metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "_stream.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/heatmap/_stream.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StreamValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="stream", parent_name="heatmap", **kwargs):
super(StreamValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Stream"),
data_docs=kwargs.pop(
"data_docs",
"""
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@heatmap@_stream.py@.PATH_END.py
|
{
"filename": "common.py",
"repo_name": "marblestation/iSpec",
"repo_path": "iSpec_extracted/iSpec-master/ispec/common.py",
"type": "Python"
}
|
#
# This file is part of iSpec.
# Copyright Sergi Blanco-Cuaresma - http://www.blancocuaresma.com/s/
#
# iSpec is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# iSpec is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with iSpec. If not, see <http://www.gnu.org/licenses/>.
#
import platform
from scipy.interpolate import UnivariateSpline
import numpy.lib.recfunctions as rfn # Extra functions
import numpy as np
import calendar
import re
import shutil
import gzip
import tempfile
import os, errno
#import ipdb
import random
import sys
from . import log
import logging
import pickle as pickle
import gzip
def is_spectrum_support_enabled():
try:
from . import synthesizer as __synthesizer_ignore__
return True
except:
return False
def is_turbospectrum_support_enabled():
ispec_dir = os.path.dirname(os.path.realpath(__file__)) + "/../"
turbospectrum_dir = ispec_dir + "/synthesizer/turbospectrum/"
turbospectrum_data = turbospectrum_dir + "/DATA/"
turbospectrum_bsyn_lu = turbospectrum_dir + "bin/bsyn_lu"
turbospectrum_eqwidt_lu = turbospectrum_dir + "bin/eqwidt_lu"
turbospectrum_babsma_lu = turbospectrum_dir + "bin/babsma_lu"
if not os.path.exists(turbospectrum_eqwidt_lu) or \
not os.path.exists(turbospectrum_bsyn_lu) or \
not os.path.exists(turbospectrum_babsma_lu) or \
not os.path.exists(turbospectrum_data):
return False
else:
return True
def is_moog_support_enabled():
ispec_dir = os.path.dirname(os.path.realpath(__file__)) + "/../"
moog_dir = ispec_dir + "/synthesizer/moog/"
moog_executable = moog_dir + "MOOGSILENT"
if not os.path.exists(moog_executable) or \
not os.path.exists(moog_dir):
return False
else:
return True
def is_width_support_enabled():
is_linux = platform.system() == "Linux"
if not is_linux:
return False
ispec_dir = os.path.dirname(os.path.realpath(__file__)) + "/../"
atmos_dir = ispec_dir + "/synthesizer/atmos/"
system_64bits = sys.maxsize > 2**32
if system_64bits:
width_executable = atmos_dir + "bin.amd64/width9.exe"
else:
width_executable = atmos_dir + "bin.ia32/width9.exe"
if not os.path.exists(width_executable) or \
not os.path.exists(atmos_dir):
return False
else:
return True
def is_ares_support_enabled():
ispec_dir = os.path.dirname(os.path.realpath(__file__)) + "/../"
ares_dir = ispec_dir + "/synthesizer/ARES/"
ares_executable = ares_dir + "bin/ARES"
if not os.path.exists(ares_executable):
return False
else:
return True
def is_synthe_support_enabled():
is_linux = platform.system() == "Linux"
if not is_linux:
return False
ispec_dir = os.path.dirname(os.path.realpath(__file__)) + "/../"
atmos_dir = ispec_dir + "/synthesizer/atmos/"
system_64bits = sys.maxsize > 2**32
if system_64bits:
xnfpelsyn_executable = atmos_dir + "bin.amd64/xnfpelsyn.exe"
synbeg_executable = atmos_dir + "bin.amd64/synbeg.exe"
#rline2.exe # It does not exist in the source code!
rgfallinesnew_executable = atmos_dir + "bin.amd64/rgfalllinesnew.exe"
rmolescasc_executable = atmos_dir + "bin.amd64/rmolecasc.exe"
synthe_executable = atmos_dir + "bin.amd64/synthe.exe"
spectrv_executable = atmos_dir + "bin.amd64/spectrv.exe"
rotate_executable = atmos_dir + "bin.amd64/rotate.exe"
syntoascanga_executable = atmos_dir + "bin.amd64/syntoascanga.exe"
else:
xnfpelsyn_executable = atmos_dir + "bin.ia32/xnfpelsyn.exe"
synbeg_executable = atmos_dir + "bin.ia32/synbeg.exe"
#rline2.exe # It does not exist in the source code!
rgfallinesnew_executable = atmos_dir + "bin.ia32/rgfalllinesnew.exe"
rmolescasc_executable = atmos_dir + "bin.ia32/rmolecasc.exe"
synthe_executable = atmos_dir + "bin.ia32/synthe.exe"
spectrv_executable = atmos_dir + "bin.ia32/spectrv.exe"
rotate_executable = atmos_dir + "bin.ia32/rotate.exe"
syntoascanga_executable = atmos_dir + "bin.ia32/syntoascanga.exe"
if not os.path.exists(atmos_dir) \
or not os.path.exists(xnfpelsyn_executable) \
or not os.path.exists(synbeg_executable) \
or not os.path.exists(rgfallinesnew_executable) \
or not os.path.exists(rmolescasc_executable) \
or not os.path.exists(synthe_executable) \
or not os.path.exists(spectrv_executable) \
or not os.path.exists(rotate_executable) \
or not os.path.exists(syntoascanga_executable):
return False
else:
return True
def is_sme_support_enabled():
system = platform.system()
arch = platform.machine()
system_64bits = sys.maxsize > 2**32
ispec_dir = os.path.dirname(os.path.realpath(__file__)) + "/../"
sme_dir = ispec_dir + "/synthesizer/sme/"
sme_lib = None
if 'x86' in arch:
# Only x86 (no ARM, Apple Silicon M1/2)
if system == 'Linux':
# linux
if system_64bits:
sme_lib = sme_dir + "/sme_synth.so.linux.x86_64.64g"
else:
sme_lib = sme_dir + "/sme_synth.so.linux.x86.32"
elif system == 'Darwin':
# OS X
if system_64bits:
sme_lib = sme_dir + "/sme_synth.so.darwin.x86_64.64g"
else:
sme_lib = sme_dir + "/sme_synth.so.darwin.i386.32"
elif system == 'Windows':
# Windows
if system_64bits:
sme_lib = sme_dir + "/sme_synth.so.Win32.x86_64.64g"
else:
sme_lib = sme_dir + "/sme_synth.so.Win32.x86.32"
if sme_lib is None or not os.path.exists(sme_lib):
return False
else:
return True
def save_results(dump_filename, data):
pickle.dump(data, gzip.open(dump_filename, "wb", compresslevel=3), protocol=2)
def restore_results(dump_filename):
return pickle.load(gzip.open(dump_filename, "rb"))
def report_progress(current_work_progress, last_reported_progress):
"""
:returns:
True every 10% of progress.
"""
return (int(current_work_progress) % 10 == 0 and current_work_progress - last_reported_progress > 10) or last_reported_progress < 0 or current_work_progress == 100
def mkdir_p(path):
"""
Creates a directory. Same behaviour as 'mkdir -p'.
"""
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
def find_duplicates(a, key):
"""
Find duplicates in a column of a recarray. This is a simplified version of:
::
import numpy.lib.recfunctions as rfn
rfn.find_duplicates(...)
"""
a = np.asanyarray(a).ravel()
# Get the sorting data (by selecting the corresponding field)
base = a[key]
# Get the sorting indices and the sorted data
sortidx = base.argsort()
sorteddata = base[sortidx]
# Compare the sorting data
flag = (sorteddata[:-1] == sorteddata[1:])
flag = np.concatenate(([False], flag))
# We need to take the point on the left as well (else we're missing it)
flag[:-1] = flag[:-1] + flag[1:]
duplicates = a[sortidx][flag]
duplicates_index = sortidx[flag]
return (duplicates, duplicates_index)
def interquartile_range_filtering(data, k=1.5):
"""
Interquartile range (IQR) is used to find outliers in data. By default, outliers
are observations that fall below Quartile1 - k*(IQR) or above Quartile3 + k*(IQR).
* k = 1.5 represents +/-2.698 * sigma (or standard dev) of a gaussian\
distribution, which includes the 99.3% of the data.
"""
# First and third quartile (the second is the median)
q1 = np.percentile(data, 25) # 25% of the data (left to right)
q3 = np.percentile(data, 75) # 75%
# Interquartile range
iqr = q3 - q1
sfilter = np.logical_and(data > q1 - k * iqr, data < q3 + k * iqr)
return data[sfilter], sfilter
def sigma_clipping(data, sig=3, meanfunc=np.mean):
"""
Identify outliers considering the mean (if meanfunc=np.mean) or median (if meanfunc=np.median) value and 3 sigma (3*stdev),
iterating until convergence.
"""
last_total = len(data)
# First iteration
stdev = np.std(data)
diff = data - meanfunc(data)
sfilter = np.abs(diff) < sig*stdev
current_total = len(data[sfilter])
# Continue iterating until convergence (no more points are removed)
while last_total > current_total:
#print current_total, stdev
last_total = current_total
stdev = np.std(data[sfilter])
diff = data - meanfunc(data[sfilter])
sfilter = np.abs(diff) < sig*stdev
current_total = len(data[sfilter])
return data[sfilter], sfilter
def find_max_win(x, span=3):
"""
For an array of values, find local maximum values considering a window
of "span" elements.
"""
ret = []
n = len(x)
dist = (span + 1)/ 2;
m = 0;
for i in np.arange(n):
l_min = np.max([i-dist+1, 0])
l_max = i-1
r_min = i+1
r_max = np.min([i+dist-1, n-1])
is_max = 1;
# left side
j = l_min
while j <= l_max:
if (x[j] > x[i]):
is_max = 0;
break
j += 1
# right side
if (is_max == 1):
j = r_min
while j <= r_max:
if (x[j] > x[i]):
is_max = 0;
break
j += 1
if (is_max == 1):
ret.append(i)
return np.asarray(ret)
def find_min_win(x, span=3):
"""
For an array of values, find local minimum values considering a window
of "span" elements.
"""
ret = []
n = len(x)
dist = (span + 1)/ 2;
m = 0;
for i in np.arange(n):
l_min = np.max([i-dist+1, 0])
l_max = i-1
r_min = i+1
r_max = np.min([i+dist-1, n-1])
is_min = 1;
# left side
j = l_min
while j <= l_max:
if (x[j] < x[i]):
is_min = 0;
break
j += 1
# right side
if (is_min == 1):
j = r_min
while j <= r_max:
if (x[j] < x[i]):
is_min = 0;
break
j += 1
if (is_min == 1):
ret.append(i)
return np.asarray(ret)
try:
import pyximport
import numpy as np
pyximport.install(setup_args={'include_dirs':[np.get_include()]})
from .common_c import find_local_max_values
from .common_c import find_local_min_values
except:
print("*********************************************************************")
print("Not optimized version loaded!")
print("*********************************************************************")
def find_local_max_values(x):
"""
For an array of values, find the position of local maximum values considering only
the next and previous elements, except they have the same value.
In that case, the next/previous different value is checked. Therefore,
::
find_local_max([1,2,3,3,2,1,4,3])
would return:
::
[2, 3, 6]
"""
ret = []
n = len(x)
m = 0;
for i in np.arange(n):
l_min = np.max([i-1, 0])
#l_max = i-1
#r_min = i+1
#r_max = np.min([i+1, n-1])
r_min = np.min([i+1, n-1])
is_max = True
# left side
j = l_min
# If value is equal, search for the last different value
while j >= 0 and x[j] == x[i]:
j -= 1
if (j < 0 or x[j] > x[i]) and i > 0:
is_max = False
# right side
if is_max:
j = r_min
# If value is equal, search for the next different value
while j < n and x[j] == x[i]:
j += 1
if (j >= n or x[j] > x[i]) and i < n-1:
is_max = False
if is_max:
ret.append(i)
return np.asarray(ret)
def find_local_min_values(x):
"""
For an array of values, find the position of local maximum values considering only
the next and previous elements, except they have the same value.
In that case, the next/previous different value is checked. Therefore,
::
find_local_max([10,9,3,3,9,10,4,30])
would return:
::
[2, 3, 6]
"""
ret = []
n = len(x)
m = 0;
for i in np.arange(n):
l_min = np.max([i-1, 0])
#l_max = i-1
#r_min = i+1
#r_max = np.min([i+1, n-1])
r_min = np.min([i+1, n-1])
is_min = True
# left side
j = l_min
# If value is equal, search for the last different value
while j >= 0 and x[j] == x[i]:
j -= 1
if j < 0 or x[j] < x[i]:
is_min = False
# right side
if is_min:
j = r_min
# If value is equal, search for the next different value
while j < n and x[j] == x[i]:
j += 1
if j >= n or x[j] < x[i]:
is_min = False
if is_min:
ret.append(i)
return np.asarray(ret)
############## [start] Barycentric vel
def __precession_matrix(equinox1, equinox2, fk4=False):
"""
Return the precession matrix needed to go from EQUINOX1 (i.e. 1950.0) to EQUINOX2 (i.e. 1975.0).
The code has been copied from: `astrolib <http://code.google.com/p/astrolibpy/source/browse/trunk/astrolib/>`_
"""
deg_to_rad = np.pi/ 180.0e0
sec_to_rad = deg_to_rad/ 3600.e0
t = 0.001e0 * (equinox2 - equinox1)
if not fk4:
st = 0.001e0 * (equinox1 - 2000.e0)
# Compute 3 rotation angles
a = sec_to_rad * t * (23062.181e0 + st * (139.656e0 + 0.0139e0 * st) + t * (30.188e0 - 0.344e0 * st + 17.998e0 * t))
b = sec_to_rad * t * t * (79.280e0 + 0.410e0 * st + 0.205e0 * t) + a
c = sec_to_rad * t * (20043.109e0 - st * (85.33e0 + 0.217e0 * st) + t * (-42.665e0 - 0.217e0 * st - 41.833e0 * t))
else:
st = 0.001e0 * (equinox1 - 1900.e0)
# Compute 3 rotation angles
a = sec_to_rad * t * (23042.53e0 + st * (139.75e0 + 0.06e0 * st) + t * (30.23e0 - 0.27e0 * st + 18.0e0 * t))
b = sec_to_rad * t * t * (79.27e0 + 0.66e0 * st + 0.32e0 * t) + a
c = sec_to_rad * t * (20046.85e0 - st * (85.33e0 + 0.37e0 * st) + t * (-42.67e0 - 0.37e0 * st - 41.8e0 * t))
sina = np.sin(a)
sinb = np.sin(b)
sinc = np.sin(c)
cosa = np.cos(a)
cosb = np.cos(b)
cosc = np.cos(c)
r = np.zeros((3, 3))
r[0,:] = np.array([cosa * cosb * cosc - sina * sinb, sina * cosb + cosa * sinb * cosc, cosa * sinc])
r[1,:] = np.array([-cosa * sinb - sina * cosb * cosc, cosa * cosb - sina * sinb * cosc, -sina * sinc])
r[2,:] = np.array([-cosb * sinc, -sinb * sinc, cosc])
return r
def __baryvel(datetime, deq=0):
"""
Calculates heliocentric and barycentric velocity components of Earth.
The code has been copied from: `astrolib <http://code.google.com/p/astrolibpy/source/browse/astrolib/baryvel.py>`_
"""
#dje = astropysics.obstools.calendar_to_jd(datetime) # Julian ephemeris date.
dje = calendar_to_jd(datetime) # Julian ephemeris date
#Define constants
dc2pi = 2 * np.pi
cc2pi = 2 * np.pi
dc1 = 1.0e0
dcto = 2415020.0e0
dcjul = 36525.0e0 #days in Julian year
dcbes = 0.313e0
dctrop = 365.24219572e0 #days in tropical year (...572 insig)
dc1900 = 1900.0e0
au = 1.4959787e8
#Constants dcfel(i,k) of fast changing elements.
dcfel = np.array([1.7400353e00, 6.2833195099091e02, 5.2796e-6, 6.2565836e00, 6.2830194572674e02, -2.6180e-6, 4.7199666e00, 8.3997091449254e03, -1.9780e-5, 1.9636505e-1, 8.4334662911720e03, -5.6044e-5, 4.1547339e00, 5.2993466764997e01, 5.8845e-6, 4.6524223e00, 2.1354275911213e01, 5.6797e-6, 4.2620486e00, 7.5025342197656e00, 5.5317e-6, 1.4740694e00, 3.8377331909193e00, 5.6093e-6])
dcfel = np.reshape(dcfel, (8, 3))
#constants dceps and ccsel(i,k) of slowly changing elements.
dceps = np.array([4.093198e-1, -2.271110e-4, -2.860401e-8])
ccsel = np.array([1.675104e-2, -4.179579e-5, -1.260516e-7, 2.220221e-1, 2.809917e-2, 1.852532e-5, 1.589963e00, 3.418075e-2, 1.430200e-5, 2.994089e00, 2.590824e-2, 4.155840e-6, 8.155457e-1, 2.486352e-2, 6.836840e-6, 1.735614e00, 1.763719e-2, 6.370440e-6, 1.968564e00, 1.524020e-2, -2.517152e-6, 1.282417e00, 8.703393e-3, 2.289292e-5, 2.280820e00, 1.918010e-2, 4.484520e-6, 4.833473e-2, 1.641773e-4, -4.654200e-7, 5.589232e-2, -3.455092e-4, -7.388560e-7, 4.634443e-2, -2.658234e-5, 7.757000e-8, 8.997041e-3, 6.329728e-6, -1.939256e-9, 2.284178e-2, -9.941590e-5, 6.787400e-8, 4.350267e-2, -6.839749e-5, -2.714956e-7, 1.348204e-2, 1.091504e-5, 6.903760e-7, 3.106570e-2, -1.665665e-4, -1.590188e-7])
ccsel = np.reshape(ccsel, (17, 3))
#Constants of the arguments of the short-period perturbations.
dcargs = np.array([5.0974222e0, -7.8604195454652e2, 3.9584962e0, -5.7533848094674e2, 1.6338070e0, -1.1506769618935e3, 2.5487111e0, -3.9302097727326e2, 4.9255514e0, -5.8849265665348e2, 1.3363463e0, -5.5076098609303e2, 1.6072053e0, -5.2237501616674e2, 1.3629480e0, -1.1790629318198e3, 5.5657014e0, -1.0977134971135e3, 5.0708205e0, -1.5774000881978e2, 3.9318944e0, 5.2963464780000e1, 4.8989497e0, 3.9809289073258e1, 1.3097446e0, 7.7540959633708e1, 3.5147141e0, 7.9618578146517e1, 3.5413158e0, -5.4868336758022e2])
dcargs = np.reshape(dcargs, (15, 2))
#Amplitudes ccamps(n,k) of the short-period perturbations.
ccamps = np.array([-2.279594e-5, 1.407414e-5, 8.273188e-6, 1.340565e-5, -2.490817e-7, -3.494537e-5, 2.860401e-7, 1.289448e-7, 1.627237e-5, -1.823138e-7, 6.593466e-7, 1.322572e-5, 9.258695e-6, -4.674248e-7, -3.646275e-7, 1.140767e-5, -2.049792e-5, -4.747930e-6, -2.638763e-6, -1.245408e-7, 9.516893e-6, -2.748894e-6, -1.319381e-6, -4.549908e-6, -1.864821e-7, 7.310990e-6, -1.924710e-6, -8.772849e-7, -3.334143e-6, -1.745256e-7, -2.603449e-6, 7.359472e-6, 3.168357e-6, 1.119056e-6, -1.655307e-7, -3.228859e-6, 1.308997e-7, 1.013137e-7, 2.403899e-6, -3.736225e-7, 3.442177e-7, 2.671323e-6, 1.832858e-6, -2.394688e-7, -3.478444e-7, 8.702406e-6, -8.421214e-6, -1.372341e-6, -1.455234e-6, -4.998479e-8, -1.488378e-6, -1.251789e-5, 5.226868e-7, -2.049301e-7, 0.e0, -8.043059e-6, -2.991300e-6, 1.473654e-7, -3.154542e-7, 0.e0, 3.699128e-6, -3.316126e-6, 2.901257e-7, 3.407826e-7, 0.e0, 2.550120e-6, -1.241123e-6, 9.901116e-8, 2.210482e-7, 0.e0, -6.351059e-7, 2.341650e-6, 1.061492e-6, 2.878231e-7, 0.e0])
ccamps = np.reshape(ccamps, (15, 5))
#Constants csec3 and ccsec(n,k) of the secular perturbations in longitude.
ccsec3 = -7.757020e-8
ccsec = np.array([1.289600e-6, 5.550147e-1, 2.076942e00, 3.102810e-5, 4.035027e00, 3.525565e-1, 9.124190e-6, 9.990265e-1, 2.622706e00, 9.793240e-7, 5.508259e00, 1.559103e01])
ccsec = np.reshape(ccsec, (4, 3))
#Sidereal rates.
dcsld = 1.990987e-7 #sidereal rate in longitude
ccsgd = 1.990969e-7 #sidereal rate in mean anomaly
#Constants used in the calculation of the lunar contribution.
cckm = 3.122140e-5
ccmld = 2.661699e-6
ccfdi = 2.399485e-7
#Constants dcargm(i,k) of the arguments of the perturbations of the motion
# of the moon.
dcargm = np.array([5.1679830e0, 8.3286911095275e3, 5.4913150e0, -7.2140632838100e3, 5.9598530e0, 1.5542754389685e4])
dcargm = np.reshape(dcargm, (3, 2))
#Amplitudes ccampm(n,k) of the perturbations of the moon.
ccampm = np.array([1.097594e-1, 2.896773e-7, 5.450474e-2, 1.438491e-7, -2.223581e-2, 5.083103e-8, 1.002548e-2, -2.291823e-8, 1.148966e-2, 5.658888e-8, 8.249439e-3, 4.063015e-8])
ccampm = np.reshape(ccampm, (3, 4))
#ccpamv(k)=a*m*dl,dt (planets), dc1mme=1-mass(earth+moon)
ccpamv = np.array([8.326827e-11, 1.843484e-11, 1.988712e-12, 1.881276e-12])
dc1mme = 0.99999696e0
#Time arguments.
dt = (dje - dcto)/ dcjul
tvec = np.array([1e0, dt, dt * dt])
#Values of all elements for the instant(aneous?) dje.
temp = (np.transpose(np.dot(np.transpose(tvec), np.transpose(dcfel)))) % dc2pi
dml = temp[0]
forbel = temp[1:8]
g = forbel[0] #old fortran equivalence
deps = (tvec * dceps).sum() % dc2pi
sorbel = (np.transpose(np.dot(np.transpose(tvec), np.transpose(ccsel)))) % dc2pi
e = sorbel[0] #old fortran equivalence
#Secular perturbations in longitude.
dummy = np.cos(2.0)
sn = np.sin((np.transpose(np.dot(np.transpose(tvec[0:2]), np.transpose(ccsec[:,1:3])))) % cc2pi)
#Periodic perturbations of the emb (earth-moon barycenter).
pertl = (ccsec[:,0] * sn).sum() + dt * ccsec3 * sn[2]
pertld = 0.0
pertr = 0.0
pertrd = 0.0
for k in range(0, 15):
a = (dcargs[k,0] + dt * dcargs[k,1]) % dc2pi
cosa = np.cos(a)
sina = np.sin(a)
pertl = pertl + ccamps[k,0] * cosa + ccamps[k,1] * sina
pertr = pertr + ccamps[k,2] * cosa + ccamps[k,3] * sina
if k < 11:
pertld = pertld + (ccamps[k,1] * cosa - ccamps[k,0] * sina) * ccamps[k,4]
pertrd = pertrd + (ccamps[k,3] * cosa - ccamps[k,2] * sina) * ccamps[k,4]
#Elliptic part of the motion of the emb.
phi = (e * e/ 4e0) * (((8e0/ e) - e) * np.sin(g) + 5 * np.sin(2 * g) + (13/ 3e0) * e * np.sin(3 * g))
f = g + phi
sinf = np.sin(f)
cosf = np.cos(f)
dpsi = (dc1 - e * e)/ (dc1 + e * cosf)
phid = 2 * e * ccsgd * ((1 + 1.5 * e * e) * cosf + e * (1.25 - 0.5 * sinf * sinf))
psid = ccsgd * e * sinf/ np.sqrt(dc1 - e * e)
#Perturbed heliocentric motion of the emb.
d1pdro = dc1 + pertr
drd = d1pdro * (psid + dpsi * pertrd)
drld = d1pdro * dpsi * (dcsld + phid + pertld)
dtl = (dml + phi + pertl) % dc2pi
dsinls = np.sin(dtl)
dcosls = np.cos(dtl)
dxhd = drd * dcosls - drld * dsinls
dyhd = drd * dsinls + drld * dcosls
#Influence of eccentricity, evection and variation on the geocentric
# motion of the moon.
pertl = 0.0
pertld = 0.0
pertp = 0.0
pertpd = 0.0
for k in range(0, 3):
a = (dcargm[k,0] + dt * dcargm[k,1]) % dc2pi
sina = np.sin(a)
cosa = np.cos(a)
pertl = pertl + ccampm[k,0] * sina
pertld = pertld + ccampm[k,1] * cosa
pertp = pertp + ccampm[k,2] * cosa
pertpd = pertpd - ccampm[k,3] * sina
#Heliocentric motion of the earth.
tl = forbel[1] + pertl
sinlm = np.sin(tl)
coslm = np.cos(tl)
sigma = cckm/ (1.0 + pertp)
a = sigma * (ccmld + pertld)
b = sigma * pertpd
dxhd = dxhd + a * sinlm + b * coslm
dyhd = dyhd - a * coslm + b * sinlm
dzhd = -sigma * ccfdi * np.cos(forbel[2])
#Barycentric motion of the earth.
dxbd = dxhd * dc1mme
dybd = dyhd * dc1mme
dzbd = dzhd * dc1mme
for k in range(0, 4):
plon = forbel[k + 3]
pomg = sorbel[k + 1]
pecc = sorbel[k + 9]
tl = (plon + 2.0 * pecc * np.sin(plon - pomg)) % cc2pi
dxbd = dxbd + ccpamv[k] * (np.sin(tl) + pecc * np.sin(pomg))
dybd = dybd - ccpamv[k] * (np.cos(tl) + pecc * np.cos(pomg))
dzbd = dzbd - ccpamv[k] * sorbel[k + 13] * np.cos(plon - sorbel[k + 5])
#Transition to mean equator of date.
dcosep = np.cos(deps)
dsinep = np.sin(deps)
dyahd = dcosep * dyhd - dsinep * dzhd
dzahd = dsinep * dyhd + dcosep * dzhd
dyabd = dcosep * dybd - dsinep * dzbd
dzabd = dsinep * dybd + dcosep * dzbd
#Epoch of mean equinox (deq) of zero implies that we should use
# Julian ephemeris date (dje) as epoch of mean equinox.
if deq == 0:
dvelh = au * (np.array([dxhd, dyahd, dzahd]))
dvelb = au * (np.array([dxbd, dyabd, dzabd]))
return (dvelh,dvelb)
#General precession from epoch dje to deq.
deqdat = (dje - dcto - dcbes)/ dctrop + dc1900
prema = __precession_matrix(deqdat, deq, fk4=True)
dvelh = au * (np.transpose(np.dot(np.transpose(prema), np.transpose(np.array([dxhd, dyahd, dzahd])))))
dvelb = au * (np.transpose(np.dot(np.transpose(prema), np.transpose(np.array([dxbd, dyabd, dzabd])))))
return (dvelh, dvelb)
def calculate_barycentric_velocity_correction(datetime, coordinates, deq=0):
"""
Calculates barycentric velocity correction for a given star.
The code is based on: `astrolib <http://code.google.com/p/astrolibpy/source/browse/astrolib/baryvel.py>`_
"""
datetime = list(map(float, datetime))
coordinates = list(map(float, coordinates))
dvelh, dvelb = __baryvel(datetime, deq=2000) # J2000.0
# Calculate velocity toward a star in a given position
ra_hours, ra_minutes, ra_seconds, dec_degrees, dec_minutes, dec_seconds = coordinates
ra = (ra_hours + ra_minutes/60. + ra_seconds/(60.*60)) # hours
ra = ra * 360./24 # degrees
ra = ra * ((2*np.pi) / 360.) # radians
dec = (dec_degrees + dec_minutes/60. + dec_seconds/(60.*60)) # degrees
dec = dec * ((2*np.pi) / 360.) # radians
# Project velocity toward star
barycentric_vel = dvelb[0]*np.cos(dec)*np.cos(ra) + dvelb[1]*np.cos(dec)*np.sin(ra) + dvelb[2]*np.sin(dec) # km/s
barycentric_vel = np.round(barycentric_vel, 2) # km/s
# Correction in the opposite sense
barycentric_correction = -1*barycentric_vel
return barycentric_correction
############## [end] Barycentric vel
################################################################################
#### [start] Copied from astropysics.obsutils (if not, pyinstaller fails)
# http://packages.python.org/Astropysics/
# https://github.com/eteq/astropysics/blob/master/astropysics/obstools.py
def jd_to_calendar(jd,rounding=1000000,output='datetime',gregorian=None,mjd=False):
"""
Converts a julian date to a calendar date and time.
This piece of code has been copied from astropysics.obsutils:
* `Astropysics <http://packages.python.org/Astropysics/>`_
* `obstools module <https://github.com/eteq/astropysics/blob/master/astropysics/obstools.py>`_
"""
import datetime
from dateutil import tz
if jd is None:
jd = calendar_to_jd(datetime.datetime.now(tz.tzlocal()))
jd = np.array(jd,copy=True,dtype=float)
scalar = jd.shape == ()
jd = jd.ravel()
if mjd:
jd += mjdoffset
if rounding > 1000000:
raise ValueError('rounding cannot exceed a second')
elif rounding <= 0:
jd += .5
else:
rounding = int(rounding)
roundingfrac = rounding/86400000000
jd += .5 + roundingfrac
z = np.floor(jd).astype(int)
dec = jd - z #fractional piece
#fix slight floating-point errors if they hapepn TOOD:check
dgtr1 = dec>=1.0
dec[dgtr1] -= 1.0
z[dgtr1] += 1
if gregorian is None:
gregorian = 2299161
if gregorian is True:
alpha = ((z-1867216.25)/36524.25).astype(int)
z += 1 + alpha - alpha//4
elif gregorian is False:
pass
else:
gmask = z >= gregorian
alpha = ((z[gmask]-1867216.25)/36524.25).astype(int)
z[gmask] += 1 + alpha - alpha//4
b = z + 1524
c = ((b-122.1)/365.25).astype(int)
d = (365.25*c).astype(int)
e = ((b-d)/30.6001).astype(int)
day = b - d - (30.6001*e).astype(int)
mmask = e<14
month = e
month[mmask] -= 1
month[~mmask] -= 13
year = c
year[month>2] -= 4716
year[month<=2] -= 4715
if output == 'fracarray':
dec = dec-roundingfrac
dec[dec<0]=0
return np.array((year,month,day+dec)).T
if rounding == 1000000:
secdec = dec*86400
sec = secdec.astype(int)
min = sec//60
sec -= 60*min
hr = min//60
min -= 60*hr
#sec[sec==secdec] -= 1
msec = None
else:
msec = (dec*86400000000.).astype('int64')
if rounding > 0:
div = (msec//1000000)*1000000
toround = (msec - div)<(2*rounding)
msec[toround] = div + rounding
msec -= rounding
sec = msec//1000000
msec -= 1000000*sec
min = sec//60
sec -= 60*min
hr = min//60
min -= 60*hr
if output == 'datetime':
tzi = tz.tzutc()
if msec is None:
ts = (year,month,day,hr%24,min%60,sec%60)
else:
ts = (year,month,day,hr%24,min%60,sec%60,msec%1000000)
res = [datetime.datetime(*t,**dict(tzinfo=tzi)) for t in zip(*ts)]
elif output == 'array':
msec = np.zeros_like(sec) if msec is None else msec
res = np.array([year,month,day,hr%24,min%60,sec%60,msec]).T
else:
raise ValueError('invlid output form '+str(output))
if scalar:
return res[0]
else:
return res
def calendar_to_jd(caltime,tz=None,gregorian=True,mjd=False):
"""
Convert a calendar date and time to julian date.
This piece of code has been copied from astropysics.obsutils:
* `Astropysics <http://packages.python.org/Astropysics/>`_
* `obstools module <https://github.com/eteq/astropysics/blob/master/astropysics/obstools.py>`_
"""
#Adapted from xidl jdcnv.pro
from datetime import datetime,date,tzinfo
if caltime is None:
from dateutil.tz import tzlocal
datetimes = [datetime.now(tzlocal())]
scalarout = True
elif isinstance(caltime,datetime) or isinstance(caltime,date):
datetimes = [caltime]
scalarout = True
elif all([isinstance(ct,datetime) or isinstance(ct,date) for ct in caltime]):
datetimes = caltime
scalarout = False
else:
datetimes = None
caltime = list(caltime)
if not (3 <= len(caltime) < 8):
raise ValueError('caltime input sequence is invalid size')
while len(caltime) < 7:
if len(caltime) == 3:
#make hours 12
caltime.append(12*np.ones_like(caltime[-1]))
else:
caltime.append(np.zeros_like(caltime[-1]))
yr,month,day,hr,min,sec,msec = caltime
scalarout = all([np.shape(v) is tuple() for v in caltime])
#if input objects are datetime objects, generate arrays
if datetimes is not None:
yr,month,day,hr,min,sec,msec = [],[],[],[],[],[],[]
for dt in datetimes:
if not hasattr(dt,'hour'):
dt = datetime(dt.year,dt.month,dt.day,12)
if tz is None:
off = dt.utcoffset()
if off is not None:
dt = dt - off
yr.append(dt.year)
month.append(dt.month)
day.append(dt.day)
hr.append(dt.hour)
min.append(dt.minute)
sec.append(dt.second)
msec.append(dt.microsecond)
yr = np.array(yr,dtype='int64',copy=False).ravel()
month = np.array(month,dtype='int64',copy=False).ravel()
day = np.array(day,dtype='int64',copy=False).ravel()
hr = np.array(hr,dtype=float,copy=False).ravel()
min = np.array(min,dtype=float,copy=False).ravel()
sec = np.array(sec,dtype=float,copy=False).ravel()
msec = np.array(msec,dtype=float,copy=False).ravel()
#do tz conversion if tz is provided
if isinstance(tz,str) or isinstance(tz,tzinfo):
if isinstance(tz,str):
from dateutil import tz
tzi = tz.gettz(tz)
else:
tzi = tz
utcoffset = []
for t in zip(yr,month,day,hr,min,sec,msec):
#microsecond from float component of seconds
dt = datetime(*[int(ti) for ti in t],**dict(tzinfo=tzi))
utcdt = dt.utcoffset()
if utcdt is None:
utcoffset.append(0)
else:
utcoffset.append(utcdt.days*24 + (utcdt.seconds + utcdt.microseconds*1e-6)/3600)
else:
utcoffset = tz
# ly = ((month-14)/12).astype(int) #In leap years, -1 for Jan, Feb, else 0
# jdn = day - 32075l + 1461l*(yr+4800l+ly)//4
# jdn += 367l*(month - 2-ly*12)//12 - 3*((yr+4900l+ly)//100)//4
# res = jdn + (hr/24.0) + min/1440.0 + sec/86400.0 - 0.5
#this algorithm from meeus 2ed
m3 = month < 3
yr[m3] -= 1
month[m3] += 12
cen = yr//100
if gregorian is None:
gregorian = (1582,10,4)
if gregorian is True:
gregoffset = 2 - cen + cen//4
elif gregorian is False:
gregoffset = 0
else:
gregoffset = 2 - cen + cen//4
gmask = (yr>gregorian[0])&(month>gregorian[1])&(day>gregorian[2])
gregoffset[~gmask] = 0
jdn = (365.25*(yr+4716)).astype(int) + \
(30.6001*(month + 1)).astype(int) + \
day + gregoffset - 1524.5
res = jdn + hr/24.0 + min/1440.0 + sec/86400.0
if mjd:
res -= mjdoffset
if np.any(utcoffset):
res -= np.array(utcoffset)/24.0
if scalarout:
return res[0]
else:
return res
#### [end] Astropysics
def estimate_vmic(teff, logg, feh):
"""
Estimate Microturbulence velocity (Vmic) by using an empirical relation
considering the effective temperature, surface gravity and metallicity.
The relation was constructed based on the UVES Gaia ESO Survey iDR1 data,
results for the benchmark stars (Jofre et al. 2013),
and globular cluster data from external literature sources.
Source: http://great.ast.cam.ac.uk/GESwiki/GesWg/GesWg11/Microturbulence
"""
t0 = 5500
g0 = 4.0
if logg >= 3.5:
if teff >= 5000:
# main sequence and subgiants (RGB)
vmic = 1.05 + 2.51e-4*(teff-t0) + 1.5e-7*(teff-t0)**2 - 0.14*(logg-g0) - 0.05e-1*(logg-g0)**2 + 0.05*feh + 0.01*feh**2
else:
# main sequence
vmic = 1.05 + 2.51e-4*(5000-t0) + 1.5e-7*(5000-t0)**2 - 0.14*(logg-g0) - 0.05e-1*(logg-g0)**2 + 0.05*feh + 0.01*feh**2
else:
# giants (RGB/AGB)
vmic = 1.25 + 4.01e-4*(teff-t0) + 3.1e-7*(teff-t0)**2 - 0.14*(logg-g0) - 0.05e-1*(logg-g0)**2 + 0.05*feh + 0.01*feh**2
vmic = float("%.2f" % vmic)
return vmic
def _estimate_vmac_doyle2014(teff, logg, feh):
"""
Estimate Macroturbulence velocity (Vmac) by using an empirical relation
considering the effective temperature, surface gravity and metallicity.
The relation was constructed by Doyle et al. (2014), which is only valid
for the Teff range 5200 to 6400 K, and the log g range 4.0 to 4.6 dex.
"""
t0 = 5777
g0 = 4.44
if logg >= 3.5:
if teff >= 5000:
# main sequence and subgiants (RGB)
vmac = 3.21 + 2.33e-3*(teff-t0) + 2e-6*(teff-t0)**2 - 2*(logg-g0)
else:
# main sequence
vmac = 3.21 + 2.33e-3*(teff-t0) + 2e-6*(teff-t0)**2 - 2*(logg-g0)
else:
# Out of the calibrated limits
vmac = 0.
return vmac
def _estimate_vmac_ges(teff, logg, feh):
"""
Estimate Microturbulence velocity (Vmic) by using an empirical relation
considering the effective temperature, surface gravity and metallicity.
The relation was constructed by Maria Bergemann for the Gaia ESO Survey.
"""
t0 = 5500
g0 = 4.0
if logg >= 3.5:
if teff >= 5000:
# main sequence and subgiants (RGB)
vmac = 3*(1.15 + 7e-4*(teff-t0) + 1.2e-6*(teff-t0)**2 - 0.13*(logg-g0) + 0.13*(logg-g0)**2 - 0.37*feh - 0.07*feh**2)
else:
# main sequence
vmac = 3*(1.15 + 2e-4*(teff-t0) + 3.95e-7*(teff-t0)**2 - 0.13*(logg-g0) + 0.13*(logg-g0)**2)
else:
# giants (RGB/AGB)
vmac = 3*(1.15 + 2.2e-5*(teff-t0) - 0.5e-7*(teff-t0)**2 - 0.1*(logg-g0) + 0.04*(logg-g0)**2 - 0.37*feh - 0.07*feh**2)
return vmac
def estimate_vmac(teff, logg, feh, relation='GES'):
"""
Estimate Microturbulence velocity (Vmic) by using an empirical relation
considering the effective temperature, surface gravity and metallicity.
By default, the selected relation was constructed by Maria Bergemann
for the Gaia ESO Survey. Alternatively, "relation='Doyle2014'" implements
a relation for dwrafs (Doyle et al, 2014).
"""
if relation == 'Doyle2014':
vmac = _estimate_vmac_doyle2014(teff, logg, feh)
else:
vmac = _estimate_vmac_ges(teff, logg, feh)
vmac = float("%.2f" % vmac)
return vmac
def estimate_mass_radius(teff, logg, feh):
"""
Uses the Torres relation[1] to determine the star mass and radius from
the effective temperature, surface gravity and metallicity.
NOTE: The errors in the empirical calibration are of ulog M = 0.027 and
ulog R = 0.014 (6.4 and 3.2%, respectively) for main-sequence and evolved
stars above 0.6 M_SUN.
[1] http://adsabs.harvard.edu/abs/2010A%26ARv..18...67T
"""
# Torres et al. 2010: Table 4 (Page 110) Coefficients for the calibration equations
a = (1.5689, 1.3787, 0.4243, 1.139, -0.14250, 0.01969, 0.10100)
b = (2.4427, 0.6679, 0.1771, 0.705, -0.21415, 0.02306, 0.04173)
# Teff and logg: http://vizier.cfa.harvard.edu/viz-bin/VizieR-3?-source=J/other/A%2bARV/18.67/table1
if teff > 38000 or teff < 3120:
logging.warning("Teff {} is out of the Teff range used to derive the empirical relation".format(teff))
if logg > 5.009 or logg < 2.122:
logging.warning("logg {} is out of the logg range used to derive the empirical relation".format(logg))
# Metallicities: http://vizier.cfa.harvard.edu/viz-bin/VizieR-3?-source=J/other/A%2bARV/18.67/table2
if feh > 0.40 or feh < -0.60:
logging.warning("Metallicity {} is out of the metallicity range used to derive the empirical relation".format(feh))
X = np.log10(teff) - 4.1
log_mass = a[0] + a[1]*X + a[2]*X**2 + a[3]*X**3 + a[4]*logg**2 + a[5]*logg**3 + a[6]*feh
log_radius = b[0] + b[1]*X + b[2]*X**2 + b[3]*X**3 + b[4]*logg**2 + b[5]*logg**3 + b[6]*feh
mass = 10**log_mass # M_SUN
radius = 10**log_radius # R_SUN
return mass, radius
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('""')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
|
marblestationREPO_NAMEiSpecPATH_START.@iSpec_extracted@iSpec-master@ispec@common.py@.PATH_END.py
|
{
"filename": "misc_functions.py",
"repo_name": "ireis/PRF",
"repo_path": "PRF_extracted/PRF-master/PRF/misc_functions.py",
"type": "Python"
}
|
import numpy
from numba import jit
from scipy.stats import norm
cache = True
############################################################
############################################################
############ Propogate Probabilities functions ############
############################################################
############################################################
N_SIGMA = 1
X_GAUS = numpy.arange(-N_SIGMA,N_SIGMA,0.1)
#X_GAUS = numpy.append(X_GAUS, N_SIGMA)
GAUS = numpy.array(norm(0,1).cdf(X_GAUS))
GAUS = numpy.append(GAUS, 1)
@jit(cache=cache, nopython=True)
def split_probability(value, delta, threshold):
"""
Calculate split probability for a single object
"""
if numpy.isnan(value):
return numpy.nan
if delta > 0:
normalized_threshold = (threshold - value)/delta
if (normalized_threshold <= -N_SIGMA):
split_proba = 0
elif (normalized_threshold >= N_SIGMA):
split_proba = 1
else:
x = numpy.searchsorted(a=X_GAUS, v=normalized_threshold,)
#x = numpy.argmax(X_GAUS > normalized_threshold)
split_proba = GAUS[x]
else:
if (threshold - value) >= 0:
# split_proba = 0.5
#elif (threshold - value) > 0:
split_proba = 1
elif (threshold - value) < 0:
split_proba = 0
return 1-split_proba
@jit(cache=cache, nopython=True)
def split_probability_all(values, deltas, threshold):
"""
Calculate split probabilities for all rows in values
"""
nof_objcts = values.shape[0]
ps = [split_probability(values[i], deltas[i], threshold) for i in range(nof_objcts)]
ps = numpy.array(ps)
return ps
@jit(cache=cache, nopython=True)
def return_class_probas(pnode, pY):
"""
The leaf probabilities for each class
"""
nof_objects = pY.shape[0]
nof_classes = pY.shape[1]
class_probas = numpy.zeros(nof_classes)
for i in range(nof_objects):
class_probas += pnode[i] * pY[i,:]
#class_probas = class_probas/numpy.sum(pnode)
class_probas = class_probas/len(pnode)
#class_probas = pY
return class_probas
############################################################
############################################################
############################ MISC #########################
############################################################
############################################################
@jit(cache=True, nopython=True)
def get_split_objects(pnode, p_split_right, p_split_left, is_max, n_objects_node, keep_proba):
pnode_right = pnode*p_split_right
pnode_left = pnode*p_split_left
pnode_right_tot = numpy.nansum(pnode_right)
pnode_left_tot = numpy.nansum(pnode_left)
pnode_tot = pnode_right_tot + pnode_left_tot
is_nan = numpy.isnan(p_split_right)
p_split_right_batch = pnode_right_tot / pnode_tot
p_split_right[is_nan] = p_split_right_batch
pnode_right[is_nan] = pnode[is_nan] * p_split_right[is_nan]
p_split_left_batch = pnode_left_tot / pnode_tot
p_split_left[is_nan] = p_split_left_batch
pnode_left[is_nan] = pnode[is_nan] * p_split_left[is_nan]
best_right = [0]
best_left = [0]
is_max_right = [0]
is_max_left = [0]
for i in range(n_objects_node):
#if is_nan[i]:
# best_right.append(i)
# best_left.append(i)
# if (is_max[i] == 1):
# if (p_split_right_batch > p_split_left_batch):
# is_max_right.append(1)
# is_max_left.append(0)
# else:
# is_max_right.append(0)
# is_max_left.append(1)
#else:
if (p_split_right[i] >= 0.5 and is_max[i] == 1):
best_right.append(i)
is_max_right.append(1)
elif pnode_right[i] > keep_proba:
best_right.append(i)
is_max_right.append(0)
if (p_split_left[i] > 0.5 and is_max[i] == 1):
best_left.append(i)
is_max_left.append(1)
elif pnode_left[i] > keep_proba:
best_left.append(i)
is_max_left.append(0)
best_right = numpy.array(best_right)
best_left = numpy.array(best_left)
is_max_right = numpy.array(is_max_right)
is_max_left = numpy.array(is_max_left)
pnode_right, _ = pull_values(pnode_right, best_right[1:], best_left[1:])
_, pnode_left = pull_values(pnode_left, best_right[1:], best_left[1:])
return pnode_right, pnode_left, best_right[1:], best_left[1:], is_max_right[1:], is_max_left[1:], p_split_right_batch
#@jit(cache=True, nopython=True)
def choose_features(nof_features, max_features):
"""
function randomly selects the features that will be examined for each split
"""
features_indices = numpy.arange(nof_features)
#numpy.random.seed()
#features_chosen = numpy.random.choice(features_indices, size=max_features, replace = True)
features_chosen = numpy.random.choice(features_indices, size=nof_features, replace = False)
#print(features_chosen)
return features_chosen
@jit(cache=True, nopython=True)
def pull_values(A, right, left):
"""
Splits an array A to two
according to lists of indicies
given in right and left
"""
A_left = A[left]
A_right = A[right]
return A_right, A_left
def get_pY(pY_true, y_fake):
"""
Recieves a vector with the probability to be true (pY_true)
returns a matrix with the probability to be in each class
we put pY_true as the probability of the true class
and (1-pY_true)/(nof_lables-1) for all other classes
"""
nof_objects = len(pY_true)
all_labels = numpy.unique(y_fake)
label_dict = {i:a for i,a in enumerate(all_labels) }
nof_labels = len(all_labels)
pY = numpy.zeros([nof_objects, nof_labels])
for o in range(nof_objects):
for c_idx, c in enumerate(all_labels):
if y_fake[o] == c:
pY[o,c_idx] = pY_true[o]
else:
pY[o,c_idx] = float(1 - pY_true[o])/(nof_labels - 1)
return pY, label_dict
|
ireisREPO_NAMEPRFPATH_START.@PRF_extracted@PRF-master@PRF@misc_functions.py@.PATH_END.py
|
{
"filename": "dcr_metric.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/rubin_sim/maf/metrics/dcr_metric.py",
"type": "Python"
}
|
__all__ = ("DcrPrecisionMetric",)
import numpy as np
import rubin_scheduler.utils as utils
import rubin_sim.maf.utils as mafUtils
from .base_metric import BaseMetric
class DcrPrecisionMetric(BaseMetric):
"""Determine how precise a DCR correction could be made
Parameters
----------
atm_err : `float`
Minimum error in photometry centroids introduced by the atmosphere
(arcseconds). Default 0.01.
"""
def __init__(
self,
metric_name="DCRprecision",
seeing_col="seeingFwhmGeom",
m5_col="fiveSigmaDepth",
ha_col="HA",
pa_col="paraAngle",
filter_col="filter",
atm_err=0.01,
sed_template="flat",
rmag=20.0,
**kwargs,
):
self.m5_col = m5_col
self.filter_col = filter_col
self.pa_col = pa_col
self.seeing_col = seeing_col
self.mags = {}
self.filters = ["u", "g", "r", "i", "z", "y"]
if sed_template == "flat":
for f in self.filters:
self.mags[f] = rmag
else:
self.mags = utils.stellarMags(sed_template, rmag=rmag)
cols = [
"ra_dcr_amp",
"dec_dcr_amp",
seeing_col,
m5_col,
filter_col,
"zenithDistance",
pa_col,
]
units = "arcseconds"
self.atm_err = atm_err
super(DcrPrecisionMetric, self).__init__(cols, metric_name=metric_name, units=units, **kwargs)
def run(self, data_slice, slice_point=None):
snr = np.zeros(len(data_slice), dtype="float")
for filt in self.filters:
in_filt = np.where(data_slice[self.filter_col] == filt)
snr[in_filt] = mafUtils.m52snr(self.mags[filt], data_slice[self.m5_col][in_filt])
position_errors = np.sqrt(
mafUtils.astrom_precision(data_slice[self.seeing_col], snr) ** 2 + self.atm_err**2
)
x_coord = np.tan(np.radians(data_slice["zenithDistance"])) * np.sin(
np.radians(data_slice[self.pa_col])
)
x_coord2 = np.tan(np.radians(data_slice["zenithDistance"])) * np.cos(
np.radians(data_slice[self.pa_col])
)
# Things should be the same for RA and dec.
# Now I want to compute the error if I interpolate/extrapolate to +/-1.
# function is of form, y=ax. a=y/x. da = dy/x.
# Only strictly true if we know the unshifted position.
# But this should be a reasonable approx.
slope_uncerts = position_errors / x_coord
slope_uncerts2 = position_errors / x_coord2
total_slope_uncert = 1.0 / np.sqrt(np.sum(1.0 / slope_uncerts**2) + np.sum(1.0 / slope_uncerts2**2))
# So, this will be the uncertainty in the RA or Dec offset at
# x= +/- 1. A.K.A., the uncertainty in the slope
# of the line made by tan(zd)*sin(PA) vs RA offset
# or the line tan(zd)*cos(PA) vs Dec offset
# Assuming we know the unshfted position of the object
# (or there's little covariance if we are fitting for both)
result = total_slope_uncert
return result
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@rubin_sim@maf@metrics@dcr_metric.py@.PATH_END.py
|
{
"filename": "ernie.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/embeddings/ernie.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.embeddings import ErnieEmbeddings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"ErnieEmbeddings": "langchain_community.embeddings"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ErnieEmbeddings",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@embeddings@ernie.py@.PATH_END.py
|
{
"filename": "parallel.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/joblib/joblib/parallel.py",
"type": "Python"
}
|
"""
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
from math import sqrt
import functools
import collections
import time
import threading
import itertools
from uuid import uuid4
from numbers import Integral
import warnings
import queue
import weakref
from contextlib import nullcontext
from multiprocessing import TimeoutError
from ._multiprocessing_helpers import mp
from .logger import Logger, short_format_time
from .disk import memstr_to_bytes
from ._parallel_backends import (FallbackToBackend, MultiprocessingBackend,
ThreadingBackend, SequentialBackend,
LokyBackend)
from ._utils import eval_expr, _Sentinel
# Make sure that those two classes are part of the public joblib.parallel API
# so that 3rd party backend implementers can import them from here.
from ._parallel_backends import AutoBatchingMixin # noqa
from ._parallel_backends import ParallelBackendBase # noqa
IS_PYPY = hasattr(sys, "pypy_version_info")
BACKENDS = {
'threading': ThreadingBackend,
'sequential': SequentialBackend,
}
# name of the backend used by default by Parallel outside of any context
# managed by ``parallel_config`` or ``parallel_backend``.
# threading is the only backend that is always everywhere
DEFAULT_BACKEND = 'threading'
MAYBE_AVAILABLE_BACKENDS = {'multiprocessing', 'loky'}
# if multiprocessing is available, so is loky, we set it as the default
# backend
if mp is not None:
BACKENDS['multiprocessing'] = MultiprocessingBackend
from .externals import loky
BACKENDS['loky'] = LokyBackend
DEFAULT_BACKEND = 'loky'
DEFAULT_THREAD_BACKEND = 'threading'
# Thread local value that can be overridden by the ``parallel_config`` context
# manager
_backend = threading.local()
def _register_dask():
"""Register Dask Backend if called with parallel_config(backend="dask")"""
try:
from ._dask import DaskDistributedBackend
register_parallel_backend('dask', DaskDistributedBackend)
except ImportError as e:
msg = ("To use the dask.distributed backend you must install both "
"the `dask` and distributed modules.\n\n"
"See https://dask.pydata.org/en/latest/install.html for more "
"information.")
raise ImportError(msg) from e
EXTERNAL_BACKENDS = {
'dask': _register_dask,
}
# Sentinels for the default values of the Parallel constructor and
# the parallel_config and parallel_backend context managers
default_parallel_config = {
"backend": _Sentinel(default_value=None),
"n_jobs": _Sentinel(default_value=None),
"verbose": _Sentinel(default_value=0),
"temp_folder": _Sentinel(default_value=None),
"max_nbytes": _Sentinel(default_value="1M"),
"mmap_mode": _Sentinel(default_value="r"),
"prefer": _Sentinel(default_value=None),
"require": _Sentinel(default_value=None),
}
VALID_BACKEND_HINTS = ('processes', 'threads', None)
VALID_BACKEND_CONSTRAINTS = ('sharedmem', None)
def _get_config_param(param, context_config, key):
"""Return the value of a parallel config parameter
Explicitly setting it in Parallel has priority over setting in a
parallel_(config/backend) context manager.
"""
if param is not default_parallel_config[key]:
# param is explicitly set, return it
return param
if context_config[key] is not default_parallel_config[key]:
# there's a context manager and the key is set, return it
return context_config[key]
# Otherwise, we are in the default_parallel_config,
# return the default value
return param.default_value
def get_active_backend(
prefer=default_parallel_config["prefer"],
require=default_parallel_config["require"],
verbose=default_parallel_config["verbose"],
):
"""Return the active default backend"""
backend, config = _get_active_backend(prefer, require, verbose)
n_jobs = _get_config_param(
default_parallel_config['n_jobs'], config, "n_jobs"
)
return backend, n_jobs
def _get_active_backend(
prefer=default_parallel_config["prefer"],
require=default_parallel_config["require"],
verbose=default_parallel_config["verbose"],
):
"""Return the active default backend"""
backend_config = getattr(_backend, "config", default_parallel_config)
backend = _get_config_param(
default_parallel_config['backend'], backend_config, "backend"
)
prefer = _get_config_param(prefer, backend_config, "prefer")
require = _get_config_param(require, backend_config, "require")
verbose = _get_config_param(verbose, backend_config, "verbose")
if prefer not in VALID_BACKEND_HINTS:
raise ValueError(
f"prefer={prefer} is not a valid backend hint, "
f"expected one of {VALID_BACKEND_HINTS}"
)
if require not in VALID_BACKEND_CONSTRAINTS:
raise ValueError(
f"require={require} is not a valid backend constraint, "
f"expected one of {VALID_BACKEND_CONSTRAINTS}"
)
if prefer == 'processes' and require == 'sharedmem':
raise ValueError(
"prefer == 'processes' and require == 'sharedmem'"
" are inconsistent settings"
)
explicit_backend = True
if backend is None:
# We are either outside of the scope of any parallel_(config/backend)
# context manager or the context manager did not set a backend.
# create the default backend instance now.
backend = BACKENDS[DEFAULT_BACKEND](nesting_level=0)
explicit_backend = False
# Try to use the backend set by the user with the context manager.
nesting_level = backend.nesting_level
uses_threads = getattr(backend, 'uses_threads', False)
supports_sharedmem = getattr(backend, 'supports_sharedmem', False)
# Force to use thread-based backend if the provided backend does not
# match the shared memory constraint or if the backend is not explicitly
# given and threads are preferred.
force_threads = (require == 'sharedmem' and not supports_sharedmem)
force_threads |= (
not explicit_backend and prefer == 'threads' and not uses_threads
)
if force_threads:
# This backend does not match the shared memory constraint:
# fallback to the default thead-based backend.
sharedmem_backend = BACKENDS[DEFAULT_THREAD_BACKEND](
nesting_level=nesting_level
)
# Warn the user if we forced the backend to thread-based, while the
# user explicitly specified a non-thread-based backend.
if verbose >= 10 and explicit_backend:
print(
f"Using {sharedmem_backend.__class__.__name__} as "
f"joblib backend instead of {backend.__class__.__name__} "
"as the latter does not provide shared memory semantics."
)
# Force to n_jobs=1 by default
thread_config = backend_config.copy()
thread_config['n_jobs'] = 1
return sharedmem_backend, thread_config
return backend, backend_config
class parallel_config:
"""Set the default backend or configuration for :class:`~joblib.Parallel`.
This is an alternative to directly passing keyword arguments to the
:class:`~joblib.Parallel` class constructor. It is particularly useful when
calling into library code that uses joblib internally but does not expose
the various parallel configuration arguments in its own API.
Parameters
----------
backend: str or ParallelBackendBase instance, default=None
If ``backend`` is a string it must match a previously registered
implementation using the :func:`~register_parallel_backend` function.
By default the following backends are available:
- 'loky': single-host, process-based parallelism (used by default),
- 'threading': single-host, thread-based parallelism,
- 'multiprocessing': legacy single-host, process-based parallelism.
'loky' is recommended to run functions that manipulate Python objects.
'threading' is a low-overhead alternative that is most efficient for
functions that release the Global Interpreter Lock: e.g. I/O-bound
code or CPU-bound code in a few calls to native code that explicitly
releases the GIL. Note that on some rare systems (such as pyodide),
multiprocessing and loky may not be available, in which case joblib
defaults to threading.
In addition, if the ``dask`` and ``distributed`` Python packages are
installed, it is possible to use the 'dask' backend for better
scheduling of nested parallel calls without over-subscription and
potentially distribute parallel calls over a networked cluster of
several hosts.
It is also possible to use the distributed 'ray' backend for
distributing the workload to a cluster of nodes. See more details
in the Examples section below.
Alternatively the backend can be passed directly as an instance.
n_jobs: int, default=None
The maximum number of concurrently running jobs, such as the number
of Python worker processes when ``backend="loky"`` or the size of the
thread-pool when ``backend="threading"``.
This argument is converted to an integer, rounded below for float.
If -1 is given, `joblib` tries to use all CPUs. The number of CPUs
``n_cpus`` is obtained with :func:`~cpu_count`.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. For instance,
using ``n_jobs=-2`` will result in all CPUs but one being used.
This argument can also go above ``n_cpus``, which will cause
oversubscription. In some cases, slight oversubscription can be
beneficial, e.g., for tasks with large I/O operations.
If 1 is given, no parallel computing code is used at all, and the
behavior amounts to a simple python `for` loop. This mode is not
compatible with `timeout`.
None is a marker for 'unset' that will be interpreted as n_jobs=1
unless the call is performed under a :func:`~parallel_config`
context manager that sets another value for ``n_jobs``.
If n_jobs = 0 then a ValueError is raised.
verbose: int, default=0
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
temp_folder: str or None, default=None
Folder to be used by the pool for memmapping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the ``JOBLIB_TEMP_FOLDER`` environment
variable,
- ``/dev/shm`` if the folder exists and is writable: this is a
RAM disk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with ``TMP``, ``TMPDIR`` or ``TEMP`` environment
variables, typically ``/tmp`` under Unix operating systems.
max_nbytes int, str, or None, optional, default='1M'
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmapping of large arrays.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default='r'
Memmapping mode for numpy arrays passed to workers. None will
disable memmapping, other modes defined in the numpy.memmap doc:
https://numpy.org/doc/stable/reference/generated/numpy.memmap.html
Also, see 'max_nbytes' parameter documentation for more details.
prefer: str in {'processes', 'threads'} or None, default=None
Soft hint to choose the default backend.
The default process-based backend is 'loky' and the default
thread-based backend is 'threading'. Ignored if the ``backend``
parameter is specified.
require: 'sharedmem' or None, default=None
Hard constraint to select the backend. If set to 'sharedmem',
the selected backend will be single-host and thread-based.
inner_max_num_threads: int, default=None
If not None, overwrites the limit set on the number of threads
usable in some third-party library threadpools like OpenBLAS,
MKL or OpenMP. This is only used with the ``loky`` backend.
backend_params: dict
Additional parameters to pass to the backend constructor when
backend is a string.
Notes
-----
Joblib tries to limit the oversubscription by limiting the number of
threads usable in some third-party library threadpools like OpenBLAS, MKL
or OpenMP. The default limit in each worker is set to
``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be
overwritten with the ``inner_max_num_threads`` argument which will be used
to set this limit in the child processes.
.. versionadded:: 1.3
Examples
--------
>>> from operator import neg
>>> with parallel_config(backend='threading'):
... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
...
[-1, -2, -3, -4, -5]
To use the 'ray' joblib backend add the following lines:
>>> from ray.util.joblib import register_ray # doctest: +SKIP
>>> register_ray() # doctest: +SKIP
>>> with parallel_config(backend="ray"): # doctest: +SKIP
... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
[-1, -2, -3, -4, -5]
"""
def __init__(
self,
backend=default_parallel_config["backend"],
*,
n_jobs=default_parallel_config["n_jobs"],
verbose=default_parallel_config["verbose"],
temp_folder=default_parallel_config["temp_folder"],
max_nbytes=default_parallel_config["max_nbytes"],
mmap_mode=default_parallel_config["mmap_mode"],
prefer=default_parallel_config["prefer"],
require=default_parallel_config["require"],
inner_max_num_threads=None,
**backend_params
):
# Save the parallel info and set the active parallel config
self.old_parallel_config = getattr(
_backend, "config", default_parallel_config
)
backend = self._check_backend(
backend, inner_max_num_threads, **backend_params
)
new_config = {
"n_jobs": n_jobs,
"verbose": verbose,
"temp_folder": temp_folder,
"max_nbytes": max_nbytes,
"mmap_mode": mmap_mode,
"prefer": prefer,
"require": require,
"backend": backend
}
self.parallel_config = self.old_parallel_config.copy()
self.parallel_config.update({
k: v for k, v in new_config.items()
if not isinstance(v, _Sentinel)
})
setattr(_backend, "config", self.parallel_config)
def _check_backend(self, backend, inner_max_num_threads, **backend_params):
if backend is default_parallel_config['backend']:
if inner_max_num_threads is not None or len(backend_params) > 0:
raise ValueError(
"inner_max_num_threads and other constructor "
"parameters backend_params are only supported "
"when backend is not None."
)
return backend
if isinstance(backend, str):
# Handle non-registered or missing backends
if backend not in BACKENDS:
if backend in EXTERNAL_BACKENDS:
register = EXTERNAL_BACKENDS[backend]
register()
elif backend in MAYBE_AVAILABLE_BACKENDS:
warnings.warn(
f"joblib backend '{backend}' is not available on "
f"your system, falling back to {DEFAULT_BACKEND}.",
UserWarning,
stacklevel=2
)
BACKENDS[backend] = BACKENDS[DEFAULT_BACKEND]
else:
raise ValueError(
f"Invalid backend: {backend}, expected one of "
f"{sorted(BACKENDS.keys())}"
)
backend = BACKENDS[backend](**backend_params)
if inner_max_num_threads is not None:
msg = (
f"{backend.__class__.__name__} does not accept setting the "
"inner_max_num_threads argument."
)
assert backend.supports_inner_max_num_threads, msg
backend.inner_max_num_threads = inner_max_num_threads
# If the nesting_level of the backend is not set previously, use the
# nesting level from the previous active_backend to set it
if backend.nesting_level is None:
parent_backend = self.old_parallel_config['backend']
if parent_backend is default_parallel_config['backend']:
nesting_level = 0
else:
nesting_level = parent_backend.nesting_level
backend.nesting_level = nesting_level
return backend
def __enter__(self):
return self.parallel_config
def __exit__(self, type, value, traceback):
self.unregister()
def unregister(self):
setattr(_backend, "config", self.old_parallel_config)
class parallel_backend(parallel_config):
"""Change the default backend used by Parallel inside a with block.
.. warning::
It is advised to use the :class:`~joblib.parallel_config` context
manager instead, which allows more fine-grained control over the
backend configuration.
If ``backend`` is a string it must match a previously registered
implementation using the :func:`~register_parallel_backend` function.
By default the following backends are available:
- 'loky': single-host, process-based parallelism (used by default),
- 'threading': single-host, thread-based parallelism,
- 'multiprocessing': legacy single-host, process-based parallelism.
'loky' is recommended to run functions that manipulate Python objects.
'threading' is a low-overhead alternative that is most efficient for
functions that release the Global Interpreter Lock: e.g. I/O-bound code or
CPU-bound code in a few calls to native code that explicitly releases the
GIL. Note that on some rare systems (such as Pyodide),
multiprocessing and loky may not be available, in which case joblib
defaults to threading.
You can also use the `Dask <https://docs.dask.org/en/stable/>`_ joblib
backend to distribute work across machines. This works well with
scikit-learn estimators with the ``n_jobs`` parameter, for example::
>>> import joblib # doctest: +SKIP
>>> from sklearn.model_selection import GridSearchCV # doctest: +SKIP
>>> from dask.distributed import Client, LocalCluster # doctest: +SKIP
>>> # create a local Dask cluster
>>> cluster = LocalCluster() # doctest: +SKIP
>>> client = Client(cluster) # doctest: +SKIP
>>> grid_search = GridSearchCV(estimator, param_grid, n_jobs=-1)
... # doctest: +SKIP
>>> with joblib.parallel_backend("dask", scatter=[X, y]): # doctest: +SKIP
... grid_search.fit(X, y)
It is also possible to use the distributed 'ray' backend for distributing
the workload to a cluster of nodes. To use the 'ray' joblib backend add
the following lines::
>>> from ray.util.joblib import register_ray # doctest: +SKIP
>>> register_ray() # doctest: +SKIP
>>> with parallel_backend("ray"): # doctest: +SKIP
... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
[-1, -2, -3, -4, -5]
Alternatively the backend can be passed directly as an instance.
By default all available workers will be used (``n_jobs=-1``) unless the
caller passes an explicit value for the ``n_jobs`` parameter.
This is an alternative to passing a ``backend='backend_name'`` argument to
the :class:`~Parallel` class constructor. It is particularly useful when
calling into library code that uses joblib internally but does not expose
the backend argument in its own API.
>>> from operator import neg
>>> with parallel_backend('threading'):
... print(Parallel()(delayed(neg)(i + 1) for i in range(5)))
...
[-1, -2, -3, -4, -5]
Joblib also tries to limit the oversubscription by limiting the number of
threads usable in some third-party library threadpools like OpenBLAS, MKL
or OpenMP. The default limit in each worker is set to
``max(cpu_count() // effective_n_jobs, 1)`` but this limit can be
overwritten with the ``inner_max_num_threads`` argument which will be used
to set this limit in the child processes.
.. versionadded:: 0.10
See Also
--------
joblib.parallel_config: context manager to change the backend
configuration.
"""
def __init__(self, backend, n_jobs=-1, inner_max_num_threads=None,
**backend_params):
super().__init__(
backend=backend,
n_jobs=n_jobs,
inner_max_num_threads=inner_max_num_threads,
**backend_params
)
if self.old_parallel_config is None:
self.old_backend_and_jobs = None
else:
self.old_backend_and_jobs = (
self.old_parallel_config["backend"],
self.old_parallel_config["n_jobs"],
)
self.new_backend_and_jobs = (
self.parallel_config["backend"],
self.parallel_config["n_jobs"],
)
def __enter__(self):
return self.new_backend_and_jobs
# Under Linux or OS X the default start method of multiprocessing
# can cause third party libraries to crash. Under Python 3.4+ it is possible
# to set an environment variable to switch the default start method from
# 'fork' to 'forkserver' or 'spawn' to avoid this issue albeit at the cost
# of causing semantic changes and some additional pool instantiation overhead.
DEFAULT_MP_CONTEXT = None
if hasattr(mp, 'get_context'):
method = os.environ.get('JOBLIB_START_METHOD', '').strip() or None
if method is not None:
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice, backend_and_jobs, reducer_callback=None,
pickle_cache=None):
self.items = list(iterator_slice)
self._size = len(self.items)
self._reducer_callback = reducer_callback
if isinstance(backend_and_jobs, tuple):
self._backend, self._n_jobs = backend_and_jobs
else:
# this is for backward compatibility purposes. Before 0.12.6,
# nested backends were returned without n_jobs indications.
self._backend, self._n_jobs = backend_and_jobs, None
self._pickle_cache = pickle_cache if pickle_cache is not None else {}
def __call__(self):
# Set the default nested backend to self._backend but do not set the
# change the default number of processes to -1
with parallel_config(backend=self._backend, n_jobs=self._n_jobs):
return [func(*args, **kwargs)
for func, args, kwargs in self.items]
def __reduce__(self):
if self._reducer_callback is not None:
self._reducer_callback()
# no need to pickle the callback.
return (
BatchedCalls,
(self.items, (self._backend, self._n_jobs), None,
self._pickle_cache)
)
def __len__(self):
return self._size
# Possible exit status for a task
TASK_DONE = "Done"
TASK_ERROR = "Error"
TASK_PENDING = "Pending"
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count(only_physical_cores=False):
"""Return the number of CPUs.
This delegates to loky.cpu_count that takes into account additional
constraints such as Linux CFS scheduler quotas (typically set by container
runtimes such as docker) and CPU affinity (for instance using the taskset
command on Linux).
If only_physical_cores is True, do not take hyperthreading / SMT logical
cores into account.
"""
if mp is None:
return 1
return loky.cpu_count(only_physical_cores=only_physical_cores)
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
def delayed(function):
"""Decorator used to capture the arguments of a function."""
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class BatchCompletionCallBack(object):
"""Callback to keep track of completed results and schedule the next tasks.
This callable is executed by the parent process whenever a worker process
has completed a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
It is assumed that this callback will always be triggered by the backend
right after the end of a task, in case of success as well as in case of
failure.
"""
##########################################################################
# METHODS CALLED BY THE MAIN THREAD #
##########################################################################
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
self.parallel_call_id = parallel._call_id
# Internals to keep track of the status and outcome of the task.
# Used to hold a reference to the future-like object returned by the
# backend after launching this task
# This will be set later when calling `register_job`, as it is only
# created once the task has been submitted.
self.job = None
if not parallel._backend.supports_retrieve_callback:
# The status is only used for asynchronous result retrieval in the
# callback.
self.status = None
else:
# The initial status for the job is TASK_PENDING.
# Once it is done, it will be either TASK_DONE, or TASK_ERROR.
self.status = TASK_PENDING
def register_job(self, job):
"""Register the object returned by `apply_async`."""
self.job = job
def get_result(self, timeout):
"""Returns the raw result of the task that was submitted.
If the task raised an exception rather than returning, this same
exception will be raised instead.
If the backend supports the retrieval callback, it is assumed that this
method is only called after the result has been registered. It is
ensured by checking that `self.status(timeout)` does not return
TASK_PENDING. In this case, `get_result` directly returns the
registered result (or raise the registered exception).
For other backends, there are no such assumptions, but `get_result`
still needs to synchronously retrieve the result before it can
return it or raise. It will block at most `self.timeout` seconds
waiting for retrieval to complete, after that it raises a TimeoutError.
"""
backend = self.parallel._backend
if backend.supports_retrieve_callback:
# We assume that the result has already been retrieved by the
# callback thread, and is stored internally. It's just waiting to
# be returned.
return self._return_or_raise()
# For other backends, the main thread needs to run the retrieval step.
try:
if backend.supports_timeout:
result = self.job.get(timeout=timeout)
else:
result = self.job.get()
outcome = dict(result=result, status=TASK_DONE)
except BaseException as e:
outcome = dict(result=e, status=TASK_ERROR)
self._register_outcome(outcome)
return self._return_or_raise()
def _return_or_raise(self):
try:
if self.status == TASK_ERROR:
raise self._result
return self._result
finally:
del self._result
def get_status(self, timeout):
"""Get the status of the task.
This function also checks if the timeout has been reached and register
the TimeoutError outcome when it is the case.
"""
if timeout is None or self.status != TASK_PENDING:
return self.status
# The computation are running and the status is pending.
# Check that we did not wait for this jobs more than `timeout`.
now = time.time()
if not hasattr(self, "_completion_timeout_counter"):
self._completion_timeout_counter = now
if (now - self._completion_timeout_counter) > timeout:
outcome = dict(result=TimeoutError(), status=TASK_ERROR)
self._register_outcome(outcome)
return self.status
##########################################################################
# METHODS CALLED BY CALLBACK THREADS #
##########################################################################
def __call__(self, out):
"""Function called by the callback thread after a job is completed."""
# If the backend doesn't support callback retrievals, the next batch of
# tasks is dispatched regardless. The result will be retrieved by the
# main thread when calling `get_result`.
if not self.parallel._backend.supports_retrieve_callback:
self._dispatch_new()
return
# If the backend supports retrieving the result in the callback, it
# registers the task outcome (TASK_ERROR or TASK_DONE), and schedules
# the next batch if needed.
with self.parallel._lock:
# Edge case where while the task was processing, the `parallel`
# instance has been reset and a new call has been issued, but the
# worker managed to complete the task and trigger this callback
# call just before being aborted by the reset.
if self.parallel._call_id != self.parallel_call_id:
return
# When aborting, stop as fast as possible and do not retrieve the
# result as it won't be returned by the Parallel call.
if self.parallel._aborting:
return
# Retrieves the result of the task in the main process and dispatch
# a new batch if needed.
job_succeeded = self._retrieve_result(out)
if not self.parallel.return_ordered:
# Append the job to the queue in the order of completion
# instead of submission.
self.parallel._jobs.append(self)
if job_succeeded:
self._dispatch_new()
def _dispatch_new(self):
"""Schedule the next batch of tasks to be processed."""
# This steps ensure that auto-batching works as expected.
this_batch_duration = time.time() - self.dispatch_timestamp
self.parallel._backend.batch_completed(self.batch_size,
this_batch_duration)
# Schedule the next batch of tasks.
with self.parallel._lock:
self.parallel.n_completed_tasks += self.batch_size
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
def _retrieve_result(self, out):
"""Fetch and register the outcome of a task.
Return True if the task succeeded, False otherwise.
This function is only called by backends that support retrieving
the task result in the callback thread.
"""
try:
result = self.parallel._backend.retrieve_result_callback(out)
outcome = dict(status=TASK_DONE, result=result)
except BaseException as e:
# Avoid keeping references to parallel in the error.
e.__traceback__ = None
outcome = dict(result=e, status=TASK_ERROR)
self._register_outcome(outcome)
return outcome['status'] != TASK_ERROR
##########################################################################
# This method can be called either in the main thread #
# or in the callback thread. #
##########################################################################
def _register_outcome(self, outcome):
"""Register the outcome of a task.
This method can be called only once, future calls will be ignored.
"""
# Covers the edge case where the main thread tries to register a
# `TimeoutError` while the callback thread tries to register a result
# at the same time.
with self.parallel._lock:
if self.status not in (TASK_PENDING, None):
return
self.status = outcome["status"]
self._result = outcome["result"]
# Once the result and the status are extracted, the last reference to
# the job can be deleted.
self.job = None
# As soon as an error as been spotted, early stopping flags are sent to
# the `parallel` instance.
if self.status == TASK_ERROR:
self.parallel._exception = True
self.parallel._aborting = True
###############################################################################
def register_parallel_backend(name, factory, make_default=False):
"""Register a new Parallel backend factory.
The new backend can then be selected by passing its name as the backend
argument to the :class:`~Parallel` class. Moreover, the default backend can
be overwritten globally by setting make_default=True.
The factory can be any callable that takes no argument and return an
instance of ``ParallelBackendBase``.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
BACKENDS[name] = factory
if make_default:
global DEFAULT_BACKEND
DEFAULT_BACKEND = name
def effective_n_jobs(n_jobs=-1):
"""Determine the number of jobs that can actually run in parallel
n_jobs is the number of workers requested by the callers. Passing n_jobs=-1
means requesting all available workers for instance matching the number of
CPU cores on the worker host(s).
This method should return a guesstimate of the number of workers that can
actually perform work concurrently with the currently enabled default
backend. The primary use case is to make it possible for the caller to know
in how many chunks to slice the work.
In general working on larger data chunks is more efficient (less scheduling
overhead and better use of CPU cache prefetching heuristics) as long as all
the workers have enough work to do.
Warning: this function is experimental and subject to change in a future
version of joblib.
.. versionadded:: 0.10
"""
if n_jobs == 1:
return 1
backend, backend_n_jobs = get_active_backend()
if n_jobs is None:
n_jobs = backend_n_jobs
return backend.effective_n_jobs(n_jobs=n_jobs)
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Read more in the :ref:`User Guide <parallel>`.
Parameters
----------
n_jobs: int, default=None
The maximum number of concurrently running jobs, such as the number
of Python worker processes when ``backend="loky"`` or the size of
the thread-pool when ``backend="threading"``.
This argument is converted to an integer, rounded below for float.
If -1 is given, `joblib` tries to use all CPUs. The number of CPUs
``n_cpus`` is obtained with :func:`~cpu_count`.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. For instance,
using ``n_jobs=-2`` will result in all CPUs but one being used.
This argument can also go above ``n_cpus``, which will cause
oversubscription. In some cases, slight oversubscription can be
beneficial, e.g., for tasks with large I/O operations.
If 1 is given, no parallel computing code is used at all, and the
behavior amounts to a simple python `for` loop. This mode is not
compatible with ``timeout``.
None is a marker for 'unset' that will be interpreted as n_jobs=1
unless the call is performed under a :func:`~parallel_config`
context manager that sets another value for ``n_jobs``.
If n_jobs = 0 then a ValueError is raised.
backend: str, ParallelBackendBase instance or None, default='loky'
Specify the parallelization backend implementation.
Supported backends are:
- "loky" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the worker Python processes. On some rare
systems (such as Pyiodide), the loky backend may not be
available.
- "multiprocessing" previous process-based backend based on
`multiprocessing.Pool`. Less robust than `loky`.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
- finally, you can register backends by calling
:func:`~register_parallel_backend`. This will allow you to
implement a backend of your liking.
It is not recommended to hard-code the backend name in a call to
:class:`~Parallel` in a library. Instead it is recommended to set
soft hints (prefer) or hard constraints (require) so as to make it
possible for library users to change the backend from the outside
using the :func:`~parallel_config` context manager.
return_as: str in {'list', 'generator', 'generator_unordered'}, default='list'
If 'list', calls to this instance will return a list, only when
all results have been processed and retrieved.
If 'generator', it will return a generator that yields the results
as soon as they are available, in the order the tasks have been
submitted with.
If 'generator_unordered', the generator will immediately yield
available results independently of the submission order. The output
order is not deterministic in this case because it depends on the
concurrency of the workers.
prefer: str in {'processes', 'threads'} or None, default=None
Soft hint to choose the default backend if no specific backend
was selected with the :func:`~parallel_config` context manager.
The default process-based backend is 'loky' and the default
thread-based backend is 'threading'. Ignored if the ``backend``
parameter is specified.
require: 'sharedmem' or None, default=None
Hard constraint to select the backend. If set to 'sharedmem',
the selected backend will be single-host and thread-based even
if the user asked for a non-thread based backend with
:func:`~joblib.parallel_config`.
verbose: int, default=0
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
timeout: float or None, default=None
Timeout limit for each task to complete. If any task takes longer
a TimeOutError will be raised. Only applied when n_jobs != 1
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}, default='2*n_jobs'
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the workers should never starve. Note that only basic
arithmetics are allowed here and no modules can be used in this
expression.
batch_size: int or 'auto', default='auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, dispatching
calls to workers can be slower than sequential computation because
of the overhead. Batching fast computations together can mitigate
this.
The ``'auto'`` strategy keeps track of the time it takes for a
batch to complete, and dynamically adjusts the batch size to keep
the time on the order of half a second, using a heuristic. The
initial batch size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str or None, default=None
Folder to be used by the pool for memmapping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment
variable,
- /dev/shm if the folder exists and is writable: this is a
RAM disk filesystem available by default on modern Linux
distributions,
- the default system temporary folder that can be
overridden with TMP, TMPDIR or TEMP environment
variables, typically /tmp under Unix operating systems.
Only active when ``backend="loky"`` or ``"multiprocessing"``.
max_nbytes int, str, or None, optional, default='1M'
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmapping of large arrays.
Only active when ``backend="loky"`` or ``"multiprocessing"``.
mmap_mode: {None, 'r+', 'r', 'w+', 'c'}, default='r'
Memmapping mode for numpy arrays passed to workers. None will
disable memmapping, other modes defined in the numpy.memmap doc:
https://numpy.org/doc/stable/reference/generated/numpy.memmap.html
Also, see 'max_nbytes' parameter documentation for more details.
Notes
-----
This object uses workers to compute in parallel the application of a
function to many different arguments. The main functionality it brings
in addition to using the raw multiprocessing or concurrent.futures API
are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Note that the intended usage is to run one call at a time. Multiple
calls to the same Parallel object will result in a ``RuntimeError``
Examples
--------
A simple example:
>>> from math import sqrt
>>> from joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages:
>>> from time import sleep
>>> from joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=10)(
... delayed(sleep)(.2) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 tasks | elapsed: 0.6s
[Parallel(n_jobs=2)]: Done 4 tasks | elapsed: 0.8s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 1.4s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process:
>>> from heapq import nlargest
>>> from joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(
... delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3))
... # doctest: +SKIP
-----------------------------------------------------------------------
Sub-process traceback:
-----------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
_______________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called 3 times before the parallel loop is initiated, and then
called to generate new data on the fly:
>>> from math import sqrt
>>> from joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
''' # noqa: E501
def __init__(
self,
n_jobs=default_parallel_config["n_jobs"],
backend=default_parallel_config['backend'],
return_as="list",
verbose=default_parallel_config["verbose"],
timeout=None,
pre_dispatch='2 * n_jobs',
batch_size='auto',
temp_folder=default_parallel_config["temp_folder"],
max_nbytes=default_parallel_config["max_nbytes"],
mmap_mode=default_parallel_config["mmap_mode"],
prefer=default_parallel_config["prefer"],
require=default_parallel_config["require"],
):
# Initiate parent Logger class state
super().__init__()
# Interpret n_jobs=None as 'unset'
if n_jobs is None:
n_jobs = default_parallel_config["n_jobs"]
active_backend, context_config = _get_active_backend(
prefer=prefer, require=require, verbose=verbose
)
nesting_level = active_backend.nesting_level
self.verbose = _get_config_param(verbose, context_config, "verbose")
self.timeout = timeout
self.pre_dispatch = pre_dispatch
if return_as not in {"list", "generator", "generator_unordered"}:
raise ValueError(
'Expected `return_as` parameter to be a string equal to "list"'
f',"generator" or "generator_unordered", but got {return_as} '
"instead."
)
self.return_as = return_as
self.return_generator = return_as != "list"
self.return_ordered = return_as != "generator_unordered"
# Check if we are under a parallel_config or parallel_backend
# context manager and use the config from the context manager
# for arguments that are not explicitly set.
self._backend_args = {
k: _get_config_param(param, context_config, k) for param, k in [
(max_nbytes, "max_nbytes"),
(temp_folder, "temp_folder"),
(mmap_mode, "mmap_mode"),
(prefer, "prefer"),
(require, "require"),
(verbose, "verbose"),
]
}
if isinstance(self._backend_args["max_nbytes"], str):
self._backend_args["max_nbytes"] = memstr_to_bytes(
self._backend_args["max_nbytes"]
)
self._backend_args["verbose"] = max(
0, self._backend_args["verbose"] - 50
)
if DEFAULT_MP_CONTEXT is not None:
self._backend_args['context'] = DEFAULT_MP_CONTEXT
elif hasattr(mp, "get_context"):
self._backend_args['context'] = mp.get_context()
if backend is default_parallel_config['backend'] or backend is None:
backend = active_backend
elif isinstance(backend, ParallelBackendBase):
# Use provided backend as is, with the current nesting_level if it
# is not set yet.
if backend.nesting_level is None:
backend.nesting_level = nesting_level
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._backend_args['context'] = backend
backend = MultiprocessingBackend(nesting_level=nesting_level)
elif backend not in BACKENDS and backend in MAYBE_AVAILABLE_BACKENDS:
warnings.warn(
f"joblib backend '{backend}' is not available on "
f"your system, falling back to {DEFAULT_BACKEND}.",
UserWarning,
stacklevel=2)
BACKENDS[backend] = BACKENDS[DEFAULT_BACKEND]
backend = BACKENDS[DEFAULT_BACKEND](nesting_level=nesting_level)
else:
try:
backend_factory = BACKENDS[backend]
except KeyError as e:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, sorted(BACKENDS.keys()))) from e
backend = backend_factory(nesting_level=nesting_level)
n_jobs = _get_config_param(n_jobs, context_config, "n_jobs")
if n_jobs is None:
# No specific context override and no specific value request:
# default to the default of the backend.
n_jobs = backend.default_n_jobs
try:
n_jobs = int(n_jobs)
except ValueError:
raise ValueError("n_jobs could not be converted to int")
self.n_jobs = n_jobs
if (require == 'sharedmem' and
not getattr(backend, 'supports_sharedmem', False)):
raise ValueError("Backend %s does not support shared memory"
% backend)
if (batch_size == 'auto' or isinstance(batch_size, Integral) and
batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
if not isinstance(backend, SequentialBackend):
if self.return_generator and not backend.supports_return_generator:
raise ValueError(
"Backend {} does not support "
"return_as={}".format(backend, return_as)
)
# This lock is used to coordinate the main thread of this process
# with the async callback thread of our the pool.
self._lock = threading.RLock()
self._jobs = collections.deque()
self._pending_outputs = list()
self._ready_batches = queue.Queue()
self._reducer_callback = None
# Internal variables
self._backend = backend
self._running = False
self._managed_backend = False
self._id = uuid4().hex
self._call_ref = None
def __enter__(self):
self._managed_backend = True
self._calling = False
self._initialize_backend()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._managed_backend = False
if self.return_generator and self._calling:
self._abort()
self._terminate_and_reset()
def _initialize_backend(self):
"""Build a process or thread pool and return the number of workers"""
try:
n_jobs = self._backend.configure(n_jobs=self.n_jobs, parallel=self,
**self._backend_args)
if self.timeout is not None and not self._backend.supports_timeout:
warnings.warn(
'The backend class {!r} does not support timeout. '
"You have set 'timeout={}' in Parallel but "
"the 'timeout' parameter will not be used.".format(
self._backend.__class__.__name__,
self.timeout))
except FallbackToBackend as e:
# Recursively initialize the backend in case of requested fallback.
self._backend = e.backend
n_jobs = self._initialize_backend()
return n_jobs
def _effective_n_jobs(self):
if self._backend:
return self._backend.effective_n_jobs(self.n_jobs)
return 1
def _terminate_and_reset(self):
if hasattr(self._backend, 'stop_call') and self._calling:
self._backend.stop_call()
self._calling = False
if not self._managed_backend:
self._backend.terminate()
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
batch_size = len(batch)
self.n_dispatched_tasks += batch_size
self.n_dispatched_batches += 1
dispatch_timestamp = time.time()
batch_tracker = BatchCompletionCallBack(
dispatch_timestamp, batch_size, self
)
if self.return_ordered:
self._jobs.append(batch_tracker)
# If return_ordered is False, the batch_tracker is not stored in the
# jobs queue at the time of submission. Instead, it will be appended to
# the queue by itself as soon as the callback is triggered to be able
# to return the results in the order of completion.
job = self._backend.apply_async(batch, callback=batch_tracker)
batch_tracker.register_job(job)
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self._aborting:
return False
batch_size = self._get_batch_size()
with self._lock:
# to ensure an even distribution of the workload between workers,
# we look ahead in the original iterators more than batch_size
# tasks - However, we keep consuming only one batch at each
# dispatch_one_batch call. The extra tasks are stored in a local
# queue, _ready_batches, that is looked-up prior to re-consuming
# tasks from the origal iterator.
try:
tasks = self._ready_batches.get(block=False)
except queue.Empty:
# slice the iterator n_jobs * batchsize items at a time. If the
# slice returns less than that, then the current batchsize puts
# too much weight on a subset of workers, while other may end
# up starving. So in this case, re-scale the batch size
# accordingly to distribute evenly the last items between all
# workers.
n_jobs = self._cached_effective_n_jobs
big_batch_size = batch_size * n_jobs
try:
islice = list(itertools.islice(iterator, big_batch_size))
except Exception as e:
# Handle the fact that the generator of task raised an
# exception. As this part of the code can be executed in
# a thread internal to the backend, register a task with
# an error that will be raised in the user's thread.
if isinstance(e.__context__, queue.Empty):
# Suppress the cause of the exception if it is
# queue.Empty to avoid cluttered traceback. Only do it
# if the __context__ is really empty to avoid messing
# with causes of the original error.
e.__cause__ = None
batch_tracker = BatchCompletionCallBack(
0, batch_size, self
)
self._jobs.append(batch_tracker)
batch_tracker._register_outcome(dict(
result=e, status=TASK_ERROR
))
return True
if len(islice) == 0:
return False
elif (iterator is self._original_iterator and
len(islice) < big_batch_size):
# We reached the end of the original iterator (unless
# iterator is the ``pre_dispatch``-long initial slice of
# the original iterator) -- decrease the batch size to
# account for potential variance in the batches running
# time.
final_batch_size = max(1, len(islice) // (10 * n_jobs))
else:
final_batch_size = max(1, len(islice) // n_jobs)
# enqueue n_jobs batches in a local queue
for i in range(0, len(islice), final_batch_size):
tasks = BatchedCalls(islice[i:i + final_batch_size],
self._backend.get_nested_backend(),
self._reducer_callback,
self._pickle_cache)
self._ready_batches.put(tasks)
# finally, get one task.
tasks = self._ready_batches.get(block=False)
if len(tasks) == 0:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _get_batch_size(self):
"""Returns the effective batch size for dispatch"""
if self.batch_size == 'auto':
return self._backend.compute_batch_size()
else:
# Fixed batch size strategy
return self.batch_size
def _print(self, msg):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
writer(f"[{self}]: {msg}\n")
def _is_completed(self):
"""Check if all tasks have been completed"""
return self.n_completed_tasks == self.n_dispatched_tasks and not (
self._iterating or self._aborting
)
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
if self._is_completed():
# Make sure that we get a last message telling us we are done
self._print(
f"Done {self.n_completed_tasks:3d} out of "
f"{self.n_completed_tasks:3d} | elapsed: "
f"{short_format_time(elapsed_time)} finished"
)
return
# Original job iterator becomes None once it has been fully
# consumed: at this point we know the total number of jobs and we are
# able to display an estimation of the remaining time based on already
# completed jobs. Otherwise, we simply display the number of completed
# tasks.
elif self._original_iterator is not None:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print(
f"Done {self.n_completed_tasks:3d} tasks | elapsed: "
f"{short_format_time(elapsed_time)}"
)
else:
index = self.n_completed_tasks
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1 -
self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / index) * \
(self.n_dispatched_tasks - index * 1.0)
# only display status if remaining time is greater or equal to 0
self._print(
f"Done {index:3d} out of {total_tasks:3d} | elapsed: "
f"{short_format_time(elapsed_time)} remaining: "
f"{short_format_time(remaining_time)}"
)
def _abort(self):
# Stop dispatching new jobs in the async callback thread
self._aborting = True
# If the backend allows it, cancel or kill remaining running
# tasks without waiting for the results as we will raise
# the exception we got back to the caller instead of returning
# any result.
backend = self._backend
if (not self._aborted and hasattr(backend, 'abort_everything')):
# If the backend is managed externally we need to make sure
# to leave it in a working state to allow for future jobs
# scheduling.
ensure_ready = self._managed_backend
backend.abort_everything(ensure_ready=ensure_ready)
self._aborted = True
def _start(self, iterator, pre_dispatch):
# Only set self._iterating to True if at least a batch
# was dispatched. In particular this covers the edge
# case of Parallel used with an exhausted iterator. If
# self._original_iterator is None, then this means either
# that pre_dispatch == "all", n_jobs == 1 or that the first batch
# was very quick and its callback already dispatched all the
# remaining jobs.
self._iterating = False
if self.dispatch_one_batch(iterator):
self._iterating = self._original_iterator is not None
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all":
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
def _get_outputs(self, iterator, pre_dispatch):
"""Iterator returning the tasks' output as soon as they are ready."""
dispatch_thread_id = threading.get_ident()
detach_generator_exit = False
try:
self._start(iterator, pre_dispatch)
# first yield returns None, for internal use only. This ensures
# that we enter the try/except block and start dispatching the
# tasks.
yield
with self._backend.retrieval_context():
yield from self._retrieve()
except GeneratorExit:
# The generator has been garbage collected before being fully
# consumed. This aborts the remaining tasks if possible and warn
# the user if necessary.
self._exception = True
# In some interpreters such as PyPy, GeneratorExit can be raised in
# a different thread than the one used to start the dispatch of the
# parallel tasks. This can lead to hang when a thread attempts to
# join itself. As workaround, we detach the execution of the
# aborting code to a dedicated thread. We then need to make sure
# the rest of the function does not call `_terminate_and_reset`
# in finally.
if dispatch_thread_id != threading.get_ident():
if not IS_PYPY:
warnings.warn(
"A generator produced by joblib.Parallel has been "
"gc'ed in an unexpected thread. This behavior should "
"not cause major -issues but to make sure, please "
"report this warning and your use case at "
"https://github.com/joblib/joblib/issues so it can "
"be investigated."
)
detach_generator_exit = True
_parallel = self
class _GeneratorExitThread(threading.Thread):
def run(self):
_parallel._abort()
if _parallel.return_generator:
_parallel._warn_exit_early()
_parallel._terminate_and_reset()
_GeneratorExitThread(
name="GeneratorExitThread"
).start()
return
# Otherwise, we are in the thread that started the dispatch: we can
# safely abort the execution and warn the user.
self._abort()
if self.return_generator:
self._warn_exit_early()
raise
# Note: we catch any BaseException instead of just Exception instances
# to also include KeyboardInterrupt
except BaseException:
self._exception = True
self._abort()
raise
finally:
# Store the unconsumed tasks and terminate the workers if necessary
_remaining_outputs = ([] if self._exception else self._jobs)
self._jobs = collections.deque()
self._running = False
if not detach_generator_exit:
self._terminate_and_reset()
while len(_remaining_outputs) > 0:
batched_results = _remaining_outputs.popleft()
batched_results = batched_results.get_result(self.timeout)
for result in batched_results:
yield result
def _wait_retrieval(self):
"""Return True if we need to continue retrieving some tasks."""
# If the input load is still being iterated over, it means that tasks
# are still on the dispatch waitlist and their results will need to
# be retrieved later on.
if self._iterating:
return True
# If some of the dispatched tasks are still being processed by the
# workers, wait for the compute to finish before starting retrieval
if self.n_completed_tasks < self.n_dispatched_tasks:
return True
# For backends that does not support retrieving asynchronously the
# result to the main process, all results must be carefully retrieved
# in the _retrieve loop in the main thread while the backend is alive.
# For other backends, the actual retrieval is done asynchronously in
# the callback thread, and we can terminate the backend before the
# `self._jobs` result list has been emptied. The remaining results
# will be collected in the `finally` step of the generator.
if not self._backend.supports_retrieve_callback:
if len(self._jobs) > 0:
return True
return False
def _retrieve(self):
while self._wait_retrieval():
# If the callback thread of a worker has signaled that its task
# triggered an exception, or if the retrieval loop has raised an
# exception (e.g. `GeneratorExit`), exit the loop and surface the
# worker traceback.
if self._aborting:
self._raise_error_fast()
break
# If the next job is not ready for retrieval yet, we just wait for
# async callbacks to progress.
if ((len(self._jobs) == 0) or
(self._jobs[0].get_status(
timeout=self.timeout) == TASK_PENDING)):
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by
# default hence the use of the lock
with self._lock:
batched_results = self._jobs.popleft()
# Flatten the batched results to output one output at a time
batched_results = batched_results.get_result(self.timeout)
for result in batched_results:
self._nb_consumed += 1
yield result
def _raise_error_fast(self):
"""If we are aborting, raise if a job caused an error."""
# Find the first job whose status is TASK_ERROR if it exists.
with self._lock:
error_job = next((job for job in self._jobs
if job.status == TASK_ERROR), None)
# If this error job exists, immediately raise the error by
# calling get_result. This job might not exists if abort has been
# called directly or if the generator is gc'ed.
if error_job is not None:
error_job.get_result(self.timeout)
def _warn_exit_early(self):
"""Warn the user if the generator is gc'ed before being consumned."""
ready_outputs = self.n_completed_tasks - self._nb_consumed
is_completed = self._is_completed()
msg = ""
if ready_outputs:
msg += (
f"{ready_outputs} tasks have been successfully executed "
" but not used."
)
if not is_completed:
msg += " Additionally, "
if not is_completed:
msg += (
f"{self.n_dispatched_tasks - self.n_completed_tasks} tasks "
"which were still being processed by the workers have been "
"cancelled."
)
if msg:
msg += (
" You could benefit from adjusting the input task "
"iterator to limit unnecessary computation time."
)
warnings.warn(msg)
def _get_sequential_output(self, iterable):
"""Separate loop for sequential output.
This simplifies the traceback in case of errors and reduces the
overhead of calling sequential tasks with `joblib`.
"""
try:
self._iterating = True
self._original_iterator = iterable
batch_size = self._get_batch_size()
if batch_size != 1:
it = iter(iterable)
iterable_batched = iter(
lambda: tuple(itertools.islice(it, batch_size)), ()
)
iterable = (
task for batch in iterable_batched for task in batch
)
# first yield returns None, for internal use only. This ensures
# that we enter the try/except block and setup the generator.
yield None
# Sequentially call the tasks and yield the results.
for func, args, kwargs in iterable:
self.n_dispatched_batches += 1
self.n_dispatched_tasks += 1
res = func(*args, **kwargs)
self.n_completed_tasks += 1
self.print_progress()
yield res
self._nb_consumed += 1
except BaseException:
self._exception = True
self._aborting = True
self._aborted = True
raise
finally:
self.print_progress()
self._running = False
self._iterating = False
self._original_iterator = None
def _reset_run_tracking(self):
"""Reset the counters and flags used to track the execution."""
# Makes sur the parallel instance was not previously running in a
# thread-safe way.
with getattr(self, '_lock', nullcontext()):
if self._running:
msg = 'This Parallel instance is already running !'
if self.return_generator is True:
msg += (
" Before submitting new tasks, you must wait for the "
"completion of all the previous tasks, or clean all "
"references to the output generator."
)
raise RuntimeError(msg)
self._running = True
# Counter to keep track of the task dispatched and completed.
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
# Following count is incremented by one each time the user iterates
# on the output generator, it is used to prepare an informative
# warning message in case the generator is deleted before all the
# dispatched tasks have been consumed.
self._nb_consumed = 0
# Following flags are used to synchronize the threads in case one of
# the tasks error-out to ensure that all workers abort fast and that
# the backend terminates properly.
# Set to True as soon as a worker signals that a task errors-out
self._exception = False
# Set to True in case of early termination following an incident
self._aborting = False
# Set to True after abortion is complete
self._aborted = False
def __call__(self, iterable):
"""Main function to dispatch parallel tasks."""
self._reset_run_tracking()
self._start_time = time.time()
if not self._managed_backend:
n_jobs = self._initialize_backend()
else:
n_jobs = self._effective_n_jobs()
if n_jobs == 1:
# If n_jobs==1, run the computation sequentially and return
# immediately to avoid overheads.
output = self._get_sequential_output(iterable)
next(output)
return output if self.return_generator else list(output)
# Let's create an ID that uniquely identifies the current call. If the
# call is interrupted early and that the same instance is immediately
# re-used, this id will be used to prevent workers that were
# concurrently finalizing a task from the previous call to run the
# callback.
with self._lock:
self._call_id = uuid4().hex
# self._effective_n_jobs should be called in the Parallel.__call__
# thread only -- store its value in an attribute for further queries.
self._cached_effective_n_jobs = n_jobs
if isinstance(self._backend, LokyBackend):
# For the loky backend, we add a callback executed when reducing
# BatchCalls, that makes the loky executor use a temporary folder
# specific to this Parallel object when pickling temporary memmaps.
# This callback is necessary to ensure that several Parallel
# objects using the same reusable executor don't use the same
# temporary resources.
def _batched_calls_reducer_callback():
# Relevant implementation detail: the following lines, called
# when reducing BatchedCalls, are called in a thread-safe
# situation, meaning that the context of the temporary folder
# manager will not be changed in between the callback execution
# and the end of the BatchedCalls pickling. The reason is that
# pickling (the only place where set_current_context is used)
# is done from a single thread (the queue_feeder_thread).
self._backend._workers._temp_folder_manager.set_current_context( # noqa
self._id
)
self._reducer_callback = _batched_calls_reducer_callback
# self._effective_n_jobs should be called in the Parallel.__call__
# thread only -- store its value in an attribute for further queries.
self._cached_effective_n_jobs = n_jobs
backend_name = self._backend.__class__.__name__
if n_jobs == 0:
raise RuntimeError("%s has no active worker." % backend_name)
self._print(
f"Using backend {backend_name} with {n_jobs} concurrent workers."
)
if hasattr(self._backend, 'start_call'):
self._backend.start_call()
# Following flag prevents double calls to `backend.stop_call`.
self._calling = True
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all':
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval_expr(
pre_dispatch.replace("n_jobs", str(n_jobs))
)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
# TODO: this iterator should be batch_size * n_jobs
iterator = itertools.islice(iterator, self._pre_dispatch_amount)
# Use a caching dict for callables that are pickled with cloudpickle to
# improve performances. This cache is used only in the case of
# functions that are defined in the __main__ module, functions that
# are defined locally (inside another function) and lambda expressions.
self._pickle_cache = dict()
output = self._get_outputs(iterator, pre_dispatch)
self._call_ref = weakref.ref(output)
# The first item from the output is blank, but it makes the interpreter
# progress until it enters the Try/Except block of the generator and
# reaches the first `yield` statement. This starts the asynchronous
# dispatch of the tasks to the workers.
next(output)
return output if self.return_generator else list(output)
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@joblib@joblib@parallel.py@.PATH_END.py
|
{
"filename": "test_numpy_compat.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/indexes/test_numpy_compat.py",
"type": "Python"
}
|
import numpy as np
import pytest
from pandas import (
CategoricalIndex,
DatetimeIndex,
Index,
PeriodIndex,
TimedeltaIndex,
isna,
)
import pandas._testing as tm
from pandas.api.types import (
is_complex_dtype,
is_numeric_dtype,
)
from pandas.core.arrays import BooleanArray
from pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin
def test_numpy_ufuncs_out(index):
result = index == index
out = np.empty(index.shape, dtype=bool)
np.equal(index, index, out=out)
tm.assert_numpy_array_equal(out, result)
if not index._is_multi:
# same thing on the ExtensionArray
out = np.empty(index.shape, dtype=bool)
np.equal(index.array, index.array, out=out)
tm.assert_numpy_array_equal(out, result)
@pytest.mark.parametrize(
"func",
[
np.exp,
np.exp2,
np.expm1,
np.log,
np.log2,
np.log10,
np.log1p,
np.sqrt,
np.sin,
np.cos,
np.tan,
np.arcsin,
np.arccos,
np.arctan,
np.sinh,
np.cosh,
np.tanh,
np.arcsinh,
np.arccosh,
np.arctanh,
np.deg2rad,
np.rad2deg,
],
ids=lambda x: x.__name__,
)
def test_numpy_ufuncs_basic(index, func):
# test ufuncs of numpy, see:
# https://numpy.org/doc/stable/reference/ufuncs.html
if isinstance(index, DatetimeIndexOpsMixin):
with tm.external_error_raised((TypeError, AttributeError)):
with np.errstate(all="ignore"):
func(index)
elif is_numeric_dtype(index) and not (
is_complex_dtype(index) and func in [np.deg2rad, np.rad2deg]
):
# coerces to float (e.g. np.sin)
with np.errstate(all="ignore"):
result = func(index)
arr_result = func(index.values)
if arr_result.dtype == np.float16:
arr_result = arr_result.astype(np.float32)
exp = Index(arr_result, name=index.name)
tm.assert_index_equal(result, exp)
if isinstance(index.dtype, np.dtype) and is_numeric_dtype(index):
if is_complex_dtype(index):
assert result.dtype == index.dtype
elif index.dtype in ["bool", "int8", "uint8"]:
assert result.dtype in ["float16", "float32"]
elif index.dtype in ["int16", "uint16", "float32"]:
assert result.dtype == "float32"
else:
assert result.dtype == "float64"
else:
# e.g. np.exp with Int64 -> Float64
assert type(result) is Index
# raise AttributeError or TypeError
elif len(index) == 0:
pass
else:
with tm.external_error_raised((TypeError, AttributeError)):
with np.errstate(all="ignore"):
func(index)
@pytest.mark.parametrize(
"func", [np.isfinite, np.isinf, np.isnan, np.signbit], ids=lambda x: x.__name__
)
def test_numpy_ufuncs_other(index, func):
# test ufuncs of numpy, see:
# https://numpy.org/doc/stable/reference/ufuncs.html
if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
if func in (np.isfinite, np.isinf, np.isnan):
# numpy 1.18 changed isinf and isnan to not raise on dt64/td64
result = func(index)
assert isinstance(result, np.ndarray)
out = np.empty(index.shape, dtype=bool)
func(index, out=out)
tm.assert_numpy_array_equal(out, result)
else:
with tm.external_error_raised(TypeError):
func(index)
elif isinstance(index, PeriodIndex):
with tm.external_error_raised(TypeError):
func(index)
elif is_numeric_dtype(index) and not (
is_complex_dtype(index) and func is np.signbit
):
# Results in bool array
result = func(index)
if not isinstance(index.dtype, np.dtype):
# e.g. Int64 we expect to get BooleanArray back
assert isinstance(result, BooleanArray)
else:
assert isinstance(result, np.ndarray)
out = np.empty(index.shape, dtype=bool)
func(index, out=out)
if not isinstance(index.dtype, np.dtype):
tm.assert_numpy_array_equal(out, result._data)
else:
tm.assert_numpy_array_equal(out, result)
elif len(index) == 0:
pass
else:
with tm.external_error_raised(TypeError):
func(index)
@pytest.mark.parametrize("func", [np.maximum, np.minimum])
def test_numpy_ufuncs_reductions(index, func, request):
# TODO: overlap with tests.series.test_ufunc.test_reductions
if len(index) == 0:
pytest.skip("Test doesn't make sense for empty index.")
if isinstance(index, CategoricalIndex) and index.dtype.ordered is False:
with pytest.raises(TypeError, match="is not ordered for"):
func.reduce(index)
return
else:
result = func.reduce(index)
if func is np.maximum:
expected = index.max(skipna=False)
else:
expected = index.min(skipna=False)
# TODO: do we have cases both with and without NAs?
assert type(result) is type(expected)
if isna(result):
assert isna(expected)
else:
assert result == expected
@pytest.mark.parametrize("func", [np.bitwise_and, np.bitwise_or, np.bitwise_xor])
def test_numpy_ufuncs_bitwise(func):
# https://github.com/pandas-dev/pandas/issues/46769
idx1 = Index([1, 2, 3, 4], dtype="int64")
idx2 = Index([3, 4, 5, 6], dtype="int64")
with tm.assert_produces_warning(None):
result = func(idx1, idx2)
expected = Index(func(idx1.values, idx2.values))
tm.assert_index_equal(result, expected)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@indexes@test_numpy_compat.py@.PATH_END.py
|
{
"filename": "autocorr.py",
"repo_name": "HETDEX/elixer",
"repo_path": "elixer_extracted/elixer-main/elixer/emcee/autocorr.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import logging
import numpy as np
__all__ = ["function_1d", "integrated_time", "AutocorrError"]
logger = logging.getLogger(__name__)
def next_pow_two(n):
"""Returns the next power of two greater than or equal to `n`"""
i = 1
while i < n:
i = i << 1
return i
def function_1d(x):
"""Estimate the normalized autocorrelation function of a 1-D series
Args:
x: The series as a 1-D numpy array.
Returns:
array: The autocorrelation function of the time series.
"""
x = np.atleast_1d(x)
if len(x.shape) != 1:
raise ValueError("invalid dimensions for 1D autocorrelation function")
n = next_pow_two(len(x))
# Compute the FFT and then (from that) the auto-correlation function
f = np.fft.fft(x - np.mean(x), n=2 * n)
acf = np.fft.ifft(f * np.conjugate(f))[: len(x)].real
acf /= acf[0]
return acf
def auto_window(taus, c):
m = np.arange(len(taus)) < c * taus
if np.any(m):
return np.argmin(m)
return len(taus) - 1
def integrated_time(x, c=5, tol=50, quiet=False):
"""Estimate the integrated autocorrelation time of a time series.
This estimate uses the iterative procedure described on page 16 of
`Sokal's notes <https://www.semanticscholar.org/paper/Monte-Carlo-Methods-in-Statistical-Mechanics%3A-and-Sokal/0bfe9e3db30605fe2d4d26e1a288a5e2997e7225>`_ to
determine a reasonable window size.
Args:
x: The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for
every other axis.
c (Optional[float]): The step size for the window search. (default:
``5``)
tol (Optional[float]): The minimum number of autocorrelation times
needed to trust the estimate. (default: ``50``)
quiet (Optional[bool]): This argument controls the behavior when the
chain is too short. If ``True``, give a warning instead of raising
an :class:`AutocorrError`. (default: ``False``)
Returns:
float or array: An estimate of the integrated autocorrelation time of
the time series ``x`` computed along the axis ``axis``.
Raises
AutocorrError: If the autocorrelation time can't be reliably estimated
from the chain and ``quiet`` is ``False``. This normally means
that the chain is too short.
"""
x = np.atleast_1d(x)
if len(x.shape) == 1:
x = x[:, np.newaxis, np.newaxis]
if len(x.shape) == 2:
x = x[:, :, np.newaxis]
if len(x.shape) != 3:
raise ValueError("invalid dimensions")
n_t, n_w, n_d = x.shape
tau_est = np.empty(n_d)
windows = np.empty(n_d, dtype=int)
# Loop over parameters
for d in range(n_d):
f = np.zeros(n_t)
for k in range(n_w):
f += function_1d(x[:, k, d])
f /= n_w
taus = 2.0 * np.cumsum(f) - 1.0
windows[d] = auto_window(taus, c)
tau_est[d] = taus[windows[d]]
# Check convergence
flag = tol * tau_est > n_t
# Warn or raise in the case of non-convergence
if np.any(flag):
msg = (
"The chain is shorter than {0} times the integrated "
"autocorrelation time for {1} parameter(s). Use this estimate "
"with caution and run a longer chain!\n"
).format(tol, np.sum(flag))
msg += "N/{0} = {1:.0f};\ntau: {2}".format(tol, n_t / tol, tau_est)
if not quiet:
raise AutocorrError(tau_est, msg)
logger.warning(msg)
return tau_est
class AutocorrError(Exception):
"""Raised if the chain is too short to estimate an autocorrelation time.
The current estimate of the autocorrelation time can be accessed via the
``tau`` attribute of this exception.
"""
def __init__(self, tau, *args, **kwargs):
self.tau = tau
super(AutocorrError, self).__init__(*args, **kwargs)
|
HETDEXREPO_NAMEelixerPATH_START.@elixer_extracted@elixer-main@elixer@emcee@autocorr.py@.PATH_END.py
|
{
"filename": "plot_stars.py",
"repo_name": "vblanka24/Ba_star_classification_PaperIII",
"repo_path": "Ba_star_classification_PaperIII_extracted/Ba_star_classification_PaperIII-main/data_processing_and_plotting/plot_stars.py",
"type": "Python"
}
|
import sys, os
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from process_data_lib import *
FIGSIZE = [20, 8]
EDGECOLOR = "midnightblue"
MARKERCOLOR = "yellow"
FONTSIZE = 18
MARKERSIZE = 16
LINESIZE = 2
ZMIN = 6
IRONZ = 26
LIMIT_DIL = False
def get_dict_predicted(files):
"""
Make input dictionary to combine all files
"""
# Get the short names
fullnames, shortnames = new_names()
short_names_dict = {full: short for full, short in
zip(fullnames, shortnames)}
full_names_dict = {short: full for short, full in
zip(shortnames, fullnames)}
# Initialize dictionary
dict_ = {}
dict_["fruity"] = {}
dict_["monash"] = {}
# Keep an inventory of repeated models
repeated = {}
repeated["fruity"] = {}
repeated["monash"] = {}
cm = False # need to know if CM or other model: get_clean_lnlst requres a different input format
# Go for file and line
for file_ in files:
with open(file_, "r") as fread:
if "cm" in file_: cm = True
for line in fread:
lnlst = get_clean_lnlst(line, cm)
# Skipe lines without content
if lnlst is None:
continue
# Read the star name and add the sets in
# the dictionaries if they were not there
if "star" in lnlst:
star_name = lnlst[-1][:-1]
if star_name not in dict_["fruity"]:
# The starname set
dict_["fruity"][star_name] = []
dict_["monash"][star_name] = []
# Add the list to the repeated models
repeated["fruity"][star_name] = []
repeated["monash"][star_name] = []
# Add this line in fruity or monash
else: # lnlst[0]: label; lnlst[1]: dil; lnlst[2]: GoF; lnlst[3]: proba
if "fruity" in lnlst[0]:
type_ = "fruity"
elif "monash" in lnlst[0]:
type_ = "monash"
# Check if repeated to skip
if lnlst[0] in repeated[type_][star_name]:
continue
# Add this model here to avoid repeating it
repeated[type_][star_name].append(lnlst[0])
# Add to the set
lnlst[1] = float(lnlst[1])
dict_[type_][star_name].append(tuple(lnlst[:-2]))
return dict_
def plot_this_data(data, name_z, ax1, ax2, fmt, fmtMk="", label=None, mec=None,
mfc=None, error=False, data_compare=None, opacity=1.0):
'''
Plot a specific diluted model
'''
if data is None: return # exit the function if data is all zeroes
# Order data
values = [[]]
x_axis = [[]]
vals_err = [[]]
for z in name_z:
# This part prepares the keys to be looked for in data
name = name_z[z]
key = name + "/Fe"
if error:
key_err = key + "_err"
# Here the values are retrieved, giving the 0.5 errorbar if not
# present
if key in data:
val = data[key]
if error:
val_err = data[key_err]
if val_err == "-":
val_err = 0.5
# If the value does not exist, leave a space
if val == "-":
x_axis.append([])
values.append([])
vals_err.append([])
else:
x_axis[-1].append(z)
values[-1].append(val)
if error:
vals_err[-1].append(val_err)
else:
vals_err[-1].append(0)
# Plot the model
color = None
#for x_line, y_line, y_err in zip(x_axis, values, vals_err):
x_line, y_line, y_err = [], [], []
for ii in range(len(x_axis)):
x_line.extend(x_axis[ii])
y_line.extend(values[ii])
y_err.extend(vals_err[ii])
if ii != (len(x_axis)-1) and len(x_axis[ii]) != 0:
x_line.append(x_axis[ii][-1]+1)
y_line.append(np.nan)
y_err.append(np.nan)
# Repeating arguments
args = {"mec": mec, "ms": MARKERSIZE, "lw": LINESIZE, "mfc": mfc}
if not error:
args["ms"] /= 3
# Plot
if not error:
# Plotting model
if color is None:
y_line = pd.Series(y_line)
line = ax1.plot(x_line, y_line.interpolate(), fmtMk + fmt,
mec=mec, ms=0, lw=LINESIZE, mfc=mfc, alpha=opacity)
color = line[-1].get_color()
line = ax1.plot(x_line, y_line, fmtMk + fmt, label=label,
**args, alpha=opacity, color=color)
else:
ax1.plot(x_line, y_line, fmtMk + fmt, color=color, **args)
else:
# Plotting data
ax1.errorbar(x_line, y_line, yerr=y_err, fmt=fmtMk + fmt,
ecolor=mec, label=label, capsize=3, zorder=5, **args)
label = None
# Plot residuals
if data_compare is not None:
# Dictionaries for keys so that they are easy to compare
names_to_z = {name_z[z] + "/Fe": z for z in name_z}
names_Fe = names_to_z.keys()
# Make sure that it's the data we want to compare
for name in data_compare:
if name in names_Fe and name in data:
# Positive residuals only
try:
residual = data_compare[name] - data[name]
except TypeError:
continue
except:
raise
# Plot it, reducing a bit the markersize with respect to data
ax2.plot(names_to_z[name], residual, fmtMk, color=color,
ms=MARKERSIZE / 2)
def plot_results(predicted_models_dict, fruity_models_dict,
monash_models_dict, dict_data, red_elements, pathn):
"""
Plot the results, star by star.
"""
# Get the short names
fullnames, shortnames = new_names()
short_names_dict = {full: short for full, short in
zip(fullnames, shortnames)}
full_names_dict = {short: full for short, full in
zip(shortnames, fullnames)}
# Grab the data
name_z = np.loadtxt(os.path.join("data_for_plot", "atomic_nums.dat"),
dtype=str)
# Create dictionaries
red_elements = {int(z): name for name, z in name_z
if name in red_elements}
name_z = {int(z): name for name, z in name_z
if name != "element" and int(z) >= ZMIN}
# Create names list for dilution.
names_dil = []
for z in name_z:
name = name_z[z]
# To limit to anything above Fe
if LIMIT_DIL and z > IRONZ:
names_dil.append(name + "/Fe")
elif not LIMIT_DIL:
names_dil.append(name + "/Fe")
# Each key in dict_data is a star
for key in dict_data:
# Figure specification
fig = plt.figure(figsize=FIGSIZE)
spec = gridspec.GridSpec(8, 1)
# Axes for abundances
ax1 = plt.subplot(spec[:4, :])
ax1.set_title(key, size=FONTSIZE * 1.5)
#ax1.set_title((key+", XGB"), size=FONTSIZE * 1.5, weight='bold')
ax1.set_ylabel("[X/Fe]", size=FONTSIZE)
# Axes for residual
ax2 = plt.subplot(spec[4:6, :], sharex=ax1)
ax2.set_ylabel("residuals", size=FONTSIZE)
# Remove vertical space between plots
fig.subplots_adjust(hspace=0)
# Plot the fruity and monash models
mod_type = ["fruity", "monash"]
n_plots, n_mon, n_fru = 0, 0, 0
for type_ in mod_type:
# Retrieve name and dilution
for model_name, dil in predicted_models_dict[type_][key]:
# Get model and dilute
if type_ == "fruity":
model = fruity_models_dict[model_name]
fmt = "-"
fmtMk = "v"
n_fru += 1
n_curr = n_fru
elif type_ == "monash":
model = monash_models_dict[model_name]
fmt = "--"
fmtMk = "o"
n_mon += 1
n_curr = n_mon
else:
raise Exception("Only types implemented: fruity and monash")
diluted_model = apply_dilution(model, dil, names_dil)
# plot
n_plots += 1
tot_plots = len(predicted_models_dict[type_][key])
short_name = short_names_dict[model_name]
plot_this_data(diluted_model, name_z, ax1, ax2,
label=short_name, fmt=fmt, fmtMk=fmtMk,
data_compare=dict_data[key], opacity=0.2+0.8*(tot_plots-n_curr)/tot_plots)
# Plot data and errorbars
plot_this_data(dict_data[key], name_z, ax1, ax2, label="Data",
fmt="*", mec=EDGECOLOR, mfc=MARKERCOLOR, error=True)
# Red elements
plot_this_data(dict_data[key], red_elements, ax1, ax2,
fmt="*", mec=EDGECOLOR, mfc="red", error=True)
# set vertical lines
for ii in range(ZMIN, max(name_z.keys()) + 1, 4):
ax1.axvline(ii, ls="-", color="lightgray", zorder=0)
ax2.axvline(ii, ls="-", color="lightgray", zorder=0)
for ii in range(ZMIN + 2, max(name_z.keys()) + 1, 4):
ax1.axvline(ii, ls="--", color="lightgray", zorder=0)
ax2.axvline(ii, ls="--", color="lightgray", zorder=0)
# Set horizontal lines
ax1.axhline(ls="--", color="silver", zorder=0)
ax2.axhline(ls="-", color="k", zorder=0)
ax2.axhline(0.2, ls="--", color="k", zorder=0)
ax2.axhline(-0.2, ls="--", color="k", zorder=0)
ax2.axhline(0.4, ls=":", color="k", zorder=0)
ax2.axhline(-0.4, ls=":", color="k", zorder=0)
# Adjust axes
ax1.set_xlim([min(name_z.keys()) - 1, max(name_z.keys()) + 1])
ax2.set_ylim([-0.72, 0.72])
ax1.tick_params(which="both", right=True, labelsize=FONTSIZE)
ax1.minorticks_on()
ax2.tick_params(which="both", right=True, labelsize=FONTSIZE)
ax2.minorticks_on()
# Change x-axis. Use only odd numbers. 6, 10, ... for major
# 8, 12... for minor
x_axis_maj = [z for z in name_z.keys() if (z - 2) % 4 == 0]
x_axis_maj_labs = [name_z[z] for z in name_z.keys() if (z - 2) % 4 == 0]
x_axis_min = [z for z in name_z.keys() if z % 4 == 0]
x_axis_min_labs = [name_z[z] for z in name_z.keys() if z % 4 == 0]
# Major ticks
ax2.set_xticks(x_axis_maj)
ax2.set_xticklabels(x_axis_maj_labs, size=FONTSIZE)
# Minor ticks
ax2.set_xticks(x_axis_min, minor=True)
ax2.set_xticklabels(x_axis_min_labs, minor=True, size=FONTSIZE)
# Displace the minor ticks down
ax2.xaxis.set_tick_params(which="minor", pad=20)
# Aling y-labels
fig.align_ylabels()
# Put legend outside of plot
ncol = min(6, n_plots + 1)
print(f"Plotting: {key}")
ax1.legend(loc="upper center", bbox_to_anchor=(0.5, -0.75), ncol=ncol,
fontsize=FONTSIZE)
# Create plotting directory if it does not exist
if not os.path.isdir(pathn):
os.mkdir(pathn)
# Save the plot
#filename = os.path.join(pathn, key + ".pdf")
#plt.savefig(filename)
filename = os.path.join(pathn, key + ".png")
plt.savefig(filename)
# Close the figure to save memory
plt.close()
def main():
"""
Program for plotting the outputs from the classification
"""
if len(sys.argv) < 3:
s = "Incorrect number of arguments. "
s += f"Use: python3 {sys.argv[0]} <file1> [file2 ...] <directory>"
sys.exit(s)
# Save files with data and directory
files = sys.argv[1:-1]
pathn = sys.argv[-1]
# Define all the directories
dir_data = "~/Ba_star_classification_VB/Ba_star_classification_data"
fruity_mods = "models_fruity_dec" # decayed fruity models
monash_mods = "models_monash"
#data_file = "all_abund_and_masses.dat"
data_file = "all_data_w_err.dat"
fruity_dir = os.path.join(dir_data, fruity_mods)
monash_dir = os.path.join(dir_data, monash_mods)
data_file = os.path.join(dir_data, data_file)
# Sort files in fruity and monash
predicted_models_dict = get_dict_predicted(files)
# Load the stellar abundances
dict_data = get_data_values(data_file)
# The fruity and monash models
fruity_models_dict = get_data_fruity(fruity_dir)
monash_models_dict = get_data_monash(monash_dir)
# Load the red elements
with open("element_set.dat", "r") as fread:
for line in fread:
lnlst = line.split()
# Skip comments and empty lines
if len(lnlst) == 0 or "#" in lnlst[0]:
continue
red_elements = lnlst
break
# Remove "/Fe"
red_elements = [elem.split("/")[0] for elem in red_elements]
red_elements.remove("Fe")
# Now that all the data is loaded, we can plot it
plot_results(predicted_models_dict, fruity_models_dict,
monash_models_dict, dict_data, red_elements, pathn)
if __name__ == "__main__":
main()
|
vblanka24REPO_NAMEBa_star_classification_PaperIIIPATH_START.@Ba_star_classification_PaperIII_extracted@Ba_star_classification_PaperIII-main@data_processing_and_plotting@plot_stars.py@.PATH_END.py
|
{
"filename": "getcollisionsY1_ran.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/scripts/getcollisionsY1_ran.py",
"type": "Python"
}
|
'''
Find all of the collisions for Y1 randoms
'''
import numpy as np
import os
from astropy.table import Table
import argparse
from fiberassign.hardware import load_hardware
from fiberassign.tiles import load_tiles
from fiberassign.targets import Targets, TargetsAvailable, LocationsAvailable, create_tagalong, load_target_file, targets_in_tiles
from fiberassign.assign import Assignment
from fiberassign.utils import Logger
import fitsio
import LSS.common_tools as common
parser = argparse.ArgumentParser()
parser.add_argument("--prog", choices=['DARK','BRIGHT'])
args = parser.parse_args()
tiletab = Table.read('/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/tiles-'+args.prog+'.fits')
margins = dict(pos=0.05,
petal=0.4,
gfa=0.4)
#def main():
# from LSS.mkCat_singletile.fa4lsscat import getfatiles
# getfatiles()
# return
log = Logger.get()
rann = 0
n = 0
#for tile in t['TILEID']:
def getcoll(ind):
#tile = 1230
tile = tiletab[ind]['TILEID']
ts = '%06i' % tile
fbah = fitsio.read_header('/global/cfs/cdirs/desi/target/fiberassign/tiles/trunk/'+ts[:3]+'/fiberassign-'+ts+'.fits.gz')
dt = fbah['RUNDATE']#[:19]
pr = args.prog
t = Table(tiletab[ind])
t['OBSCONDITIONS'] = 516
t['IN_DESI'] = 1
t['MTLTIME'] = fbah['MTLTIME']
t['FA_RUN'] = fbah['FA_RUN']
t['PROGRAM'] = pr
obsha = fbah['FA_HA']
obstheta = fbah['FIELDROT']
hw = load_hardware(rundate=dt, add_margins=margins)
t.write(os.environ['SCRATCH']+'/rantiles/'+str(tile)+'-'+str(rann)+'-tiles.fits', overwrite=True)
tiles = load_tiles(
tiles_file=os.environ['SCRATCH']+'/rantiles/'+str(tile)+'-'+str(rann)+'-tiles.fits',obsha=obsha,obstheta=obstheta,
select=[tile])
tids = tiles.id
print('Tile ids:', tids)
I = np.flatnonzero(np.array(tids) == tile)
assert(len(I) == 1)
i = I[0]
tile_ra = tiles.ra[i]
tile_dec = tiles.dec[i]
# Create empty target list
tgs = Targets()
# Create structure for carrying along auxiliary target data not needed by C++.
plate_radec=True
tagalong = create_tagalong(plate_radec=plate_radec)
# Load target files...
load_target_file(tgs, tagalong, '/global/cfs/cdirs/desi/survey/catalogs/main/LSS/random'+str(rann)+'/tilenofa-%i.fits' % tile)
# Find targets within tiles, and project their RA,Dec positions
# into focal-plane coordinates.
tile_targetids, tile_x, tile_y, tile_xy_cs5 = targets_in_tiles(hw, tgs, tiles, tagalong)
# Compute the targets available to each fiber for each tile.
tgsavail = TargetsAvailable(hw, tiles, tile_targetids, tile_x, tile_y)
# Compute the fibers on all tiles available for each target and sky
favail = LocationsAvailable(tgsavail)
# FAKE stucksky
stucksky = {}
# Create assignment object
asgn = Assignment(tgs, tgsavail, favail, stucksky)
coll = asgn.check_avail_collisions(tile)
kl = np.array(list(coll.keys())).transpose()
locs = kl[0]
ids = kl[1]
locids = ids*10000+locs
#print('collisions:', coll)
print('N collisions:', len(coll))
# coll: dict (loc, targetid) -> bitmask
forig = fitsio.read('/global/cfs/cdirs/desi/survey/catalogs/main/LSS/random'+str(rann)+'/fba-'+ts+'.fits',ext='FAVAIL')
#print(coll)
locidsin = np.isin(forig['LOCATION']+10000*forig['TARGETID'],locids)
print('N collisions original:',np.sum(locidsin))
colltab = Table(forig[locidsin])
colltab['TILEID'] = tile
return colltab
if __name__ == '__main__':
from multiprocessing import Pool
tls = list(tiletab['TILEID'])#[:10])
inds = np.arange(len(tls))
for rann in range(0,18):
with Pool(processes=128) as pool:
res = pool.map(getcoll, inds)
colltot = np.concatenate(res)
common.write_LSS(colltot,'/global/cfs/cdirs/desi/survey/catalogs/Y1/LSS/random'+str(rann)+'collisions-'+args.prog+'.fits')
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@scripts@getcollisionsY1_ran.py@.PATH_END.py
|
{
"filename": "hb.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/io/harwell_boeing/hb.py",
"type": "Python"
}
|
"""
Implementation of Harwell-Boeing read/write.
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
from __future__ import division, print_function, absolute_import
# TODO:
# - Add more support (symmetric/complex matrices, non-assembled matrices ?)
# XXX: reading is reasonably efficient (>= 85 % is in numpy.fromstring), but
# takes a lot of memory. Being faster would require compiled code.
# write is not efficient. Although not a terribly exciting task,
# having reusable facilities to efficiently read/write fortran-formatted files
# would be useful outside this module.
import warnings
import numpy as np
from scipy.sparse import csc_matrix
from scipy.io.harwell_boeing._fortran_format_parser import \
FortranFormatParser, IntFormat, ExpFormat
__all__ = ["MalformedHeader", "hb_read", "hb_write", "HBInfo", "HBFile",
"HBMatrixType"]
class MalformedHeader(Exception):
pass
class LineOverflow(Warning):
pass
def _nbytes_full(fmt, nlines):
"""Return the number of bytes to read to get every full lines for the
given parsed fortran format."""
return (fmt.repeat * fmt.width + 1) * (nlines - 1)
class HBInfo(object):
@classmethod
def from_data(cls, m, title="Default title", key="0", mxtype=None, fmt=None):
"""Create a HBInfo instance from an existing sparse matrix.
Parameters
----------
m : sparse matrix
the HBInfo instance will derive its parameters from m
title : str
Title to put in the HB header
key : str
Key
mxtype : HBMatrixType
type of the input matrix
fmt : dict
not implemented
Returns
-------
hb_info : HBInfo instance
"""
m = m.tocsc(copy=False)
pointer = m.indptr
indices = m.indices
values = m.data
nrows, ncols = m.shape
nnon_zeros = m.nnz
if fmt is None:
# +1 because HB use one-based indexing (Fortran), and we will write
# the indices /pointer as such
pointer_fmt = IntFormat.from_number(np.max(pointer+1))
indices_fmt = IntFormat.from_number(np.max(indices+1))
if values.dtype.kind in np.typecodes["AllFloat"]:
values_fmt = ExpFormat.from_number(-np.max(np.abs(values)))
elif values.dtype.kind in np.typecodes["AllInteger"]:
values_fmt = IntFormat.from_number(-np.max(np.abs(values)))
else:
raise NotImplementedError("type %s not implemented yet" % values.dtype.kind)
else:
raise NotImplementedError("fmt argument not supported yet.")
if mxtype is None:
if not np.isrealobj(values):
raise ValueError("Complex values not supported yet")
if values.dtype.kind in np.typecodes["AllInteger"]:
tp = "integer"
elif values.dtype.kind in np.typecodes["AllFloat"]:
tp = "real"
else:
raise NotImplementedError("type %s for values not implemented"
% values.dtype)
mxtype = HBMatrixType(tp, "unsymmetric", "assembled")
else:
raise ValueError("mxtype argument not handled yet.")
def _nlines(fmt, size):
nlines = size // fmt.repeat
if nlines * fmt.repeat != size:
nlines += 1
return nlines
pointer_nlines = _nlines(pointer_fmt, pointer.size)
indices_nlines = _nlines(indices_fmt, indices.size)
values_nlines = _nlines(values_fmt, values.size)
total_nlines = pointer_nlines + indices_nlines + values_nlines
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_fmt.fortran_format, indices_fmt.fortran_format,
values_fmt.fortran_format)
@classmethod
def from_file(cls, fid):
"""Create a HBInfo instance from a file object containing a matrix in the
HB format.
Parameters
----------
fid : file-like matrix
File or file-like object containing a matrix in the HB format.
Returns
-------
hb_info : HBInfo instance
"""
# First line
line = fid.readline().strip("\n")
if not len(line) > 72:
raise ValueError("Expected at least 72 characters for first line, "
"got: \n%s" % line)
title = line[:72]
key = line[72:]
# Second line
line = fid.readline().strip("\n")
if not len(line.rstrip()) >= 56:
raise ValueError("Expected at least 56 characters for second line, "
"got: \n%s" % line)
total_nlines = _expect_int(line[:14])
pointer_nlines = _expect_int(line[14:28])
indices_nlines = _expect_int(line[28:42])
values_nlines = _expect_int(line[42:56])
rhs_nlines = line[56:72].strip()
if rhs_nlines == '':
rhs_nlines = 0
else:
rhs_nlines = _expect_int(rhs_nlines)
if not rhs_nlines == 0:
raise ValueError("Only files without right hand side supported for "
"now.")
# Third line
line = fid.readline().strip("\n")
if not len(line) >= 70:
raise ValueError("Expected at least 72 character for third line, got:\n"
"%s" % line)
mxtype_s = line[:3].upper()
if not len(mxtype_s) == 3:
raise ValueError("mxtype expected to be 3 characters long")
mxtype = HBMatrixType.from_fortran(mxtype_s)
if mxtype.value_type not in ["real", "integer"]:
raise ValueError("Only real or integer matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.structure == "unsymmetric":
raise ValueError("Only unsymmetric matrices supported for "
"now (detected %s)" % mxtype)
if not mxtype.storage == "assembled":
raise ValueError("Only assembled matrices supported for now")
if not line[3:14] == " " * 11:
raise ValueError("Malformed data for third line: %s" % line)
nrows = _expect_int(line[14:28])
ncols = _expect_int(line[28:42])
nnon_zeros = _expect_int(line[42:56])
nelementals = _expect_int(line[56:70])
if not nelementals == 0:
raise ValueError("Unexpected value %d for nltvl (last entry of line 3)"
% nelementals)
# Fourth line
line = fid.readline().strip("\n")
ct = line.split()
if not len(ct) == 3:
raise ValueError("Expected 3 formats, got %s" % ct)
return cls(title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
ct[0], ct[1], ct[2],
rhs_nlines, nelementals)
def __init__(self, title, key,
total_nlines, pointer_nlines, indices_nlines, values_nlines,
mxtype, nrows, ncols, nnon_zeros,
pointer_format_str, indices_format_str, values_format_str,
right_hand_sides_nlines=0, nelementals=0):
"""Do not use this directly, but the class ctrs (from_* functions)."""
self.title = title
self.key = key
if title is None:
title = "No Title"
if len(title) > 72:
raise ValueError("title cannot be > 72 characters")
if key is None:
key = "|No Key"
if len(key) > 8:
warnings.warn("key is > 8 characters (key is %s)" % key, LineOverflow)
self.total_nlines = total_nlines
self.pointer_nlines = pointer_nlines
self.indices_nlines = indices_nlines
self.values_nlines = values_nlines
parser = FortranFormatParser()
pointer_format = parser.parse(pointer_format_str)
if not isinstance(pointer_format, IntFormat):
raise ValueError("Expected int format for pointer format, got %s"
% pointer_format)
indices_format = parser.parse(indices_format_str)
if not isinstance(indices_format, IntFormat):
raise ValueError("Expected int format for indices format, got %s" %
indices_format)
values_format = parser.parse(values_format_str)
if isinstance(values_format, ExpFormat):
if mxtype.value_type not in ["real", "complex"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
values_dtype = np.float64
elif isinstance(values_format, IntFormat):
if mxtype.value_type not in ["integer"]:
raise ValueError("Inconsistency between matrix type %s and "
"value type %s" % (mxtype, values_format))
# XXX: fortran int -> dtype association ?
values_dtype = int
else:
raise ValueError("Unsupported format for values %r" % (values_format,))
self.pointer_format = pointer_format
self.indices_format = indices_format
self.values_format = values_format
self.pointer_dtype = np.int32
self.indices_dtype = np.int32
self.values_dtype = values_dtype
self.pointer_nlines = pointer_nlines
self.pointer_nbytes_full = _nbytes_full(pointer_format, pointer_nlines)
self.indices_nlines = indices_nlines
self.indices_nbytes_full = _nbytes_full(indices_format, indices_nlines)
self.values_nlines = values_nlines
self.values_nbytes_full = _nbytes_full(values_format, values_nlines)
self.nrows = nrows
self.ncols = ncols
self.nnon_zeros = nnon_zeros
self.nelementals = nelementals
self.mxtype = mxtype
def dump(self):
"""Gives the header corresponding to this instance as a string."""
header = [self.title.ljust(72) + self.key.ljust(8)]
header.append("%14d%14d%14d%14d" %
(self.total_nlines, self.pointer_nlines,
self.indices_nlines, self.values_nlines))
header.append("%14s%14d%14d%14d%14d" %
(self.mxtype.fortran_format.ljust(14), self.nrows,
self.ncols, self.nnon_zeros, 0))
pffmt = self.pointer_format.fortran_format
iffmt = self.indices_format.fortran_format
vffmt = self.values_format.fortran_format
header.append("%16s%16s%20s" %
(pffmt.ljust(16), iffmt.ljust(16), vffmt.ljust(20)))
return "\n".join(header)
def _expect_int(value, msg=None):
try:
return int(value)
except ValueError:
if msg is None:
msg = "Expected an int, got %s"
raise ValueError(msg % value)
def _read_hb_data(content, header):
# XXX: look at a way to reduce memory here (big string creation)
ptr_string = "".join([content.read(header.pointer_nbytes_full),
content.readline()])
ptr = np.fromstring(ptr_string,
dtype=int, sep=' ')
ind_string = "".join([content.read(header.indices_nbytes_full),
content.readline()])
ind = np.fromstring(ind_string,
dtype=int, sep=' ')
val_string = "".join([content.read(header.values_nbytes_full),
content.readline()])
val = np.fromstring(val_string,
dtype=header.values_dtype, sep=' ')
try:
return csc_matrix((val, ind-1, ptr-1),
shape=(header.nrows, header.ncols))
except ValueError as e:
raise e
def _write_data(m, fid, header):
m = m.tocsc(copy=False)
def write_array(f, ar, nlines, fmt):
# ar_nlines is the number of full lines, n is the number of items per
# line, ffmt the fortran format
pyfmt = fmt.python_format
pyfmt_full = pyfmt * fmt.repeat
# for each array to write, we first write the full lines, and special
# case for partial line
full = ar[:(nlines - 1) * fmt.repeat]
for row in full.reshape((nlines-1, fmt.repeat)):
f.write(pyfmt_full % tuple(row) + "\n")
nremain = ar.size - full.size
if nremain > 0:
f.write((pyfmt * nremain) % tuple(ar[ar.size - nremain:]) + "\n")
fid.write(header.dump())
fid.write("\n")
# +1 is for fortran one-based indexing
write_array(fid, m.indptr+1, header.pointer_nlines,
header.pointer_format)
write_array(fid, m.indices+1, header.indices_nlines,
header.indices_format)
write_array(fid, m.data, header.values_nlines,
header.values_format)
class HBMatrixType(object):
"""Class to hold the matrix type."""
# q2f* translates qualified names to fortran character
_q2f_type = {
"real": "R",
"complex": "C",
"pattern": "P",
"integer": "I",
}
_q2f_structure = {
"symmetric": "S",
"unsymmetric": "U",
"hermitian": "H",
"skewsymmetric": "Z",
"rectangular": "R"
}
_q2f_storage = {
"assembled": "A",
"elemental": "E",
}
_f2q_type = dict([(j, i) for i, j in _q2f_type.items()])
_f2q_structure = dict([(j, i) for i, j in _q2f_structure.items()])
_f2q_storage = dict([(j, i) for i, j in _q2f_storage.items()])
@classmethod
def from_fortran(cls, fmt):
if not len(fmt) == 3:
raise ValueError("Fortran format for matrix type should be 3 "
"characters long")
try:
value_type = cls._f2q_type[fmt[0]]
structure = cls._f2q_structure[fmt[1]]
storage = cls._f2q_storage[fmt[2]]
return cls(value_type, structure, storage)
except KeyError:
raise ValueError("Unrecognized format %s" % fmt)
def __init__(self, value_type, structure, storage="assembled"):
self.value_type = value_type
self.structure = structure
self.storage = storage
if value_type not in self._q2f_type:
raise ValueError("Unrecognized type %s" % value_type)
if structure not in self._q2f_structure:
raise ValueError("Unrecognized structure %s" % structure)
if storage not in self._q2f_storage:
raise ValueError("Unrecognized storage %s" % storage)
@property
def fortran_format(self):
return self._q2f_type[self.value_type] + \
self._q2f_structure[self.structure] + \
self._q2f_storage[self.storage]
def __repr__(self):
return "HBMatrixType(%s, %s, %s)" % \
(self.value_type, self.structure, self.storage)
class HBFile(object):
def __init__(self, file, hb_info=None):
"""Create a HBFile instance.
Parameters
----------
file : file-object
StringIO work as well
hb_info : HBInfo, optional
Should be given as an argument for writing, in which case the file
should be writable.
"""
self._fid = file
if hb_info is None:
self._hb_info = HBInfo.from_file(file)
else:
#raise IOError("file %s is not writable, and hb_info "
# "was given." % file)
self._hb_info = hb_info
@property
def title(self):
return self._hb_info.title
@property
def key(self):
return self._hb_info.key
@property
def type(self):
return self._hb_info.mxtype.value_type
@property
def structure(self):
return self._hb_info.mxtype.structure
@property
def storage(self):
return self._hb_info.mxtype.storage
def read_matrix(self):
return _read_hb_data(self._fid, self._hb_info)
def write_matrix(self, m):
return _write_data(m, self._fid, self._hb_info)
def hb_read(path_or_open_file):
"""Read HB-format file.
Parameters
----------
path_or_open_file : path-like or file-like
If a file-like object, it is used as-is. Otherwise it is opened
before reading.
Returns
-------
data : scipy.sparse.csc_matrix instance
The data read from the HB file as a sparse matrix.
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
def _get_matrix(fid):
hb = HBFile(fid)
return hb.read_matrix()
if hasattr(path_or_open_file, 'read'):
return _get_matrix(path_or_open_file)
else:
with open(path_or_open_file) as f:
return _get_matrix(f)
def hb_write(path_or_open_file, m, hb_info=None):
"""Write HB-format file.
Parameters
----------
path_or_open_file : path-like or file-like
If a file-like object, it is used as-is. Otherwise it is opened
before writing.
m : sparse-matrix
the sparse matrix to write
hb_info : HBInfo
contains the meta-data for write
Returns
-------
None
Notes
-----
At the moment not the full Harwell-Boeing format is supported. Supported
features are:
- assembled, non-symmetric, real matrices
- integer for pointer/indices
- exponential format for float values, and int format
"""
m = m.tocsc(copy=False)
if hb_info is None:
hb_info = HBInfo.from_data(m)
def _set_matrix(fid):
hb = HBFile(fid, hb_info)
return hb.write_matrix(m)
if hasattr(path_or_open_file, 'write'):
return _set_matrix(path_or_open_file)
else:
with open(path_or_open_file, 'w') as f:
return _set_matrix(f)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@io@harwell_boeing@hb.py@.PATH_END.py
|
{
"filename": "test_background_tasks.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/tests/test_background_tasks.py",
"type": "Python"
}
|
import asyncio
from datetime import timedelta
from pathlib import Path
from typing import TYPE_CHECKING, AsyncGenerator, Iterable, Tuple
from unittest import mock
import pytest
import prefect.results
from prefect import Task, task, unmapped
from prefect.blocks.core import Block
from prefect.client.orchestration import get_client
from prefect.client.schemas import TaskRun
from prefect.filesystems import LocalFileSystem
from prefect.results import ResultStore, get_or_create_default_task_scheduling_storage
from prefect.server.api.task_runs import TaskQueue
from prefect.server.schemas.core import TaskRun as ServerTaskRun
from prefect.settings import (
PREFECT_TASK_SCHEDULING_DEFAULT_STORAGE_BLOCK,
temporary_settings,
)
if TYPE_CHECKING:
from prefect.client.orchestration import PrefectClient
async def result_store_from_task(task) -> ResultStore:
return await ResultStore(
result_storage=await get_or_create_default_task_scheduling_storage()
).update_for_task(task)
@pytest.fixture
def local_filesystem(tmp_path):
block = LocalFileSystem(basepath=tmp_path)
block.save("test-fs", overwrite=True)
return block
@pytest.fixture(autouse=True)
async def clear_scheduled_task_queues():
TaskQueue.reset()
yield
TaskQueue.reset()
@pytest.fixture(autouse=True)
async def clear_cached_filesystems():
prefect.results._default_storages.clear()
yield
prefect.results._default_storages.clear()
@pytest.fixture
def foo_task() -> Task:
@task
def foo(x: int) -> int:
print(x)
return x
return foo
@pytest.fixture
def async_foo_task() -> Task:
@task
async def async_foo(x: int) -> int:
print(x)
return x
return async_foo
@pytest.fixture
def foo_task_with_result_storage(foo_task, local_filesystem):
return foo_task.with_options(result_storage=local_filesystem)
@pytest.fixture
def async_foo_task_with_result_storage(async_foo_task, local_filesystem):
return async_foo_task.with_options(result_storage=local_filesystem)
async def test_task_submission_with_parameters_uses_default_storage(
foo_task, prefect_client
):
foo_task_without_result_storage = foo_task.with_options(result_storage=None)
task_run_future = foo_task_without_result_storage.apply_async((42,))
task_run = await prefect_client.read_task_run(task_run_future.task_run_id)
result_store = await result_store_from_task(foo_task)
await result_store.read_parameters(task_run.state.state_details.task_parameters_id)
async def test_task_submission_with_parameters_reuses_default_storage_block(
foo_task: Task, tmp_path: Path, prefect_client
):
with temporary_settings(
{
PREFECT_TASK_SCHEDULING_DEFAULT_STORAGE_BLOCK: "local-file-system/my-tasks",
}
):
block = LocalFileSystem(basepath=tmp_path / "some-storage")
await block.save("my-tasks", overwrite=True)
foo_task_without_result_storage = foo_task.with_options(result_storage=None)
task_run_future_a = foo_task_without_result_storage.apply_async((42,))
storage_before = await Block.load("local-file-system/my-tasks")
assert isinstance(storage_before, LocalFileSystem)
assert storage_before.basepath == str(tmp_path / "some-storage")
foo_task_without_result_storage = foo_task.with_options(result_storage=None)
task_run_future_b = foo_task_without_result_storage.apply_async((24,))
storage_after = await Block.load("local-file-system/my-tasks")
assert isinstance(storage_after, LocalFileSystem)
result_store = await result_store_from_task(foo_task)
task_run_a = await prefect_client.read_task_run(task_run_future_a.task_run_id)
task_run_b = await prefect_client.read_task_run(task_run_future_b.task_run_id)
assert await result_store.read_parameters(
task_run_a.state.state_details.task_parameters_id
) == {"parameters": {"x": 42}, "context": mock.ANY}
assert await result_store.read_parameters(
task_run_b.state.state_details.task_parameters_id
) == {"parameters": {"x": 24}, "context": mock.ANY}
async def test_task_submission_creates_a_scheduled_task_run(
foo_task_with_result_storage, prefect_client
):
task_run_future = foo_task_with_result_storage.apply_async((42,))
task_run = await prefect_client.read_task_run(task_run_future.task_run_id)
assert task_run.state.is_scheduled()
assert task_run.state.state_details.deferred is True
result_store = await result_store_from_task(foo_task_with_result_storage)
parameters = await result_store.read_parameters(
task_run.state.state_details.task_parameters_id
)
assert parameters == {"parameters": {"x": 42}, "context": mock.ANY}
async def test_sync_task_not_awaitable_in_async_context(foo_task, prefect_client):
task_run_future = foo_task.apply_async((42,))
task_run = await prefect_client.read_task_run(task_run_future.task_run_id)
assert task_run.state.is_scheduled()
result_store = await result_store_from_task(foo_task)
parameters = await result_store.read_parameters(
task_run.state.state_details.task_parameters_id
)
assert parameters == {"parameters": {"x": 42}, "context": mock.ANY}
async def test_async_task_submission_creates_a_scheduled_task_run(
async_foo_task_with_result_storage, prefect_client
):
task_run_future = async_foo_task_with_result_storage.apply_async((42,))
task_run = await prefect_client.read_task_run(task_run_future.task_run_id)
assert task_run.state.is_scheduled()
result_store = await result_store_from_task(async_foo_task_with_result_storage)
parameters = await result_store.read_parameters(
task_run.state.state_details.task_parameters_id
)
assert parameters == {"parameters": {"x": 42}, "context": mock.ANY}
async def test_scheduled_tasks_are_enqueued_server_side(
foo_task_with_result_storage: Task,
in_memory_prefect_client: "PrefectClient",
monkeypatch,
):
# Need to mock `get_client` to return the in-memory client because we are directly inspecting
# changes in the server-side task queue. Ideally, we'd be able to inspect the task queue via
# the REST API for this test, but that's not currently possible.
# TODO: Add ways to inspect the task queue via the REST API
monkeypatch.setattr(prefect.tasks, "get_client", lambda: in_memory_prefect_client)
task_run_future = foo_task_with_result_storage.apply_async((42,))
task_run = await in_memory_prefect_client.read_task_run(task_run_future.task_run_id)
client_run: TaskRun = task_run
assert client_run.state.is_scheduled()
enqueued_run: ServerTaskRun = await TaskQueue.for_key(client_run.task_key).get()
# The server-side task run in the queue should be the same as the one returned
# to the client, but some of the calculated fields will be populated server-side
# after orchestration in a way that differs by microseconds, or the
# created/updated dates are populated.
assert client_run.estimated_start_time_delta is not None
assert enqueued_run.estimated_start_time_delta is not None
assert (
client_run.estimated_start_time_delta - enqueued_run.estimated_start_time_delta
< timedelta(seconds=10)
)
client_run.estimated_start_time_delta = enqueued_run.estimated_start_time_delta
enqueued_run_dict = enqueued_run.model_dump()
client_run_dict = client_run.model_dump()
client_run_dict["state"].pop("created")
client_run_dict["state"].pop("updated")
assert enqueued_run_dict == client_run_dict
async def test_tasks_are_not_enqueued_server_side_when_executed_directly(
foo_task: Task,
):
# Regression test for https://github.com/PrefectHQ/prefect/issues/13674
# where executing a task would cause it to be enqueue server-side
# and executed twice.
foo_task(x=42)
with pytest.raises(asyncio.QueueEmpty):
TaskQueue.for_key(foo_task.task_key).get_nowait()
@pytest.fixture
async def prefect_client() -> AsyncGenerator["PrefectClient", None]:
async with get_client() as client:
yield client
class TestCall:
async def test_call(self, async_foo_task):
result = await async_foo_task(42)
assert result == 42
async def test_call_with_return_state(self, async_foo_task):
state = await async_foo_task(42, return_state=True)
assert state.is_completed()
assert await state.result() == 42
class TestMap:
async def test_map(self, async_foo_task):
task_runs = async_foo_task.map([1, 2, 3], deferred=True)
assert len(task_runs) == 3
result_store = await result_store_from_task(async_foo_task)
for i, task_run in enumerate(task_runs):
assert task_run.state.is_scheduled()
assert await result_store.read_parameters(
task_run.state.state_details.task_parameters_id
) == {"parameters": {"x": i + 1}, "context": mock.ANY}
async def test_map_with_implicitly_unmapped_kwargs(self):
@task
def bar(x: int, unmappable: int) -> Tuple[int, int]:
return (x, unmappable)
task_runs = bar.map([1, 2, 3], unmappable=42, deferred=True)
assert len(task_runs) == 3
result_store = await result_store_from_task(bar)
for i, task_run in enumerate(task_runs):
assert task_run.state.is_scheduled()
assert await result_store.read_parameters(
task_run.state.state_details.task_parameters_id
) == {"parameters": {"x": i + 1, "unmappable": 42}, "context": mock.ANY}
async def test_async_map_with_implicitly_unmapped_kwargs(self):
@task
async def bar(x: int, unmappable: int) -> Tuple[int, int]:
return (x, unmappable)
task_runs = bar.map([1, 2, 3], unmappable=42, deferred=True)
assert len(task_runs) == 3
result_store = await result_store_from_task(bar)
for i, task_run in enumerate(task_runs):
assert task_run.state.is_scheduled()
assert await result_store.read_parameters(
task_run.state.state_details.task_parameters_id
) == {"parameters": {"x": i + 1, "unmappable": 42}, "context": mock.ANY}
async def test_map_with_explicit_unmapped_kwargs(self):
@task
def bar(x: int, mappable: Iterable) -> Tuple[int, Iterable]:
return (x, mappable)
task_runs = bar.map(
[1, 2, 3], mappable=unmapped(["some", "iterable"]), deferred=True
)
assert len(task_runs) == 3
result_store = await result_store_from_task(bar)
for i, task_run in enumerate(task_runs):
assert task_run.state.is_scheduled()
assert await result_store.read_parameters(
task_run.state.state_details.task_parameters_id
) == {
"parameters": {"x": i + 1, "mappable": ["some", "iterable"]},
"context": mock.ANY,
}
async def test_async_map_with_explicit_unmapped_kwargs(self):
@task
async def bar(x: int, mappable: Iterable) -> Tuple[int, Iterable]:
return (x, mappable)
task_runs = bar.map(
[1, 2, 3], mappable=unmapped(["some", "iterable"]), deferred=True
)
assert len(task_runs) == 3
result_store = await result_store_from_task(bar)
for i, task_run in enumerate(task_runs):
assert task_run.state.is_scheduled()
assert await result_store.read_parameters(
task_run.state.state_details.task_parameters_id
) == {
"parameters": {"x": i + 1, "mappable": ["some", "iterable"]},
"context": mock.ANY,
}
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@tests@test_background_tasks.py@.PATH_END.py
|
{
"filename": "_showticksuffix.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermapbox/marker/colorbar/_showticksuffix.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="showticksuffix",
parent_name="scattermapbox.marker.colorbar",
**kwargs,
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermapbox@marker@colorbar@_showticksuffix.py@.PATH_END.py
|
{
"filename": "_lightposition.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/volume/_lightposition.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Lightposition(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "volume"
_path_str = "volume.lightposition"
_valid_props = {"x", "y", "z"}
# x
# -
@property
def x(self):
"""
Numeric vector, representing the X coordinate for each vertex.
The 'x' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# y
# -
@property
def y(self):
"""
Numeric vector, representing the Y coordinate for each vertex.
The 'y' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# z
# -
@property
def z(self):
"""
Numeric vector, representing the Z coordinate for each vertex.
The 'z' property is a number and may be specified as:
- An int or float in the interval [-100000, 100000]
Returns
-------
int|float
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
x
Numeric vector, representing the X coordinate for each
vertex.
y
Numeric vector, representing the Y coordinate for each
vertex.
z
Numeric vector, representing the Z coordinate for each
vertex.
"""
def __init__(self, arg=None, x=None, y=None, z=None, **kwargs):
"""
Construct a new Lightposition object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.volume.Lightposition`
x
Numeric vector, representing the X coordinate for each
vertex.
y
Numeric vector, representing the Y coordinate for each
vertex.
z
Numeric vector, representing the Z coordinate for each
vertex.
Returns
-------
Lightposition
"""
super(Lightposition, self).__init__("lightposition")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.volume.Lightposition
constructor must be a dict or
an instance of :class:`plotly.graph_objs.volume.Lightposition`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@volume@_lightposition.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "simonsobs/socs",
"repo_path": "socs_extracted/socs-main/socs/agents/labjack/__init__.py",
"type": "Python"
}
|
simonsobsREPO_NAMEsocsPATH_START.@socs_extracted@socs-main@socs@agents@labjack@__init__.py@.PATH_END.py
|
|
{
"filename": "itep.py",
"repo_name": "astrocatalogs/supernovae",
"repo_path": "supernovae_extracted/supernovae-master/tasks/itep.py",
"type": "Python"
}
|
"""Import tasks for ITEP.
Import tasks for the Sternberg Astronomical Institute's Supernova Light
Curve Catalog, from the ITEP-SAI group.
"""
import csv
import os
import re
from collections import OrderedDict
from html import unescape
from astrocats.catalog.utils import jd_to_mjd, pbar, uniq_cdl
from astrocats.catalog.photometry import PHOTOMETRY
from decimal import Decimal
from ..supernova import SUPERNOVA
def do_itep(catalog):
"""Import data from ITEP."""
task_str = catalog.get_current_task_str()
itepignoresources = [
'2004ApJ...602..571B', '2013NewA...20...30M', '1999AJ....117..707R',
'2006AJ....131..527J', '1994AJ....108.2233W', '1993AJ....105.2236S',
'1995AJ....110.2868B', '2004ApJ...616..339W', '2002ApJ...573..144D',
'2000MNRAS.318.1093F', '2005MNRAS.360..950P', '2004A&A...426..963E',
'1999AJ....117.1175S', '1999MNRAS.305..811B', '2001MNRAS.321..254S',
'1995A&A...293..723C', '2004AJ....127.1664K', '2001AJ....122.1616K',
'2004MNRAS.349.1344A', '2006AJ....131.1639K', '2004AJ....128.3034K']
itepignorephot = ['SN2006gy', 'SN1999aw', 'SN1995N']
needsbib = []
with open(os.path.join(catalog.get_current_task_repo(),
'itep-refs.txt'), 'r') as refs_file:
refrep = refs_file.read().splitlines()
refrepf = dict(list(zip(refrep[1::2], refrep[::2])))
fname = os.path.join(catalog.get_current_task_repo(),
'itep-lc-cat-28dec2015.txt')
tsvin = list(csv.reader(open(fname, 'r'),
delimiter='|', skipinitialspace=True))
curname = ''
for rr, row in enumerate(pbar(tsvin, task_str)):
if rr <= 1 or len(row) < 7:
continue
oldname = 'SN' + row[0].strip()
mjd = str(jd_to_mjd(Decimal(row[1].strip())))
band = row[2].strip()
magnitude = row[3].strip()
e_magnitude = row[4].strip()
reference = row[6].strip().strip(',')
if curname != oldname:
curname = oldname
name = catalog.add_entry(oldname)
sec_reference = ('Sternberg Astronomical Institute '
'Supernova Light Curve Catalogue')
sec_refurl = 'http://dau.itep.ru/sn/node/72'
sec_source = catalog.entries[name].add_source(
name=sec_reference, url=sec_refurl, secondary=True)
catalog.entries[name].add_quantity(
SUPERNOVA.ALIAS, oldname, sec_source)
year = re.findall(r'\d+', name)[0]
catalog.entries[name].add_quantity(
SUPERNOVA.DISCOVER_DATE, year, sec_source)
if reference in refrepf:
bibcode = unescape(refrepf[reference])
source = catalog.entries[name].add_source(bibcode=bibcode)
else:
needsbib.append(reference)
source = catalog.entries[name].add_source(
name=reference) if reference else ''
if oldname in itepignorephot or bibcode in itepignoresources:
continue
photodict = {
PHOTOMETRY.TIME: mjd,
PHOTOMETRY.U_TIME: 'MJD',
PHOTOMETRY.MAGNITUDE: magnitude,
PHOTOMETRY.SOURCE: uniq_cdl([sec_source, source])
}
if e_magnitude:
photodict[PHOTOMETRY.E_MAGNITUDE] = e_magnitude
if band.endswith('_SDSS'):
photodict[PHOTOMETRY.BAND_SET] = 'SDSS'
photodict[PHOTOMETRY.SYSTEM] = 'SDSS'
band = band.replace('_SDSS', "'")
photodict[PHOTOMETRY.BAND] = band
catalog.entries[name].add_photometry(**photodict)
if catalog.args.travis and rr >= catalog.TRAVIS_QUERY_LIMIT:
break
# Write out references that could use aa bibcode
needsbib = list(OrderedDict.fromkeys(needsbib))
with open('../itep-needsbib.txt', 'w') as bib_file:
bib_file.writelines(['%ss\n' % ii for ii in needsbib])
catalog.journal_entries()
return
|
astrocatalogsREPO_NAMEsupernovaePATH_START.@supernovae_extracted@supernovae-master@tasks@itep.py@.PATH_END.py
|
{
"filename": "test_shell_util.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/crossbar/shell/tests/test_shell_util.py",
"type": "Python"
}
|
###############################################################################
#
# Crossbar.io Shell
# Copyright (c) typedef int GmbH. Licensed under EUPLv1.2.
#
###############################################################################
from crossbar.shell.util import localnow
def test_localnow():
now = localnow()
assert isinstance(now, str)
class TestClass(object):
def test_one(self):
assert True
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@crossbar@shell@tests@test_shell_util.py@.PATH_END.py
|
{
"filename": "MakeTable1.ipynb",
"repo_name": "kgullikson88/Companion-Finder",
"repo_path": "Companion-Finder_extracted/Companion-Finder-master/MakeTable1.ipynb",
"type": "Jupyter Notebook"
}
|
#Making Table 1 for the paper
Table 1 contains the radial velocity measurements shifted to the reference frame of the binary system velocity. Therefore I need a bit of code to make the table.
```python
import numpy as np
import pandas as pd
import MassRatio_Fitter
import matplotlib.pyplot as plt
%matplotlib inline
```
```python
# Read in the data and format the time column to 2 decimal places
t, rv1, rv1_err, rv2, rv2_err = np.loadtxt('data/rv_data.txt')
rv_df = pd.DataFrame(data=dict(t=t, rv1=rv1, rv1_err=rv1_err, rv2=rv2, rv2_err=rv2_err,
rv1_raw=rv1, rv1_err_raw=rv1_err, rv2_raw=rv2, rv2_err_raw=rv2_err))
rv_df = rv_df.convert_objects()
rv_df['JD'] = rv_df['t'].map(lambda t: '{:.2f}'.format(t))
rv_df.head()
```
<div style="max-height:1000px;max-width:1500px;overflow:auto;">
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>rv1</th>
<th>rv1_err</th>
<th>rv1_err_raw</th>
<th>rv1_raw</th>
<th>rv2</th>
<th>rv2_err</th>
<th>rv2_err_raw</th>
<th>rv2_raw</th>
<th>t</th>
<th>JD</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>1.92651</td>
<td>0.01275</td>
<td>0.01275</td>
<td>1.92651</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2451809.659569</td>
<td>2451809.66</td>
</tr>
<tr>
<th>1</th>
<td>1.92852</td>
<td>0.01427</td>
<td>0.01427</td>
<td>1.92852</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2451809.673975</td>
<td>2451809.67</td>
</tr>
<tr>
<th>2</th>
<td>1.84140</td>
<td>0.01186</td>
<td>0.01186</td>
<td>1.84140</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2452142.680492</td>
<td>2452142.68</td>
</tr>
<tr>
<th>3</th>
<td>2.43256</td>
<td>0.01140</td>
<td>0.01140</td>
<td>2.43256</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2453319.639237</td>
<td>2453319.64</td>
</tr>
<tr>
<th>4</th>
<td>2.55912</td>
<td>0.00992</td>
<td>0.00992</td>
<td>2.55912</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2453585.853915</td>
<td>2453585.85</td>
</tr>
</tbody>
</table>
</div>
```python
# Subtract the primary velocity from the secondary velocities to get it in an inertial frame
samples = np.load('data/SB2_samples.npy')
K1, K2, P, T0, w, e, dv1, dv2, lnf, noise = np.median(samples, axis=0)
rv1_pred = MassRatio_Fitter.get_rv(T0=T0, P=P, e=e, K1=K1, w=w, t=rv_df['t'].values)
rv_df['rv2'] += rv1_pred
```
```python
# Apply the additive and multiplicative constants that we fit
rv_df['rv1'] -= dv1
rv_df['rv2'] -= dv2
rv_df['rv2_err'] *= np.exp(lnf/2.0)
rv_df.head()
```
<div style="max-height:1000px;max-width:1500px;overflow:auto;">
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>rv1</th>
<th>rv1_err</th>
<th>rv1_err_raw</th>
<th>rv1_raw</th>
<th>rv2</th>
<th>rv2_err</th>
<th>rv2_err_raw</th>
<th>rv2_raw</th>
<th>t</th>
<th>JD</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>-2.174239</td>
<td>0.01275</td>
<td>0.01275</td>
<td>1.92651</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2451809.659569</td>
<td>2451809.66</td>
</tr>
<tr>
<th>1</th>
<td>-2.172229</td>
<td>0.01427</td>
<td>0.01427</td>
<td>1.92852</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2451809.673975</td>
<td>2451809.67</td>
</tr>
<tr>
<th>2</th>
<td>-2.259349</td>
<td>0.01186</td>
<td>0.01186</td>
<td>1.84140</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2452142.680492</td>
<td>2452142.68</td>
</tr>
<tr>
<th>3</th>
<td>-1.668189</td>
<td>0.01140</td>
<td>0.01140</td>
<td>2.43256</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2453319.639237</td>
<td>2453319.64</td>
</tr>
<tr>
<th>4</th>
<td>-1.541629</td>
<td>0.00992</td>
<td>0.00992</td>
<td>2.55912</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>NaN</td>
<td>2453585.853915</td>
<td>2453585.85</td>
</tr>
</tbody>
</table>
</div>
```python
# Check to make sure it looks right
plt.errorbar(rv_df['t'], rv_df['rv1'], yerr=rv_df['rv1_err'], fmt='k^')
plt.errorbar(rv_df['t'], rv_df['rv2'], yerr=rv_df['rv2_err'], fmt='ro')
```
<Container object of 3 artists>

```python
# Cut the measurements down to 3 sig figs
rv_df['rv1'] = rv_df['rv1'].map(lambda v: '{:.3f}'.format(float(v)))
rv_df['rv1_err'] = rv_df['rv1_err'].map(lambda v: '{:.3f}'.format(float(v)))
rv_df['rv2'] = rv_df['rv2'].map(lambda v: '{:.2f}'.format(float(v)) if pd.notnull(v) else v)
rv_df['rv2_err'] = rv_df['rv2_err'].map(lambda v: '{:.2f}'.format(float(v)) if pd.notnull(v) else v)
rv_df['rv1_raw'] = rv_df['rv1_raw'].map(lambda v: '{:.3f}'.format(float(v)))
rv_df['rv1_err_raw'] = rv_df['rv1_err_raw'].map(lambda v: '{:.3f}'.format(float(v)))
rv_df['rv2_raw'] = rv_df['rv2_raw'].map(lambda v: '{:.2f}'.format(float(v)) if pd.notnull(v) else v)
rv_df['rv2_err_raw'] = rv_df['rv2_err_raw'].map(lambda v: '{:.2f}'.format(float(v)) if pd.notnull(v) else v)
```
```python
# Print out the table. Copy-paste into latex.
print rv_df[['JD', 'rv1_raw', 'rv1', 'rv1_err', 'rv2_raw', 'rv2', 'rv2_err']].to_latex(header=True, index=False, na_rep='\\nodata', escape=False)
```
\begin{tabular}{lllllll}
\toprule
JD & rv1_raw & rv1 & rv1_err & rv2_raw & rv2 & rv2_err \\
\midrule
2451809.66 & 1.927 & -2.174 & 0.013 & \nodata & \nodata & \nodata \\
2451809.67 & 1.929 & -2.172 & 0.014 & \nodata & \nodata & \nodata \\
2452142.68 & 1.841 & -2.259 & 0.012 & \nodata & \nodata & \nodata \\
2453319.64 & 2.433 & -1.668 & 0.011 & \nodata & \nodata & \nodata \\
2453585.85 & 2.559 & -1.542 & 0.010 & \nodata & \nodata & \nodata \\
2453585.88 & 2.550 & -1.551 & 0.011 & \nodata & \nodata & \nodata \\
2453634.64 & 2.654 & -1.446 & 0.011 & \nodata & \nodata & \nodata \\
2453635.62 & 2.554 & -1.547 & 0.009 & \nodata & \nodata & \nodata \\
2453655.64 & 2.711 & -1.390 & 0.009 & \nodata & \nodata & \nodata \\
2453655.64 & 2.780 & -1.321 & 0.027 & \nodata & \nodata & \nodata \\
2453689.54 & 2.665 & -1.436 & 0.008 & \nodata & \nodata & \nodata \\
2453907.85 & 2.960 & -1.141 & 0.011 & \nodata & \nodata & \nodata \\
2453928.80 & 2.858 & -1.243 & 0.012 & \nodata & \nodata & \nodata \\
2454019.60 & 2.930 & -1.171 & 0.012 & \nodata & \nodata & \nodata \\
2454279.75 & 3.068 & -1.033 & 0.011 & \nodata & \nodata & \nodata \\
2454279.76 & 3.056 & -1.044 & 0.010 & \nodata & \nodata & \nodata \\
2454309.79 & 3.021 & -1.080 & 0.013 & \nodata & \nodata & \nodata \\
2454345.63 & 3.270 & -0.830 & 0.010 & \nodata & \nodata & \nodata \\
2454401.56 & 3.155 & -0.945 & 0.009 & \nodata & \nodata & \nodata \\
2454662.93 & 3.349 & -0.752 & 0.015 & -4.34 & 0.36 & 0.37 \\
2454665.77 & 3.486 & -0.615 & 0.014 & -3.95 & 0.75 & 0.35 \\
2454665.77 & 3.492 & -0.609 & 0.015 & -4.06 & 0.64 & 0.36 \\
2454730.71 & 3.457 & -0.644 & 0.014 & -4.09 & 0.68 & 0.36 \\
2454750.64 & 3.439 & -0.662 & 0.019 & \nodata & \nodata & \nodata \\
2454750.64 & 3.424 & -0.676 & 0.018 & \nodata & \nodata & \nodata \\
2454750.65 & 3.425 & -0.675 & 0.017 & \nodata & \nodata & \nodata \\
2454750.65 & 3.406 & -0.694 & 0.015 & \nodata & \nodata & \nodata \\
2454750.66 & 3.436 & -0.665 & 0.019 & \nodata & \nodata & \nodata \\
2454750.66 & 3.425 & -0.676 & 0.023 & \nodata & \nodata & \nodata \\
2454750.67 & 3.412 & -0.689 & 0.020 & \nodata & \nodata & \nodata \\
2454750.68 & 3.423 & -0.677 & 0.019 & \nodata & \nodata & \nodata \\
2454750.68 & 3.419 & -0.681 & 0.019 & \nodata & \nodata & \nodata \\
2455100.57 & 3.875 & -0.226 & 0.016 & -5.14 & 0.04 & 0.44 \\
2455100.58 & 3.891 & -0.210 & 0.014 & -5.02 & 0.16 & 0.44 \\
2455398.75 & 4.209 & 0.108 & 0.015 & -6.49 & -0.89 & 0.51 \\
2455790.72 & 4.977 & 0.876 & 0.021 & -8.65 & -2.34 & 0.67 \\
2455869.58 & 5.211 & 1.111 & 0.017 & -8.14 & -1.66 & 0.60 \\
2455910.57 & 5.321 & 1.221 & 0.018 & -8.40 & -1.82 & 0.63 \\
2455992.02 & 5.538 & 1.437 & 0.012 & -9.01 & -2.22 & 0.65 \\
2456016.93 & 5.659 & 1.558 & 0.014 & -9.23 & -2.37 & 0.62 \\
2456106.78 & 5.784 & 1.683 & 0.015 & -10.36 & -3.24 & 0.67 \\
2456138.84 & 5.944 & 1.844 & 0.020 & -10.73 & -3.51 & 0.73 \\
2456145.65 & 5.947 & 1.846 & 0.025 & -11.38 & -4.14 & 0.80 \\
2456145.66 & 5.929 & 1.828 & 0.018 & -11.43 & -4.19 & 0.79 \\
2456145.66 & 5.965 & 1.864 & 0.018 & -11.15 & -3.91 & 0.79 \\
2456173.73 & 5.955 & 1.854 & 0.018 & -10.82 & -3.48 & 0.75 \\
2456401.97 & 6.964 & 2.864 & 0.014 & -14.11 & -5.87 & 0.69 \\
2456401.97 & 6.941 & 2.841 & 0.012 & -14.39 & -6.15 & 0.68 \\
2456433.74 & 7.238 & 3.138 & 0.013 & -14.54 & -6.14 & 0.62 \\
2456433.74 & 7.209 & 3.108 & 0.012 & -14.61 & -6.21 & 0.65 \\
2456435.87 & 7.208 & 3.108 & 0.015 & -14.73 & -6.33 & 0.64 \\
2456435.87 & 7.205 & 3.104 & 0.015 & -15.08 & -6.68 & 0.64 \\
2456461.87 & 7.358 & 3.257 & 0.012 & -14.96 & -6.42 & 0.63 \\
2456461.88 & 7.351 & 3.250 & 0.015 & -14.65 & -6.11 & 0.65 \\
2456461.88 & 7.326 & 3.225 & 0.016 & -14.60 & -6.06 & 0.61 \\
2456465.80 & 7.297 & 3.196 & 0.014 & -14.74 & -6.18 & 0.53 \\
2456497.86 & 7.574 & 3.473 & 0.019 & -15.86 & -7.13 & 0.73 \\
2456519.62 & 7.765 & 3.664 & 0.015 & -16.78 & -7.93 & 0.59 \\
2456525.66 & 7.725 & 3.624 & 0.017 & -16.27 & -7.38 & 0.64 \\
2456560.58 & 7.812 & 3.711 & 0.013 & -16.32 & -7.22 & 0.90 \\
2456564.59 & 7.781 & 3.680 & 0.015 & -16.18 & -7.05 & 0.86 \\
2456613.55 & 8.089 & 3.988 & 0.016 & -15.96 & -6.51 & 0.91 \\
2456614.58 & 8.139 & 4.038 & 0.012 & -16.52 & -7.06 & 0.85 \\
2456755.98 & 9.308 & 5.208 & 0.014 & -21.44 & -10.84 & 0.74 \\
2456759.97 & 9.366 & 5.265 & 0.015 & -21.91 & -11.28 & 0.76 \\
2456784.84 & 9.603 & 5.502 & 0.017 & -22.56 & -11.69 & 0.81 \\
2456816.67 & 9.895 & 5.794 & 0.014 & -23.36 & -12.17 & 0.74 \\
2456816.67 & 9.907 & 5.806 & 0.015 & -23.68 & -12.49 & 0.72 \\
2456860.73 & 10.402 & 6.301 & 0.016 & -25.64 & -13.97 & 0.88 \\
2456860.73 & 10.421 & 6.321 & 0.015 & -25.77 & -14.11 & 0.83 \\
2456885.62 & 10.607 & 6.507 & 0.015 & -27.23 & -15.29 & 0.84 \\
2456938.63 & 11.189 & 7.089 & 0.016 & -29.30 & -16.77 & 1.03 \\
2456938.64 & 11.173 & 7.072 & 0.015 & -28.99 & -16.45 & 0.96 \\
2457092.02 & 12.114 & 8.013 & 0.015 & -31.57 & -18.09 & 1.00 \\
2457109.85 & 12.077 & 7.976 & 0.015 & -32.79 & -19.40 & 1.10 \\
2457118.96 & 11.987 & 7.886 & 0.016 & -31.14 & -17.82 & 0.90 \\
2457150.92 & 11.685 & 7.584 & 0.017 & \nodata & \nodata & \nodata \\
2457174.96 & 11.267 & 7.167 & 0.017 & -28.58 & -16.13 & 0.81 \\
2457214.83 & 10.240 & 6.139 & 0.017 & -24.68 & -13.16 & 0.82 \\
2457214.84 & 10.253 & 6.152 & 0.016 & -25.36 & -13.85 & 0.90 \\
2457216.73 & 10.220 & 6.119 & 0.016 & -25.13 & -13.66 & 0.86 \\
2457216.73 & 10.228 & 6.128 & 0.015 & -25.15 & -13.69 & 0.80 \\
2457245.60 & 9.302 & 5.201 & 0.016 & -22.21 & -11.52 & 0.77 \\
2457245.61 & 9.299 & 5.199 & 0.016 & -21.85 & -11.16 & 0.72 \\
2457248.61 & 9.338 & 5.237 & 0.017 & -21.66 & -11.05 & 0.70 \\
\bottomrule
\end{tabular}
```python
```
|
kgullikson88REPO_NAMECompanion-FinderPATH_START.@Companion-Finder_extracted@Companion-Finder-master@MakeTable1.ipynb@.PATH_END.py
|
{
"filename": "spinOS_io.py",
"repo_name": "matthiasfabry/spinOS",
"repo_path": "spinOS_extracted/spinOS-master/modules/spinOS_io.py",
"type": "Python"
}
|
"""
Copyright 2020-2024 Matthias Fabry
This file is part of spinOS.
spinOS is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
spinOS is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with spinOS. If not, see <https://www.gnu.org/licenses/>.
Module that handles the loading of the relevant data for the solver.
"""
import numpy as np
def guess_loader(wd: str, guessfile: str) -> dict:
"""
parses the guess file and determines values and flags for each guess
:param wd: the working directory
:param guessfile: pathname (relative to wd) pointing to the file
containing guesses
:return: dictionary containing the guesses and flags for each parameter
"""
wd = check_slash(wd)
guesses = np.genfromtxt(wd + guessfile, dtype=None, filling_values=np.nan,
usecols=(0, 1, 2), encoding='utf-8')
guessdict = dict()
for i in range(12):
guessdict[guesses[i][0]] = (guesses[i][1], guesses[i][2])
return guessdict
def guess_saver(wd: str, name: str, guess_dict: dict) -> None:
"""
saves guesses to a file
:param wd: working directory
:param name: file name
:param guess_dict: guesses to save
"""
wd = check_slash(wd)
with open(wd + name + '.txt', 'w') as guessfile:
for param, guess in guess_dict.items():
guessfile.write(param + ' {} {}\n'.format(guess[0], str(guess[1])))
def data_loader(wd: str, filetypes: list, filenames: list) -> dict:
"""
loads data from files into a dictionary
:param wd: working directory where the files are
:param filetypes: data types to load, must be 'RV1file', 'RV2file',
or 'ASfile'
:param filenames: names of the files in question
:return: data in a dictionary
"""
wd = check_slash(wd)
data_dict = dict()
for i in range(len(filetypes)):
if filetypes[i] == 'RV1file':
data = np.loadtxt(wd + filenames[i])
data_dict['RV1'] = dict()
data_dict['RV1']['hjds'] = data[:, 0]
data_dict['RV1']['RVs'] = data[:, 1]
try:
data_dict['RV1']['errors'] = data[:, 2]
except IndexError:
# put dummy error if none found in data
data_dict['RV1']['errors'] = data[:, 1] * 0.05
elif filetypes[i] == 'RV2file':
data = np.loadtxt(wd + filenames[i])
data_dict['RV2'] = dict()
data_dict['RV2']['hjds'] = data[:, 0]
data_dict['RV2']['RVs'] = data[:, 1]
try:
data_dict['RV2']['errors'] = data[:, 2]
except IndexError:
# put dummy error if none found in data
data_dict['RV2']['errors'] = data[:, 1] * 0.05
elif filetypes[i] == 'ASfile':
data = np.loadtxt(wd + filenames[i])
data_dict['AS'] = dict()
data_dict['AS']['hjds'] = data[:, 0]
data_dict['AS']['majors'] = data[:, 3]
data_dict['AS']['minors'] = data[:, 4]
data_dict['AS']['pas'] = data[:, 5]
data_dict['AS']['eastsorsep'] = data[:, 1]
data_dict['AS']['northsorpa'] = data[:, 2]
return data_dict
def convert_error_ellipse(major, minor, angle):
"""
Converts error ellipses to actual east and north errors by a sampling
the error ellipse monte-carlo style and
then taking the variance in the east and north directions.
:param major: length of the major axis of the error ellipse
:param minor: length of the minor axis of the error ellipse
:param angle: position angle east of north of the major axis
:return: east and north error
"""
num = 1000
cosa = np.cos(angle)
sina = np.sin(angle)
temp_major = np.random.randn(num) * major
temp_minor = np.random.randn(num) * minor
rotated_temp = np.matmul(np.array([[cosa, sina], [-sina, cosa]]),
[temp_major, temp_minor])
east_error = np.std(rotated_temp[0])
north_error = np.std(rotated_temp[1])
return east_error, north_error
def check_slash(wd):
if len(wd) == 0:
return wd
if wd[-1] != '/':
wd += '/'
return wd
|
matthiasfabryREPO_NAMEspinOSPATH_START.@spinOS_extracted@spinOS-master@modules@spinOS_io.py@.PATH_END.py
|
{
"filename": "noxfile.py",
"repo_name": "PlasmaPy/PlasmaPy",
"repo_path": "PlasmaPy_extracted/PlasmaPy-main/noxfile.py",
"type": "Python"
}
|
"""
Nox is an automation tool used by PlasmaPy to run tests, build
documentation, and perform other checks. Nox sessions are defined in
noxfile.py.
Running `nox` without arguments will run tests with the version of
Python that `nox` is installed under, skipping slow tests. To invoke a
nox session, enter the top-level directory of this repository and run
`nox -s "<session>"`, where <session> is replaced with the name of the
session. To list available sessions, run `nox -l`.
The tests can be run with the following options:
* "all": run all tests
* "skipslow": run tests, except tests decorated with `@pytest.mark.slow`
* "cov": run all tests with code coverage checks
* "lowest-direct" : run all tests with lowest version of direct dependencies
Doctests are run only for the most recent versions of Python and
PlasmaPy dependencies, and not when code coverage checks are performed.
Some of the checks require the most recent supported version of Python
to be installed.
"""
# Documentation: https://nox.thea.codes
import os
import pathlib
import platform
import re
import sys
from typing import Literal
import nox
# SPEC 0 indicates that scientific Python packages should support
# versions of Python that have been released in the last 3 years, or
# equivalently the most three recently released versions of Python.
# The minimum version of Python should be incremented immediately
# following the first release after October of each year.
supported_python_versions: tuple[str, ...] = ("3.11", "3.12", "3.13")
supported_operating_systems: tuple[str, ...] = ("linux", "macos", "windows")
maxpython = max(supported_python_versions)
minpython = min(supported_python_versions)
current_python = f"{sys.version_info.major}.{sys.version_info.minor}"
# The documentation should be build always using the same version of
# Python, which should be the latest version of Python supported by Read
# the Docs. Because Read the Docs takes some time to support new
# releases of Python, we should not link docpython to maxpython.
docpython = "3.12"
nox.options.sessions: list[str] = [f"tests-{current_python}(skipslow)"]
nox.options.default_venv_backend = "uv|virtualenv"
running_on_ci = os.getenv("CI")
def _get_requirements_filepath(
category: Literal["docs", "tests", "all"],
version: Literal["3.11", "3.12", "3.13"],
resolution: Literal["highest", "lowest-direct", "lowest"] = "highest",
os_platform: Literal["linux", "macos", "windows"] | None = None,
) -> str:
"""
Return the file path to the requirements file.
Parameters
----------
category : str
The name of the optional dependency set, as defined in
:file:`pyproject.toml`.
version : str
The supported version of Python.
resolution : str
The resolution strategy used by uv.
os_platform : str, optional
The name of the target platform. By default, it will attempt to find the
requirement file associated with the current platform.
"""
if os_platform is None:
current_platform = platform.system().lower()
os_platform = (
current_platform
if current_platform in supported_operating_systems
else "linux"
)
requirements_directory = "ci_requirements"
specifiers = [category, version, os_platform]
if resolution != "highest":
specifiers.append(resolution)
return f"{requirements_directory}/{'-'.join(specifiers)}.txt"
@nox.session
def requirements(session) -> None:
"""
Regenerate the pinned requirements files used in CI.
This session uses `uv pip compile` to regenerate the pinned
requirements files in `ci_requirements/` for use by the Nox sessions
for running tests, building documentation, and performing other
continuous integration checks.
"""
session.install("uv")
category_version_resolution: list[tuple[str, str, str]] = [
("tests", version, "highest") for version in supported_python_versions
]
category_version_resolution += [
("tests", minpython, "lowest-direct"),
("docs", maxpython, "highest"),
("all", maxpython, "highest"),
]
category_flags: dict[str, tuple[str, ...]] = {
"all": ("--all-extras",),
"docs": ("--extra", "docs"),
"tests": ("--extra", "tests"),
}
command: tuple[str, ...] = (
"python",
"-m",
"uv",
"pip",
"compile",
"pyproject.toml",
"--upgrade",
"--quiet",
"--custom-compile-command", # defines command to be included in file header
"nox -s requirements",
)
for os_platform in supported_operating_systems:
for category, version, resolution in category_version_resolution:
filename = _get_requirements_filepath(
category, version, resolution, os_platform
)
session.run(
*command,
"--python-version",
version,
*category_flags[category],
"--output-file",
filename,
"--resolution",
resolution,
*session.posargs,
"--python-platform",
os_platform,
)
pytest_command: tuple[str, ...] = (
"pytest",
"--pyargs",
"--durations=5",
"--tb=short",
"-n=auto",
"--dist=loadfile",
)
with_doctests: tuple[str, ...] = ("--doctest-modules", "--doctest-continue-on-failure")
with_coverage: tuple[str, ...] = (
"--cov=plasmapy",
"--cov-report=xml",
"--cov-config=pyproject.toml",
"--cov-append",
"--cov-report",
"xml:coverage.xml",
)
skipslow: tuple[str, ...] = ("-m", "not slow")
test_specifiers: list = [
nox.param("run all tests", id="all"),
nox.param("skip slow tests", id="skipslow"),
nox.param("with code coverage", id="cov"),
nox.param("lowest-direct", id="lowest-direct"),
]
@nox.session(python=supported_python_versions)
@nox.parametrize("test_specifier", test_specifiers)
def tests(session: nox.Session, test_specifier: nox._parametrize.Param) -> None:
"""Run tests with pytest."""
resolution = "lowest-direct" if test_specifier == "lowest-direct" else "highest"
requirements = _get_requirements_filepath(
category="tests",
version=session.python,
resolution=resolution,
)
options: list[str] = []
if test_specifier == "skip slow tests":
options += skipslow
if test_specifier == "with code coverage":
options += with_coverage
# Doctests are only run with the most recent versions of Python and
# other dependencies because there may be subtle differences in the
# output between different versions of Python, NumPy, and Astropy.
if session.python == maxpython and test_specifier not in {"lowest-direct", "cov"}:
options += with_doctests
if gh_token := os.getenv("GH_TOKEN"):
session.env["GH_TOKEN"] = gh_token
session.install("-r", requirements, ".[tests]")
session.run(*pytest_command, *options, *session.posargs)
@nox.session(python=maxpython)
@nox.parametrize(
["repository"],
[
nox.param("numpy", id="numpy"),
nox.param("https://github.com/astropy/astropy", id="astropy"),
nox.param("https://github.com/pydata/xarray", id="xarray"),
nox.param("https://github.com/lmfit/lmfit-py", id="lmfit"),
nox.param("https://github.com/pandas-dev/pandas", id="pandas"),
],
)
def run_tests_with_dev_version_of(session: nox.Session, repository: str) -> None:
"""
Run tests against the development branch of a dependency.
Running this session helps us catch problems resulting from breaking
changes in an upstream dependency before its official release.
"""
if repository != "numpy":
session.install(f"git+{repository}")
else:
# From: https://numpy.org/doc/1.26/dev/depending_on_numpy.html
session.run_install(
"uv",
"pip",
"install",
"-U",
"--pre",
"--only-binary",
":all:",
"-i",
"https://pypi.anaconda.org/scientific-python-nightly-wheels/simple",
"numpy",
)
session.install(".[tests]")
session.run(*pytest_command, *session.posargs)
sphinx_commands: tuple[str, ...] = (
"sphinx-build",
"docs/",
"docs/build/html",
"--nitpicky",
"--fail-on-warning",
"--keep-going",
"-q",
)
build_html: tuple[str, ...] = ("--builder", "html")
check_hyperlinks: tuple[str, ...] = ("--builder", "linkcheck")
docs_requirements = _get_requirements_filepath(category="docs", version=maxpython)
doc_troubleshooting_message = """
π Tips for troubleshooting common documentation build failures are in
PlasmaPy's documentation guide at:
π https://docs.plasmapy.org/en/latest/contributing/doc_guide.html#troubleshooting
"""
@nox.session(python=docpython)
def docs(session: nox.Session) -> None:
"""
Build documentation with Sphinx.
This session may require installation of pandoc and graphviz.
"""
if running_on_ci:
session.debug(doc_troubleshooting_message)
session.install("-r", docs_requirements, ".[docs]")
session.run(*sphinx_commands, *build_html, *session.posargs)
landing_page = (
pathlib.Path(session.invoked_from) / "docs" / "build" / "html" / "index.html"
)
if not running_on_ci and landing_page.exists():
session.debug(f"The documentation may be previewed at {landing_page}")
elif not running_on_ci:
session.debug(f"Documentation preview landing page not found: {landing_page}")
@nox.session(python=docpython)
@nox.parametrize(
["site", "repository"],
[
nox.param("github", "sphinx-doc/sphinx", id="sphinx"),
nox.param("github", "readthedocs/sphinx_rtd_theme", id="sphinx_rtd_theme"),
nox.param("github", "spatialaudio/nbsphinx", id="nbsphinx"),
],
)
def build_docs_with_dev_version_of(
session: nox.Session, site: str, repository: str
) -> None:
"""
Build documentation against the development branch of a dependency.
The purpose of this session is to catch bugs and breaking changes
so that they can be fixed or updated earlier rather than later.
"""
session.install(f"git+https://{site}.com/{repository}", ".[docs]")
session.run(*sphinx_commands, *build_html, *session.posargs)
LINKCHECK_TROUBLESHOOTING = """
The Sphinx configuration variables `linkcheck_ignore` and
`linkcheck_allowed_redirects` in `docs/conf.py` can be used to specify
hyperlink patterns to be ignored along with allowed redirects. For more
information, see:
π https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-linkcheck_ignore
π https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-linkcheck_allowed_redirects
These variables are in the form of Python regular expressions:
π https://docs.python.org/3/howto/regex.html
"""
@nox.session(python=docpython)
def linkcheck(session: nox.Session) -> None:
"""Check hyperlinks in documentation."""
if running_on_ci:
session.debug(LINKCHECK_TROUBLESHOOTING)
session.install("-r", docs_requirements, ".[docs]")
session.run(*sphinx_commands, *check_hyperlinks, *session.posargs)
MYPY_TROUBLESHOOTING = """
π‘ To learn more about type hints, check out mypy's cheat sheet at:
https://mypy.readthedocs.io/en/stable/cheat_sheet_py3.html
For more details about specific mypy errors, go to:
π https://mypy.readthedocs.io/en/stable/error_codes.html
πͺ§ Especially difficult errors can be ignored with an inline comment of
the form: `# type: ignore[error]`, where `error` is replaced with the
mypy error code. Please use sparingly!
π To automatically add type hints for common patterns, run:
nox -s 'autotyping(safe)'
"""
@nox.session(python=maxpython)
def mypy(session: nox.Session) -> None:
"""Perform static type checking."""
if running_on_ci:
session.debug(MYPY_TROUBLESHOOTING)
MYPY_COMMAND: tuple[str, ...] = (
"mypy",
".",
"--install-types",
"--non-interactive",
"--show-error-context",
"--show-error-code-links",
"--pretty",
)
requirements = _get_requirements_filepath(
category="tests",
version=session.python,
resolution="highest",
)
session.install("pip")
session.install("-r", requirements, ".[tests]")
session.run(*MYPY_COMMAND, *session.posargs)
@nox.session(name="import")
def try_import(session: nox.Session) -> None:
"""Install PlasmaPy and import it."""
session.install(".")
session.run("python", "-c", "import plasmapy", *session.posargs)
@nox.session
def validate_requirements(session: nox.Session) -> None:
"""Verify that the pinned requirements are consistent with pyproject.toml."""
requirements_file = _get_requirements_filepath(
category="all",
version=maxpython,
resolution="highest",
)
session.install("uv")
session.debug(
"π‘ If this check fails, regenerate the pinned requirements files "
"with `nox -s requirements` (see `ci_requirements/README.md`)."
)
session.run(
"uv",
"pip",
"install",
"-r",
requirements_file,
".[docs,tests]",
"--dry-run",
)
@nox.session
def build(session: nox.Session) -> None:
"""Build & verify the source distribution and wheel."""
session.install("twine", "build")
build_command = ("python", "-m", "build")
session.run(*build_command, "--sdist")
session.run(*build_command, "--wheel")
session.run("twine", "check", "dist/*", *session.posargs)
AUTOTYPING_SAFE: tuple[str, ...] = (
"--none-return",
"--scalar-return",
"--annotate-magics",
)
AUTOTYPING_RISKY: tuple[str, ...] = (
*AUTOTYPING_SAFE,
"--bool-param",
"--int-param",
"--float-param",
"--str-param",
"--bytes-param",
"--annotate-imprecise-magics",
)
@nox.session
@nox.parametrize("draft", [nox.param(False, id="draft"), nox.param(True, id="final")])
def changelog(session: nox.Session, final: str) -> None:
"""
Build the changelog with towncrier.
- 'final': build the combined changelog for the release, and delete
the individual changelog entries in `changelog`.
- 'draft': print the draft changelog to standard output, without
writing to files
When executing this session, provide the version of the release, as
in this example:
nox -s 'changelog(final)' -- 2024.7.0
"""
if len(session.posargs) != 1:
raise TypeError(
"Please provide the version of PlasmaPy to be released "
"(i.e., `nox -s changelog -- 2024.9.0`"
)
source_directory = pathlib.Path("./changelog")
extraneous_files = source_directory.glob("changelog/*[0-9]*.*.rst?*")
if final and extraneous_files:
session.error(
"Please delete the following extraneous files before "
"proceeding, as the presence of these files may cause "
f"towncrier errors: {extraneous_files}"
)
version = session.posargs[0]
year_pattern = r"(202[4-9]|20[3-9][0-9]|2[1-9][0-9]{2}|[3-9][0-9]{3,})"
month_pattern = r"(1[0-2]|[1-9])"
patch_pattern = r"(0?[0-9]|[1-9][0-9])"
version_pattern = rf"^{year_pattern}\.{month_pattern}\.{patch_pattern}$"
if not re.match(version_pattern, version):
raise ValueError(
"Please provide a version of the form YYYY.M.PATCH, where "
"YYYY is the year past 2024, M is the one or two digit month, "
"and PATCH is a non-negative integer."
)
session.install(".", "towncrier")
options = ("--yes",) if final else ("--draft", "--keep")
session.run(
"towncrier",
"build",
"--config",
"pyproject.toml",
"--dir",
".",
"--version",
version,
*options,
)
if final:
original_file = pathlib.Path("./CHANGELOG.rst")
destination = pathlib.Path(f"./docs/changelog/{version}.rst")
original_file.rename(destination)
@nox.session
@nox.parametrize(
"options",
[
nox.param(AUTOTYPING_SAFE, id="safe"),
nox.param(AUTOTYPING_RISKY, id="aggressive"),
],
)
def autotyping(session: nox.Session, options: tuple[str, ...]) -> None:
"""
Automatically add type hints with autotyping.
The `safe` option generates very few incorrect type hints, and can
be used in CI. The `aggressive` option may add type hints that are
incorrect, so please perform a careful code review when using this
option.
To check specific files, pass them after a `--`, such as:
nox -s 'autotyping(safe)' -- noxfile.py
"""
session.install(".[tests,docs]", "autotyping", "typing_extensions")
DEFAULT_PATHS = ("src", "tests", "tools", "*.py", ".github", "docs/*.py")
paths = session.posargs or DEFAULT_PATHS
session.run("python", "-m", "autotyping", *options, *paths)
@nox.session
def monkeytype(session: nox.Session) -> None:
"""
Add type hints to a module based on variable types from running pytest.
Examples
--------
nox -s monkeytype -- plasmapy.particles.atomic
"""
if not session.posargs:
session.error(
"Please add at least one module using a command like: "
"`nox -s monkeytype -- plasmapy.particles.atomic`"
)
session.install(".[tests]")
session.install("MonkeyType", "pytest-monkeytype", "pre-commit")
database = pathlib.Path("./monkeytype.sqlite3")
if not database.exists():
session.log(f"File {database.absolute()} not found. Running MonkeyType.")
session.run("pytest", f"--monkeytype-output={database.absolute()}")
else:
session.log(f"File {database.absolute()} found.")
for module in session.posargs:
session.run("monkeytype", "apply", module)
session.run("pre-commit", "run", "ruff", "--all-files")
session.run("pre-commit", "run", "ruff-format", "--all-files")
session.log("Please inspect newly added type hints for correctness.")
session.log("Check new type hints with `nox -s mypy`.")
@nox.session
def cff(session: nox.Session) -> None:
"""Validate CITATION.cff against the metadata standard."""
session.install("cffconvert")
session.run("cffconvert", "--validate", *session.posargs)
@nox.session
def manifest(session: nox.Session) -> None:
"""
Check for missing files in MANIFEST.in.
When run outside of CI, this check may report files that were
locally created but not included in version control. These false
positives can be ignored by adding file patterns and paths to
`ignore` under `[tool.check-manifest]` in `pyproject.toml`.
"""
session.install("check-manifest")
session.run("check-manifest", *session.posargs)
@nox.session
def lint(session: nox.Session) -> None:
"""Run all pre-commit hooks on all files."""
session.install("pre-commit")
session.run(
"pre-commit",
"run",
"--all-files",
"--show-diff-on-failure",
*session.posargs,
)
|
PlasmaPyREPO_NAMEPlasmaPyPATH_START.@PlasmaPy_extracted@PlasmaPy-main@noxfile.py@.PATH_END.py
|
{
"filename": "test_c_implementation_simplified.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/test/suite/compile_tests/test_c_implementation_simplified.py",
"type": "Python"
}
|
import unittest
import numpy
from amuse.support.interface import InCodeComponentImplementation
from amuse.test.amusetest import TestWithMPI
from amuse.test import compile_tools
from amuse.support import exceptions
import os
import time
from amuse.units import nbody_system
from amuse.units import units
from amuse import datamodel
from amuse.rfi.tools import create_c
from amuse.rfi import channel
from amuse.rfi.core import *
codestring = """
#include <stdio.h>
#include <stdlib.h>
int echo_int(int int_in, int * int_out) {
*int_out = int_in;
if(int_in < 0) {
return -1;
} else {
return 0;
}
}
int echo_long_long_int(long long int int_in, long long int * int_out) {
*int_out = int_in;
if(int_in < 0) {
return -1;
} else {
return 0;
}
}
int echo_double(double in, double * out) {
*out = in;
return 0;
}
int echo_float(float in, float * out) {
*out = in;
return 0;
}
int echo_string(char * in, char ** out) {
*out = in;
return 0;
}
int echo_string_int(int inint, char * in, char ** out) {
*out = in;
return 0;
}
int echo_string_two(char * in1, char * in2, char ** out1, char ** out2) {
*out1 = in1;
*out2 = in2;
return 0;
}
int print_string(char * in) {
fprintf(stdout, "%s\\n", in);
return 0;
}
int print_error_string(char * in) {
fprintf(stderr, "%s\\n", in);
return 0;
}
int echo_strings(char ** inout1, char ** inout2) {
char * tmp;
tmp = *inout1;
*inout1 = *inout2;
*inout2 = tmp;
return 0;
}
void echo_array(int * in, int * out, int len) {
int i = 0;
for(i = 0; i < len; i++) {
out[i] = in[i];
}
}
int echo_array_with_result(int * in, int *out, int len) {
int i = 0;
for(i = 0; i < len; i++) {
out[i] = in[i];
}
return -1;
}
int echo_2_int(int * int_in1, int * int_in2, int * int_out1, int * int_out2, int len) {
int i = 0;
for(i = 0; i < len; i++) {
int_out1[i] = int_in1[i];
int_out2[i] = int_in2[i];
}
return len;
}
int echo_3_int(int * i, int * j, int * k, int * l, int * m, int * int_out, int len) {
int x = 0;
for(x = 0; x < len; x++) {
int_out[x] = i;
}
return len;
}
int dummy_3_int(int i, int j, int k) {
return 0;
}
int echo_inout_array_with_result(int * inout, int len) {
int i = 0;
for(i = 0; i < len; i++) {
inout[i] = inout[i] + 10;
}
return 11;
}
int echo_logical(int in, int * out) {
*out = in;
return 0;
}
/*
int echo_string_array(char ** in, char ** out, int len) {
int x = 0;
for(x = 0; x < len; x++) {
out[x] = in[x];
}
return len;
}
*/
"""
class ForTestingInterface(CodeInterface):
def __init__(self, exefile, **options):
CodeInterface.__init__(self, exefile, **options)
@remote_function(can_handle_array=True)
def echo_int(int_in='int32'):
returns(int_out='int32')
@remote_function(can_handle_array=True)
def echo_long_long_int(int_in='int64'):
returns(int_out='int64')
@remote_function(can_handle_array=True)
def echo_double(double_in='float64'):
returns(double_out='float64')
@remote_function(can_handle_array=True)
def echo_float(float_in='float32'):
returns(float_out='float32')
@remote_function(can_handle_array=True)
def echo_string(string_in='string'):
returns(string_out='string')
@remote_function(can_handle_array=True)
def echo_strings(string_inout1='string', string_inout2='string'):
returns(string_inout1='string', string_inout2='string')
@remote_function(can_handle_array=True)
def echo_string_int(inint='int32', ins='echo'):
returns(out='string')
@remote_function(can_handle_array=True)
def echo_string_two(in1='s', in2='echo'):
returns(out1='s', out2='s')
@remote_function(must_handle_array=True)
def echo_array(len, int_in='int32'):
returns(int_out='int32', __result=None)
@remote_function(must_handle_array=True)
def echo_array_with_result(len, int_in='int32'):
returns(int_out='int32')
# ~ #@legacy_function
# ~ def return_string():
# ~ function = LegacyFunctionSpecification()
# ~ function.addParameter('string_in', dtype='string', direction=function.IN)
# ~ function.result_type = 'string'
# ~ function.can_handle_array = True
# ~ return function
@remote_function(must_handle_array=True)
def echo_2_int(N, int_in1='int32', int_in2=numpy.int32(1)):
returns(int_out1='int32', int_out2='int32')
@remote_function(must_handle_array=True)
def echo_3_int(i='int32', j='int32', k='int32', l=numpy.int32(0), m=numpy.int32(1)):
returns(int_out='int32')
@remote_function(must_handle_array=True)
def echo_inout_array_with_result(in_out='int32'):
returns(in_out='int32')
@remote_function(can_handle_array=True)
def echo_logical(input='bool'):
returns(output='bool')
@remote_function(can_handle_array=True)
def print_string(string_in='string'):
pass
@remote_function
def dummy_3_int(i='i', j='i', k='i'):
pass
@remote_function(can_handle_array=True)
def print_error_string(string_in='string'):
pass
class ForTesting(InCodeComponentImplementation):
def __init__(self, exefile, **options):
InCodeComponentImplementation.__init__(self, ForTestingInterface(exefile, **options), **options)
def define_methods(self, object):
object.add_method(
'echo_int',
(units.m,),
(
units.m,
object.ERROR_CODE,
)
)
class TestCImplementationInterface(TestWithMPI):
@classmethod
def setup_class(cls):
print("building...")
cls.check_can_compile_modules()
try:
cls.exefile = compile_tools.build_worker(codestring, cls.get_path_to_results(),
ForTestingInterface, write_header=False)
except Exception as ex:
print(ex)
raise
print("done")
def test1(self):
instance = ForTestingInterface(self.exefile)
int_out, error = instance.echo_int(10)
instance.stop()
self.assertEqual(int_out, 10)
self.assertEqual(error, 0)
def test2(self):
instance = ForTestingInterface(self.exefile)
out, error = instance.echo_double(4.0)
instance.stop()
self.assertEqual(out, 4.0)
self.assertEqual(error, 0)
def test3(self):
instance = ForTestingInterface(self.exefile)
input = [1, 2, 3, 4]
output, errors = instance.echo_int(input)
instance.stop()
self.assertEqual(len(errors), 4)
for actual, expected in zip(output, input):
self.assertEqual(actual, expected)
def test4(self):
instance = ForTestingInterface(self.exefile)
input = [1.0, 2.1, 3.3, 4.2]
output, errors = instance.echo_double(input)
instance.stop()
self.assertEqual(len(errors), 4)
for actual, expected in zip(output, input):
self.assertEqual(actual, expected)
def test5(self):
instance = ForTestingInterface(self.exefile)
out, error = instance.echo_float(4.0)
instance.stop()
self.assertEqual(out, 4.0)
self.assertEqual(error, 0)
def test6(self):
instance = ForTestingInterface(self.exefile)
out, error = instance.echo_string("abc")
instance.stop()
self.assertEqual(error, 0)
self.assertEqual(out, "abc")
def test7(self):
instance = ForTestingInterface(self.exefile)
out, error = instance.echo_string(["abc", "def"])
instance.stop()
self.assertEqual(error[0], 0)
self.assertEqual(error[1], 0)
self.assertEqual(out[0], "abc")
self.assertEqual(out[1], "def")
def test8(self):
instance = ForTestingInterface(self.exefile)
out1, out2, error = instance.echo_strings("abc", "def")
instance.stop()
self.assertEqual(error, 0)
self.assertEqual(out1, "def")
self.assertEqual(out2, "abc")
def test9(self):
instance = ForTestingInterface(self.exefile)
str1_out, str2_out, error = instance.echo_strings(["abc", "def"], ["ghi", "jkl"])
instance.stop()
self.assertEqual(error[0], 0)
self.assertEqual(error[1], 0)
self.assertEqual(str1_out[0], "ghi")
self.assertEqual(str1_out[1], "jkl")
self.assertEqual(str2_out[0], "abc")
self.assertEqual(str2_out[1], "def")
def xtest10(self):
"""test for ticket #74, 'running out of os file descriptors'
Note: this test takes a very long time, to enable it
remove the 'X' infront of the test name, to disable it
add an 'X'.
Also note: to test this, you best set the ulimit
to a low number (but not too low), for example
ulimit -n 400
"""
for x in range(400):
instance = ForTestingInterface(self.exefile)
out, error = instance.echo_float(4.0)
if x % 100 == 0:
print("x:", x)
instance.stop()
def test11(self):
instance = ForTestingInterface(self.exefile)
(output_ints,) = instance.echo_array([4, 5, 6])
instance.stop()
print(output_ints)
self.assertEqual(output_ints[0], 4)
self.assertEqual(output_ints[1], 5)
self.assertEqual(output_ints[2], 6)
def test12(self):
instance = ForTestingInterface(self.exefile)
(output_ints, error) = instance.echo_array_with_result([4, 5, 6])
instance.stop()
self.assertEqual(output_ints[0], 4)
self.assertEqual(output_ints[1], 5)
self.assertEqual(output_ints[2], 6)
self.assertEqual(error[0], -1)
self.assertEqual(error[1], -1)
self.assertEqual(error[2], -1)
def test13(self):
instance = ForTesting(self.exefile)
self.assertRaises(exceptions.AmuseException, instance.echo_int, [-1, -2] | units.m,
expected_message="Error when calling 'echo_int' of a 'ForTesting', errorcode is -1")
instance.stop()
def test14(self):
instance = ForTesting(self.exefile)
self.assertRaises(exceptions.CodeException, lambda: instance.echo_int())
old_id = instance.legacy_interface.echo_int.specification.id
instance.legacy_interface.echo_int.specification.id = -9
self.assertRaises(exceptions.CodeException, lambda: instance.echo_int(1 | units.m))
instance.legacy_interface.echo_int.specification.id = old_id
instance.echo_int(1 | units.m)
instance.stop()
def test15(self):
instance = ForTesting(self.exefile)
output_ints1, output_ints2 = instance.echo_2_int([1, 2], [3, 4])
output_ints3, output_ints4 = instance.echo_2_int([1, 2, 3])
output_ints5, output_ints6 = instance.echo_2_int([5], [0])
output_ints7, output_ints8 = instance.echo_2_int([5])
instance.stop()
self.assertEqual(output_ints1[1], 2)
self.assertEqual(output_ints2[0], 3)
self.assertEqual(output_ints2[1], 4)
for i in range(3):
self.assertEqual(output_ints3[i], i + 1)
self.assertEqual(output_ints4[i], 1)
self.assertEqual(output_ints5[0], 5)
self.assertEqual(output_ints6[0], 0)
self.assertEqual(output_ints7[0], 5)
self.assertEqual(output_ints8[0], 1)
def test16(self):
instance = ForTesting(self.exefile)
self.assertRaises(exceptions.AmuseException, lambda: instance.echo_int([]))
instance.stop()
def test17(self):
instance = ForTestingInterface(self.exefile)
(output_ints, error) = instance.echo_inout_array_with_result([4, 5, 6])
instance.stop()
self.assertEqual(output_ints[0], 14)
self.assertEqual(output_ints[1], 15)
self.assertEqual(output_ints[2], 16)
self.assertEqual(error[0], 11)
self.assertEqual(error[1], 11)
self.assertEqual(error[2], 11)
def test18(self):
instance = ForTestingInterface(self.exefile)
out, error = instance.echo_logical([True, False, True])
instance.stop()
self.assertEqual(out, [True, False, True])
self.assertEqual(error, 0)
def test19(self):
instance = ForTestingInterface(self.exefile)
print(3935559000370003845)
int_out, error = instance.echo_long_long_int(3935559000370003845)
instance.stop()
self.assertEqual(int_out, 3935559000370003845)
self.assertEqual(error, 0)
def xtest20(self):
#
# TURNED OFF support for redirection,
# by default output is redirected to /dev/null
# if you need file, use the support from your mpi implementation
#
if os.path.exists("pout.000"):
os.remove("pout.000")
if os.path.exists("perr.000"):
os.remove("perr.000")
x = ForTesting(self.exefile, redirect_stderr_file='perr', redirect_stdout_file='pout', redirection="file")
x.print_string("abc")
x.print_error_string("exex")
x.stop()
time.sleep(0.2)
self.assertTrue(os.path.exists("pout.000"))
with open("pout.000", "r") as f:
content = f.read()
self.assertEqual(content.strip(), "abc")
self.assertTrue(os.path.exists("perr.000"))
with open("perr.000", "r") as f:
content = f.read()
self.assertEqual(content.strip(), "exex")
x = ForTesting(self.exefile, redirect_stderr_file='pout', redirect_stdout_file='pout', redirection="file")
x.print_string("def")
x.print_error_string("exex")
x.stop()
time.sleep(0.2)
self.assertTrue(os.path.exists("pout.000"))
with open("pout.000", "r") as f:
content = f.read()
self.assertEqual(content.strip(), "abc\ndef\nexex")
def test21(self):
instance = ForTestingInterface(self.exefile)
(output1, error1) = instance.internal__get_message_polling_interval()
error2 = instance.internal__set_message_polling_interval(1234)
(output2, error3) = instance.internal__get_message_polling_interval()
instance.internal__set_message_polling_interval(0)
instance.stop()
self.assertEqual(error1, 0)
self.assertEqual(output1, 0)
self.assertEqual(error2, 0)
self.assertEqual(error3, 0)
self.assertEqual(output2, 1234)
def test22(self):
self.check_for_mpi()
instance = ForTestingInterface(self.exefile)
t0 = time.time()
(output1, error1) = instance.internal__get_message_polling_interval()
t1 = time.time()
error2 = instance.internal__set_message_polling_interval(500 * 1000)
t2 = time.time()
(output2, error3) = instance.internal__get_message_polling_interval()
t3 = time.time()
instance.stop()
self.assertEqual(error1, 0)
self.assertEqual(output1, 0)
self.assertEqual(error2, 0)
self.assertEqual(error3, 0)
self.assertEqual(output2, 500 * 1000)
# ~ print t1 - t0, t3 - t2
# ~ self.assertTrue((t3 - t2) > 0.25)
def test23(self):
instance = ForTestingInterface(self.exefile)
out, error = instance.echo_string_int(1)
instance.stop()
self.assertEqual(error, 0)
self.assertEqual(out, "echo")
def test24(self):
instance = ForTestingInterface(self.exefile)
out, error = instance.echo_string_int(1, "abc")
instance.stop()
self.assertEqual(error, 0)
self.assertEqual(out, "abc")
def test25(self):
instance = ForTestingInterface(self.exefile)
out, error = instance.echo_string_int([1, 2])
instance.stop()
self.assertEqual(error, 0)
self.assertEqual(out[0], "echo")
self.assertEqual(out[1], "echo")
def test26(self):
instance = ForTestingInterface(self.exefile)
out, error = instance.echo_string_int([1, 2], ["abc", "def"])
instance.stop()
self.assertEqual(error, 0)
self.assertEqual(out[0], "abc")
self.assertEqual(out[1], "def")
def test27(self):
instance = ForTestingInterface(self.exefile)
out, error = instance.echo_string_int([1, 2], "abc")
instance.stop()
self.assertEqual(error, 0)
self.assertEqual(out[0], "abc")
self.assertEqual(out[1], "abc")
def test28(self):
instance = ForTestingInterface(self.exefile)
out1, out2, error = instance.echo_string_two(["one", "two"], "three")
instance.stop()
self.assertEqual(error, 0)
self.assertEqual(out1[0], "one")
self.assertEqual(out1[1], "two")
self.assertEqual(out2[0], "three")
self.assertEqual(out2[1], "three")
def test29(self):
self.check_for_mpi()
instance1 = ForTestingInterface(self.exefile)
instance2 = ForTestingInterface(self.exefile)
portname, error = instance1.internal__open_port()
self.assertTrue(len(portname) > 0)
request1 = instance1.internal__accept_on_port.asynchronous(portname)
request2 = instance2.internal__connect_to_port.asynchronous(portname)
request1.wait()
request2.wait()
port_id1, error1 = request1.result()
port_id2, error2 = request2.result()
self.assertTrue(port_id1 >= 0)
self.assertTrue(port_id2 >= 0)
self.assertEqual(error1, 0)
self.assertEqual(error2, 0)
def test30(self):
from amuse.support.interface import ConvertArgumentsException
instance = ForTesting(self.exefile)
self.assertRaises(ConvertArgumentsException, instance.dummy_3_int, 2, 3, i=1, expected_message="got multiple values for argument 'i' of method dummy_3_int")
instance.stop()
@unittest.skip
def test31(self):
import time
instance = ForTestingInterface(self.exefile)
N = 5000
t1 = time.time()
for i in range(N):
res, err = instance.echo_int([i])
t2 = time.time()
print("1 time:", t2-t1, (t2-t1)/N)
instance.stop()
instance = ForTesting(self.exefile)
N = 5000
t1 = time.time()
for i in range(N):
res = instance.echo_int([i] | units.m)
t2 = time.time()
print("2 time:", t2-t1, (t2-t1)/N)
instance.stop()
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@test@suite@compile_tests@test_c_implementation_simplified.py@.PATH_END.py
|
{
"filename": "test_lg.py",
"repo_name": "icrar/daliuge",
"repo_path": "daliuge_extracted/daliuge-master/daliuge-translator/test/dropmake/test_lg.py",
"type": "Python"
}
|
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2024
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
import json
import pickle
import unittest
from dlg.common import CategoryType
from dlg.dropmake import path_utils
from dlg.dropmake.lg import LG
NODES = 'nodeDataArray'
LINKS = 'linkDataArray'
TEST_SSID = 'test_pg_gen' # Needed to match output files generated in test_pg_gen.py
class TestLGInit(unittest.TestCase):
lg_names = {
"HelloWorld_simple.graph": 2,
"eagle_gather_empty_update.graph": 11,
"eagle_gather_simple_update.graph": 18,
"eagle_gather_update.graph": 14,
"testLoop.graph": 4,
"cont_img_mvp.graph": 45,
"test_grpby_gather.graph": 21,
"chiles_simple.graph": 22,
"Plasma_test.graph": 6,
}
def test_lg_init(self):
for lg_name, num_keys in self.lg_names.items():
fp = path_utils.get_lg_fpath("logical_graphs", lg_name)
lg = LG(fp, ssid=TEST_SSID)
self.assertEqual(num_keys,
len(lg._done_dict.keys()),
f"Incorrect number of elements when constructing LG "
f"object using: {lg_name}")
def _calc_num_drops(drop_values):
"""
Get the number of drops created during the lgn_to_pgn method.
The drops are stored in dictionaries of the original LG node, so we
iterate through them and get the number of Physical Graph Nodes for every one of
the Logical Graph nodes.
"""
return sum(len(drop_list) for drop_list in drop_values)
class TestLGNToPGN(unittest.TestCase):
"""
Verify any changes to the LGN to PGN method produce the same PGT structure.
Intended as a regression testing to ensure backwards compatiblity with existing
PGT behaviour.
# Note that currently, LGN to PGN creates all the physical graph nodes,
but doesn't get rid of construct nodes (these are removed by the unroll_to_tpl)
"""
def test_loop_graph(self):
"""
More complex looping graph
"""
graph_name = "testLoop.graph"
graph_information = {"num_keys": 11}
lg = LG(path_utils.get_lg_fpath('logical_graphs', graph_name), ssid="TEST")
for lgn in lg._start_list:
lg.lgn_to_pgn(lgn)
self.assertEqual(graph_information["num_keys"],
_calc_num_drops(lg._drop_dict.values()))
def test_scatter_graph_graph(self):
"""
Test scatter gather constructs
"""
graph_name = "eagle_gather_update.graph"
graph_information = {"num_keys": 31}
lg = LG(path_utils.get_lg_fpath('logical_graphs', graph_name), ssid="TEST")
for lgn in lg._start_list:
lg.lgn_to_pgn(lgn)
self.assertEqual(graph_information["num_keys"],
_calc_num_drops(lg._drop_dict.values()))
def test_non_recursive(self):
"""
This tests that we can generate the correct number of expected 'unrolled' drops
using a non-recursive implementation of the lgn_to_pgn translation method.
We want to get the number of drops created during lgn_to_pgn, which is the
intermediate representation of drops proir to calling 'unroll_to_tpl'.
To test the call to lgn_to_pgn, we use the same structure as in unroll_to_tpl,
and iterate through the _start_list identified during the LG class __init__.
"""
lg_names = {"testLoop.graph": {"num_pgt_drops": 11},
"eagle_gather_update.graph": {"num_pgt_drops": 31}}
for graph_name, test_dict in lg_names.items():
expected_drops = test_dict['num_pgt_drops']
lg_recursive = LG(path_utils.get_lg_fpath("logical_graphs", graph_name),
ssid="TEST")
for lgn in lg_recursive._start_list:
lg_recursive.lgn_to_pgn(lgn)
self.assertEqual(
expected_drops,
_calc_num_drops(lg_recursive._drop_dict.values())
)
lg_non_recursive = LG(
path_utils.get_lg_fpath("logical_graphs", graph_name), ssid="TEST"
)
for lgn in lg_non_recursive._start_list:
lg_non_recursive.lgn_to_pgn(lgn, recursive=False)
self.assertEqual(
expected_drops,
_calc_num_drops(lg_non_recursive._drop_dict.values())
)
class TestLGNodeLoading(unittest.TestCase):
def test_LGNode_SubgraphData(self):
"""
Test that when we initially construct the logical graph, the SubGraph data node
that is added to the graph stores the sub-graph nodes and links.
"""
fname = path_utils.get_lg_fpath("logical_graphs", "ExampleSubgraphSimple.graph")
lg = LG(fname)
subgraph_data_node_key = "bb9b78bc-b725-4b61-a12a-413bdcef7690"
self.assertIsNotNone(lg._done_dict[subgraph_data_node_key].subgraph)
class TestLGUnroll(unittest.TestCase):
"""
Test that the LG unrolls as expected
Uses test/dropmake/pickles as test data
Note: This is a regression testing class. These tests are based on graphs that were
generated using the code they are testing. If the LG class and it's methods change
in the future, test data may need to be re-generated (provided test failures are
caused by known-breaking changes, as opposed to legitimate bugs!).
We no longer compare directly the output, as this causes errors with UIDs/OID
conflicts. What we care about in this scenario is that twe have the correct nu
"""
lg_names = {
"HelloWorld_simple.graph": {"nodes": 2, "edges": 1},
"eagle_gather_empty_update.graph": {"nodes": 22, "edges": 24},
"eagle_gather_simple_update.graph": {"nodes": 42, "edges": 55},
"eagle_gather_update.graph": {"nodes": 29, "edges": 30},
"testLoop.graph": {"nodes": 11, "edges": 10},
"cont_img_mvp.graph": {"nodes": 144, "edges": 188},
"test_grpby_gather.graph": {"nodes": 15, "edges": 14},
"chiles_simple.graph": {"nodes": 22, "edges": 21},
"Plasma_test.graph": {"nodes": 6, "edges": 5},
"SharedMemoryTest_update.graph": {"nodes": 8, "edges": 7},
# "simpleMKN_update.graph", # Currently broken
}
def test_lg_unroll(self):
"""
Basic verification that we can unroll a list of dropdicts from a logical graph
lg_names = { "logical_graph_file.graph": num_keys_in_drop_list, ...}
"""
# TODO These are number of logical graph nodes! Make this exclusive to LG init
for lg_name, num_keys in self.lg_names.items():
fp = path_utils.get_lg_fpath("logical_graphs", lg_name)
lg = LG(fp, ssid=TEST_SSID)
drop_list = lg.unroll_to_tpl()
with open(path_utils.get_lg_fpath('pickle', lg_name), 'rb') as pk_fp:
test_unroll = pickle.load(pk_fp)
# It is worth mentioning that we do not get an accurate number of links
# from the LG, as it is not tracked after the initial graph_loading.
self.assertEqual(len(test_unroll), len(drop_list))
self.assertEqual(num_keys['nodes'], len(drop_list))
# Confirm number of output/consumers and inputs/producers are the same
for i, drop in enumerate(drop_list):
if 'outputs' in drop:
expected = test_unroll[i]['outputs']
actual = drop['outputs']
self.assertEqual(len(expected), len(actual))
if 'inputs' in drop:
expected = test_unroll[i]['inputs']
actual = drop['inputs']
self.assertEqual(len(expected), len(actual))
if 'producers' in drop:
expected = test_unroll[i]['producers']
actual = drop['producers']
self.assertEqual(len(expected), len(actual))
if 'consumers' in drop:
expected = test_unroll[i]['consumers']
actual = drop['consumers']
self.assertEqual(len(expected), len(actual))
def test_lg_unroll_sharedmemory(self):
"""
Confirm the SharedMemory data type is correctly unrolled.
"""
lg_name = "SharedMemoryTest_update.graph"
num_keys = 8
fp = path_utils.get_lg_fpath("logical_graphs", lg_name)
lg = LG(fp, ssid=TEST_SSID)
self.assertEqual(num_keys,
len(lg._done_dict.keys()),
f"Incorrect number of elements when constructing LG "
f"object using: {lg_name}")
drop_list = lg.unroll_to_tpl()
for drop in drop_list:
if drop["categoryType"] in [CategoryType.DATA, "data"]:
self.assertEqual("SharedMemory", drop["category"])
|
icrarREPO_NAMEdaliugePATH_START.@daliuge_extracted@daliuge-master@daliuge-translator@test@dropmake@test_lg.py@.PATH_END.py
|
{
"filename": "firefly-js-modules.md",
"repo_name": "Caltech-IPAC/firefly",
"repo_path": "firefly_extracted/firefly-master/docs/firefly-js-modules.md",
"type": "Markdown"
}
|
### Modules in use for the firefly js environment
**React.js**, see: http://facebook.github.io/react/docs/getting-started.html
* the core UI library
**underscore.string**, see: http://epeli.github.io/underscore.string/
* complete string manipulation operations
**lodash**, see https://lodash.com
* useful utility functions, superset of underscore
**validator**, see: https://www.npmjs.com/package/validator
* String validation and sanitization
**numeraljs**, see: http://numeraljs.com/
* format and manipulate numbers
**icepick**, see: https://www.npmjs.com/package/icepick
* make object immutable
**react-grid-layout**, see: https://github.com/STRML/react-grid-layout
* grid layout system
**uniq**, see: https://www.npmjs.com/package/uniq
* Removes duplicates from a sorted array in place
**enum**, see: https://www.npmjs.com/package/enum
* Enum is a javascript module that introduces the Enum Type
**jsonp**, see: https://www.npmjs.com/package/jsonp
* handle jsonp request
**debug**, see: https://www.npmjs.com/package/debug
* will provide some functionality similar to java logging
#### Modules being evaluated
**ampersand-state**, http://ampersandjs.com/docs#ampersand-state
* model object, has change events
### Licenses for all JS modules
All license files for JavaScript dependencies are in firefly/node_modules. All licenses allow reuse.
To view:
`cd firefly/node_modules; find . -name 'LIC*' -exec more {} \;`
|
Caltech-IPACREPO_NAMEfireflyPATH_START.@firefly_extracted@firefly-master@docs@firefly-js-modules.md@.PATH_END.py
|
{
"filename": "test_assumed_shape.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/f2py/tests/test_assumed_shape.py",
"type": "Python"
}
|
import os
import pytest
import tempfile
from . import util
class TestAssumedShapeSumExample(util.F2PyTest):
sources = [
util.getpath("tests", "src", "assumed_shape", "foo_free.f90"),
util.getpath("tests", "src", "assumed_shape", "foo_use.f90"),
util.getpath("tests", "src", "assumed_shape", "precision.f90"),
util.getpath("tests", "src", "assumed_shape", "foo_mod.f90"),
util.getpath("tests", "src", "assumed_shape", ".f2py_f2cmap"),
]
@pytest.mark.slow
def test_all(self):
r = self.module.fsum([1, 2])
assert r == 3
r = self.module.sum([1, 2])
assert r == 3
r = self.module.sum_with_use([1, 2])
assert r == 3
r = self.module.mod.sum([1, 2])
assert r == 3
r = self.module.mod.fsum([1, 2])
assert r == 3
class TestF2cmapOption(TestAssumedShapeSumExample):
def setup_method(self):
# Use a custom file name for .f2py_f2cmap
self.sources = list(self.sources)
f2cmap_src = self.sources.pop(-1)
self.f2cmap_file = tempfile.NamedTemporaryFile(delete=False)
with open(f2cmap_src, "rb") as f:
self.f2cmap_file.write(f.read())
self.f2cmap_file.close()
self.sources.append(self.f2cmap_file.name)
self.options = ["--f2cmap", self.f2cmap_file.name]
super().setup_method()
def teardown_method(self):
os.unlink(self.f2cmap_file.name)
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@f2py@tests@test_assumed_shape.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/isosurface/stream/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._token import TokenValidator
from ._maxpoints import MaxpointsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._token.TokenValidator", "._maxpoints.MaxpointsValidator"]
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@isosurface@stream@__init__.py@.PATH_END.py
|
{
"filename": "policies.py",
"repo_name": "DLR-RM/stable-baselines3",
"repo_path": "stable-baselines3_extracted/stable-baselines3-master/stable_baselines3/common/policies.py",
"type": "Python"
}
|
"""Policies: abstract base class and concrete implementations."""
import collections
import copy
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Optional, TypeVar, Union
import numpy as np
import torch as th
from gymnasium import spaces
from torch import nn
from stable_baselines3.common.distributions import (
BernoulliDistribution,
CategoricalDistribution,
DiagGaussianDistribution,
Distribution,
MultiCategoricalDistribution,
StateDependentNoiseDistribution,
make_proba_distribution,
)
from stable_baselines3.common.preprocessing import get_action_dim, is_image_space, maybe_transpose, preprocess_obs
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
MlpExtractor,
NatureCNN,
create_mlp,
)
from stable_baselines3.common.type_aliases import PyTorchObs, Schedule
from stable_baselines3.common.utils import get_device, is_vectorized_observation, obs_as_tensor
SelfBaseModel = TypeVar("SelfBaseModel", bound="BaseModel")
class BaseModel(nn.Module):
"""
The base model object: makes predictions in response to observations.
In the case of policies, the prediction is an action. In the case of critics, it is the
estimated value of the observation.
:param observation_space: The observation space of the environment
:param action_space: The action space of the environment
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
optimizer: th.optim.Optimizer
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Space,
features_extractor_class: type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[dict[str, Any]] = None,
features_extractor: Optional[BaseFeaturesExtractor] = None,
normalize_images: bool = True,
optimizer_class: type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[dict[str, Any]] = None,
):
super().__init__()
if optimizer_kwargs is None:
optimizer_kwargs = {}
if features_extractor_kwargs is None:
features_extractor_kwargs = {}
self.observation_space = observation_space
self.action_space = action_space
self.features_extractor = features_extractor
self.normalize_images = normalize_images
self.optimizer_class = optimizer_class
self.optimizer_kwargs = optimizer_kwargs
self.features_extractor_class = features_extractor_class
self.features_extractor_kwargs = features_extractor_kwargs
# Automatically deactivate dtype and bounds checks
if not normalize_images and issubclass(features_extractor_class, (NatureCNN, CombinedExtractor)):
self.features_extractor_kwargs.update(dict(normalized_image=True))
def _update_features_extractor(
self,
net_kwargs: dict[str, Any],
features_extractor: Optional[BaseFeaturesExtractor] = None,
) -> dict[str, Any]:
"""
Update the network keyword arguments and create a new features extractor object if needed.
If a ``features_extractor`` object is passed, then it will be shared.
:param net_kwargs: the base network keyword arguments, without the ones
related to features extractor
:param features_extractor: a features extractor object.
If None, a new object will be created.
:return: The updated keyword arguments
"""
net_kwargs = net_kwargs.copy()
if features_extractor is None:
# The features extractor is not shared, create a new one
features_extractor = self.make_features_extractor()
net_kwargs.update(dict(features_extractor=features_extractor, features_dim=features_extractor.features_dim))
return net_kwargs
def make_features_extractor(self) -> BaseFeaturesExtractor:
"""Helper method to create a features extractor."""
return self.features_extractor_class(self.observation_space, **self.features_extractor_kwargs)
def extract_features(self, obs: PyTorchObs, features_extractor: BaseFeaturesExtractor) -> th.Tensor:
"""
Preprocess the observation if needed and extract features.
:param obs: Observation
:param features_extractor: The features extractor to use.
:return: The extracted features
"""
preprocessed_obs = preprocess_obs(obs, self.observation_space, normalize_images=self.normalize_images)
return features_extractor(preprocessed_obs)
def _get_constructor_parameters(self) -> dict[str, Any]:
"""
Get data that need to be saved in order to re-create the model when loading it from disk.
:return: The dictionary to pass to the as kwargs constructor when reconstruction this model.
"""
return dict(
observation_space=self.observation_space,
action_space=self.action_space,
# Passed to the constructor by child class
# squash_output=self.squash_output,
# features_extractor=self.features_extractor
normalize_images=self.normalize_images,
)
@property
def device(self) -> th.device:
"""Infer which device this policy lives on by inspecting its parameters.
If it has no parameters, the 'cpu' device is used as a fallback.
:return:"""
for param in self.parameters():
return param.device
return get_device("cpu")
def save(self, path: str) -> None:
"""
Save model to a given location.
:param path:
"""
th.save({"state_dict": self.state_dict(), "data": self._get_constructor_parameters()}, path)
@classmethod
def load(cls: type[SelfBaseModel], path: str, device: Union[th.device, str] = "auto") -> SelfBaseModel:
"""
Load model from path.
:param path:
:param device: Device on which the policy should be loaded.
:return:
"""
device = get_device(device)
# Note(antonin): we cannot use `weights_only=True` here because we need to allow
# gymnasium imports for the policy to be loaded successfully
saved_variables = th.load(path, map_location=device, weights_only=False)
# Create policy object
model = cls(**saved_variables["data"])
# Load weights
model.load_state_dict(saved_variables["state_dict"])
model.to(device)
return model
def load_from_vector(self, vector: np.ndarray) -> None:
"""
Load parameters from a 1D vector.
:param vector:
"""
th.nn.utils.vector_to_parameters(th.as_tensor(vector, dtype=th.float, device=self.device), self.parameters())
def parameters_to_vector(self) -> np.ndarray:
"""
Convert the parameters to a 1D vector.
:return:
"""
return th.nn.utils.parameters_to_vector(self.parameters()).detach().cpu().numpy()
def set_training_mode(self, mode: bool) -> None:
"""
Put the policy in either training or evaluation mode.
This affects certain modules, such as batch normalisation and dropout.
:param mode: if true, set to training mode, else set to evaluation mode
"""
self.train(mode)
def is_vectorized_observation(self, observation: Union[np.ndarray, dict[str, np.ndarray]]) -> bool:
"""
Check whether or not the observation is vectorized,
apply transposition to image (so that they are channel-first) if needed.
This is used in DQN when sampling random action (epsilon-greedy policy)
:param observation: the input observation to check
:return: whether the given observation is vectorized or not
"""
vectorized_env = False
if isinstance(observation, dict):
assert isinstance(
self.observation_space, spaces.Dict
), f"The observation provided is a dict but the obs space is {self.observation_space}"
for key, obs in observation.items():
obs_space = self.observation_space.spaces[key]
vectorized_env = vectorized_env or is_vectorized_observation(maybe_transpose(obs, obs_space), obs_space)
else:
vectorized_env = is_vectorized_observation(
maybe_transpose(observation, self.observation_space), self.observation_space
)
return vectorized_env
def obs_to_tensor(self, observation: Union[np.ndarray, dict[str, np.ndarray]]) -> tuple[PyTorchObs, bool]:
"""
Convert an input observation to a PyTorch tensor that can be fed to a model.
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:return: The observation as PyTorch tensor
and whether the observation is vectorized or not
"""
vectorized_env = False
if isinstance(observation, dict):
assert isinstance(
self.observation_space, spaces.Dict
), f"The observation provided is a dict but the obs space is {self.observation_space}"
# need to copy the dict as the dict in VecFrameStack will become a torch tensor
observation = copy.deepcopy(observation)
for key, obs in observation.items():
obs_space = self.observation_space.spaces[key]
if is_image_space(obs_space):
obs_ = maybe_transpose(obs, obs_space)
else:
obs_ = np.array(obs)
vectorized_env = vectorized_env or is_vectorized_observation(obs_, obs_space)
# Add batch dimension if needed
observation[key] = obs_.reshape((-1, *self.observation_space[key].shape)) # type: ignore[misc]
elif is_image_space(self.observation_space):
# Handle the different cases for images
# as PyTorch use channel first format
observation = maybe_transpose(observation, self.observation_space)
else:
observation = np.array(observation)
if not isinstance(observation, dict):
# Dict obs need to be handled separately
vectorized_env = is_vectorized_observation(observation, self.observation_space)
# Add batch dimension if needed
observation = observation.reshape((-1, *self.observation_space.shape)) # type: ignore[misc]
obs_tensor = obs_as_tensor(observation, self.device)
return obs_tensor, vectorized_env
class BasePolicy(BaseModel, ABC):
"""The base policy object.
Parameters are mostly the same as `BaseModel`; additions are documented below.
:param args: positional arguments passed through to `BaseModel`.
:param kwargs: keyword arguments passed through to `BaseModel`.
:param squash_output: For continuous actions, whether the output is squashed
or not using a ``tanh()`` function.
"""
features_extractor: BaseFeaturesExtractor
def __init__(self, *args, squash_output: bool = False, **kwargs):
super().__init__(*args, **kwargs)
self._squash_output = squash_output
@staticmethod
def _dummy_schedule(progress_remaining: float) -> float:
"""(float) Useful for pickling policy."""
del progress_remaining
return 0.0
@property
def squash_output(self) -> bool:
"""(bool) Getter for squash_output."""
return self._squash_output
@staticmethod
def init_weights(module: nn.Module, gain: float = 1) -> None:
"""
Orthogonal initialization (used in PPO and A2C)
"""
if isinstance(module, (nn.Linear, nn.Conv2d)):
nn.init.orthogonal_(module.weight, gain=gain)
if module.bias is not None:
module.bias.data.fill_(0.0)
@abstractmethod
def _predict(self, observation: PyTorchObs, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
By default provides a dummy implementation -- not all BasePolicy classes
implement this, e.g. if they are a Critic in an Actor-Critic method.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
def predict(
self,
observation: Union[np.ndarray, dict[str, np.ndarray]],
state: Optional[tuple[np.ndarray, ...]] = None,
episode_start: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> tuple[np.ndarray, Optional[tuple[np.ndarray, ...]]]:
"""
Get the policy action from an observation (and optional hidden state).
Includes sugar-coating to handle different observations (e.g. normalizing images).
:param observation: the input observation
:param state: The last hidden states (can be None, used in recurrent policies)
:param episode_start: The last masks (can be None, used in recurrent policies)
this correspond to beginning of episodes,
where the hidden states of the RNN must be reset.
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next hidden state
(used in recurrent policies)
"""
# Switch to eval mode (this affects batch norm / dropout)
self.set_training_mode(False)
# Check for common mistake that the user does not mix Gym/VecEnv API
# Tuple obs are not supported by SB3, so we can safely do that check
if isinstance(observation, tuple) and len(observation) == 2 and isinstance(observation[1], dict):
raise ValueError(
"You have passed a tuple to the predict() function instead of a Numpy array or a Dict. "
"You are probably mixing Gym API with SB3 VecEnv API: `obs, info = env.reset()` (Gym) "
"vs `obs = vec_env.reset()` (SB3 VecEnv). "
"See related issue https://github.com/DLR-RM/stable-baselines3/issues/1694 "
"and documentation for more information: https://stable-baselines3.readthedocs.io/en/master/guide/vec_envs.html#vecenv-api-vs-gym-api"
)
obs_tensor, vectorized_env = self.obs_to_tensor(observation)
with th.no_grad():
actions = self._predict(obs_tensor, deterministic=deterministic)
# Convert to numpy, and reshape to the original action shape
actions = actions.cpu().numpy().reshape((-1, *self.action_space.shape)) # type: ignore[misc, assignment]
if isinstance(self.action_space, spaces.Box):
if self.squash_output:
# Rescale to proper domain when using squashing
actions = self.unscale_action(actions) # type: ignore[assignment, arg-type]
else:
# Actions could be on arbitrary scale, so clip the actions to avoid
# out of bound error (e.g. if sampling from a Gaussian distribution)
actions = np.clip(actions, self.action_space.low, self.action_space.high) # type: ignore[assignment, arg-type]
# Remove batch dimension if needed
if not vectorized_env:
assert isinstance(actions, np.ndarray)
actions = actions.squeeze(axis=0)
return actions, state # type: ignore[return-value]
def scale_action(self, action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [low, high] to [-1, 1]
(no need for symmetric action space)
:param action: Action to scale
:return: Scaled action
"""
assert isinstance(
self.action_space, spaces.Box
), f"Trying to scale an action using an action space that is not a Box(): {self.action_space}"
low, high = self.action_space.low, self.action_space.high
return 2.0 * ((action - low) / (high - low)) - 1.0
def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:
"""
Rescale the action from [-1, 1] to [low, high]
(no need for symmetric action space)
:param scaled_action: Action to un-scale
"""
assert isinstance(
self.action_space, spaces.Box
), f"Trying to unscale an action using an action space that is not a Box(): {self.action_space}"
low, high = self.action_space.low, self.action_space.high
return low + (0.5 * (scaled_action + 1.0) * (high - low))
class ActorCriticPolicy(BasePolicy):
"""
Policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param share_features_extractor: If True, the features extractor is shared between the policy and value networks.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[list[int], dict[str, list[int]]]] = None,
activation_fn: type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[dict[str, Any]] = None,
share_features_extractor: bool = True,
normalize_images: bool = True,
optimizer_class: type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[dict[str, Any]] = None,
):
if optimizer_kwargs is None:
optimizer_kwargs = {}
# Small values to avoid NaN in Adam optimizer
if optimizer_class == th.optim.Adam:
optimizer_kwargs["eps"] = 1e-5
super().__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=squash_output,
normalize_images=normalize_images,
)
if isinstance(net_arch, list) and len(net_arch) > 0 and isinstance(net_arch[0], dict):
warnings.warn(
(
"As shared layers in the mlp_extractor are removed since SB3 v1.8.0, "
"you should now pass directly a dictionary and not a list "
"(net_arch=dict(pi=..., vf=...) instead of net_arch=[dict(pi=..., vf=...)])"
),
)
net_arch = net_arch[0]
# Default network architecture, from stable-baselines
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = dict(pi=[64, 64], vf=[64, 64])
self.net_arch = net_arch
self.activation_fn = activation_fn
self.ortho_init = ortho_init
self.share_features_extractor = share_features_extractor
self.features_extractor = self.make_features_extractor()
self.features_dim = self.features_extractor.features_dim
if self.share_features_extractor:
self.pi_features_extractor = self.features_extractor
self.vf_features_extractor = self.features_extractor
else:
self.pi_features_extractor = self.features_extractor
self.vf_features_extractor = self.make_features_extractor()
self.log_std_init = log_std_init
dist_kwargs = None
assert not (squash_output and not use_sde), "squash_output=True is only available when using gSDE (use_sde=True)"
# Keyword arguments for gSDE distribution
if use_sde:
dist_kwargs = {
"full_std": full_std,
"squash_output": squash_output,
"use_expln": use_expln,
"learn_features": False,
}
self.use_sde = use_sde
self.dist_kwargs = dist_kwargs
# Action distribution
self.action_dist = make_proba_distribution(action_space, use_sde=use_sde, dist_kwargs=dist_kwargs)
self._build(lr_schedule)
def _get_constructor_parameters(self) -> dict[str, Any]:
data = super()._get_constructor_parameters()
default_none_kwargs = self.dist_kwargs or collections.defaultdict(lambda: None) # type: ignore[arg-type, return-value]
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
squash_output=default_none_kwargs["squash_output"],
full_std=default_none_kwargs["full_std"],
use_expln=default_none_kwargs["use_expln"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
ortho_init=self.ortho_init,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, n_envs: int = 1) -> None:
"""
Sample new weights for the exploration matrix.
:param n_envs:
"""
assert isinstance(self.action_dist, StateDependentNoiseDistribution), "reset_noise() is only available when using gSDE"
self.action_dist.sample_weights(self.log_std, batch_size=n_envs)
def _build_mlp_extractor(self) -> None:
"""
Create the policy and value networks.
Part of the layers can be shared.
"""
# Note: If net_arch is None and some features extractor is used,
# net_arch here is an empty list and mlp_extractor does not
# really contain any layers (acts like an identity module).
self.mlp_extractor = MlpExtractor(
self.features_dim,
net_arch=self.net_arch,
activation_fn=self.activation_fn,
device=self.device,
)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the networks and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self._build_mlp_extractor()
latent_dim_pi = self.mlp_extractor.latent_dim_pi
if isinstance(self.action_dist, DiagGaussianDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
self.action_net, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=latent_dim_pi, latent_sde_dim=latent_dim_pi, log_std_init=self.log_std_init
)
elif isinstance(self.action_dist, (CategoricalDistribution, MultiCategoricalDistribution, BernoulliDistribution)):
self.action_net = self.action_dist.proba_distribution_net(latent_dim=latent_dim_pi)
else:
raise NotImplementedError(f"Unsupported distribution '{self.action_dist}'.")
self.value_net = nn.Linear(self.mlp_extractor.latent_dim_vf, 1)
# Init weights: use orthogonal initialization
# with small initial weight for the output
if self.ortho_init:
# TODO: check for features_extractor
# Values from stable-baselines.
# features_extractor/mlp values are
# originally from openai/baselines (default gains/init_scales).
module_gains = {
self.features_extractor: np.sqrt(2),
self.mlp_extractor: np.sqrt(2),
self.action_net: 0.01,
self.value_net: 1,
}
if not self.share_features_extractor:
# Note(antonin): this is to keep SB3 results
# consistent, see GH#1148
del module_gains[self.features_extractor]
module_gains[self.pi_features_extractor] = np.sqrt(2)
module_gains[self.vf_features_extractor] = np.sqrt(2)
for module, gain in module_gains.items():
module.apply(partial(self.init_weights, gain=gain))
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs) # type: ignore[call-arg]
def forward(self, obs: th.Tensor, deterministic: bool = False) -> tuple[th.Tensor, th.Tensor, th.Tensor]:
"""
Forward pass in all the networks (actor and critic)
:param obs: Observation
:param deterministic: Whether to sample or use deterministic actions
:return: action, value and log probability of the action
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
if self.share_features_extractor:
latent_pi, latent_vf = self.mlp_extractor(features)
else:
pi_features, vf_features = features
latent_pi = self.mlp_extractor.forward_actor(pi_features)
latent_vf = self.mlp_extractor.forward_critic(vf_features)
# Evaluate the values for the given observations
values = self.value_net(latent_vf)
distribution = self._get_action_dist_from_latent(latent_pi)
actions = distribution.get_actions(deterministic=deterministic)
log_prob = distribution.log_prob(actions)
actions = actions.reshape((-1, *self.action_space.shape)) # type: ignore[misc]
return actions, values, log_prob
def extract_features( # type: ignore[override]
self, obs: PyTorchObs, features_extractor: Optional[BaseFeaturesExtractor] = None
) -> Union[th.Tensor, tuple[th.Tensor, th.Tensor]]:
"""
Preprocess the observation if needed and extract features.
:param obs: Observation
:param features_extractor: The features extractor to use. If None, then ``self.features_extractor`` is used.
:return: The extracted features. If features extractor is not shared, returns a tuple with the
features for the actor and the features for the critic.
"""
if self.share_features_extractor:
return super().extract_features(obs, self.features_extractor if features_extractor is None else features_extractor)
else:
if features_extractor is not None:
warnings.warn(
"Provided features_extractor will be ignored because the features extractor is not shared.",
UserWarning,
)
pi_features = super().extract_features(obs, self.pi_features_extractor)
vf_features = super().extract_features(obs, self.vf_features_extractor)
return pi_features, vf_features
def _get_action_dist_from_latent(self, latent_pi: th.Tensor) -> Distribution:
"""
Retrieve action distribution given the latent codes.
:param latent_pi: Latent code for the actor
:return: Action distribution
"""
mean_actions = self.action_net(latent_pi)
if isinstance(self.action_dist, DiagGaussianDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std)
elif isinstance(self.action_dist, CategoricalDistribution):
# Here mean_actions are the logits before the softmax
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, MultiCategoricalDistribution):
# Here mean_actions are the flattened logits
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, BernoulliDistribution):
# Here mean_actions are the logits (before rounding to get the binary actions)
return self.action_dist.proba_distribution(action_logits=mean_actions)
elif isinstance(self.action_dist, StateDependentNoiseDistribution):
return self.action_dist.proba_distribution(mean_actions, self.log_std, latent_pi)
else:
raise ValueError("Invalid action distribution")
def _predict(self, observation: PyTorchObs, deterministic: bool = False) -> th.Tensor:
"""
Get the action according to the policy for a given observation.
:param observation:
:param deterministic: Whether to use stochastic or deterministic actions
:return: Taken action according to the policy
"""
return self.get_distribution(observation).get_actions(deterministic=deterministic)
def evaluate_actions(self, obs: PyTorchObs, actions: th.Tensor) -> tuple[th.Tensor, th.Tensor, Optional[th.Tensor]]:
"""
Evaluate actions according to the current policy,
given the observations.
:param obs: Observation
:param actions: Actions
:return: estimated value, log likelihood of taking those actions
and entropy of the action distribution.
"""
# Preprocess the observation if needed
features = self.extract_features(obs)
if self.share_features_extractor:
latent_pi, latent_vf = self.mlp_extractor(features)
else:
pi_features, vf_features = features
latent_pi = self.mlp_extractor.forward_actor(pi_features)
latent_vf = self.mlp_extractor.forward_critic(vf_features)
distribution = self._get_action_dist_from_latent(latent_pi)
log_prob = distribution.log_prob(actions)
values = self.value_net(latent_vf)
entropy = distribution.entropy()
return values, log_prob, entropy
def get_distribution(self, obs: PyTorchObs) -> Distribution:
"""
Get the current policy distribution given the observations.
:param obs:
:return: the action distribution.
"""
features = super().extract_features(obs, self.pi_features_extractor)
latent_pi = self.mlp_extractor.forward_actor(features)
return self._get_action_dist_from_latent(latent_pi)
def predict_values(self, obs: PyTorchObs) -> th.Tensor:
"""
Get the estimated values according to the current policy given the observations.
:param obs: Observation
:return: the estimated values.
"""
features = super().extract_features(obs, self.vf_features_extractor)
latent_vf = self.mlp_extractor.forward_critic(features)
return self.value_net(latent_vf)
class ActorCriticCnnPolicy(ActorCriticPolicy):
"""
CNN policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param share_features_extractor: If True, the features extractor is shared between the policy and value networks.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[list[int], dict[str, list[int]]]] = None,
activation_fn: type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[dict[str, Any]] = None,
share_features_extractor: bool = True,
normalize_images: bool = True,
optimizer_class: type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
share_features_extractor,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class MultiInputActorCriticPolicy(ActorCriticPolicy):
"""
MultiInputActorClass policy class for actor-critic algorithms (has both policy and value prediction).
Used by A2C, PPO and the likes.
:param observation_space: Observation space (Tuple)
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param ortho_init: Whether to use or not orthogonal initialization
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE
:param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param squash_output: Whether to squash the output using a tanh function,
this allows to ensure boundaries when using gSDE.
:param features_extractor_class: Uses the CombinedExtractor
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param share_features_extractor: If True, the features extractor is shared between the policy and value networks.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: spaces.Dict,
action_space: spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[list[int], dict[str, list[int]]]] = None,
activation_fn: type[nn.Module] = nn.Tanh,
ortho_init: bool = True,
use_sde: bool = False,
log_std_init: float = 0.0,
full_std: bool = True,
use_expln: bool = False,
squash_output: bool = False,
features_extractor_class: type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[dict[str, Any]] = None,
share_features_extractor: bool = True,
normalize_images: bool = True,
optimizer_class: type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[dict[str, Any]] = None,
):
super().__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
ortho_init,
use_sde,
log_std_init,
full_std,
use_expln,
squash_output,
features_extractor_class,
features_extractor_kwargs,
share_features_extractor,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
class ContinuousCritic(BaseModel):
"""
Critic network(s) for DDPG/SAC/TD3.
It represents the action-state value function (Q-value function).
Compared to A2C/PPO critics, this one represents the Q-value
and takes the continuous action as input. It is concatenated with the state
and then fed to the network which outputs a single value: Q(s, a).
For more recent algorithms like SAC/TD3, multiple networks
are created to give different estimates.
By default, it creates two critic networks used to reduce overestimation
thanks to clipped Q-learning (cf TD3 paper).
:param observation_space: Observation space
:param action_space: Action space
:param net_arch: Network architecture
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param features_dim: Number of features
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether the features extractor is shared or not
between the actor and the critic (this saves computation time)
"""
features_extractor: BaseFeaturesExtractor
def __init__(
self,
observation_space: spaces.Space,
action_space: spaces.Box,
net_arch: list[int],
features_extractor: BaseFeaturesExtractor,
features_dim: int,
activation_fn: type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super().__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
action_dim = get_action_dim(self.action_space)
self.share_features_extractor = share_features_extractor
self.n_critics = n_critics
self.q_networks: list[nn.Module] = []
for idx in range(n_critics):
q_net_list = create_mlp(features_dim + action_dim, 1, net_arch, activation_fn)
q_net = nn.Sequential(*q_net_list)
self.add_module(f"qf{idx}", q_net)
self.q_networks.append(q_net)
def forward(self, obs: th.Tensor, actions: th.Tensor) -> tuple[th.Tensor, ...]:
# Learn the features extractor using the policy loss only
# when the features_extractor is shared with the actor
with th.set_grad_enabled(not self.share_features_extractor):
features = self.extract_features(obs, self.features_extractor)
qvalue_input = th.cat([features, actions], dim=1)
return tuple(q_net(qvalue_input) for q_net in self.q_networks)
def q1_forward(self, obs: th.Tensor, actions: th.Tensor) -> th.Tensor:
"""
Only predict the Q-value using the first network.
This allows to reduce computation when all the estimates are not needed
(e.g. when updating the policy in TD3).
"""
with th.no_grad():
features = self.extract_features(obs, self.features_extractor)
return self.q_networks[0](th.cat([features, actions], dim=1))
|
DLR-RMREPO_NAMEstable-baselines3PATH_START.@stable-baselines3_extracted@stable-baselines3-master@stable_baselines3@common@policies.py@.PATH_END.py
|
{
"filename": "nm.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/jax/experimental/sparse/nm.py",
"type": "Python"
}
|
# Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""N:M-sparsity associated primitives."""
from jax._src import core
from jax._src import dispatch
from jax._src.lax.lax import DotDimensionNumbers
from jax._src.lib import gpu_sparse
from jax._src.lib.mlir.dialects import mhlo
from jax._src.typing import Array, DTypeLike
from jax.interpreters import mlir
import jax.numpy as jnp
import numpy as np
# --------------------------------------------------------------------
# nm_spmm
nm_spmm_p = core.Primitive("sparse_dense_matmul")
_supported_input_types = (jnp.int8, jnp.int16, jnp.float16, jnp.bfloat16)
_supported_output_types = (jnp.bfloat16, jnp.float32)
def nm_spmm(
lhs: Array,
rhs: Array,
metadata: Array,
dimension_numbers: DotDimensionNumbers = (((1,), (0,)), (tuple(), tuple())),
sparse_operand_idx: int = 0,
output_dtype: DTypeLike = jnp.bfloat16,
) -> Array:
"""Dot operation where one of the operands has N:M sparsity.
Args:
lhs: An ndarray (first dot operand).
rhs: An ndarray (second dot operand).
metadata: An ndarray with structured sparsity metadata for the contracting
dimension. For 2:4 sparsity it should contain (N=2) two-bit index values
for each (M=4) element group.
dimension_numbers: a tuple of tuples of the form `((lhs_contracting_dims,
rhs_contracting_dims), (lhs_batch_dims, rhs_batch_dims))`.
sparse_operand_idx: index of the sparse operand (0 or 1).
output_dtype: result type.
Returns:
An ndarray dense array containing the result.
"""
return nm_spmm_p.bind(
lhs,
rhs,
metadata,
dimension_numbers=dimension_numbers,
sparse_operand_idx=sparse_operand_idx,
output_dtype=output_dtype,
)
def _calc_groups_per_element(n, m):
group_bits = n * (m.bit_length() - 1) # 4 bits per group for 2:4
return 16 // group_bits
def _validate_dnums(rank, contract, batch, name):
non_contract = tuple(sorted(set(range(rank)) - set(contract + batch)))
if sorted(non_contract + contract + batch) != list(range(rank)):
raise TypeError(f"Incorrect dimension numbers for {name}")
return non_contract
def _validate_metadata(lhs, rhs, metadata, dimension_numbers, index, n=2, m=4):
assert index in (0, 1)
size_factor = n * _calc_groups_per_element(n, m)
sparse = [lhs, rhs][index]
sparse_contract = dimension_numbers[0][index]
if metadata.dtype != np.uint16:
raise TypeError(f"Metadata must be uint16, got {metadata.dtype}")
if sparse_contract[0] != sparse.ndim - 1:
raise TypeError("Contracting dimension must be the minor one")
if metadata.shape[:-1] != sparse.shape[:-1]:
raise TypeError(
"Metadata shape must match the operand shape (except for the"
" contracting dimension)"
)
if metadata.shape[-1] * size_factor != sparse.shape[-1]:
raise TypeError(
f"Metadata must be exactly {size_factor} times less than the"
f" contracting dimension for {n}:{m} structured sparsity (expected"
f" {sparse.shape[-1] // size_factor}, got {metadata.shape[-1]})"
)
if sparse.shape[-1] % size_factor != 0:
raise NotImplementedError("Metadata with padding is not supported")
dense = [lhs, rhs][1 - index]
dense_contract = dimension_numbers[0][1 - index]
a, b = sparse.shape[sparse_contract[0]], dense.shape[dense_contract[0]]
if n * b != m * a:
raise TypeError(
f"Contracting dimension sizes should have {n}:{m} ratio, got {a}:{b}"
)
def _infer_result_shape(lhs, rhs, dimension_numbers):
((lhs_contract, rhs_contract), (lhs_batch, rhs_batch)) = dimension_numbers
if len(lhs_contract) != 1 or len(rhs_contract) != 1:
raise TypeError("Only single contracting dimension is supported")
lhs_dims = _validate_dnums(lhs.ndim, lhs_contract, lhs_batch, "lhs")
rhs_dims = _validate_dnums(rhs.ndim, rhs_contract, rhs_batch, "rhs")
if len(lhs_dims) != 1 or len(rhs_dims) != 1:
raise TypeError("Only single non-contracting dimension is supported")
batch = [lhs.shape[i] for i in lhs_batch]
if batch != [rhs.shape[i] for i in rhs_batch]:
raise TypeError("Batch dimension sizes do not match")
return tuple(batch + [lhs.shape[lhs_dims[0]], rhs.shape[rhs_dims[0]]])
def _nm_spmm_default_lowering(*_args, **_kwargs):
raise NotImplementedError("Sparse N:M matmul is only implemented on GPU")
def _nm_spmm_gpu_lowering(
ctx,
lhs,
rhs,
metadata,
*,
dimension_numbers,
sparse_operand_idx,
output_dtype,
):
assert sparse_operand_idx in (0, 1)
sparsity_descriptor = mhlo.SparsityDescriptor.get(
dimension=dimension_numbers[0][sparse_operand_idx][0], n=2, m=4
)
dot_dnums = mhlo.DotDimensionNumbers.get(
lhs_batching_dimensions=dimension_numbers[1][sparse_operand_idx],
rhs_batching_dimensions=dimension_numbers[1][1 - sparse_operand_idx],
lhs_contracting_dimensions=dimension_numbers[0][sparse_operand_idx],
rhs_contracting_dimensions=dimension_numbers[0][1 - sparse_operand_idx],
)
dot_type = ctx.avals_out[0]
key = ["lhs_sparsity", "rhs_sparsity"][sparse_operand_idx]
kwargs = {key: sparsity_descriptor}
op = mhlo.SparseDotOp(
mlir.aval_to_ir_type(dot_type), lhs, rhs, [metadata], dot_dnums, **kwargs
)
return op.results
@nm_spmm_p.def_abstract_eval
def _nm_spmm_abstract_eval(
lhs, rhs, metadata, *, dimension_numbers, sparse_operand_idx, output_dtype
):
if lhs.dtype not in _supported_input_types:
raise TypeError(f"Unsupported lhs input type: {lhs.dtype}")
if rhs.dtype not in _supported_input_types:
raise TypeError(f"Unsupported rhs input type: {rhs.dtype}")
if output_dtype not in _supported_output_types:
raise TypeError(f"Unsupported output type: {output_dtype}")
res_shape = _infer_result_shape(lhs, rhs, dimension_numbers)
_validate_metadata(lhs, rhs, metadata, dimension_numbers, sparse_operand_idx)
return core.ShapedArray(res_shape, output_dtype)
mlir.register_lowering(nm_spmm_p, _nm_spmm_default_lowering)
dispatch.simple_impl(nm_spmm_p)
if gpu_sparse.cuda_is_supported:
mlir.register_lowering(nm_spmm_p, _nm_spmm_gpu_lowering, platform="cuda")
if gpu_sparse.rocm_is_supported:
mlir.register_lowering(nm_spmm_p, _nm_spmm_gpu_lowering, platform="rocm")
# --------------------------------------------------------------------
# nm_pack
nm_pack_p = core.Primitive("sparse_pack_nm")
def nm_pack(mask: Array, n=2, m=4) -> Array:
"""Generate metadata tensor for an N:M mask.
Args:
mask: Predicates for the input tensor, where the elements are grouped in the
minor dimension. In each group of size M there should be exactly N true
values, which mark the data elements to keep.
n: Number of non-zero elements in a group.
m: Group size.
Returns:
An ndarray containing only the masked input elements.
"""
return nm_pack_p.bind(mask, n=n, m=m)
def _compress(data, n, m, k):
result = []
expected = n * (k // m)
for i in range(0, len(data), k):
index = tuple(jnp.nonzero(data[i : i + k], size=expected)[0] % m)
value = sum(j * pow(m, i) for i, j in enumerate(index))
result.append(value)
return jnp.array(result, dtype=np.uint16)
@nm_pack_p.def_impl
def _nm_pack_impl(mask, *, n, m):
batch_size = m * _calc_groups_per_element(n, m)
return jnp.apply_along_axis(
lambda x: _compress(x, n, m, batch_size), -1, mask
)
@nm_pack_p.def_abstract_eval
def _nm_pack_abstract_eval(mask, *, n, m):
size_factor = m * _calc_groups_per_element(n, m)
if mask.dtype != bool:
raise TypeError(f"Mask should be bool, got {mask.dtype}")
if mask.shape[-1] % size_factor != 0:
raise TypeError(
f"Inner dimension size should be divisible by {size_factor}, got"
f" {mask.shape}"
)
res_shape = list(mask.shape)
res_shape[-1] //= size_factor
return core.ShapedArray(res_shape, np.uint16)
_nm_pack_lowering = mlir.lower_fun(_nm_pack_impl, multiple_results=False)
mlir.register_lowering(nm_pack_p, _nm_pack_lowering)
dispatch.simple_impl(nm_pack_p)
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@experimental@sparse@nm.py@.PATH_END.py
|
{
"filename": "example_HelicalTangled_jet.ipynb",
"repo_name": "me-manu/gammaALPs",
"repo_path": "gammaALPs_extracted/gammaALPs-master/notebooks/example_HelicalTangled_jet.ipynb",
"type": "Jupyter Notebook"
}
|
This example shows the HelicalTangled magnetic field class, for a blazar jet magnetic field comprised of a helical and a tangled component. Values used for the jet properties come from the best fit of the Potter & Cotter model: Potter & Cotter 2015, https://ui.adsabs.harvard.edu/abs/2015MNRAS.453.4070P/
## Imports
```python
from gammaALPs.core import Source, ALP, ModuleList
from gammaALPs.base import environs, transfer
import numpy as np
import matplotlib.pyplot as plt
from ebltable.tau_from_model import OptDepth
from astropy import constants as c
from matplotlib.patheffects import withStroke
from astropy import units as u
from glob import glob
from IPython.display import Image, display
import time
```
```python
effect = dict(path_effects=[withStroke(foreground="w", linewidth=2)])
```
```python
%matplotlib inline
```
## Define the source
We will use Markarian 501 as the example.
```python
src = Source(z = 0.034 , ra = '16h53m52.2s', dec = '+39d45m37s',
) # Mrk501
print (src.z)
print (src.ra, src.dec)
print (src.l, src.b)
```
0.034
253.4675 39.76027777777778
63.60017233364852 38.859218799553474
## Init the module list
Set up the module list with arbitrary ALP parameters. Energy range is roughly CTA energies.
```python
EGeV = np.logspace(0.,5.,2000)
```
Unpolarised initial beam.
```python
pin = np.diag((1.,1.,0.)) * 0.5
```
```python
m = ModuleList(ALP(m = 1, g = 2), src, pin = pin, EGeV = EGeV, seed = 0, log_level="info")
```
Add the jet module. We will ignore any other magnetic fields. Here we use a field with 70% magnetic energy density in the tangled component, and a helical component which is a purely toroidal field. Values come from Potter & Cotter best fit. For the tangled field coherence length, we use a uniform distribution between 0.1 and 1. times the jet width at each point. This is chosen with the "l_tcor='jetwidth'" and "jwf_dist = 'Uniform'" options. l_tcor can also be given as a constant in parsecs, or the keyword 'jetdom' can be used to used the jet field domains as the tangled domains.
```python
m.add_propagation("JetHelicalTangled",
0, # position of module counted from the source.
ndom = 400,
ft = 0.7, # fraction of magnetic field energy density in tangled field
Bt_exp = -1., # exponent of the transverse component of the helical field
r_T = 0.3, # radius at which helical field becomes toroidal in pc
r0 = 0.3, # radius where B field is equal to b0 in pc
B0 = 0.8, # Bfield strength in G
g0 = 9., # jet lorenz factor at r0
n0 = 1.e4, # electron density at r0 in cm**-3
rjet = 98.3e+3, # jet length in pc
rvhe = 0.3, # distance of gamma-ray emission region from BH in pc
alpha = 1.68, # power-law index of electron energy distribution function
l_tcor='jetwidth', # tangled field coherence average length in pc if a constant, or keyword
#jwf = 1., # jet width factor used when calculating l_tcor = jwf*jetwidth
jwf_dist = 'Uniform' # type of distribution for jet width factors (jwf)
)
```
[0;36m jet.py:[0;35m 559[0;0m --- [1;31mWARNING[1;0m: Not resolving tangled field: min z step is 0.009857915102843562pc but min tangled length is 0.0010982609912778551 pc
[0;36m jet.py:[0;35m 560[0;0m --- [1;31mWARNING[1;0m: # of z doms is 399 but # tangled doms is 176
[0;36m jet.py:[0;35m 574[0;0m --- [1;36mINFO[1;0m: rerunning with 701 domains. new min z step is 0.00027456524781943603 pc
[0;36menvirons.py:[0;35m 798[0;0m --- [1;36mINFO[1;0m: Using inputted chi
The default number of log-spaced field domains is 400 (this is enough to resolve the field for ALPs). In this case, however, this was not enough to resolve the tangled field. Therefore the number of domains was increased until the resolution was right and the module was re-run with 641 field domains, making sure that the edges line up with the 161 tangled component domains.
### Peek at the electron density and the magnetic field
The overall shape of the magnetic field strength in the jet comes from the Potter & Cotter model
```python
plt.loglog(m.modules[0]._r, m.modules[0].B)
plt.grid(True)
plt.xlabel('Distance from BH [pc]')
plt.ylabel('Transverse B field [$\mu$G]')
```
Text(0, 0.5, 'Transverse B field [$\\mu$G]')

The electron density used to propagate the ALP-photon beam is not the actual electron density, but rather the effective electron density of a cold plasma that would give the save effective photon mass as the non-thermal plasma of the jet. That is why it appears lower here than the actual electron density inputted.
```python
plt.loglog(m.modules["JetHelicalTangled"]._r, m.modules["JetHelicalTangled"].nel, color = plt.cm.tab10(0.1), ls = '--')
plt.grid(True)
plt.ylabel('Effective Electron dentity [cm$^{-3}$]')
```
Text(0, 0.5, 'Effective Electron dentity [cm$^{-3}$]')

### Compute the photon-ALP mixing probability
```python
m.alp.m = 40.
m.alp.g = 0.6
```
```python
px,py,pa = m.run(multiprocess=2)
```
### Plot the output
```python
pgg = px + py # the total photon survival probability
print (pgg.shape)
print (px[0,-1], py[0,-1])
for p in pgg: # plot all realizations
plt.plot(m.EGeV, p )
plt.grid(True, lw = 0.2)
plt.grid(True, which = 'minor', axis = 'y', lw = 0.2)
plt.xlabel('Energy (GeV)')
plt.ylabel(r'Photon survival probability')
plt.gca().set_xscale('log')
plt.annotate(r'$m_a = {0:.1f}\,\mathrm{{neV}}, g_{{a\gamma}} = {1:.2f} \times 10^{{-11}}\,\mathrm{{GeV}}^{{-1}}$'.format(m.alp.m,m.alp.g),
xy = (0.05,0.1), size = 'large', xycoords = 'axes fraction', ha = 'left',**effect)
# plt.axvline(transfer.EminGeV(m.alp.m, m.alp.g, m.modules[0].nel[0], m.modules[0].B[0]), ls = '--',label='Emin Base')
plt.axvline(transfer.EminGeV(m.alp.m, m.alp.g, m.modules[-1].nel[-1], m.modules[-1].B[-1]), ls = ':',label='Emin Jet End')
plt.axvline(transfer.EmaxGeV(m.alp.g, m.modules[0].B[0]), ls = '-.',color='red',label='Emax Jet Base')
# plt.axvline(transfer.EmaxGeV(m.alp.g, m.modules[-1].B[-1]), ls = '-',color='red',label='Emax End')
plt.gca().set_ylim(0.5,1.)
plt.gca().set_xlim(min(m.EGeV),max(m.EGeV))
plt.gca().set_yscale('log')
plt.subplots_adjust(left = 0.2)
plt.legend(loc = 'upper left', fontsize = 'medium')
```
(1, 2000)
0.38052400646502554 0.4517655942887826
<matplotlib.legend.Legend at 0x1842d12c90>

```python
```
|
me-manuREPO_NAMEgammaALPsPATH_START.@gammaALPs_extracted@gammaALPs-master@notebooks@example_HelicalTangled_jet.ipynb@.PATH_END.py
|
{
"filename": "typing.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/io/typing.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Type annotations for ``astropy.io``.
These are type annotations for I/O-related functions and classes. Some of the type
objects can also be used as runtime-checkable :class:`~typing.Protocol` objects.
"""
from __future__ import annotations
__all__ = ["PathLike", "ReadableFileLike", "WriteableFileLike"]
import os
from typing import TYPE_CHECKING, Protocol, TypeVar, runtime_checkable
if TYPE_CHECKING:
from typing import TypeAlias
_T_co = TypeVar("_T_co", covariant=True)
_T_contra = TypeVar("_T_contra", contravariant=True)
PathLike: TypeAlias = str | bytes | os.PathLike
"""Type alias for a path-like object.
This is a union of :class:`str`, :class:`bytes`, and :class:`~os.PathLike`.
"""
@runtime_checkable
class ReadableFileLike(Protocol[_T_co]):
"""A file-like object that supports reading with a method ``read``.
This is a :class:`~typing.Protocol` that can be used to annotate file-like
objects. It is also runtime-checkable and can be used with :func:`isinstance`.
See :func:`~typing.runtime_checkable` for more information about how runtime
checking with Protocols works.
"""
def read(self) -> _T_co: ...
@runtime_checkable
class WriteableFileLike(Protocol[_T_contra]):
"""A file-like object that supports writing with a method ``write``.
This is a :class:`~typing.Protocol` that can be used to annotate file-like
objects. It is also runtime-checkable and can be used with :func:`isinstance`.
See :func:`~typing.runtime_checkable` for more information about how runtime
checking with Protocols works.
"""
def write(self, data: _T_contra) -> None: ...
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@io@typing.py@.PATH_END.py
|
{
"filename": "test_determine_distortion.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/gnirs/tests/crossdispersed/test_determine_distortion.py",
"type": "Python"
}
|
#!/usr/bin/env python3
"""
Test related to GNIRS Cross-dispersed spectroscopy arc primitives.
Notes
-----
- The `indirect` argument on `@pytest.mark.parametrize` fixture forces the
`ad` and `ad_ref` fixtures to be called and the AstroData object returned.
"""
import numpy as np
import os
import pytest
import astrodata, gemini_instruments
import geminidr
from geminidr.gnirs.primitives_gnirs_crossdispersed import GNIRSCrossDispersed
from recipe_system.testing import ref_ad_factory
# Test parameters -------------------------------------------------------------
fixed_parameters_for_determine_distortion = {
"fwidth": None,
"id_only": False,
"max_missed": 5,
"max_shift": 0.05,
"min_snr": 5.,
"nsum": 10,
"spectral_order": 4,
"min_line_length": 0.,
"debug_reject_bad": True
}
input_pars = [
# Process Arcs: GNIRS
# (Input File, params)
# 10 l/mm Longblue SXD
('N20170511S0269_wavelengthSolutionDetermined.fits', dict()),
# 10 l/mm Longblue LXD
('N20130821S0301_wavelengthSolutionDetermined.fits', dict()),
# 32 l/mm Shortblue SXD
('N20210129S0324_wavelengthSolutionDetermined.fits', dict()),
# 111 l/mm Shortblue SXD
('N20231030S0034_wavelengthSolutionDetermined.fits', dict()),
# 32 l/mm Longblue LXD
('N20201223S0216_wavelengthSolutionDetermined.fits', dict()),
# 32 l/mm Shortblue SXD
('S20060507S0070_wavelengthSolutionDetermined.fits', dict()),
# 111 l/mm Shortblue SXD
('S20060311S0321_wavelengthSolutionDetermined.fits', dict()),
]
# Tests -----------------------------------------------------------------------
@pytest.mark.gnirsxd
@pytest.mark.preprocessed_data
@pytest.mark.regression
@pytest.mark.parametrize("ad,params", input_pars, indirect=['ad'])
def test_regression_for_determine_distortion_using_wcs(
ad, params, change_working_dir, ref_ad_factory):
with change_working_dir():
p = GNIRSCrossDispersed([ad])
p.determineDistortion(**fixed_parameters_for_determine_distortion)
distortion_determined_ad = p.writeOutputs().pop()
ref_ad = ref_ad_factory(distortion_determined_ad.filename)
model = distortion_determined_ad[0].wcs.get_transform(
"pixels", "distortion_corrected")[2]
ref_model = ref_ad[0].wcs.get_transform("pixels", "distortion_corrected")[2]
# Otherwise we're doing something wrong!
assert model.__class__.__name__ == ref_model.__class__.__name__ == "Chebyshev2D"
X, Y = np.mgrid[:ad[0].shape[0], :ad[0].shape[1]]
np.testing.assert_allclose(model(X, Y), ref_model(X, Y), atol=0.05)
# Local Fixtures and Helper Functions ------------------------------------------
@pytest.fixture(scope='function')
def ad(path_to_inputs, request):
"""
Returns the pre-processed spectrum file.
Parameters
----------
path_to_inputs : pytest.fixture
Fixture defined in :mod:`astrodata.testing` with the path to the
pre-processed input file.
request : pytest.fixture
PyTest built-in fixture containing information about parent test.
Returns
-------
AstroData
Input spectrum processed up to right before the `distortionDetermine`
primitive.
"""
filename = request.param
path = os.path.join(path_to_inputs, filename)
if os.path.exists(path):
ad = astrodata.open(path)
else:
raise FileNotFoundError(path)
return ad
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@gnirs@tests@crossdispersed@test_determine_distortion.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/image/legendgrouptitle/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@image@legendgrouptitle@__init__.py@.PATH_END.py
|
{
"filename": "Surface.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/tests/functional/Surfaces/Surface.py",
"type": "Python"
}
|
#ATS:t1 = test( SELF, "--CRKSPH True --cfl 0.25 --clearDirectories True --steps=2 --nx=50 --ny=50 --checkAnswer=True --detectSurfaces=True --detectThreshold=0.99 --sweepAngle=pi/4.0 --detectRange=2.0", label="Surface Detection Test -- 2-D (serial)")
from math import *
import mpi
import os, sys, shutil
from Spheral2d import *
from SpheralTestUtilities import *
from findLastRestart import *
import SpheralPointmeshSiloDump
from GenerateNodeDistribution2d import *
title("Surface Detection Test")
class Rejecter(object):
def __init__(self,radius):
self.radius = radius
def __call__(self,x,y,m,H):
nX = []
nY = []
nM = []
nH = []
for i in range(len(x)):
ri = sqrt(x[i]*x[i]+y[i]*y[i])
if (ri > self.radius):
nX.append(x[i])
nY.append(y[i])
nM.append(m[i])
nH.append(H[i])
return nX,nY,nM,nH
class dSurface(object):
def __init__(self,nodes,db,Kern,Bf,Sf,hydro,file):
self.nodes = nodes
self.db = db
self.Kern = Kern
self.Bf = Bf
self.Sf = Sf
self.hydro = hydro
self.file = file
def __call__(self,cycle,time,dt):
#self.renormMat()
self.momentNorm()
def renormMat(self):
f = open(self.file, 'w')
f.write("i\tSi\txi\n")
self.db.updateConnectivityMap(True)
cm = self.db.connectivityMap()
for i in range(self.nodes.numInternalNodes):
xi = self.nodes.positions()[i]
Hi = self.nodes.Hfield()[i]
neighbors = cm.connectivityForNode(self.nodes, i)
Bi = Tensor.zero
Vi = self.hydro.volume()[0][i]
for j in neighbors[0]:
xj = self.nodes.positions()[j]
xij = xj-xi
Hj = self.nodes.Hfield()[j]
Vj = self.hydro.volume()[0][j] # this could be done better
gWj = Hj*xij.unitVector()*self.Kern.gradValue((Hj*xij).magnitude(),Hj.Determinant())
Bij = gWj.dyad(xij)*Vj
Bi += Bij
Bi = Bi.Inverse()
Ei = Bi.eigenValues()
Si = min(abs(Ei[0]),abs(Ei[1]))
f.write("%d\t%f\t%f\n" % (i,Si,xi.magnitude()))
def momentNorm(self):
f = open(self.file, 'w')
f.write("i\tSi\txi\tSSi\n")
for i in range(self.nodes.numInternalNodes):
xi = self.nodes.positions()[i]
m0i = self.hydro.m0()[0][i]
m1i = self.hydro.m1()[0][i]
f.write("%d\t%f\t%f\t%f\n" %(i,m0i,xi.magnitude(),m1i.magnitude()))
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(lattice = True,
nx = 50,
ny = 50,
rmin = 0.0,
rmax = 1.0,
nPerh = 1.01,
rho0 = 1.0,
eps0 = 0.0,
gamma = 5.0/3.0,
mu = 1.0,
rhomin = 1.0e-8,
holeRadius = 0.5,
ASPH = False,
CRKSPH = True,
SPH = True,
XSPH = False,
filter = 0,
KernelConstructor = NBSplineKernel,
order = 7,
# Hydro
Qconstructor = MonaghanGingoldViscosity2d,
correctionOrder = LinearOrder,
Cl = 1.0,
Cq = 2.0,
Qlimiter = False,
balsaraCorrection = False,
epsilon2 = 1e-4,
negligibleSoundSpeed = 1e-5,
csMultiplier = 0.1,
hmin = 0.004,
hmax = 10.0,
hminratio = 0.1,
compatibleEnergy = False,
gradhCorrection = False,
HEvolution = IdealH,
sumForMassDensity = RigorousSumDensity,
densityUpdate = RigorousSumDensity,
HUpdate = IdealH,
linearInExpansion = False,
volumeType = RKVoronoiVolume,
# Timestep constraints
cfl = 0.5,
deltaPhi = 0.01,
domainIndependent = False,
# Integrator
IntegratorConstructor = CheapSynchronousRK2Integrator,
goalTime = 1.0,
dt = 0.0001,
dtMin = 1.0e-5,
dtMax = 1.0e5,
dtGrowth = 2.0,
maxSteps = None,
steps = None,
statsStep = 10,
redistributeStep = 500,
restartStep = 500,
restoreCycle = None,
smoothIters = 0,
rigorousBoundaries = True,
dtverbose = False,
vizCycle = 1,
vizTime = 1.0e5,
vizMethod = SpheralPointmeshSiloDump.dumpPhysicsState,
clearDirectories = False,
renormFile = "renorm.txt",
detectSurfaces = False,
detectRange = 2.0,
sweepAngle = pi/4.0,
detectThreshold = 0.99,
checkAnswer = False,
)
if CRKSPH:
Qconstructor = LimitedMonaghanGingoldViscosity2d
if ASPH:
HydroConstructor = ACRKSPHHydro
else:
HydroConstructor = CRKSPHHydro
else:
if ASPH:
HydroConstructor = ASPHHydro
else:
HydroConstructor = SPHHydro
dataDir = "surface-%i-%i" % (nx,ny)
dataDir = os.path.join(dataDir, "CRK=%s-nPerh=%f" % (CRKSPH,nPerh))
dataDir = os.path.join(dataDir, "Cl=%f-Cq=%f" % (Cl,Cq))
restartBaseName = "%s/SurfaceTest-%i-%i" % (dataDir,nx,ny)
vizDir = os.path.join(dataDir, "visit")
vizBaseName = "SurfaceTest"
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if clearDirectories and os.path.exists(dataDir):
shutil.rmtree(dataDir)
if not os.path.exists(dataDir):
os.makedirs(dataDir)
if not os.path.exists(vizDir):
os.makedirs(vizDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# If we're restarting, find the set of most recent restart files.
#-------------------------------------------------------------------------------
if restoreCycle is None:
restoreCycle = findLastRestart(restartBaseName)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eos = GammaLawGasMKS(gamma, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
if KernelConstructor==NBSplineKernel:
Wbase = NBSplineKernel(order)
else:
Wbase = KernelConstructor()
WT = TableKernel(KernelConstructor(order), 1000)
WTPi = TableKernel(KernelConstructor(order), 1000)
output('WT')
output('WTPi')
kernelExtent = WT.kernelExtent
output("WT")
nodes1 = makeFluidNodeList("nodes1", eos,
hmin = hmin,
hmax = hmax,
nPerh = nPerh,
kernelExtent = kernelExtent,
rhoMin = rhomin)
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
pos = nodes1.positions()
vel = nodes1.velocity()
mass = nodes1.mass()
eps = nodes1.specificThermalEnergy()
H = nodes1.Hfield()
if restoreCycle is None:
if lattice == True:
xmin = (-1.0, -1.0)
xmax = (1.0, 1.0)
myRejecter = Rejecter(holeRadius)
generator = GenerateNodeDistribution2d(nx,ny,rho0,"lattice",
rmin = rmin,
rmax = rmax,
xmin = xmin,
xmax = xmax,
theta = 2*pi,
nNodePerh = nPerh,
SPH = (not ASPH),
rejecter = myRejecter)
if mpi.procs > 1:
from VoronoiDistributeNodes import distribueNodes2d
else:
from DistributeNodes import distributeNodes2d
distributeNodes2d((nodes1,generator))
output("mpi.reduce(nodes1.numInternalNodes, mpi.MIN)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.MAX)")
output("mpi.reduce(nodes1.numInternalNodes, mpi.SUM)")
for nodeID in range(nodes1.numInternalNodes):
eps[nodeID] = eps0
#-------------------------------------------------------------------------------
# Construct a DataBase to hold our node list
#-------------------------------------------------------------------------------
db = DataBase()
output("db")
output("db.appendNodeList(nodes1)")
output("db.numNodeLists")
output("db.numFluidNodeLists")
Bf = db.newFluidTensorFieldList(Tensor.zero, "Normalization")
Sf = db.newFluidScalarFieldList(0.0, "Surface")
#-------------------------------------------------------------------------------
# Construct the artificial viscosity.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq, linearInExpansion)
q.epsilon2 = epsilon2
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
output("q")
output("q.Cl")
output("q.Cq")
output("q.epsilon2")
output("q.limiter")
output("q.balsaraShearCorrection")
output("q.linearInExpansion")
output("q.quadraticInExpansion")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
if CRKSPH:
hydro = HydroConstructor(W = WT,
WPi = WTPi,
Q = q,
filter = filter,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
XSPH = XSPH,
densityUpdate = densityUpdate,
correctionOrder = correctionOrder,
volumeType = volumeType,
HUpdate = HEvolution,
detectSurfaces = detectSurfaces,
detectThreshold = detectThreshold,
sweepAngle = sweepAngle,
detectRange = detectRange)
else:
hydro = HydroConstructor(W = WT,
Q = q,
cfl = cfl,
compatibleEnergyEvolution = compatibleEnergy,
evolveTotalEnergy = evolveTotalEnergy,
gradhCorrection = gradhCorrection,
correctVelocityGradient = correctVelocityGradient,
densityUpdate = densityUpdate,
XSPH = XSPH,
HUpdate = HEvolution)
output("hydro")
output("hydro.kernel()")
output("hydro.PiKernel()")
output("hydro.cfl")
output("hydro.compatibleEnergyEvolution")
output("hydro.XSPH")
output("hydro.densityUpdate")
output("hydro.HEvolution")
packages = [hydro]
#-------------------------------------------------------------------------------
# Construct the surface detection periodic work function
#-------------------------------------------------------------------------------
#ds = detectSurface(nodes1,db,WT,Bf,Sf,hydro,renormFile)
#dsFreq = 1
#-------------------------------------------------------------------------------
# Construct a time integrator, and add the one physics package.
#-------------------------------------------------------------------------------
integrator = IntegratorConstructor(db)
for p in packages:
integrator.appendPhysicsPackage(p)
integrator.lastDt = dt
if dtMin:
integrator.dtMin = dtMin
if dtMax:
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.dtGrowth")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
#-------------------------------------------------------------------------------
# Build the controller.
#-------------------------------------------------------------------------------
control = SpheralController(integrator, WT,
statsStep = statsStep,
restartStep = restartStep,
restartBaseName = restartBaseName,
restoreCycle = restoreCycle,
vizMethod = vizMethod,
vizBaseName = "surface-test-%ix%i" % (nx, ny),
vizDir = vizDir,
vizStep = vizCycle,
vizTime = vizTime,
SPH = (not ASPH))
output("control")
#control.appendPeriodicWork(ds,dsFreq)
#-------------------------------------------------------------------------------
# Finally run the problem and plot the results.
#-------------------------------------------------------------------------------
if not steps is None:
control.step(steps)
else:
control.advance(goalTime,maxSteps)
if checkAnswer:
sp = hydro.surfacePoint()
count = 0
for i in range(nodes1.numInternalNodes):
if sp[0][i] == 1:
count += 1
if not count == 212:
raise ValueError("The surface detection algorithm failed!")
else:
print("Surface Detection PASSED.")
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@tests@functional@Surfaces@Surface.py@.PATH_END.py
|
{
"filename": "test_nlargest.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/series/methods/test_nlargest.py",
"type": "Python"
}
|
"""
Note: for naming purposes, most tests are title with as e.g. "test_nlargest_foo"
but are implicitly also testing nsmallest_foo.
"""
import numpy as np
import pytest
import pandas as pd
from pandas import Series
import pandas._testing as tm
def assert_check_nselect_boundary(vals, dtype, method):
# helper function for 'test_boundary_{dtype}' tests
ser = Series(vals, dtype=dtype)
result = getattr(ser, method)(3)
expected_idxr = range(3) if method == "nsmallest" else range(3, 0, -1)
expected = ser.loc[expected_idxr]
tm.assert_series_equal(result, expected)
class TestSeriesNLargestNSmallest:
@pytest.mark.parametrize(
"r",
[
Series([3.0, 2, 1, 2, "5"], dtype="object"),
Series([3.0, 2, 1, 2, 5], dtype="object"),
# not supported on some archs
# Series([3., 2, 1, 2, 5], dtype='complex256'),
Series([3.0, 2, 1, 2, 5], dtype="complex128"),
Series(list("abcde")),
Series(list("abcde"), dtype="category"),
],
)
@pytest.mark.parametrize("method", ["nlargest", "nsmallest"])
@pytest.mark.parametrize("arg", [2, 5, 0, -1])
def test_nlargest_error(self, r, method, arg):
dt = r.dtype
msg = f"Cannot use method 'n(largest|smallest)' with dtype {dt}"
with pytest.raises(TypeError, match=msg):
getattr(r, method)(arg)
@pytest.mark.parametrize(
"data",
[
pd.to_datetime(["2003", "2002", "2001", "2002", "2005"]),
pd.to_datetime(["2003", "2002", "2001", "2002", "2005"], utc=True),
pd.to_timedelta(["3D", "2D", "1D", "2D", "5D"]),
np.array([3, 2, 1, 2, 5], dtype="int8"),
np.array([3, 2, 1, 2, 5], dtype="int16"),
np.array([3, 2, 1, 2, 5], dtype="int32"),
np.array([3, 2, 1, 2, 5], dtype="int64"),
np.array([3, 2, 1, 2, 5], dtype="uint8"),
np.array([3, 2, 1, 2, 5], dtype="uint16"),
np.array([3, 2, 1, 2, 5], dtype="uint32"),
np.array([3, 2, 1, 2, 5], dtype="uint64"),
np.array([3, 2, 1, 2, 5], dtype="float32"),
np.array([3, 2, 1, 2, 5], dtype="float64"),
],
)
def test_nsmallest_nlargest(self, data):
# float, int, datetime64 (use i8), timedelts64 (same),
# object that are numbers, object that are strings
ser = Series(data)
tm.assert_series_equal(ser.nsmallest(2), ser.iloc[[2, 1]])
tm.assert_series_equal(ser.nsmallest(2, keep="last"), ser.iloc[[2, 3]])
empty = ser.iloc[0:0]
tm.assert_series_equal(ser.nsmallest(0), empty)
tm.assert_series_equal(ser.nsmallest(-1), empty)
tm.assert_series_equal(ser.nlargest(0), empty)
tm.assert_series_equal(ser.nlargest(-1), empty)
tm.assert_series_equal(ser.nsmallest(len(ser)), ser.sort_values())
tm.assert_series_equal(ser.nsmallest(len(ser) + 1), ser.sort_values())
tm.assert_series_equal(ser.nlargest(len(ser)), ser.iloc[[4, 0, 1, 3, 2]])
tm.assert_series_equal(ser.nlargest(len(ser) + 1), ser.iloc[[4, 0, 1, 3, 2]])
def test_nlargest_misc(self):
ser = Series([3.0, np.nan, 1, 2, 5])
result = ser.nlargest()
expected = ser.iloc[[4, 0, 3, 2, 1]]
tm.assert_series_equal(result, expected)
result = ser.nsmallest()
expected = ser.iloc[[2, 3, 0, 4, 1]]
tm.assert_series_equal(result, expected)
msg = 'keep must be either "first", "last"'
with pytest.raises(ValueError, match=msg):
ser.nsmallest(keep="invalid")
with pytest.raises(ValueError, match=msg):
ser.nlargest(keep="invalid")
# GH#15297
ser = Series([1] * 5, index=[1, 2, 3, 4, 5])
expected_first = Series([1] * 3, index=[1, 2, 3])
expected_last = Series([1] * 3, index=[5, 4, 3])
result = ser.nsmallest(3)
tm.assert_series_equal(result, expected_first)
result = ser.nsmallest(3, keep="last")
tm.assert_series_equal(result, expected_last)
result = ser.nlargest(3)
tm.assert_series_equal(result, expected_first)
result = ser.nlargest(3, keep="last")
tm.assert_series_equal(result, expected_last)
@pytest.mark.parametrize("n", range(1, 5))
def test_nlargest_n(self, n):
# GH 13412
ser = Series([1, 4, 3, 2], index=[0, 0, 1, 1])
result = ser.nlargest(n)
expected = ser.sort_values(ascending=False).head(n)
tm.assert_series_equal(result, expected)
result = ser.nsmallest(n)
expected = ser.sort_values().head(n)
tm.assert_series_equal(result, expected)
def test_nlargest_boundary_integer(self, nselect_method, any_int_numpy_dtype):
# GH#21426
dtype_info = np.iinfo(any_int_numpy_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val, min_val + 1, max_val - 1, max_val]
assert_check_nselect_boundary(vals, any_int_numpy_dtype, nselect_method)
def test_nlargest_boundary_float(self, nselect_method, float_numpy_dtype):
# GH#21426
dtype_info = np.finfo(float_numpy_dtype)
min_val, max_val = dtype_info.min, dtype_info.max
min_2nd, max_2nd = np.nextafter([min_val, max_val], 0, dtype=float_numpy_dtype)
vals = [min_val, min_2nd, max_2nd, max_val]
assert_check_nselect_boundary(vals, float_numpy_dtype, nselect_method)
@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])
def test_nlargest_boundary_datetimelike(self, nselect_method, dtype):
# GH#21426
# use int64 bounds and +1 to min_val since true minimum is NaT
# (include min_val/NaT at end to maintain same expected_idxr)
dtype_info = np.iinfo("int64")
min_val, max_val = dtype_info.min, dtype_info.max
vals = [min_val + 1, min_val + 2, max_val - 1, max_val, min_val]
assert_check_nselect_boundary(vals, dtype, nselect_method)
def test_nlargest_duplicate_keep_all_ties(self):
# see GH#16818
ser = Series([10, 9, 8, 7, 7, 7, 7, 6])
result = ser.nlargest(4, keep="all")
expected = Series([10, 9, 8, 7, 7, 7, 7])
tm.assert_series_equal(result, expected)
result = ser.nsmallest(2, keep="all")
expected = Series([6, 7, 7, 7, 7], index=[7, 3, 4, 5, 6])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data,expected", [([True, False], [True]), ([True, False, True, True], [True])]
)
def test_nlargest_boolean(self, data, expected):
# GH#26154 : ensure True > False
ser = Series(data)
result = ser.nlargest(1)
expected = Series(expected)
tm.assert_series_equal(result, expected)
def test_nlargest_nullable(self, any_numeric_ea_dtype):
# GH#42816
dtype = any_numeric_ea_dtype
if dtype.startswith("UInt"):
# Can't cast from negative float to uint on some platforms
arr = np.random.default_rng(2).integers(1, 10, 10)
else:
arr = np.random.default_rng(2).standard_normal(10)
arr = arr.astype(dtype.lower(), copy=False)
ser = Series(arr, dtype=dtype, copy=True)
ser[1] = pd.NA
result = ser.nlargest(5)
expected = (
Series(np.delete(arr, 1), index=ser.index.delete(1))
.nlargest(5)
.astype(dtype)
)
tm.assert_series_equal(result, expected)
def test_nsmallest_nan_when_keep_is_all(self):
# GH#46589
s = Series([1, 2, 3, 3, 3, None])
result = s.nsmallest(3, keep="all")
expected = Series([1.0, 2.0, 3.0, 3.0, 3.0])
tm.assert_series_equal(result, expected)
s = Series([1, 2, None, None, None])
result = s.nsmallest(3, keep="all")
expected = Series([1, 2, None, None, None])
tm.assert_series_equal(result, expected)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@series@methods@test_nlargest.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "shadden/celmech",
"repo_path": "celmech_extracted/celmech-master/setup.py",
"type": "Python"
}
|
try:
from setuptools import setup, Extension
except ImportError:
from distutils.core import setup, Extension
from codecs import open
import os
import sys
import sysconfig
suffix = sysconfig.get_config_var('EXT_SUFFIX')
if suffix is None:
suffix = ".so"
# Try to get git hash
try:
import subprocess
ghash = subprocess.check_output(["git", "rev-parse", "HEAD"]).decode("ascii")
ghash_arg = "-DCELMECHGITHASH="+ghash.strip()
except:
ghash_arg = "-DCELMECHGITHASH=485554c96625e8d3f90d5dd7cfac3e6a29eea763" #GITHASHAUTOUPDATE
extra_link_args=[]
if sys.platform == 'darwin':
from distutils import sysconfig
vars = sysconfig.get_config_vars()
vars['LDSHARED'] = vars['LDSHARED'].replace('-bundle', '-shared')
extra_link_args=['-Wl,-install_name,@rpath/libcelmech'+suffix]
libcelmechmodule = Extension('libcelmech',
sources = [
'src/disturbing_function.c',
'src/poisson_series.c',
'src/fmft.c',
'src/fmftPy.c',
'src/nrutil.c'
],
include_dirs = ['src'],
define_macros=[ ('LIBCELMECH', None) ],
# Removed '-march=native' for now.
extra_compile_args=['-fstrict-aliasing', '-O3','-std=c99','-Wno-unknown-pragmas', ghash_arg, '-DLIBCELMECH', '-D_GNU_SOURCE', '-fPIC'],
extra_link_args=extra_link_args,
)
if not os.getenv('READTHEDOCS'):
packages = ['exoplanet-core>=0.3.0','pytensor>=2.18' ,'sympy>=1.1.1', 'numpy', 'scipy>=1.2.0', 'reboundx>=4.0.0', 'rebound>=4.0.1', 'mpmath>=1.0.0']
try:
install_requires += packages
except:
install_requires = packages
setup(name='celmech',
version='1.5.2',
description='Open source tools for celestial mechanics',
url='http://github.com/shadden/celmech',
author='Dan Tamayo, Sam Hadden',
author_email='tamayo.daniel@gmail.com, shadden1107@gmail.com',
license='GPL',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Topic :: Scientific/Engineering :: Astronomy',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
keywords='astronomy astrophysics celestial-mechanics orbits orbital-mechanics',
packages=['celmech'],
install_requires=['exoplanet-core>=0.3.0rc2','pytensor>=2.18', 'mpmath>=1.0.0', 'sympy>=1.1.1', 'rebound>=4.0.1', 'reboundx>=4.0.0', 'numpy', 'scipy>=1.2.0'],
test_suite="celmech.test",
ext_modules = [libcelmechmodule],
zip_safe=False)
|
shaddenREPO_NAMEcelmechPATH_START.@celmech_extracted@celmech-master@setup.py@.PATH_END.py
|
{
"filename": "truthloader.py",
"repo_name": "LSSTDESC/elasticc",
"repo_path": "elasticc_extracted/elasticc-main/tom_management/truthloader.py",
"type": "Python"
}
|
raise RuntimeError( "Deprecated. See elasticc2/management/commands/load_snana_fits.py in desc-tom" )
import sys
import pandas
import numpy
import json
import gzip
import logging
from tomconnection import TomConnection
class TruthLoader(TomConnection):
def __init__( self, *args, converters=None, urlend=None, renames=None, sep=',', **kwargs ):
super().__init__( *args, **kwargs )
if converters is None or urlend is None:
raise RuntimeError( "Must give converters and url" )
self.converters = converters
self.urlend = urlend
self.renames = renames
self.sep = sep
self.cache = []
self.tot_n_loaded = 0
self.tot_missing = 0
self.cache_size = 1000
def load_csv( self, filename ):
self.logger.info( f"****** Reading {filename} ******" )
if ( len(filename) >= 3 ) and ( filename[-3:] == ".gz" ):
ifp = gzip.open( filename )
else:
ifp = open( filename )
df = pandas.read_csv( ifp, skipinitialspace=True, comment='#', skip_blank_lines=True, sep=self.sep,
converters=self.converters )
ifp.close()
if self.renames is not None:
df.rename( self.renames, axis=1, inplace=True )
# import pdb; pdb.set_trace()
for i, row in df.iterrows():
# Dealing with Pandas NaN and JSON is painf8ul
d = dict(row)
for key, val in d.items():
if numpy.isnan( val ): d[key] = None
self.cache.append( d )
if len( self.cache ) >= self.cache_size:
self.flush_cache()
self.flush_cache()
def flush_cache( self ):
if len( self.cache ) > 0:
self.logger.debug( f"Posting {sys.getsizeof(json.dumps(self.cache))/1024:.2f} kiB "
f"for {len(self.cache)} truth values" )
# Keep resending until we get a good result. The code on the server
# should be smart enough to not load duplicates, so we should be
# safe just resending.
ok = False
while not ok:
resp = self.rqs.post( f'{self.urlbase}/{self.urlend}', json=self.cache )
if resp.status_code != 200:
self.logger.error( f"ERROR : got status code {resp.status_code}; retrying after 1s..." )
time.sleep(1)
else:
ok = True
rjson = json.loads( resp.text )
if rjson['status'] != 'ok':
outlines = [ f"ERROR: got status {rjson['status']}" ]
for key, val in rjson.items():
if key != 'status':
outlines.append( f" {key} : {val}\n" )
self.logger.error( "\n".join( outlines ) )
else:
if 'missing' in rjson:
if len( rjson['missing'] ) > 0:
self.logger.warning( f'Server told us the following was missing: '
f'{" ".join( [ str(i) for i in rjson["missing"] ] )}' )
self.tot_missing += len( rjson['missing'] )
self.tot_n_loaded += len( rjson["message"] )
self.logger.info( f'Loaded {len(rjson["message"])} truth values, '
f'cumulative {self.tot_n_loaded} (with {self.tot_missing} missing)\n' )
self.cache = []
|
LSSTDESCREPO_NAMEelasticcPATH_START.@elasticc_extracted@elasticc-main@tom_management@truthloader.py@.PATH_END.py
|
{
"filename": "mwm_rv.py",
"repo_name": "sdss/target_selection",
"repo_path": "target_selection_extracted/target_selection-main/python/target_selection/cartons/mwm_rv.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Pramod Gupta (psgupta@uw.edu)
# @Date: 2020-06-10
# @Filename: mwm_rv.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import math
import peewee
from sdssdb.peewee.sdss5db.catalogdb import (
Catalog,
CatalogToGaia_DR3,
CatalogToTIC_v8,
CatalogToTwoMassPSC,
Gaia_DR3,
SDSS_DR17_APOGEE_Allstarmerge,
TIC_v8,
TwoMassPSC,
)
from target_selection.cartons import BaseCarton
# See catalog.py for the name of peewee model names corresponding
# to postgres table names:
# https://github.com/sdss/sdssdb/blob/master/python/sdssdb/peewee/sdss5db/catalogdb.py
# 2.2.1. Long Baseline (Legacy Targets)
# Shorthand name: mwm_rv_long (Not an actual target class.
# Defined as a parent sample to select from for 2.2.1.x sections below)
# Simplified Description of selection criteria:
# APOGEE-1&-2 "main survey" targets with at least 3 previous epochs
# brighter than H of 12.8 that have a Gaia parallax.
# Wiki page: Binaries and Substellar Companions
# Additional source catalogs needed: Just need sdss_dr17_apogee_allstarmerge
# Additional cross-matching needed: (None)
# Return columns: apogee_id, nvisits, ra, dec, pmra, pmdec, h, baseline, fields
# cadence options for these targets (list all options,
# even though no single target will receive more than one):
# (See entries for this in 2.2.1.x below)
# Pseudo SQL (optional): SELECT apogee_id, nvisits, ra, dec,
# pmra, pmdec, h, baseline, fields FROM sdss_dr17_apogee_allstarmerge
# WHERE h<12.8 AND nvisits>=3 AND dist_src == 'GaiaDR3' AND
# [targflags contains 'APOGEE_SHORT' OR 'APOGEE_INTERMEDIATE' OR
# 'APOGEE_LONG' OR '*APOGEE2_*BIN_β] AS mwm_rv_long
# Implementation:
# Non-SQL implementation:
# lead contact: Nicholas Troup
# We associate each APOGEE target with a 2MASS source since the
# vast majority of APOGEE targets were selected from the 2MASS PSC.
# APOGEE_ID is essentially the same as
# the βdesignationβ column of the 2MASS PSC;
# For sdss_dr17_apogee_allstarmerge we
# have to strip off the β2Mβ in the APOGEE_ID.
# However, some apogee_id values do not have 2M in the front.
# (old: for sdss_apogeeallstarmerge_r13 we also had to strip off the
# β2Mβ in the APOGEE_ID.)
# For example:
# sdss5db=# select designation from catalogdb.twomass_psc limit 2;
# designation
# ------------------
# 12034281-5740002
# 12032366-5738380
# (2 rows)
#
# sdss5db=# select apogee_id from catalogdb.SDSS_DR17_APOGEE_Allstarmerge
# where apogee_id like '2M%';
# apogee_id
# ---------------------
# 2M00000002+7417074
# 2M00000019-1924498
# etc.
# sdss5db=# select apogee_id from catalogdb.SDSS_DR17_APOGEE_Allstarmerge
# where apogee_id not like '2M%';
# apogee_id
# --------------------
# 19140272-1554055
# 19155129-1617591
#
# #### start comment for old sdss_apogeeallstarmerge_r13 ###################
#
# sdss5db=# select replace(apogee_id,'2M', '') from
# catalogdb.sdss_apogeeallstarmerge_r13 limit 2;
# replace
# ------------------
# 14044120-1550575
# 14033676-1554164
# (2 rows)
#
#
# sdss5db=# select count(1) from
# catalogdb.sdss_apogeeallstarmerge_r13 where dist_src='gaia';
# count
# -------
# 0
# (1 row)
#
# Hence use trim(dist_src) like below:
#
# sdss5db=# select count(1) from
# catalogdb.sdss_apogeeallstarmerge_r13 where trim(dist_src)='gaia';
# count
# --------
# 487508
# (1 row)
#
# ######### end comment for old sdss_apogeeallstarmerge_r13 ##########
# Below we use GaiaEDR3 and not GaiaDR3 since the dist_src column
# does not have GaiaDR3
mwm_rv_long_condition = (
SDSS_DR17_APOGEE_Allstarmerge.h < 11.5, # old 12.8,
SDSS_DR17_APOGEE_Allstarmerge.nvisits >= 6, # old 3,
peewee.fn.trim(SDSS_DR17_APOGEE_Allstarmerge.dist_src) == "GaiaEDR3",
(SDSS_DR17_APOGEE_Allstarmerge.targflags % "%APOGEE_SHORT%")
| (SDSS_DR17_APOGEE_Allstarmerge.targflags % "%APOGEE_INTERMEDIATE%")
| (SDSS_DR17_APOGEE_Allstarmerge.targflags % "%APOGEE_LONG%")
| (SDSS_DR17_APOGEE_Allstarmerge.targflags % "%APOGEE2_%BIN_%"),
)
# old class name MWM_RV_Long_FPS_Carton(BaseCarton):
class MWM_bin_rv_long_apogee_Carton(BaseCarton):
"""3.2.1.3. Long Baseline Targets for FPS
Shorthand name: mwm_bin_rv_long_apogee (old name mwm_rv_long_fps)
Simplified Description of selection criteria:
Select from long-baseline targets (above) with H brighter than 11.5
Wiki page: Binaries and Substellar Companions (Page Under Construction)
Additional source catalogs needed: Select from mwm_rv_long (2.2.1)
Additional cross-matching needed: (None)
Return columns: apogee_id, nvisits, ra, dec,
pmra, pmdec, h, baseline, fields (Same as 2.2.1 above)
cadence options for these targets
(list all options, even though no single target will receive more than one):
If H>10.8 then use mwm_rv_<nn>x2, otherwise use mwm_rv_<nn>x1,
where <nn> = 3*ceiling((18-nvisits)/3)
if <nn> is less than 6 then
set <nn> = 6
Pseudo SQL (optional): SELECT apogee_id, nvisits, ra, dec,
pmra, pmdec, h, baseline, fields FROM mwm_rv_long WHERE h<11.5
Implementation:
Non-SQL implementation:
lead contact: Nicholas Troup
"""
# cadence must be None here so that
# it can be set in post_process().
# If cadence is not None here then
# it cannot be set in post_process().
name = "mwm_bin_rv_long_apogee" # old name = 'mwm_rv_long_fps'
category = "science"
instrument = "APOGEE"
cadence = None # cadence is set in post_process()
program = "mwm_bin"
mapper = "MWM"
priority = 1810
can_offset = True
# peewee Model name ---> postgres table name
# SDSS_DR17_APOGEE_Allstarmerge(CatalogdbModel)--->'sdss_dr17_apogee_allstarmerge'
# There is no gaia_dr3 in the below query so for v1.0 we do not have to do
# a major modification of the v0.5 query.
# In the below query, we use replace() instead of ltrim() since
# ltrim('2M20', '2M') will also trim the second 2.
def build_query(self, version_id, query_region=None):
query = (
Catalog.select(
CatalogToTIC_v8.catalogid,
SDSS_DR17_APOGEE_Allstarmerge.apogee_id,
SDSS_DR17_APOGEE_Allstarmerge.nvisits,
SDSS_DR17_APOGEE_Allstarmerge.ra.alias("allstarmerge_ra"),
SDSS_DR17_APOGEE_Allstarmerge.dec.alias("allstarmerge_dec"),
SDSS_DR17_APOGEE_Allstarmerge.pmra.alias("allstarmerge_pmra"),
SDSS_DR17_APOGEE_Allstarmerge.pmdec.alias("allstarmerge_pmdec"),
SDSS_DR17_APOGEE_Allstarmerge.h,
SDSS_DR17_APOGEE_Allstarmerge.baseline,
SDSS_DR17_APOGEE_Allstarmerge.fields,
SDSS_DR17_APOGEE_Allstarmerge.teff_avg, # old teff
SDSS_DR17_APOGEE_Allstarmerge.logg_avg, # old logg
SDSS_DR17_APOGEE_Allstarmerge.dist,
SDSS_DR17_APOGEE_Allstarmerge.dist_src,
)
.join(CatalogToTIC_v8, on=(Catalog.catalogid == CatalogToTIC_v8.catalogid))
.join(TIC_v8, on=(CatalogToTIC_v8.target_id == TIC_v8.id))
.join(TwoMassPSC, on=(TIC_v8.twomass_psc == TwoMassPSC.designation))
.join(
SDSS_DR17_APOGEE_Allstarmerge,
on=(
TwoMassPSC.designation
== peewee.fn.replace(SDSS_DR17_APOGEE_Allstarmerge.apogee_id, "2M", "")
),
)
.where(
CatalogToTIC_v8.version_id == version_id,
CatalogToTIC_v8.best >> True,
*mwm_rv_long_condition,
SDSS_DR17_APOGEE_Allstarmerge.h < 11.5,
)
)
# Below ra, dec and radius are in degrees
# query_region[0] is ra of center of the region
# query_region[1] is dec of center of the region
# query_region[2] is radius of the region
if query_region:
query = query.where(
peewee.fn.q3c_radial_query(
Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2],
)
)
return query
def post_process(self, model):
"""
For cadence:
If H>10.8 then use bright_<nn>x2, otherwise use bright_<nn>x1,
where <nn> = 3*ceiling((18-nvisits)/3)
if <nn> is less than 6 then
set <nn> = 6
"""
# teff_avg and logg_avg are from SDSS_DR17_APOGEE_Allstarmerge
# old name was teff, logg
cursor = self.database.execute_sql(
"select catalogid, nvisits, h, teff_avg, logg_avg from "
+ " sandbox.temp_mwm_bin_rv_long_apogee ;"
)
output = cursor.fetchall()
for i in range(len(output)):
current_catalogid = output[i][0]
current_nvisits = output[i][1]
current_h = output[i][2]
# current_teff_avg = output[i][3]
# current_logg_avg = output[i][4]
nn = 3 * math.ceil((18 - current_nvisits) / 3)
if nn < 6:
nn = 6
if current_h > 10.8:
current_cadence = "bright_" + str(nn) + "x2"
else:
current_cadence = "bright_" + str(nn) + "x1"
if current_cadence is not None:
self.database.execute_sql(
" update sandbox.temp_mwm_bin_rv_long_apogee "
+ " set cadence = '"
+ current_cadence
+ "'"
" where catalogid = " + str(current_catalogid) + ";"
)
# Below is from the comments for
# MWM_bin_rv_short_Base_Carton further below.
# It is put here for reference.
#
# v1.0
# AND tm.j_msigcom <= 0.1
# AND tm.h_msigcom <= 0.1
# AND tm.k_msigcom <= 0.1
# AND ( tm.ph_qual='AAA'
# OR tm.ph_qual='AAB'
# OR tm.ph_qual='ABA'
# OR tm.ph_qual='BAA'
# OR tm.ph_qual='ABB'
# OR tm.ph_qual='BAB'
# OR tm.ph_qual='BBA'
# OR tm.ph_qual='BBB')
# AND tm.prox >= 6
# AND tm.cc_flg='000'
# AND tm.gal_contam='000'
# AND ( tm.rd_flg='111'
# OR tm.rd_flg='112'
# OR tm.rd_flg='121'
# OR tm.rd_flg='211'
# OR tm.rd_flg='122'
# OR tm.rd_flg='212'
# OR tm.rd_flg='221'
# OR tm.rd_flg='222')
# AND g.parallax_error/g.parallax < 0.2
mwm_rv_short_condition = (
TwoMassPSC.j_msigcom <= 0.1,
TwoMassPSC.h_msigcom <= 0.1,
TwoMassPSC.k_msigcom <= 0.1,
(
(TwoMassPSC.ph_qual == "AAA")
| (TwoMassPSC.ph_qual == "AAB")
| (TwoMassPSC.ph_qual == "ABA")
| (TwoMassPSC.ph_qual == "BAA")
| (TwoMassPSC.ph_qual == "ABB")
| (TwoMassPSC.ph_qual == "BAB")
| (TwoMassPSC.ph_qual == "BBA")
| (TwoMassPSC.ph_qual == "BBB")
),
TwoMassPSC.prox >= 6,
TwoMassPSC.cc_flg == "000",
TwoMassPSC.gal_contam == 0,
(
(TwoMassPSC.rd_flg == "111")
| (TwoMassPSC.rd_flg == "112")
| (TwoMassPSC.rd_flg == "121")
| (TwoMassPSC.rd_flg == "211")
| (TwoMassPSC.rd_flg == "122")
| (TwoMassPSC.rd_flg == "212")
| (TwoMassPSC.rd_flg == "221")
| (TwoMassPSC.rd_flg == "222")
),
)
class MWM_bin_rv_short_Base_Carton(BaseCarton):
# old class name MWM_RV_Short_FPS_Carton(BaseCarton):
"""
This base carton contains the first part of the below query.
which is common to all the mmw_bin_rv_short* cartons
The middle part (i.e. the second part) is different for the different cartons.
The third part is not in mwm_bin_rv_short_mdwarf.
The third part is in mwm_bin_rv_short_subgiant and mwm_bin_rv_short_rgb.
The third part corresponds to mwm_rv_short_condition above.
The below information is for 5.1.25. mwm_bin_rv_short_mdwarf.
It is put here for reference for MWM_bin_rv_short_Base_Carton.
The below information is again put further below in 5.1.25. mwm_bin_rv_short_mdwarf.
5.1.25. mwm_bin_rv_short_mdwarf
This carton contains the M Dwarf stars originally in mwm_bin_rv_short
Shorthand name: mwm_bin_rv_short_mdwarf (Formally mwm_rv_short_fps/mwm_bin_rv_short)
Existing carton code :
https://github.com/sdss/target_selection/blob/main/python/target_selection/cartons/mwm_rv.py
Simplified Description of selection criteria :
SELECT catalog.catalogid, catalog.ra, catalog.dec, catalog.pmra, catalog.pmdec,
g.ref_epoch, g.source_id, g.parallax, g.parallax_error,
g.phot_g_mean_mag,g.phot_bp_mean_mag,
g.phot_rp_mean_mag,g.logg_gspphot,g.teff_gspphot,tm.j_m,tm.h_m,tm.k_m
FROM catalog
JOIN gaia_dr3_source AS g,twomass_psc AS tm
WHERE tm.h_m < 11.5
AND tm.h_m > 7
AND g.parallax>0
AND g.phot_rp_mean_mag-tm.k_m>2 and
AND (g.phot_rp_mean_mag-tm.k_m)*2+0.6<tm.h_m-5*log10(1000/g.parallax)+5
Gaia DR2 parameters to be converted to Gaia DR3 :
coordinates, proper motions, parallax
Return columns (again ok to copy from v0.5 version) : (See pseudo-SQL above)
Metadata:
can_offset = False
Cadence options: bright_18x1
priority: priority = 2515
Lead contact: Nicholas Troup Nathan De Lee
"""
# Below query does not use SDSS_DR17_APOGEE_Allstarmerge.
# There is gaia_dr3 in the below query so we have done
# major modification of the old v0.5 query.
def build_query(self, version_id, query_region=None):
query = (
Catalog.select(
Catalog.catalogid,
Catalog.ra,
Catalog.dec,
Catalog.pmra,
Catalog.pmdec,
Gaia_DR3.ref_epoch,
Gaia_DR3.source_id,
Gaia_DR3.parallax,
Gaia_DR3.parallax_error,
Gaia_DR3.phot_g_mean_mag,
Gaia_DR3.phot_bp_mean_mag,
Gaia_DR3.phot_rp_mean_mag,
Gaia_DR3.logg_gspphot,
Gaia_DR3.teff_gspphot,
TwoMassPSC.j_m,
TwoMassPSC.h_m,
TwoMassPSC.k_m,
Gaia_DR3.ra.alias("gaia_dr3_ra"),
Gaia_DR3.dec.alias("gaia_dr3_dec"),
Gaia_DR3.pmra.alias("gaia_dr3_pmra"),
Gaia_DR3.pmdec.alias("gaia_dr3_pmdec"),
)
.join(CatalogToGaia_DR3, on=(Catalog.catalogid == CatalogToGaia_DR3.catalogid))
.join(Gaia_DR3, on=(CatalogToGaia_DR3.target_id == Gaia_DR3.source_id))
.switch(Catalog)
.join(CatalogToTwoMassPSC, on=(Catalog.catalogid == CatalogToTwoMassPSC.catalogid))
.join(TwoMassPSC, on=(CatalogToTwoMassPSC.target_id == TwoMassPSC.pts_key))
.where(
CatalogToTwoMassPSC.version_id == version_id,
CatalogToTwoMassPSC.best >> True,
CatalogToGaia_DR3.version_id == version_id,
CatalogToGaia_DR3.best >> True,
TwoMassPSC.ext_key >> None,
Gaia_DR3.parallax.is_null(False),
Gaia_DR3.parallax > 0,
)
)
# Below ra, dec and radius are in degrees
# query_region[0] is ra of center of the region
# query_region[1] is dec of center of the region
# query_region[2] is radius of the region
if query_region:
query = query.where(
peewee.fn.q3c_radial_query(
Catalog.ra,
Catalog.dec,
query_region[0],
query_region[1],
query_region[2],
)
)
return query
class MWM_bin_rv_short_mdwarf_Base_Carton(MWM_bin_rv_short_Base_Carton):
"""5.1.25. mwm_bin_rv_short_mdwarf
This base carton contains the M Dwarf stars originally in mwm_bin_rv_short
Shorthand name: mwm_bin_rv_short_mdwarf (Formally mwm_rv_short_fps/mwm_bin_rv_short)
Existing carton code :
https://github.com/sdss/target_selection/blob/main/python/target_selection/cartons/mwm_rv.py
Simplified Description of selection criteria :
SELECT catalog.catalogid, catalog.ra, catalog.dec, catalog.pmra, catalog.pmdec,
g.ref_epoch, g.source_id, g.parallax, g.parallax_error,
g.phot_g_mean_mag,g.phot_bp_mean_mag,
g.phot_rp_mean_mag,g.logg_gspphot,g.teff_gspphot,tm.j_m,tm.h_m,tm.k_m
FROM catalog
JOIN gaia_dr3_source AS g,twomass_psc AS tm
WHERE tm.h_m < 11.5
AND tm.h_m > 7
AND g.parallax>0
AND g.phot_rp_mean_mag-tm.k_m>2 and
AND (g.phot_rp_mean_mag-tm.k_m)*2+0.6<tm.h_m-5*log10(1000/g.parallax)+5
The mwm_bin_rv_short_mdwarf carton has the above query with only two parts.
The mwm_bin_rv_short_subgiant and mwm_bin_rv_short_rgb cartons
have queries with three parts
The third part corresponds to mwm_rv_short_condition above.
Gaia DR2 parameters to be converted to Gaia DR3 :
coordinates, proper motions, parallax
Return columns (again ok to copy from v0.5 version) : (See pseudo-SQL above)
Metadata:
can_offset = False
Cadence options: bright_18x1
priority: priority = 2515
Lead contact: Nicholas Troup Nathan De Lee
"""
def build_query(self, version_id, query_region=None):
query = super().build_query(version_id, query_region)
query = query.where(
TwoMassPSC.h_m > 7,
TwoMassPSC.h_m < 11.5,
Gaia_DR3.phot_rp_mean_mag - TwoMassPSC.k_m > 2,
((Gaia_DR3.phot_rp_mean_mag - TwoMassPSC.k_m) * 2 + 0.6)
< (TwoMassPSC.h_m - 5 * peewee.fn.log10(1000 / Gaia_DR3.parallax) + 5),
)
return query
class MWM_bin_rv_short_mdwarf_apogee_18epoch(MWM_bin_rv_short_mdwarf_Base_Carton):
name = "mwm_bin_rv_short_mdwarf_apogee_18epoch"
category = "science"
instrument = "APOGEE"
cadence = "bright_18x1"
program = "mwm_bin"
mapper = "MWM"
priority = 1305
can_offset = False
class MWM_bin_rv_short_mdwarf_apogee_12epoch(MWM_bin_rv_short_mdwarf_Base_Carton):
name = "mwm_bin_rv_short_mdwarf_apogee_12epoch"
category = "science"
instrument = "APOGEE"
cadence = "bright_12x1"
program = "mwm_bin"
mapper = "MWM"
priority = 1306
can_offset = False
class MWM_bin_rv_short_mdwarf_apogee_08epoch(MWM_bin_rv_short_mdwarf_Base_Carton):
name = "mwm_bin_rv_short_mdwarf_apogee_08epoch"
category = "science"
instrument = "APOGEE"
cadence = "bright_8x1"
program = "mwm_bin"
mapper = "MWM"
priority = 1307
can_offset = False
class MWM_bin_rv_short_subgiant_apogee_Carton(MWM_bin_rv_short_Base_Carton):
"""5.1.26. mwm_bin_rv_short_subgiant_apogee
This carton contains subgiant stars and lower red giant branch stars
originally in mwm_bin_rv_short
Shorthand name: mwm_bin_rv_short_subgiant (Formally mwm_rv_short_fps/mwm_bin_rv_short)
Existing carton code :
https://github.com/sdss/target_selection/blob/main/python/target_selection/cartons/mwm_rv.py
Simplified Description of selection criteria :
SELECT catalog.catalogid, catalog.ra, catalog.dec, catalog.pmra, catalog.pmdec,
g.ref_epoch, g.source_id, g.parallax, g.parallax_error,
g.phot_g_mean_mag,g.phot_bp_mean_mag,
g.phot_rp_mean_mag,g.logg_gspphot,g.teff_gspphot,tm.j_m,tm.h_m,tm.k_m
FROM catalog
JOIN gaia_dr3_source AS g,twomass_psc AS tm
WHERE tm.h_m <10.8
AND tm.h_m > 7
AND g.logg_gspphot > 3.0
AND g.logg_gspphot < 4.1
AND g.teff_gspphot < 7000
AND g.teff_gspphot > 4500
AND (tm.j_msigcom <= 0.1 AND tm.h_msigcom<=0.1 AND tm.k_msigcom <= 0.1)
AND(tm.ph_qual= 'AAA' OR tm.ph_qual='AAB' OR tm.ph_qual='ABA'
OR tm.ph_qual='BAA' OR tm.ph_qual= 'ABB' OR tm.ph_qual='BAB'
OR tm.ph_qual='BBA' OR tm.ph_qual ='BBB')
AND tm.prox >= 6 AND tm.cc_flg='000' AND tm.gal_contam='000'
AND (tm.rd_flg='111' OR tm.rd_flg='112' OR tm.rd_flg='121'
OR tm.rd_flg='211' OR tm.rd_flg='122' OR tm.rd_flg='212'
OR tm.rd_flg='221' OR tm.rd_flg='222')
AND g.parallax_error/g.parallax < 0.2
Gaia DR2 parameters to be converted to Gaia DR3 :
coordinates, proper motions, parallax
Return columns (again ok to copy from v0.5 version) : (See pseudo-SQL above)
Metadata:
can_offset = False
Cadence options: bright_18x1
priority: priority = 2525
Lead contact: Nicholas Troup Nathan De Lee
"""
name = "mwm_bin_rv_short_subgiant_apogee"
category = "science"
instrument = "APOGEE"
cadence = "bright_18x1"
program = "mwm_bin"
mapper = "MWM"
priority = 2525
can_offset = False
def build_query(self, version_id, query_region=None):
query = super().build_query(version_id, query_region)
query = query.where(
TwoMassPSC.h_m > 7,
TwoMassPSC.h_m < 10.8,
Gaia_DR3.logg_gspphot > 3.0,
Gaia_DR3.logg_gspphot < 4.1,
Gaia_DR3.teff_gspphot < 7000,
Gaia_DR3.teff_gspphot > 4500,
Gaia_DR3.parallax_error / Gaia_DR3.parallax < 0.2,
*mwm_rv_short_condition,
)
return query
class MWM_bin_rv_short_rgb_apogee_Carton(MWM_bin_rv_short_Base_Carton):
"""5.1.27. mwm_bin_rv_short_rgb_apogee
This carton contains the red clump and higher red giant stars
originally in mwm_bin_rv_short
Shorthand name: mwm_bin_rv_short_rgb (Formally mwm_rv_short_fps/mwm_bin_rv_short)
Existing carton code :
https://github.com/sdss/target_selection/blob/main/python/target_selection/cartons/mwm_rv.py
Simplified Description of selection criteria :
SELECT catalog.catalogid, catalog.ra, catalog.dec, catalog.pmra, catalog.pmdec,
g.ref_epoch, g.source_id, g.parallax, g.parallax_error,
g.phot_g_mean_mag,g.phot_bp_mean_mag,
g.phot_rp_mean_mag,g.logg_gspphot,g.teff_gspphot,tm.j_m,tm.h_m,tm.k_m
FROM catalog
JOIN gaia_dr3_source AS g,twomass_psc AS tm
WHERE tm.h_m<10.8
AND tm.h_m > 7
AND g.logg_gspphot > 1.2
AND g.logg_gspphot <= 3.0
AND g.teff_gspphot < 5500
AND (tm.j_msigcom <= 0.1 AND tm.h_msigcom<=0.1 AND tm.k_msigcom <= 0.1)
AND(tm.ph_qual= 'AAA' OR tm.ph_qual='AAB' OR tm.ph_qual='ABA'
OR tm.ph_qual='BAA' OR tm.ph_qual= 'ABB' OR tm.ph_qual='BAB'
OR tm.ph_qual='BBA' OR tm.ph_qual ='BBB')
AND tm.prox >= 6 AND tm.cc_flg='000' AND tm.gal_contam='000'
AND (tm.rd_flg='111' OR tm.rd_flg='112' OR tm.rd_flg='121'
OR tm.rd_flg='211' OR tm.rd_flg='122' OR tm.rd_flg='212'
OR tm.rd_flg='221' OR tm.rd_flg='222')
AND g.parallax_error/g.parallax < 0.2
Gaia DR2 parameters to be converted to Gaia DR3 :
coordinates, proper motions, parallax
Return columns (again ok to copy from v0.5 version) : (See pseudo-SQL above)
Metadata:
can_offset = False
Cadence options: bright_18x1
priority: priority = 2535
Lead contact: Nicholas Troup Nathan De Lee
"""
name = "mwm_bin_rv_short_rgb_apogee"
category = "science"
instrument = "APOGEE"
cadence = "bright_18x1"
program = "mwm_bin"
mapper = "MWM"
priority = 2535
can_offset = False
def build_query(self, version_id, query_region=None):
query = super().build_query(version_id, query_region)
query = query.where(
TwoMassPSC.h_m > 7,
TwoMassPSC.h_m < 10.8,
Gaia_DR3.logg_gspphot > 1.2,
Gaia_DR3.logg_gspphot <= 3.0,
Gaia_DR3.teff_gspphot < 5500,
Gaia_DR3.parallax_error / Gaia_DR3.parallax < 0.2,
*mwm_rv_short_condition,
)
return query
class MWM_bin_rv_short_rgb_apogee_08epoch_Carton(MWM_bin_rv_short_rgb_apogee_Carton):
"""mwm_bin_rv_short_rgb_apogee_08epoch -
same as mwm_bin_rv_short_rgb_apogee but
with cadence=bright_8x1 and priority=2537
"""
name = "mwm_bin_rv_short_rgb_apogee_08epoch"
category = "science"
instrument = "APOGEE"
cadence = "bright_8x1"
program = "mwm_bin"
mapper = "MWM"
priority = 2537
can_offset = True
class MWM_bin_rv_short_rgb_apogee_12epoch_Carton(MWM_bin_rv_short_rgb_apogee_Carton):
"""mwm_bin_rv_short_rgb_apogee_12epoch -
same as mwm_bin_rv_short_rgb_apogee but
with cadence=bright_12x1 and priority=2536
"""
name = "mwm_bin_rv_short_rgb_apogee_12epoch"
category = "science"
instrument = "APOGEE"
cadence = "bright_12x1"
program = "mwm_bin"
mapper = "MWM"
priority = 2536
can_offset = True
class MWM_bin_rv_short_subgiant_apogee_08epoch_Carton(MWM_bin_rv_short_subgiant_apogee_Carton):
"""mwm_bin_rv_short_subgiant_apogee_08epoch -
same as mwm_bin_rv_short_subgiant_apogee but
with cadence=bright_8x1 and priority=2527
"""
name = "mwm_bin_rv_short_subgiant_apogee_08epoch"
category = "science"
instrument = "APOGEE"
cadence = "bright_8x1"
program = "mwm_bin"
mapper = "MWM"
priority = 2527
can_offset = True
class MWM_bin_rv_short_subgiant_apogee_12epoch_Carton(MWM_bin_rv_short_subgiant_apogee_Carton):
"""mwm_bin_rv_short_subgiant_apogee_12epoch -
same as mwm_bin_rv_short_subgiant_apogee but
with cadence=bright_12x1 and priority=2526
"""
name = "mwm_bin_rv_short_subgiant_apogee_12epoch"
category = "science"
instrument = "APOGEE"
cadence = "bright_12x1"
program = "mwm_bin"
mapper = "MWM"
priority = 2526
can_offset = True
|
sdssREPO_NAMEtarget_selectionPATH_START.@target_selection_extracted@target_selection-main@python@target_selection@cartons@mwm_rv.py@.PATH_END.py
|
{
"filename": "_hoveron.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter/_hoveron.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoveronValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoveron", parent_name="scatter", **kwargs):
super(HoveronValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
flags=kwargs.pop("flags", ["points", "fills"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter@_hoveron.py@.PATH_END.py
|
{
"filename": "055_markdown.md",
"repo_name": "CosmicFish/CosmicFish",
"repo_path": "CosmicFish_extracted/CosmicFish-master/bundled/doxygen/testing/055_markdown.md",
"type": "Markdown"
}
|
<!--
// objective: test markdown parsing
// check: md_055_markdown.xml
-->
# Foo
## Bar
[Inline link](http://example.com/inline)
[Reference link][1]
[1]: http://example.com/reference
## Baz
More text
[Upper-cased reference link on last line][U]
[U]: http://example.com/last-line
|
CosmicFishREPO_NAMECosmicFishPATH_START.@CosmicFish_extracted@CosmicFish-master@bundled@doxygen@testing@055_markdown.md@.PATH_END.py
|
{
"filename": "test_serpapi.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/utilities/test_serpapi.py",
"type": "Python"
}
|
"""Integration test for SerpAPI."""
from langchain_community.utilities import SerpAPIWrapper
def test_call() -> None:
"""Test that call gives the correct answer."""
chain = SerpAPIWrapper() # type: ignore[call-arg]
output = chain.run("What was Obama's first name?")
assert output == "Barack Hussein Obama II"
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@utilities@test_serpapi.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcats/line/colorbar/tickformatstop/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._value import ValueValidator
from ._templateitemname import TemplateitemnameValidator
from ._name import NameValidator
from ._enabled import EnabledValidator
from ._dtickrange import DtickrangeValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._value.ValueValidator",
"._templateitemname.TemplateitemnameValidator",
"._name.NameValidator",
"._enabled.EnabledValidator",
"._dtickrange.DtickrangeValidator",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@parcats@line@colorbar@tickformatstop@__init__.py@.PATH_END.py
|
{
"filename": "_functions.py",
"repo_name": "AOtools/aotools",
"repo_path": "aotools_extracted/aotools-main/aotools/functions/_functions.py",
"type": "Python"
}
|
import numpy
def gaussian2d(size, width, amplitude=1., cent=None):
'''
Generates 2D gaussian distribution
Args:
size (tuple, float): Dimensions of Array to place gaussian (y, x)
width (tuple, float): Width of distribution.
Accepts tuple for x and y values in order (y, x).
amplitude (float, optional): Amplitude of guassian distribution. default is 1.
cent (tuple, optional): Centre of distribution on grid in order (y, x). Default is middle
'''
try:
ySize = size[0]
xSize = size[1]
except (TypeError, IndexError):
xSize = ySize = size
try:
yWidth = float(width[0])
xWidth = float(width[1])
except (TypeError, IndexError):
xWidth = yWidth = float(width)
# If a centre point not given, centre is centre of array
if cent is None:
xCent = xSize/2.
yCent = ySize/2.
else:
yCent = cent[0]
xCent = cent[1]
X, Y = numpy.meshgrid(range(0, xSize), range(0, ySize))
image = amplitude * numpy.exp(
-(((xCent - X) / xWidth) ** 2 + ((yCent - Y) / yWidth) ** 2) / 2)
return image
|
AOtoolsREPO_NAMEaotoolsPATH_START.@aotools_extracted@aotools-main@aotools@functions@_functions.py@.PATH_END.py
|
{
"filename": "_hovertemplatesrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/pie/_hovertemplatesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hovertemplatesrc", parent_name="pie", **kwargs):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@pie@_hovertemplatesrc.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "alejandrobll/py-sphviewer",
"repo_path": "py-sphviewer_extracted/py-sphviewer-master/sphviewer/__init__.py",
"type": "Python"
}
|
"""
Py-SPHViewer is an object-oriented rendering library. It was developed mainly
for rendering cosmological Smoothed Particles Hydrodynamical (SPH) simulations of galaxy formation, but in its current version, it can renderize any set of particles.
Author: Alejandro Benitez-Llambay
E-mail: If you have any question, or you want to report bugs, issues, etc., please contact me at bllalejandro@gmail.com
Acknowledgment: Many thanks to Pablo Benitez-Llambay. He has improved the original idea a lot, and without his help, Py-SPHViewer would not be what it is.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib.pyplot as plt
from .Particles import Particles
from .Camera import Camera
from .Scene import Scene
from .Render import Render
from .version import __version__
|
alejandrobllREPO_NAMEpy-sphviewerPATH_START.@py-sphviewer_extracted@py-sphviewer-master@sphviewer@__init__.py@.PATH_END.py
|
{
"filename": "stn_alt_az_integration_test.py",
"repo_name": "creaneroDIAS/beamModelTester",
"repo_path": "beamModelTester_extracted/beamModelTester-master/WIP/stn_alt_az_integration_test.py",
"type": "Python"
}
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed May 30 19:17:51 2018
@author: creanero
"""
from horizontalAzEl2stnAzEl import horizon_to_station
stn_alt_az=horizon_to_station(stnid, merge_df.az, merge_df.alt)
#stn_alt_az_t=zip(*stn_alt_az)
#
#stn_alt=np.array(stn_alt_az_t[1])*180/np.pi
#stn_az=np.array(stn_alt_az_t[0])*180/np.pi
merge_df['stn_alt']=np.array(stn_alt_az[1])
merge_df['stn_az']=np.array(stn_alt_az[0])
time_delay = 1000.0/modes['frame_rate']
source = 'scope'
animated_plot(merge_df, modes, 'stn_az', m_keys, "Freq", source,
time_delay)
four_var_plot(merge_df,modes,"stn_az","Freq",'Q',"stn_alt",source)
|
creaneroDIASREPO_NAMEbeamModelTesterPATH_START.@beamModelTester_extracted@beamModelTester-master@WIP@stn_alt_az_integration_test.py@.PATH_END.py
|
{
"filename": "DoubleSeq.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/acspycommon/src/ACSImpl/DoubleSeq.py",
"type": "Python"
}
|
# @(#) $Id: DoubleSeq.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $
#
# Copyright (C) 2001
# Associated Universities, Inc. Washington DC, USA.
#
# Produced for the ALMA project
#
# This library is free software; you can redistribute it and/or modify it under
# the terms of the GNU Library General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Library General Public License for more
# details.
#
# You should have received a copy of the GNU Library General Public License
# along with this library; if not, write to the Free Software Foundation, Inc.,
# 675 Massachusetts Ave, Cambridge, MA 02139, USA. Correspondence concerning
# ALMA should be addressed as follows:
#
# Internet email: alma-sw-admin@nrao.edu
# "@(#) $Id: DoubleSeq.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $"
#
# who when what
# -------- ---------- ----------------------------------------------
# dfugate 2004/07/21 Created.
#------------------------------------------------------------------------------
'''
This module provides an implementation of the PdoubleSeq IDL interface:
'''
__version__ = "$Id: DoubleSeq.py,v 1.1.1.1 2012/03/07 17:40:45 acaproni Exp $"
#--REGULAR IMPORTS-------------------------------------------------------------
from traceback import print_exc
#--CORBA STUBS-----------------------------------------------------------------
import ACS__POA
from ACS import CBDescOut
#--ACS Imports-----------------------------------------------------------------
from ACSImpl.GenericProperty import GenericProperty
from ACSImpl.Monitors import Monitordouble
#--GLOBALS---------------------------------------------------------------------
#------------------------------------------------------------------------------
#--P property------------------------------------------------------------------
#------------------------------------------------------------------------------
class PdoubleSeq(GenericProperty):
'''
Properties can be derived from PdoubleSeq only if their IDL derives from
ACS::PdoubleSeq.
'''
#--------------------------------------------------------------------------
def __init__(self, name, charCompRef, devIORef):
'''
Constructor
Params:
- name is the quite literally the name of the property
- charCompRef is the characteristic component object which contains this
property
- devIORef is a reference to a DevIO to be used with this property
Returns: Nothing
Raises: Nothing.
'''
GenericProperty.__init__(self, name, charCompRef, devIORef)
return
#--------------------------------------------------------------------------
def coerceToPropertyType(self, value=None):
'''
Overriden.
'''
#something went wrong. Return default value
if value==None:
return []
try:
retVal = eval("[" + value + "]")
#coerce into a double type (for floats specified like "123")
for i in range(0, len(retVal)):
retVal[i] = float(retVal[i])
retVal = tuple(retVal)
return retVal
except:
#warn them about CDB access
self.getLogger().logAlert("Unble to coerce '" + str(value) + "' into the correct type!")
print_exc()
#return an acceptable default value instead...an empty sequence
return []
#--------------------------------------------------------------------------
def getMonitorObject(self, scheduler, timeoutID):
'''
Helper method used to return a monitor of the correct type.
'''
return Monitordouble(scheduler, timeoutID)
#--------------------------------------------------------------------------
#--Overriden methods because BACI is inconsistent--------------------------
#--------------------------------------------------------------------------
def _get_min_delta_trigger(self):
'''
Implementation of the IDL attribute.
readonly attribute (unknown type) min_delta_trigger;
'''
try:
return float(str(self.getCDBDict()['min_delta_trig']))
except:
#warn them about CDB access
self.getLogger().logInfo("Some problem occurred when attempting to retrieve data from the ACS CDB")
print_exc()
#return an acceptable default value instead. It's up to the overriden
#coerceToPropertyType method to decide what an acceptable default
#value is!
return 0.0
#--------------------------------------------------------------------------
def _get_default_value(self):
'''
Implementation of the IDL attribute.
readonly attribute (unknown type) default_value;
'''
try:
return float(str(self.getCDBDict()['default_value']))
except:
#warn them about CDB access
self.getLogger().logInfo("Some problem occurred when attempting to retrieve data from the ACS CDB")
print_exc()
#return an acceptable default value instead. It's up to the overriden
#coerceToPropertyType method to decide what an acceptable default
#value is!
return 0.0
#--------------------------------------------------------------------------
def _get_graph_min(self):
'''
Implementation of the IDL attribute.
readonly attribute (unknown type) graph_min;
'''
try:
return float(str(self.getCDBDict()['graph_min']))
except:
#warn them about CDB access
self.getLogger().logInfo("Some problem occurred when attempting to retrieve data from the ACS CDB")
print_exc()
#return an acceptable default value instead.
return 0.0
#--------------------------------------------------------------------------
def _get_graph_max(self):
'''
Implementation of the IDL attribute.
readonly attribute (unknown type) graph_max;
'''
try:
return float(str(self.getCDBDict()['graph_max']))
except:
#warn them about CDB access
self.getLogger().logInfo("Some problem occurred when attempting to retrieve data from the ACS CDB")
print_exc()
#return an acceptable default value instead.
return 1000.0
#--------------------------------------------------------------------------
def _get_min_step(self):
'''
Implementation of the IDL attribute.
readonly attribute (unknown type) min_step;
'''
try:
return float(str(self.getCDBDict()['min_step']))
except:
#warn them about CDB access
self.getLogger().logInfo("Some problem occurred when attempting to retrieve data from the ACS CDB")
print_exc()
#return an acceptable default value instead. It's up to the overriden
#coerceToPropertyType method to decide what an acceptable default
#value is!
return 0.0
#------------------------------------------------------------------------------
#--RO property-----------------------------------------------------------------
#------------------------------------------------------------------------------
class ROdoubleSeq(ACS__POA.ROdoubleSeq, PdoubleSeq):
'''
Properties can be derived from ROdoubleSeq only if their IDL derives from
ACS::ROdoubleSeq.
'''
#--------------------------------------------------------------------------
def __init__(self, name, charCompRef, devIORef=None):
'''
Constructor
Params:
- name is the quite literally the name of the property
- charCompRef is the characteristic component object which contains this
property
- devIORef is a reference to a DevIO to be used with this property
Returns: Nothing
Raises: Nothing.
'''
PdoubleSeq.__init__(self, name, charCompRef, devIORef)
return
#--------------------------------------------------------------------------
#--Overriden methods because BACI is inconsistent--------------------------
#--------------------------------------------------------------------------
def _get_alarm_low_on(self):
'''
Implementation of the IDL attribute.
readonly attribute (unknown type) alarm_low_on;
'''
try:
return float(str(self.getCDBDict()['alarm_low_on']))
except:
#warn them about CDB access
self.getLogger().logInfo("Some problem occurred when attempting to retrieve data from the ACS CDB")
print_exc()
#return an acceptable default value instead.
return 0.0
#--------------------------------------------------------------------------
def _get_alarm_low_off(self):
'''
Implementation of the IDL attribute.
readonly attribute (unknown type) alarm_low_off;
'''
try:
return float(str(self.getCDBDict()['alarm_low_off']))
except:
#warn them about CDB access
self.getLogger().logInfo("Some problem occurred when attempting to retrieve data from the ACS CDB")
print_exc()
#return an acceptable default value instead.
return 0.0
#--------------------------------------------------------------------------
def _get_alarm_high_on(self):
'''
Implementation of the IDL attribute.
readonly attribute (unknown type) alarm_high_on;
'''
try:
return float(str(self.getCDBDict()['alarm_high_on']))
except:
#warn them about CDB access
self.getLogger().logInfo("Some problem occurred when attempting to retrieve data from the ACS CDB")
print_exc()
#return an acceptable default value instead.
return 0.0
#--------------------------------------------------------------------------
def _get_alarm_high_off(self):
'''
Implementation of the IDL attribute.
readonly attribute (unknown type) alarm_high_off;
'''
try:
return float(str(self.getCDBDict()['alarm_high_off']))
except:
#warn them about CDB access
self.getLogger().logInfo("Some problem occurred when attempting to retrieve data from the ACS CDB")
print_exc()
#return an acceptable default value instead.
return 0.0
#--------------------------------------------------------------------------
def get_sync(self):
'''
Overriden.
'''
retVal = list(GenericProperty.get_sync(self))
try:
retVal[0] = list(retVal[0])
except:
retVal[0] = [retVal[0]]
return tuple(retVal)
#-----------------------------------------------------------------------------
#--RW property----------------------------------------------------------------
#-----------------------------------------------------------------------------
class RWdoubleSeq(ACS__POA.RWdoubleSeq, ROdoubleSeq):
'''
Properties can be derived from ROdoubleSeq only if their IDL derives from
ACS::ROdoubleSeq.
'''
#-------------------------------------------------------------------------
def __init__(self, name, charCompRef, devIORef=None):
'''
Constructor
Params:
- name is the quite literally the name of the property
- charCompRef is the characteristic component object which contains this
property
- devIORef is a reference to a DevIO to be used with this property
Returns: Nothing
Raises: Nothing.
'''
ROdoubleSeq.__init__(self, name, charCompRef, devIORef)
return
#--------------------------------------------------------------------------
#--Overriden methods because BACI is inconsistent--------------------------
#--------------------------------------------------------------------------
def _get_min_value(self):
'''
Implementation of the IDL attribute.
readonly attribute (unknown type) min_value;
'''
try:
return float(str(self.getCDBDict()['min_value']))
except:
#warn them about CDB access
self.getLogger().logInfo("Some problem occurred when attempting to retrieve data from the ACS CDB")
print_exc()
#return an acceptable default value instead.
return 0.0
#--------------------------------------------------------------------------
def _get_max_value(self):
'''
Implementation of the IDL attribute.
readonly attribute (unknown type) max_value;
'''
try:
return float(str(self.getCDBDict()['max_value']))
except:
#warn them about CDB access
self.getLogger().logInfo("Some problem occurred when attempting to retrieve data from the ACS CDB")
print_exc()
#return an acceptable default value instead.
return 1000.0
#--------------------------------------------------------------------------
def increment(self, cb, desc):
'''
Implementation of the IDL method.
void increment (in CBvoid cb, in CBDescIn desc);
'''
compl = self.set_sync(map(lambda x: x+1,self.get_sync()[0]))
cb.done(compl, CBDescOut(0L, desc.id_tag))
return
#--------------------------------------------------------------------------
def decrement(self, cb, desc):
'''
Implementation of the IDL method.
void decrement (in CBvoid cb, in CBDescIn desc);
'''
compl = self.set_sync(map(lambda x: x-1,self.get_sync()[0]))
cb.done(compl, CBDescOut(0L, desc.id_tag))
return
#---------------------------------------------------------------------------
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@acspycommon@src@ACSImpl@DoubleSeq.py@.PATH_END.py
|
{
"filename": "compiler.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/cairosvg_ES/cssselect2/compiler.py",
"type": "Python"
}
|
import re
from urllib.parse import urlparse
from tinycss2.nth import parse_nth
from webencodings import ascii_lower
from . import parser
from .parser import SelectorError
# http://dev.w3.org/csswg/selectors/#whitespace
split_whitespace = re.compile('[^ \t\r\n\f]+').findall
def compile_selector_list(input, namespaces=None):
"""Compile a (comma-separated) list of selectors.
:param input:
A string, or an iterable of tinycss2 component values such as
the :attr:`tinycss2.ast.QualifiedRule.prelude` of a style rule.
:param namespaces:
A optional dictionary of all `namespace prefix declarations
<http://www.w3.org/TR/selectors/#nsdecl>`_ in scope for this selector.
Keys are namespace prefixes as strings, or ``None`` for the default
namespace.
Values are namespace URLs as strings.
If omitted, assume that no prefix is declared.
:returns:
A list of opaque :class:`compiler.CompiledSelector` objects.
"""
return [
CompiledSelector(selector)
for selector in parser.parse(input, namespaces)
]
class CompiledSelector:
"""Abstract representation of a selector."""
def __init__(self, parsed_selector):
source = _compile_node(parsed_selector.parsed_tree)
self.never_matches = source == '0'
eval_globals = {
'split_whitespace': split_whitespace,
'ascii_lower': ascii_lower,
'urlparse': urlparse,
}
self.test = eval('lambda el: ' + source, eval_globals, {})
self.specificity = parsed_selector.specificity
self.pseudo_element = parsed_selector.pseudo_element
self.id = None
self.class_name = None
self.local_name = None
self.lower_local_name = None
self.namespace = None
self.requires_lang_attr = False
node = parsed_selector.parsed_tree
if isinstance(node, parser.CombinedSelector):
node = node.right
for simple_selector in node.simple_selectors:
if isinstance(simple_selector, parser.IDSelector):
self.id = simple_selector.ident
elif isinstance(simple_selector, parser.ClassSelector):
self.class_name = simple_selector.class_name
elif isinstance(simple_selector, parser.LocalNameSelector):
self.local_name = simple_selector.local_name
self.lower_local_name = simple_selector.lower_local_name
elif isinstance(simple_selector, parser.NamespaceSelector):
self.namespace = simple_selector.namespace
elif isinstance(simple_selector, parser.AttributeSelector):
if simple_selector.name == 'lang':
self.requires_lang_attr = True
def _compile_node(selector):
"""Return a boolean expression, as a Python source string.
When evaluated in a context where the `el` variable is an
:class:`cssselect2.tree.Element` object, tells whether the element is a
subject of `selector`.
"""
# To avoid precedence-related bugs, any sub-expression that is passed
# around must be "atomic": add parentheses when the top-level would be
# an operator. Bare literals and function calls are fine.
# 1 and 0 are used for True and False to avoid global lookups.
if isinstance(selector, parser.CombinedSelector):
left_inside = _compile_node(selector.left)
if left_inside == '0':
return '0' # 0 and x == 0
elif left_inside == '1':
# 1 and x == x, but the element matching 1 still needs to exist.
if selector.combinator in (' ', '>'):
left = 'el.parent is not None'
elif selector.combinator in ('~', '+'):
left = 'el.previous is not None'
else:
raise SelectorError('Unknown combinator', selector.combinator)
# Rebind the `el` name inside a generator-expressions (in a new scope)
# so that 'left_inside' applies to different elements.
elif selector.combinator == ' ':
left = f'any(({left_inside}) for el in el.ancestors)'
elif selector.combinator == '>':
left = (
f'next(el is not None and ({left_inside}) '
'for el in [el.parent])')
elif selector.combinator == '+':
left = (
f'next(el is not None and ({left_inside}) '
'for el in [el.previous])')
elif selector.combinator == '~':
left = f'any(({left_inside}) for el in el.previous_siblings)'
else:
raise SelectorError('Unknown combinator', selector.combinator)
right = _compile_node(selector.right)
if right == '0':
return '0' # 0 and x == 0
elif right == '1':
return left # 1 and x == x
else:
# Evaluate combinators right to left
return f'({right}) and ({left})'
elif isinstance(selector, parser.CompoundSelector):
sub_expressions = [
expr for expr in map(_compile_node, selector.simple_selectors)
if expr != '1']
if len(sub_expressions) == 1:
test = sub_expressions[0]
elif '0' in sub_expressions:
test = '0'
elif sub_expressions:
test = ' and '.join(f'({e})' for e in sub_expressions)
else:
test = '1' # all([]) == True
return test
elif isinstance(selector, parser.NegationSelector):
sub_expressions = [
expr for expr in [
_compile_node(selector.parsed_tree)
for selector in selector.selector_list]
if expr != '1']
if not sub_expressions:
return '0'
return f'not ({" or ".join(f"({expr})" for expr in sub_expressions)})'
elif isinstance(selector, parser.RelationalSelector):
sub_expressions = []
for relative_selector in selector.selector_list:
expression = _compile_node(relative_selector.selector.parsed_tree)
if expression == '0':
continue
if relative_selector.combinator == ' ':
elements = 'list(el.iter_subtree())[1:]'
elif relative_selector.combinator == '>':
elements = 'el.iter_children()'
elif relative_selector.combinator == '+':
elements = 'list(el.iter_next_siblings())[:1]'
elif relative_selector.combinator == '~':
elements = 'el.iter_next_siblings()'
sub_expressions.append(f'(any({expression} for el in {elements}))')
return ' or '.join(sub_expressions)
elif isinstance(selector, (
parser.MatchesAnySelector, parser.SpecificityAdjustmentSelector)):
sub_expressions = [
expr for expr in [
_compile_node(selector.parsed_tree)
for selector in selector.selector_list]
if expr != '0']
if not sub_expressions:
return '0'
return ' or '.join(f'({expr})' for expr in sub_expressions)
elif isinstance(selector, parser.LocalNameSelector):
if selector.lower_local_name == selector.local_name:
return f'el.local_name == {selector.local_name!r}'
else:
return (
f'el.local_name == ({selector.lower_local_name!r} '
f'if el.in_html_document else {selector.local_name!r})')
elif isinstance(selector, parser.NamespaceSelector):
return f'el.namespace_url == {selector.namespace!r}'
elif isinstance(selector, parser.ClassSelector):
return f'{selector.class_name!r} in el.classes'
elif isinstance(selector, parser.IDSelector):
return f'el.id == {selector.ident!r}'
elif isinstance(selector, parser.AttributeSelector):
if selector.namespace is not None:
if selector.namespace:
if selector.name == selector.lower_name:
key = repr(f'{{{selector.namespace}}}{selector.name}')
else:
lower = f'{{{selector.namespace}}}{selector.lower_name}'
name = f'{{{selector.namespace}}}{selector.name}'
key = f'({lower!r} if el.in_html_document else {name!r})'
else:
if selector.name == selector.lower_name:
key = repr(selector.name)
else:
lower, name = selector.lower_name, selector.name
key = f'({lower!r} if el.in_html_document else {name!r})'
value = selector.value
attribute_value = f'el.etree_element.get({key}, "")'
if selector.case_sensitive is False:
value = value.lower()
attribute_value += '.lower()'
if selector.operator is None:
return f'{key} in el.etree_element.attrib'
elif selector.operator == '=':
return (
f'{key} in el.etree_element.attrib and '
f'{attribute_value} == {value!r}')
elif selector.operator == '~=':
return (
'0' if len(value.split()) != 1 or value.strip() != value
else f'{value!r} in split_whitespace({attribute_value})')
elif selector.operator == '|=':
return (
f'{key} in el.etree_element.attrib and '
f'{attribute_value} == {value!r} or '
f'{attribute_value}.startswith({(value + "-")!r})')
elif selector.operator == '^=':
if value:
return f'{attribute_value}.startswith({value!r})'
else:
return '0'
elif selector.operator == '$=':
return (
f'{attribute_value}.endswith({value!r})' if value else '0')
elif selector.operator == '*=':
return f'{value!r} in {attribute_value}' if value else '0'
else:
raise SelectorError(
'Unknown attribute operator', selector.operator)
else: # In any namespace
raise NotImplementedError # TODO
elif isinstance(selector, parser.PseudoClassSelector):
if selector.name in ('link', 'any-link', 'local-link'):
test = html_tag_eq('a', 'area', 'link')
test += ' and el.etree_element.get("href") is not None '
if selector.name == 'local-link':
test += 'and not urlparse(el.etree_element.get("href")).scheme'
return test
elif selector.name == 'enabled':
input = html_tag_eq(
'button', 'input', 'select', 'textarea', 'option')
group = html_tag_eq('optgroup', 'menuitem', 'fieldset')
a = html_tag_eq('a', 'area', 'link')
return (
f'({input} and el.etree_element.get("disabled") is None'
' and not el.in_disabled_fieldset) or'
f'({group} and el.etree_element.get("disabled") is None) or '
f'({a} and el.etree_element.get("href") is not None)')
elif selector.name == 'disabled':
input = html_tag_eq(
'button', 'input', 'select', 'textarea', 'option')
group = html_tag_eq('optgroup', 'menuitem', 'fieldset')
return (
f'({input} and (el.etree_element.get("disabled") is not None'
' or el.in_disabled_fieldset)) or'
f'({group} and el.etree_element.get("disabled") is not None)')
elif selector.name == 'checked':
input = html_tag_eq('input', 'menuitem')
option = html_tag_eq('option')
return (
f'({input} and el.etree_element.get("checked") is not None and'
' ascii_lower(el.etree_element.get("type", "")) '
' in ("checkbox", "radio")) or ('
f'{option} and el.etree_element.get("selected") is not None)')
elif selector.name in (
'visited', 'hover', 'active', 'focus', 'focus-within',
'focus-visible', 'target', 'target-within', 'current', 'past',
'future', 'playing', 'paused', 'seeking', 'buffering',
'stalled', 'muted', 'volume-locked', 'user-valid',
'user-invalid'):
# Not applicable in a static context: never match.
return '0'
elif selector.name in ('root', 'scope'):
return 'el.parent is None'
elif selector.name == 'first-child':
return 'el.index == 0'
elif selector.name == 'last-child':
return 'el.index + 1 == len(el.etree_siblings)'
elif selector.name == 'first-of-type':
return (
'all(s.tag != el.etree_element.tag'
' for s in el.etree_siblings[:el.index])')
elif selector.name == 'last-of-type':
return (
'all(s.tag != el.etree_element.tag'
' for s in el.etree_siblings[el.index + 1:])')
elif selector.name == 'only-child':
return 'len(el.etree_siblings) == 1'
elif selector.name == 'only-of-type':
return (
'all(s.tag != el.etree_element.tag or i == el.index'
' for i, s in enumerate(el.etree_siblings))')
elif selector.name == 'empty':
return 'not (el.etree_children or el.etree_element.text)'
else:
raise SelectorError('Unknown pseudo-class', selector.name)
elif isinstance(selector, parser.FunctionalPseudoClassSelector):
if selector.name == 'lang':
langs = []
tokens = [
token for token in selector.arguments
if token.type not in ('whitespace', 'comment')]
while tokens:
token = tokens.pop(0)
if token.type == 'ident':
langs.append(token.lower_value)
elif token.type == 'string':
langs.append(ascii_lower(token.value))
else:
raise SelectorError('Invalid arguments for :lang()')
if tokens:
token = tokens.pop(0)
if token.type != 'ident' and token.value != ',':
raise SelectorError('Invalid arguments for :lang()')
return ' or '.join(
f'el.lang == {lang!r} or el.lang.startswith({(lang + "-")!r})'
for lang in langs)
else:
nth = []
selector_list = []
current_list = nth
for argument in selector.arguments:
if argument.type == 'ident' and argument.value == 'of':
if current_list is nth:
current_list = selector_list
continue
current_list.append(argument)
if selector_list:
test = ' and '.join(
_compile_node(selector.parsed_tree)
for selector in parser.parse(selector_list))
if selector.name == 'nth-child':
count = (
f'sum(1 for el in el.previous_siblings if ({test}))')
elif selector.name == 'nth-last-child':
count = (
'sum(1 for el in'
' tuple(el.iter_siblings())[el.index + 1:]'
f' if ({test}))')
elif selector.name == 'nth-of-type':
count = (
'sum(1 for s in ('
' el for el in el.previous_siblings'
f' if ({test}))'
' if s.etree_element.tag == el.etree_element.tag)')
elif selector.name == 'nth-last-of-type':
count = (
'sum(1 for s in ('
' el for el in'
' tuple(el.iter_siblings())[el.index + 1:]'
f' if ({test}))'
' if s.etree_element.tag == el.etree_element.tag)')
else:
raise SelectorError('Unknown pseudo-class', selector.name)
count += f'if ({test}) else float("nan")'
else:
if current_list is selector_list:
raise SelectorError(
f'Invalid arguments for :{selector.name}()')
if selector.name == 'nth-child':
count = 'el.index'
elif selector.name == 'nth-last-child':
count = 'len(el.etree_siblings) - el.index - 1'
elif selector.name == 'nth-of-type':
count = (
'sum(1 for s in el.etree_siblings[:el.index]'
' if s.tag == el.etree_element.tag)')
elif selector.name == 'nth-last-of-type':
count = (
'sum(1 for s in el.etree_siblings[el.index + 1:]'
' if s.tag == el.etree_element.tag)')
else:
raise SelectorError('Unknown pseudo-class', selector.name)
result = parse_nth(nth)
if result is None:
raise SelectorError(
f'Invalid arguments for :{selector.name}()')
a, b = result
# x is the number of siblings before/after the element
# Matches if a positive or zero integer n exists so that:
# x = a*n + b-1
# x = a*n + B
B = b - 1
if a == 0:
# x = B
return f'({count}) == {B}'
else:
# n = (x - B) / a
return (
'next(r == 0 and n >= 0'
f' for n, r in [divmod(({count}) - {B}, {a})])')
else:
raise TypeError(type(selector), selector)
def html_tag_eq(*local_names):
"""Generate expression testing equality with HTML local names."""
if len(local_names) == 1:
tag = '{http://www.w3.org/1999/xhtml}' + local_names[0]
return (
f'((el.local_name == {local_names[0]!r}) if el.in_html_document '
f'else (el.etree_element.tag == {tag!r}))')
else:
names = ', '.join(repr(n) for n in local_names)
tags = ', '.join(
repr('{http://www.w3.org/1999/xhtml}' + n) for n in local_names)
return (
f'((el.local_name in ({names})) if el.in_html_document '
f'else (el.etree_element.tag in ({tags})))')
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@cairosvg_ES@cssselect2@compiler.py@.PATH_END.py
|
{
"filename": "test_decomp_cholesky.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/linalg/tests/test_decomp_cholesky.py",
"type": "Python"
}
|
import pytest
import numpy as np
from numpy.testing import assert_array_almost_equal
from pytest import raises as assert_raises
from numpy import array, transpose, dot, conjugate, zeros_like, empty
from numpy.random import random
from scipy.linalg import (cholesky, cholesky_banded, cho_solve_banded,
cho_factor, cho_solve)
from scipy.linalg._testutils import assert_no_overwrite
class TestCholesky:
def test_simple(self):
a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]]
c = cholesky(a)
assert_array_almost_equal(dot(transpose(c), c), a)
c = transpose(c)
a = dot(c, transpose(c))
assert_array_almost_equal(cholesky(a, lower=1), c)
def test_check_finite(self):
a = [[8, 2, 3], [2, 9, 3], [3, 3, 6]]
c = cholesky(a, check_finite=False)
assert_array_almost_equal(dot(transpose(c), c), a)
c = transpose(c)
a = dot(c, transpose(c))
assert_array_almost_equal(cholesky(a, lower=1, check_finite=False), c)
def test_simple_complex(self):
m = array([[3+1j, 3+4j, 5], [0, 2+2j, 2+7j], [0, 0, 7+4j]])
a = dot(transpose(conjugate(m)), m)
c = cholesky(a)
a1 = dot(transpose(conjugate(c)), c)
assert_array_almost_equal(a, a1)
c = transpose(c)
a = dot(c, transpose(conjugate(c)))
assert_array_almost_equal(cholesky(a, lower=1), c)
def test_random(self):
n = 20
for k in range(2):
m = random([n, n])
for i in range(n):
m[i, i] = 20*(.1+m[i, i])
a = dot(transpose(m), m)
c = cholesky(a)
a1 = dot(transpose(c), c)
assert_array_almost_equal(a, a1)
c = transpose(c)
a = dot(c, transpose(c))
assert_array_almost_equal(cholesky(a, lower=1), c)
def test_random_complex(self):
n = 20
for k in range(2):
m = random([n, n])+1j*random([n, n])
for i in range(n):
m[i, i] = 20*(.1+abs(m[i, i]))
a = dot(transpose(conjugate(m)), m)
c = cholesky(a)
a1 = dot(transpose(conjugate(c)), c)
assert_array_almost_equal(a, a1)
c = transpose(c)
a = dot(c, transpose(conjugate(c)))
assert_array_almost_equal(cholesky(a, lower=1), c)
@pytest.mark.xslow
def test_int_overflow(self):
# regression test for
# https://github.com/scipy/scipy/issues/17436
# the problem was an int overflow in zeroing out
# the unused triangular part
n = 47_000
x = np.eye(n, dtype=np.float64, order='F')
x[:4, :4] = np.array([[4, -2, 3, -1],
[-2, 4, -3, 1],
[3, -3, 5, 0],
[-1, 1, 0, 5]])
cholesky(x, check_finite=False, overwrite_a=True) # should not segfault
@pytest.mark.parametrize('dt', [int, float, np.float32, complex, np.complex64])
@pytest.mark.parametrize('dt_b', [int, float, np.float32, complex, np.complex64])
def test_empty(self, dt, dt_b):
a = empty((0, 0), dtype=dt)
c = cholesky(a)
assert c.shape == (0, 0)
assert c.dtype == cholesky(np.eye(2, dtype=dt)).dtype
c_and_lower = (c, True)
b = np.asarray([], dtype=dt_b)
x = cho_solve(c_and_lower, b)
assert x.shape == (0,)
assert x.dtype == cho_solve((np.eye(2, dtype=dt), True),
np.ones(2, dtype=dt_b)).dtype
b = empty((0, 0), dtype=dt_b)
x = cho_solve(c_and_lower, b)
assert x.shape == (0, 0)
assert x.dtype == cho_solve((np.eye(2, dtype=dt), True),
np.ones(2, dtype=dt_b)).dtype
a1 = array([])
a2 = array([[]])
a3 = []
a4 = [[]]
for x in ([a1, a2, a3, a4]):
assert_raises(ValueError, cholesky, x)
class TestCholeskyBanded:
"""Tests for cholesky_banded() and cho_solve_banded."""
def test_check_finite(self):
# Symmetric positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, 0.2],
[0.0, 0.0, 0.2, 4.0]])
# Banded storage form of `a`.
ab = array([[-1.0, 1.0, 0.5, 0.2],
[4.0, 4.0, 4.0, 4.0]])
c = cholesky_banded(ab, lower=False, check_finite=False)
ufac = zeros_like(a)
ufac[list(range(4)), list(range(4))] = c[-1]
ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
assert_array_almost_equal(a, dot(ufac.T, ufac))
b = array([0.0, 0.5, 4.2, 4.2])
x = cho_solve_banded((c, False), b, check_finite=False)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_upper_real(self):
# Symmetric positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, 0.2],
[0.0, 0.0, 0.2, 4.0]])
# Banded storage form of `a`.
ab = array([[-1.0, 1.0, 0.5, 0.2],
[4.0, 4.0, 4.0, 4.0]])
c = cholesky_banded(ab, lower=False)
ufac = zeros_like(a)
ufac[list(range(4)), list(range(4))] = c[-1]
ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
assert_array_almost_equal(a, dot(ufac.T, ufac))
b = array([0.0, 0.5, 4.2, 4.2])
x = cho_solve_banded((c, False), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_upper_complex(self):
# Hermitian positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, -0.2j],
[0.0, 0.0, 0.2j, 4.0]])
# Banded storage form of `a`.
ab = array([[-1.0, 1.0, 0.5, -0.2j],
[4.0, 4.0, 4.0, 4.0]])
c = cholesky_banded(ab, lower=False)
ufac = zeros_like(a)
ufac[list(range(4)), list(range(4))] = c[-1]
ufac[(0, 1, 2), (1, 2, 3)] = c[0, 1:]
assert_array_almost_equal(a, dot(ufac.conj().T, ufac))
b = array([0.0, 0.5, 4.0-0.2j, 0.2j + 4.0])
x = cho_solve_banded((c, False), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_lower_real(self):
# Symmetric positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, 0.2],
[0.0, 0.0, 0.2, 4.0]])
# Banded storage form of `a`.
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 0.5, 0.2, -1.0]])
c = cholesky_banded(ab, lower=True)
lfac = zeros_like(a)
lfac[list(range(4)), list(range(4))] = c[0]
lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3]
assert_array_almost_equal(a, dot(lfac, lfac.T))
b = array([0.0, 0.5, 4.2, 4.2])
x = cho_solve_banded((c, True), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0, 1.0])
def test_lower_complex(self):
# Hermitian positive definite banded matrix `a`
a = array([[4.0, 1.0, 0.0, 0.0],
[1.0, 4.0, 0.5, 0.0],
[0.0, 0.5, 4.0, -0.2j],
[0.0, 0.0, 0.2j, 4.0]])
# Banded storage form of `a`.
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 0.5, 0.2j, -1.0]])
c = cholesky_banded(ab, lower=True)
lfac = zeros_like(a)
lfac[list(range(4)), list(range(4))] = c[0]
lfac[(1, 2, 3), (0, 1, 2)] = c[1, :3]
assert_array_almost_equal(a, dot(lfac, lfac.conj().T))
b = array([0.0, 0.5j, 3.8j, 3.8])
x = cho_solve_banded((c, True), b)
assert_array_almost_equal(x, [0.0, 0.0, 1.0j, 1.0])
@pytest.mark.parametrize('dt', [int, float, np.float32, complex, np.complex64])
@pytest.mark.parametrize('dt_b', [int, float, np.float32, complex, np.complex64])
def test_empty(self, dt, dt_b):
ab = empty((0, 0), dtype=dt)
cb = cholesky_banded(ab)
assert cb.shape == (0, 0)
m = cholesky_banded(np.array([[0, 0], [1, 1]], dtype=dt))
assert cb.dtype == m.dtype
cb_and_lower = (cb, True)
b = np.asarray([], dtype=dt_b)
x = cho_solve_banded(cb_and_lower, b)
assert x.shape == (0,)
dtype_nonempty = cho_solve_banded((m, True), np.ones(2, dtype=dt_b)).dtype
assert x.dtype == dtype_nonempty
b = empty((0, 0), dtype=dt_b)
x = cho_solve_banded(cb_and_lower, b)
assert x.shape == (0, 0)
assert x.dtype == dtype_nonempty
class TestOverwrite:
def test_cholesky(self):
assert_no_overwrite(cholesky, [(3, 3)])
def test_cho_factor(self):
assert_no_overwrite(cho_factor, [(3, 3)])
def test_cho_solve(self):
x = array([[2, -1, 0], [-1, 2, -1], [0, -1, 2]])
xcho = cho_factor(x)
assert_no_overwrite(lambda b: cho_solve(xcho, b), [(3,)])
def test_cholesky_banded(self):
assert_no_overwrite(cholesky_banded, [(2, 3)])
def test_cho_solve_banded(self):
x = array([[0, -1, -1], [2, 2, 2]])
xcho = cholesky_banded(x)
assert_no_overwrite(lambda b: cho_solve_banded((xcho, False), b),
[(3,)])
class TestChoFactor:
@pytest.mark.parametrize('dt', [int, float, np.float32, complex, np.complex64])
def test_empty(self, dt):
a = np.empty((0, 0), dtype=dt)
x, lower = cho_factor(a)
assert x.shape == (0, 0)
xx, lower = cho_factor(np.eye(2, dtype=dt))
assert x.dtype == xx.dtype
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@linalg@tests@test_decomp_cholesky.py@.PATH_END.py
|
{
"filename": "test_classes.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/numpy/polynomial/tests/test_classes.py",
"type": "Python"
}
|
"""Test inter-conversion of different polynomial classes.
This tests the convert and cast methods of all the polynomial classes.
"""
from __future__ import division, absolute_import, print_function
import operator as op
from numbers import Number
import numpy as np
from numpy.polynomial import (
Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE)
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
run_module_suite)
from numpy.compat import long
classes = (
Polynomial, Legendre, Chebyshev, Laguerre,
Hermite, HermiteE)
def test_class_methods():
for Poly1 in classes:
for Poly2 in classes:
yield check_conversion, Poly1, Poly2
yield check_cast, Poly1, Poly2
for Poly in classes:
yield check_call, Poly
yield check_identity, Poly
yield check_basis, Poly
yield check_fromroots, Poly
yield check_fit, Poly
yield check_equal, Poly
yield check_not_equal, Poly
yield check_add, Poly
yield check_sub, Poly
yield check_mul, Poly
yield check_floordiv, Poly
yield check_truediv, Poly
yield check_mod, Poly
yield check_divmod, Poly
yield check_pow, Poly
yield check_integ, Poly
yield check_deriv, Poly
yield check_roots, Poly
yield check_linspace, Poly
yield check_mapparms, Poly
yield check_degree, Poly
yield check_copy, Poly
yield check_cutdeg, Poly
yield check_truncate, Poly
yield check_trim, Poly
yield check_ufunc_override, Poly
#
# helper functions
#
random = np.random.random
def assert_poly_almost_equal(p1, p2, msg=""):
try:
assert_(np.all(p1.domain == p2.domain))
assert_(np.all(p1.window == p2.window))
assert_almost_equal(p1.coef, p2.coef)
except AssertionError:
msg = "Result: %s\nTarget: %s", (p1, p2)
raise AssertionError(msg)
#
# conversion methods that depend on two classes
#
def check_conversion(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = p1.convert(kind=Poly2, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
def check_cast(Poly1, Poly2):
x = np.linspace(0, 1, 10)
coef = random((3,))
d1 = Poly1.domain + random((2,))*.25
w1 = Poly1.window + random((2,))*.25
p1 = Poly1(coef, domain=d1, window=w1)
d2 = Poly2.domain + random((2,))*.25
w2 = Poly2.window + random((2,))*.25
p2 = Poly2.cast(p1, domain=d2, window=w2)
assert_almost_equal(p2.domain, d2)
assert_almost_equal(p2.window, w2)
assert_almost_equal(p2(x), p1(x))
#
# methods that depend on one class
#
def check_identity(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
x = np.linspace(d[0], d[1], 11)
p = Poly.identity(domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_almost_equal(p(x), x)
def check_basis(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.basis(5, domain=d, window=w)
assert_equal(p.domain, d)
assert_equal(p.window, w)
assert_equal(p.coef, [0]*5 + [1])
def check_fromroots(Poly):
# check that requested roots are zeros of a polynomial
# of correct degree, domain, and window.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
r = random((5,))
p1 = Poly.fromroots(r, domain=d, window=w)
assert_equal(p1.degree(), len(r))
assert_equal(p1.domain, d)
assert_equal(p1.window, w)
assert_almost_equal(p1(r), 0)
# check that polynomial is monic
pdom = Polynomial.domain
pwin = Polynomial.window
p2 = Polynomial.cast(p1, domain=pdom, window=pwin)
assert_almost_equal(p2.coef[-1], 1)
def check_fit(Poly):
def f(x):
return x*(x - 1)*(x - 2)
x = np.linspace(0, 3)
y = f(x)
# check default value of domain and window
p = Poly.fit(x, y, 3)
assert_almost_equal(p.domain, [0, 3])
assert_almost_equal(p(x), y)
assert_equal(p.degree(), 3)
# check with given domains and window
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly.fit(x, y, 3, domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, d)
assert_almost_equal(p.window, w)
# check with class domain default
p = Poly.fit(x, y, 3, [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
p = Poly.fit(x, y, [0, 1, 2, 3], [])
assert_equal(p.domain, Poly.domain)
assert_equal(p.window, Poly.window)
# check that fit accepts weights.
w = np.zeros_like(x)
z = y + random(y.shape)*.25
w[::2] = 1
p1 = Poly.fit(x[::2], z[::2], 3)
p2 = Poly.fit(x, z, 3, w=w)
p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w)
assert_almost_equal(p1(x), p2(x))
assert_almost_equal(p2(x), p3(x))
def check_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(p1 == p1)
assert_(not p1 == p2)
assert_(not p1 == p3)
assert_(not p1 == p4)
def check_not_equal(Poly):
p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3])
p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3])
p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3])
p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2])
assert_(not p1 != p1)
assert_(p1 != p2)
assert_(p1 != p3)
assert_(p1 != p4)
def check_add(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 + p2
assert_poly_almost_equal(p2 + p1, p3)
assert_poly_almost_equal(p1 + c2, p3)
assert_poly_almost_equal(c2 + p1, p3)
assert_poly_almost_equal(p1 + tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) + p1, p3)
assert_poly_almost_equal(p1 + np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) + p1, p3)
assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.add, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.add, p1, Polynomial([0]))
def check_sub(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 - p2
assert_poly_almost_equal(p2 - p1, -p3)
assert_poly_almost_equal(p1 - c2, p3)
assert_poly_almost_equal(c2 - p1, -p3)
assert_poly_almost_equal(p1 - tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) - p1, -p3)
assert_poly_almost_equal(p1 - np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) - p1, -p3)
assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.sub, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.sub, p1, Polynomial([0]))
def check_mul(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = p1 * p2
assert_poly_almost_equal(p2 * p1, p3)
assert_poly_almost_equal(p1 * c2, p3)
assert_poly_almost_equal(c2 * p1, p3)
assert_poly_almost_equal(p1 * tuple(c2), p3)
assert_poly_almost_equal(tuple(c2) * p1, p3)
assert_poly_almost_equal(p1 * np.array(c2), p3)
assert_poly_almost_equal(np.array(c2) * p1, p3)
assert_poly_almost_equal(p1 * 2, p1 * Poly([2]))
assert_poly_almost_equal(2 * p1, p1 * Poly([2]))
assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mul, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mul, p1, Polynomial([0]))
def check_floordiv(Poly):
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 // p2, p1)
assert_poly_almost_equal(p4 // c2, p1)
assert_poly_almost_equal(c4 // p2, p1)
assert_poly_almost_equal(p4 // tuple(c2), p1)
assert_poly_almost_equal(tuple(c4) // p2, p1)
assert_poly_almost_equal(p4 // np.array(c2), p1)
assert_poly_almost_equal(np.array(c4) // p2, p1)
assert_poly_almost_equal(2 // p2, Poly([0]))
assert_poly_almost_equal(p2 // 2, 0.5*p2)
assert_raises(
TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(
TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.floordiv, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.floordiv, p1, Polynomial([0]))
def check_truediv(Poly):
# true division is valid only if the denominator is a Number and
# not a python bool.
p1 = Poly([1,2,3])
p2 = p1 * 5
for stype in np.ScalarType:
if not issubclass(stype, Number) or issubclass(stype, bool):
continue
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in (int, long, float):
s = stype(5)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for stype in [complex]:
s = stype(5, 0)
assert_poly_almost_equal(op.truediv(p2, s), p1)
assert_raises(TypeError, op.truediv, s, p2)
for s in [tuple(), list(), dict(), bool(), np.array([1])]:
assert_raises(TypeError, op.truediv, p2, s)
assert_raises(TypeError, op.truediv, s, p2)
for ptype in classes:
assert_raises(TypeError, op.truediv, p2, ptype(1))
def check_mod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
assert_poly_almost_equal(p4 % p2, p3)
assert_poly_almost_equal(p4 % c2, p3)
assert_poly_almost_equal(c4 % p2, p3)
assert_poly_almost_equal(p4 % tuple(c2), p3)
assert_poly_almost_equal(tuple(c4) % p2, p3)
assert_poly_almost_equal(p4 % np.array(c2), p3)
assert_poly_almost_equal(np.array(c4) % p2, p3)
assert_poly_almost_equal(2 % p2, Poly([2]))
assert_poly_almost_equal(p2 % 2, Poly([0]))
assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, op.mod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, op.mod, p1, Polynomial([0]))
def check_divmod(Poly):
# This checks commutation, not numerical correctness
c1 = list(random((4,)) + .5)
c2 = list(random((3,)) + .5)
c3 = list(random((2,)) + .5)
p1 = Poly(c1)
p2 = Poly(c2)
p3 = Poly(c3)
p4 = p1 * p2 + p3
c4 = list(p4.coef)
quo, rem = divmod(p4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, c2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(c4, p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, tuple(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(tuple(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p4, np.array(c2))
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(np.array(c4), p2)
assert_poly_almost_equal(quo, p1)
assert_poly_almost_equal(rem, p3)
quo, rem = divmod(p2, 2)
assert_poly_almost_equal(quo, 0.5*p2)
assert_poly_almost_equal(rem, Poly([0]))
quo, rem = divmod(2, p2)
assert_poly_almost_equal(quo, Poly([0]))
assert_poly_almost_equal(rem, Poly([2]))
assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1))
assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1))
if Poly is Polynomial:
assert_raises(TypeError, divmod, p1, Chebyshev([0]))
else:
assert_raises(TypeError, divmod, p1, Polynomial([0]))
def check_roots(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
tgt = np.sort(random((5,)))
res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots())
assert_almost_equal(res, tgt)
# default domain and window
res = np.sort(Poly.fromroots(tgt).roots())
assert_almost_equal(res, tgt)
def check_degree(Poly):
p = Poly.basis(5)
assert_equal(p.degree(), 5)
def check_copy(Poly):
p1 = Poly.basis(5)
p2 = p1.copy()
assert_(p1 == p2)
assert_(p1 is not p2)
assert_(p1.coef is not p2.coef)
assert_(p1.domain is not p2.domain)
assert_(p1.window is not p2.window)
def check_integ(Poly):
P = Polynomial
# Check defaults
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
# Check with k
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(k=1))
p2 = P.cast(p0.integ(2, k=[1, 1]))
assert_poly_almost_equal(p1, P([1, 2, 3, 4]))
assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1]))
# Check with lbnd
p0 = Poly.cast(P([1*2, 2*3, 3*4]))
p1 = P.cast(p0.integ(lbnd=1))
p2 = P.cast(p0.integ(2, lbnd=1))
assert_poly_almost_equal(p1, P([-9, 2, 3, 4]))
assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1]))
# Check scaling
d = 2*Poly.domain
p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d)
p1 = P.cast(p0.integ())
p2 = P.cast(p0.integ(2))
assert_poly_almost_equal(p1, P([0, 2, 3, 4]))
assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1]))
def check_deriv(Poly):
# Check that the derivative is the inverse of integration. It is
# assumes that the integration has been checked elsewhere.
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p1 = Poly([1, 2, 3], domain=d, window=w)
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
# default domain and window
p1 = Poly([1, 2, 3])
p2 = p1.integ(2, k=[1, 2])
p3 = p1.integ(1, k=[1])
assert_almost_equal(p2.deriv(1).coef, p3.coef)
assert_almost_equal(p2.deriv(2).coef, p1.coef)
def check_linspace(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
p = Poly([1, 2, 3], domain=d, window=w)
# check default domain
xtgt = np.linspace(d[0], d[1], 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20)
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
# check specified domain
xtgt = np.linspace(0, 2, 20)
ytgt = p(xtgt)
xres, yres = p.linspace(20, domain=[0, 2])
assert_almost_equal(xres, xtgt)
assert_almost_equal(yres, ytgt)
def check_pow(Poly):
d = Poly.domain + random((2,))*.25
w = Poly.window + random((2,))*.25
tgt = Poly([1], domain=d, window=w)
tst = Poly([1, 2, 3], domain=d, window=w)
for i in range(5):
assert_poly_almost_equal(tst**i, tgt)
tgt = tgt * tst
# default domain and window
tgt = Poly([1])
tst = Poly([1, 2, 3])
for i in range(5):
assert_poly_almost_equal(tst**i, tgt)
tgt = tgt * tst
# check error for invalid powers
assert_raises(ValueError, op.pow, tgt, 1.5)
assert_raises(ValueError, op.pow, tgt, -1)
def check_call(Poly):
P = Polynomial
d = Poly.domain
x = np.linspace(d[0], d[1], 11)
# Check defaults
p = Poly.cast(P([1, 2, 3]))
tgt = 1 + x*(2 + 3*x)
res = p(x)
assert_almost_equal(res, tgt)
def check_cutdeg(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.cutdeg, .5)
assert_raises(ValueError, p.cutdeg, -1)
assert_equal(len(p.cutdeg(3)), 3)
assert_equal(len(p.cutdeg(2)), 3)
assert_equal(len(p.cutdeg(1)), 2)
assert_equal(len(p.cutdeg(0)), 1)
def check_truncate(Poly):
p = Poly([1, 2, 3])
assert_raises(ValueError, p.truncate, .5)
assert_raises(ValueError, p.truncate, 0)
assert_equal(len(p.truncate(4)), 3)
assert_equal(len(p.truncate(3)), 3)
assert_equal(len(p.truncate(2)), 2)
assert_equal(len(p.truncate(1)), 1)
def check_trim(Poly):
c = [1, 1e-6, 1e-12, 0]
p = Poly(c)
assert_equal(p.trim().coef, c[:3])
assert_equal(p.trim(1e-10).coef, c[:2])
assert_equal(p.trim(1e-5).coef, c[:1])
def check_mapparms(Poly):
# check with defaults. Should be identity.
d = Poly.domain
w = Poly.window
p = Poly([1], domain=d, window=w)
assert_almost_equal([0, 1], p.mapparms())
#
w = 2*d + 1
p = Poly([1], domain=d, window=w)
assert_almost_equal([1, 2], p.mapparms())
def check_ufunc_override(Poly):
p = Poly([1, 2, 3])
x = np.ones(3)
assert_raises(TypeError, np.add, p, x)
assert_raises(TypeError, np.add, x, p)
class TestInterpolate(object):
def f(self, x):
return x * (x - 1) * (x - 2)
def test_raises(self):
assert_raises(ValueError, Chebyshev.interpolate, self.f, -1)
assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.)
def test_dimensions(self):
for deg in range(1, 5):
assert_(Chebyshev.interpolate(self.f, deg).degree() == deg)
def test_approximation(self):
def powx(x, p):
return x**p
x = np.linspace(0, 2, 10)
for deg in range(0, 10):
for t in range(0, deg + 1):
p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,))
assert_almost_equal(p(x), powx(x, t), decimal=12)
if __name__ == "__main__":
run_module_suite()
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@numpy@polynomial@tests@test_classes.py@.PATH_END.py
|
{
"filename": "tf2migration_fuzz.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/security/fuzzing/tf2migration_fuzz.py",
"type": "Python"
}
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This is a Python API fuzzer for v1 vs v2 API comparison."""
import atheris
with atheris.instrument_imports():
import sys
from python_fuzzing import FuzzingHelper
import tensorflow as tf
@atheris.instrument_func
def TestOneInput(input_bytes):
"""Test randomized integer fuzzing input for v1 vs v2 APIs."""
fh = FuzzingHelper(input_bytes)
# Comparing tf.math.angle with tf.compat.v1.angle.
input_supported_dtypes = [tf.float32, tf.float64]
random_dtype_index = fh.get_int(min_int=0, max_int=1)
input_dtype = input_supported_dtypes[random_dtype_index]
input_shape = fh.get_int_list(
min_length=0, max_length=6, min_int=0, max_int=10)
seed = fh.get_int()
input_tensor = tf.random.uniform(
shape=input_shape, dtype=input_dtype, seed=seed, maxval=10)
name = fh.get_string(5)
v2_output = tf.math.angle(input=input_tensor, name=name)
v1_output = tf.compat.v1.angle(input=input_tensor, name=name)
try:
tf.debugging.assert_equal(v1_output, v2_output)
tf.debugging.assert_equal(v1_output.shape, v2_output.shape)
except Exception as e: # pylint: disable=broad-except
print("Input tensor: {}".format(input_tensor))
print("Input dtype: {}".format(input_dtype))
print("v1_output: {}".format(v1_output))
print("v2_output: {}".format(v2_output))
raise e
# Comparing tf.debugging.assert_integer with tf.compat.v1.assert_integer.
x_supported_dtypes = [
tf.float16, tf.float32, tf.float64, tf.int32, tf.int64, tf.string
]
random_dtype_index = fh.get_int(min_int=0, max_int=5)
x_dtype = x_supported_dtypes[random_dtype_index]
x_shape = fh.get_int_list(min_length=0, max_length=6, min_int=0, max_int=10)
seed = fh.get_int()
try:
x = tf.random.uniform(shape=x_shape, dtype=x_dtype, seed=seed, maxval=10)
except ValueError:
x = tf.constant(["test_string"])
message = fh.get_string(128)
name = fh.get_string(128)
try:
v2_output = tf.debugging.assert_integer(x=x, message=message, name=name)
except Exception as e: # pylint: disable=broad-except
v2_output = e
try:
v1_output = tf.compat.v1.assert_integer(x=x, message=message, name=name)
except Exception as e: # pylint: disable=broad-except
v1_output = e
if v1_output and v2_output:
assert type(v2_output) == type(v1_output) # pylint: disable=unidiomatic-typecheck
assert v2_output.args == v1_output.args
def main():
atheris.Setup(sys.argv, TestOneInput, enable_python_coverage=True)
atheris.Fuzz()
if __name__ == "__main__":
main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@security@fuzzing@tf2migration_fuzz.py@.PATH_END.py
|
{
"filename": "rayleigh_benard.py",
"repo_name": "minkailin/stratsi",
"repo_path": "stratsi_extracted/stratsi-master/dedalus_repo/examples/ivp/2d_rayleigh_benard/rayleigh_benard.py",
"type": "Python"
}
|
"""
Dedalus script for 2D Rayleigh-Benard convection.
This script uses a Fourier basis in the x direction with periodic boundary
conditions. The equations are scaled in units of the buoyancy time (Fr = 1).
This script can be ran serially or in parallel, and uses the built-in analysis
framework to save data snapshots in HDF5 files. The `merge_procs` command can
be used to merge distributed analysis sets from parallel runs, and the
`plot_slices.py` script can be used to plot the snapshots.
To run, merge, and plot using 4 processes, for instance, you could use:
$ mpiexec -n 4 python3 rayleigh_benard.py
$ mpiexec -n 4 python3 -m dedalus merge_procs snapshots
$ mpiexec -n 4 python3 plot_slices.py snapshots/*.h5
This script can restart the simulation from the last save of the original
output to extend the integration. This requires that the output files from
the original simulation are merged, and the last is symlinked or copied to
`restart.h5`.
To run the original example and the restart, you could use:
$ mpiexec -n 4 python3 rayleigh_benard.py
$ mpiexec -n 4 python3 -m dedalus merge_procs snapshots
$ ln -s snapshots/snapshots_s2.h5 restart.h5
$ mpiexec -n 4 python3 rayleigh_benard.py
The simulations should take a few process-minutes to run.
"""
import numpy as np
from mpi4py import MPI
import time
import pathlib
from dedalus import public as de
from dedalus.extras import flow_tools
import logging
logger = logging.getLogger(__name__)
# Parameters
Lx, Lz = (4., 1.)
Prandtl = 1.
Rayleigh = 1e6
# Create bases and domain
x_basis = de.Fourier('x', 256, interval=(0, Lx), dealias=3/2)
z_basis = de.Chebyshev('z', 64, interval=(-Lz/2, Lz/2), dealias=3/2)
domain = de.Domain([x_basis, z_basis], grid_dtype=np.float64)
# 2D Boussinesq hydrodynamics
problem = de.IVP(domain, variables=['p','b','u','w','bz','uz','wz'])
problem.meta['p','b','u','w']['z']['dirichlet'] = True
problem.parameters['P'] = (Rayleigh * Prandtl)**(-1/2)
problem.parameters['R'] = (Rayleigh / Prandtl)**(-1/2)
problem.parameters['F'] = F = 1
problem.add_equation("dx(u) + wz = 0")
problem.add_equation("dt(b) - P*(dx(dx(b)) + dz(bz)) - F*w = -(u*dx(b) + w*bz)")
problem.add_equation("dt(u) - R*(dx(dx(u)) + dz(uz)) + dx(p) = -(u*dx(u) + w*uz)")
problem.add_equation("dt(w) - R*(dx(dx(w)) + dz(wz)) + dz(p) - b = -(u*dx(w) + w*wz)")
problem.add_equation("bz - dz(b) = 0")
problem.add_equation("uz - dz(u) = 0")
problem.add_equation("wz - dz(w) = 0")
problem.add_bc("left(b) = 0")
problem.add_bc("left(u) = 0")
problem.add_bc("left(w) = 0")
problem.add_bc("right(b) = 0")
problem.add_bc("right(u) = 0")
problem.add_bc("right(w) = 0", condition="(nx != 0)")
problem.add_bc("right(p) = 0", condition="(nx == 0)")
# Build solver
solver = problem.build_solver(de.timesteppers.RK222)
logger.info('Solver built')
# Initial conditions or restart
if not pathlib.Path('restart.h5').exists():
# Initial conditions
x = domain.grid(0)
z = domain.grid(1)
b = solver.state['b']
bz = solver.state['bz']
# Random perturbations, initialized globally for same results in parallel
gshape = domain.dist.grid_layout.global_shape(scales=1)
slices = domain.dist.grid_layout.slices(scales=1)
rand = np.random.RandomState(seed=42)
noise = rand.standard_normal(gshape)[slices]
# Linear background + perturbations damped at walls
zb, zt = z_basis.interval
pert = 1e-3 * noise * (zt - z) * (z - zb)
b['g'] = F * pert
b.differentiate('z', out=bz)
# Timestepping and output
dt = 0.125
stop_sim_time = 25
fh_mode = 'overwrite'
else:
# Restart
write, last_dt = solver.load_state('restart.h5', -1)
# Timestepping and output
dt = last_dt
stop_sim_time = 50
fh_mode = 'append'
# Integration parameters
solver.stop_sim_time = stop_sim_time
# Analysis
snapshots = solver.evaluator.add_file_handler('snapshots', sim_dt=0.25, max_writes=50, mode=fh_mode)
snapshots.add_system(solver.state)
# CFL
CFL = flow_tools.CFL(solver, initial_dt=dt, cadence=10, safety=1,
max_change=1.5, min_change=0.5, max_dt=0.125, threshold=0.05)
CFL.add_velocities(('u', 'w'))
# Flow properties
flow = flow_tools.GlobalFlowProperty(solver, cadence=10)
flow.add_property("sqrt(u*u + w*w) / R", name='Re')
# Main loop
try:
logger.info('Starting loop')
start_time = time.time()
while solver.proceed:
dt = CFL.compute_dt()
dt = solver.step(dt)
if (solver.iteration-1) % 10 == 0:
logger.info('Iteration: %i, Time: %e, dt: %e' %(solver.iteration, solver.sim_time, dt))
logger.info('Max Re = %f' %flow.max('Re'))
except:
logger.error('Exception raised, triggering end of main loop.')
raise
finally:
end_time = time.time()
logger.info('Iterations: %i' %solver.iteration)
logger.info('Sim end time: %f' %solver.sim_time)
logger.info('Run time: %.2f sec' %(end_time-start_time))
logger.info('Run time: %f cpu-hr' %((end_time-start_time)/60/60*domain.dist.comm_cart.size))
|
minkailinREPO_NAMEstratsiPATH_START.@stratsi_extracted@stratsi-master@dedalus_repo@examples@ivp@2d_rayleigh_benard@rayleigh_benard.py@.PATH_END.py
|
{
"filename": "_templateitemname.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/splom/marker/colorbar/tickformatstop/_templateitemname.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TemplateitemnameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="templateitemname",
parent_name="splom.marker.colorbar.tickformatstop",
**kwargs,
):
super(TemplateitemnameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@splom@marker@colorbar@tickformatstop@_templateitemname.py@.PATH_END.py
|
{
"filename": "example_poincare.py",
"repo_name": "GalacticDynamics-Oxford/Agama",
"repo_path": "Agama_extracted/Agama-master/py/example_poincare.py",
"type": "Python"
}
|
#!/usr/bin/python
'''
This interactive example shows the meridional plane {x,z} (left panel) and the Poincare
surface of section {x,v_x} for an axisymmetric potential, where x is either cylindrical
radius R when L_z>0 or the x coordinate otherwise, and points on the SoS are placed
when passing through the z=0 plane with v_z>0.
Upon right-clicking at any point inside the zero-velocity curve on the surface of section,
a new orbit starting from these initial conditions is added to the plot.
The parameters of the potential, energy and L_z are specified at the beginning of the script.
'''
import agama, numpy, scipy.optimize, matplotlib, matplotlib.pyplot as plt
plt.rc('axes', linewidth=0.5)
plt.rc('font', size=8)
# consider motion in the x-z plane of an axisymmetric potential
# (with Lz=0 for motion in a flattened 2d potential, or Lz>0 for the motion in the meridional plane)
#pot = agama.Potential(type='spheroid', gamma=1.5, q=0.5)
pot = agama.Potential(type='disk', scaleheight=0.1)
rmax = 2.0
E = pot.potential(rmax,0,0)
Lzmax= 2*numpy.pi * pot.Rcirc(E=E)**2 / pot.Tcirc(E)
Lz = 0.1 * Lzmax
def init_axes(arg=None):
axorb.cla()
axpss.cla()
axorb.set_xlim(0 if Lz>0 else -rmax, rmax)
axorb.set_aspect('equal')
axpss.set_xlim(axorb.get_xlim())
axorb.set_xlabel('$x$', fontsize=12)
axorb.set_ylabel('$z$', fontsize=12)
axpss.set_xlabel('$x$', fontsize=12)
axpss.set_ylabel('$p_x$', fontsize=12)
# plot boundaries of orbit plane and surface of section
Rp,Ra= pot.Rperiapo(E, Lz)
xmin = -Ra if Lz==0 else Rp
xmax = Ra
grid = numpy.linspace(0, 1, 100)
grid = grid * grid * (3-2*grid) * (xmax-xmin) + xmin
vval = numpy.maximum(0, 2*E - 2*pot.potential(numpy.column_stack((grid,grid*0,grid*0))) - (Lz/grid)**2)**0.5
zval = numpy.hstack([0,
numpy.array([ scipy.optimize.brentq(lambda z: pot.potential(xx,0,z) - E + 0.5*(Lz/xx)**2, 0, xmax) for xx in grid[1:-1]]),
0])
axorb.plot(numpy.hstack((grid[:-1], grid[::-1])), numpy.hstack((zval[:-1], -zval[::-1])), color='k', lw=0.5)
axpss.plot(numpy.hstack((grid[:-1], grid[::-1])), numpy.hstack((vval[:-1], -vval[::-1])), color='k', lw=0.5)
axorb.text(0.5, 1.01, 'orbit plane', ha='center', va='bottom', transform=axorb.transAxes, fontsize=10)
axpss.text(0.5, 1.01, 'surface of section', ha='center', va='bottom', transform=axpss.transAxes, fontsize=10)
plt.draw()
def run_orbit(ic):
color = numpy.random.random(size=3)*0.8
# create an orbit represented by a spline interpolator
orbit = agama.orbit(ic=ic, potential=pot, time=100*pot.Tcirc(ic), dtype=object)
# get all crossing points with z=0
timecross = orbit.z.roots()
# select those at which vz>=0
timecross = timecross[orbit.z(timecross, der=1) >= 0]
# get recorded trajectory sampled at every timestep...
traj = orbit(orbit)
# ...and at all crossing times
trajcross = orbit(timecross)
if Lz==0:
axorb.plot(traj[:,0], traj[:,2], color=color, lw=0.5, alpha=0.5)
axpss.plot(trajcross[:,0], trajcross[:,3], 'o', color=color, mew=0, ms=1.5)
else:
# orbit in the R,z plane, and SoS in the R, v_R plane
axorb.plot((traj[:,0]**2 + traj[:,1]**2)**0.5, traj[:,2], color=color, lw=0.5, alpha=0.5)
R = (trajcross[:,0]**2 + trajcross[:,1]**2)**0.5
vR= (trajcross[:,0]*trajcross[:,3] + trajcross[:,1]*trajcross[:,4]) / R
axpss.plot(R, vR, 'o', color=color, mew=0, ms=1.5)
def add_point(event):
if event.inaxes is not axpss or event.button != 3: return
x, vx = event.xdata, event.ydata
vz2 = 2 * (E - pot.potential(x,0,0)) - (Lz/x)**2 - vx**2
if vz2>0:
run_orbit([x, 0, 0, vx, Lz/x, vz2**0.5])
plt.draw()
fig = plt.figure(figsize=(6,3), dpi=200)
axorb = plt.axes([0.08,0.14,0.4,0.8])
axpss = plt.axes([0.58,0.14,0.4,0.8])
button_clear = matplotlib.widgets.Button(plt.axes([0.90,0.88,0.08,0.06]), 'clear')
fig.canvas.mpl_connect('button_press_event', add_point)
button_clear.on_clicked(init_axes)
init_axes()
print('Right-click on the Surface of Section to start an orbit')
plt.show()
|
GalacticDynamics-OxfordREPO_NAMEAgamaPATH_START.@Agama_extracted@Agama-master@py@example_poincare.py@.PATH_END.py
|
{
"filename": "run_3MdB_17.py",
"repo_name": "Morisset/pyCloudy",
"repo_path": "pyCloudy_extracted/pyCloudy-master/pyCloudy/3MdB_17/run_3MdB_17.py",
"type": "Python"
}
|
import pyCloudy as pc
#import numpy as np
from pyCloudy.db import use3MdB
#import pandas as pd
#import pymysql
#import os
#import matplotlib.pyplot as plt
import pyneb as pn
#%%
OVN_dic = {'host' : 'nefeles',
'user_name' : 'OVN_admin',
'user_passwd' : 'getenv',
'base_name' : '3MdB_17',
'tmp_name' : 'OVN_tmp',
'pending_table' : '`pending_17`',
'master_table' : '`tab_17`',
'teion_table' : '`teion_17`',
'abion_table' : '`abion_17`',
'temis_table' : '`temis_17`',
'lines_table' : '`lines_17`',
'procIDs_table' : '`procIDs`',
'seds_table': '`seds_17`'
}
pc.log_.level = -1
pc.MdB.MdBlog_.level = -1
pn.log_.level = -1
M = use3MdB.manage3MdB(OVN_dic,
models_dir='/DATA/',
Nprocs=7, clean=True,OK_with_wrong=True)
M.start()
|
MorissetREPO_NAMEpyCloudyPATH_START.@pyCloudy_extracted@pyCloudy-master@pyCloudy@3MdB_17@run_3MdB_17.py@.PATH_END.py
|
{
"filename": "Errors.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/cython/Cython/Compiler/Errors.py",
"type": "Python"
}
|
#
# Errors
#
from __future__ import absolute_import
try:
from __builtin__ import basestring as any_string_type
except ImportError:
any_string_type = (bytes, str)
import sys
from contextlib import contextmanager
from ..Utils import open_new_file
from . import DebugFlags
from . import Options
class PyrexError(Exception):
pass
class PyrexWarning(Exception):
pass
def context(position):
source = position[0]
assert not (isinstance(source, any_string_type)), (
"Please replace filename strings with Scanning.FileSourceDescriptor instances %r" % source)
try:
F = source.get_lines()
except UnicodeDecodeError:
# file has an encoding problem
s = u"[unprintable code]\n"
else:
s = u''.join(F[max(0, position[1]-6):position[1]])
s = u'...\n%s%s^\n' % (s, u' '*(position[2]-1))
s = u'%s\n%s%s\n' % (u'-'*60, s, u'-'*60)
return s
def format_position(position):
if position:
return u"%s:%d:%d: " % (position[0].get_error_description(),
position[1], position[2])
return u''
def format_error(message, position):
if position:
pos_str = format_position(position)
cont = context(position)
message = u'\nError compiling Cython file:\n%s\n%s%s' % (cont, pos_str, message or u'')
return message
class CompileError(PyrexError):
def __init__(self, position = None, message = u""):
self.position = position
self.message_only = message
self.formatted_message = format_error(message, position)
self.reported = False
# Deprecated and withdrawn in 2.6:
# self.message = message
Exception.__init__(self, self.formatted_message)
# Python Exception subclass pickling is broken,
# see http://bugs.python.org/issue1692335
self.args = (position, message)
def __str__(self):
return self.formatted_message
class CompileWarning(PyrexWarning):
def __init__(self, position = None, message = ""):
self.position = position
# Deprecated and withdrawn in 2.6:
# self.message = message
Exception.__init__(self, format_position(position) + message)
class InternalError(Exception):
# If this is ever raised, there is a bug in the compiler.
def __init__(self, message):
self.message_only = message
Exception.__init__(self, u"Internal compiler error: %s"
% message)
class AbortError(Exception):
# Throw this to stop the compilation immediately.
def __init__(self, message):
self.message_only = message
Exception.__init__(self, u"Abort error: %s" % message)
class CompilerCrash(CompileError):
# raised when an unexpected exception occurs in a transform
def __init__(self, pos, context, message, cause, stacktrace=None):
if message:
message = u'\n' + message
else:
message = u'\n'
self.message_only = message
if context:
message = u"Compiler crash in %s%s" % (context, message)
if stacktrace:
import traceback
message += (
u'\n\nCompiler crash traceback from this point on:\n' +
u''.join(traceback.format_tb(stacktrace)))
if cause:
if not stacktrace:
message += u'\n'
message += u'%s: %s' % (cause.__class__.__name__, cause)
CompileError.__init__(self, pos, message)
# Python Exception subclass pickling is broken,
# see http://bugs.python.org/issue1692335
self.args = (pos, context, message, cause, stacktrace)
class NoElementTreeInstalledException(PyrexError):
"""raised when the user enabled options.gdb_debug but no ElementTree
implementation was found
"""
listing_file = None
num_errors = 0
echo_file = None
def open_listing_file(path, echo_to_stderr = 1):
# Begin a new error listing. If path is None, no file
# is opened, the error counter is just reset.
global listing_file, num_errors, echo_file
if path is not None:
listing_file = open_new_file(path)
else:
listing_file = None
if echo_to_stderr:
echo_file = sys.stderr
else:
echo_file = None
num_errors = 0
def close_listing_file():
global listing_file
if listing_file:
listing_file.close()
listing_file = None
def report_error(err, use_stack=True):
if error_stack and use_stack:
error_stack[-1].append(err)
else:
global num_errors
# See Main.py for why dual reporting occurs. Quick fix for now.
if err.reported: return
err.reported = True
try: line = u"%s\n" % err
except UnicodeEncodeError:
# Python <= 2.5 does this for non-ASCII Unicode exceptions
line = format_error(getattr(err, 'message_only', "[unprintable exception message]"),
getattr(err, 'position', None)) + u'\n'
if listing_file:
try: listing_file.write(line)
except UnicodeEncodeError:
listing_file.write(line.encode('ASCII', 'replace'))
if echo_file:
try: echo_file.write(line)
except UnicodeEncodeError:
echo_file.write(line.encode('ASCII', 'replace'))
num_errors += 1
if Options.fast_fail:
raise AbortError("fatal errors")
def error(position, message):
#print("Errors.error:", repr(position), repr(message)) ###
if position is None:
raise InternalError(message)
err = CompileError(position, message)
if DebugFlags.debug_exception_on_error: raise Exception(err) # debug
report_error(err)
return err
LEVEL = 1 # warn about all errors level 1 or higher
def message(position, message, level=1):
if level < LEVEL:
return
warn = CompileWarning(position, message)
line = "note: %s\n" % warn
if listing_file:
listing_file.write(line)
if echo_file:
echo_file.write(line)
return warn
def warning(position, message, level=0):
if level < LEVEL:
return
if Options.warning_errors and position:
return error(position, message)
warn = CompileWarning(position, message)
line = "warning: %s\n" % warn
if listing_file:
listing_file.write(line)
if echo_file:
echo_file.write(line)
return warn
_warn_once_seen = {}
def warn_once(position, message, level=0):
if level < LEVEL or message in _warn_once_seen:
return
warn = CompileWarning(position, message)
line = "warning: %s\n" % warn
if listing_file:
listing_file.write(line)
if echo_file:
echo_file.write(line)
_warn_once_seen[message] = True
return warn
# These functions can be used to momentarily suppress errors.
error_stack = []
def hold_errors():
error_stack.append([])
def release_errors(ignore=False):
held_errors = error_stack.pop()
if not ignore:
for err in held_errors:
report_error(err)
def held_errors():
return error_stack[-1]
# same as context manager:
@contextmanager
def local_errors(ignore=False):
errors = []
error_stack.append(errors)
try:
yield errors
finally:
release_errors(ignore=ignore)
# this module needs a redesign to support parallel cythonisation, but
# for now, the following works at least in sequential compiler runs
def reset():
_warn_once_seen.clear()
del error_stack[:]
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@cython@Cython@Compiler@Errors.py@.PATH_END.py
|
{
"filename": "_tickvalssrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/surface/colorbar/_tickvalssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="tickvalssrc", parent_name="surface.colorbar", **kwargs
):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@surface@colorbar@_tickvalssrc.py@.PATH_END.py
|
{
"filename": "fancyfont.py",
"repo_name": "CadenArmstrong/simuTrans",
"repo_path": "simuTrans_extracted/simuTrans-master/fancyfont.py",
"type": "Python"
}
|
class colors(object):
def __init__(self):
self.HEADER = '\033[95m'
self.OKBLUE = '\033[94m'
self.OKGREEN = '\033[92m'
self.WARNING = '\033[93m'
self.FAIL = '\033[91m'
self.ENDC = '\033[0m'
self.BOLD = "\033[1m"
self.UNDERLINE = '\033[4m'
def red(self,msg):
return self.FAIL + msg + self.ENDC
def green(self,msg):
return self.OKGREEN + msg + self.ENDC
def blue(self,msg):
return self.OKBLUE + msg + self.ENDC
def yellow(self,msg):
return self.WARNING + msg + self.ENDC
def magenta(self,msg):
return self.HEADER+msg+self.ENDC
def bold(self,msg):
return self.BOLD+msg+self.ENDC
def underline(self,msg):
return self.UNDERLINE+msg+self.ENDC
if __name__=='__main__':
t=colors()
print t.magenta('m')
print t.bold('k')
print t.underline('underline')
print t.red('r')
print t.green('g')
print t.blue('b')
print t.yellow('y')
|
CadenArmstrongREPO_NAMEsimuTransPATH_START.@simuTrans_extracted@simuTrans-master@fancyfont.py@.PATH_END.py
|
{
"filename": "constants.py",
"repo_name": "sebhoof/bhsr",
"repo_path": "bhsr_extracted/bhsr-main/python/bhsr/constants.py",
"type": "Python"
}
|
####################################
# Physical and other constants #
####################################
import numpy as np
# Units conversions
eV_per_kg = 1.782622e-36
inv_eVs = 6.582119e-16
mP_in_GeV = 1.220890e19 # GeV
mP_in_eV = 1e9*mP_in_GeV # eV
mPred_in_GeV = mP_in_GeV/np.sqrt(8.0*np.pi) # GeV
mPred_in_eV = 1e9*mPred_in_GeV # eV
GNetwon_in_eV = 1/mP_in_eV**2 # eV^-2
# Astro constants
Msol_in_kg = 1.99841e30 # kg
Msol_in_eV = Msol_in_kg/eV_per_kg # eV
mP2_in_eVMsol = mP_in_eV*(mP_in_eV/Msol_in_eV) # eV Msol
GNewton = 1/mP2_in_eVMsol # eV^-1 Msol^-1
tHubble_in_yr = 1.45e10 # yr
tSalpeter_in_yr = 4.5e7 # yr
tSR_in_yr = tSalpeter_in_yr # yr
yr_in_s = 365.25*24*60*60 # s
inv_eVyr = inv_eVs/yr_in_s
inv_tSalpeter = inv_eVyr/tSalpeter_in_yr # eV
inv_tSR = inv_eVyr/tSR_in_yr # eV
# Bosenova constants
# Empircal numerical factor from simulations; see https://arxiv.org/abs/1411.2263
c0_n_bose = 5.0
|
sebhoofREPO_NAMEbhsrPATH_START.@bhsr_extracted@bhsr-main@python@bhsr@constants.py@.PATH_END.py
|
{
"filename": "process_wvlsol_volume_fit.py",
"repo_name": "igrins/plp",
"repo_path": "plp_extracted/plp-master/igrins/procedures/process_wvlsol_volume_fit.py",
"type": "Python"
}
|
import numpy as np
import pandas as pd
# import json
from numpy.linalg import lstsq
from .nd_poly import NdPolyNamed
def _get_center(key_list):
key_list = sorted(key_list)
n = len(key_list)
assert divmod(n, 2)[1] == 1
center_key = key_list[divmod(n, 2)[0]]
return center_key
def _append_offset(df):
"""
input should be indexed with multiple values of 'slit_center'.
Columns of 'pixel0' and 'offsets' will be appended and returned.
"""
grouped = df.groupby("slit_center")
slit_center0 = _get_center(grouped.groups.keys())
rename_dict = {'pixels': 'pixels0'}
center = grouped.get_group(slit_center0).rename(columns=rename_dict)
pp = df.join(center["pixels0"])
pp["offsets"] = pp["pixels"] - pp["pixels0"]
pp_masked = pp[np.isfinite(pp["offsets"])]
df_offset = pp_masked.reset_index()
return df_offset
def _volume_poly_fit(points, scalar, orders, names):
p = NdPolyNamed(orders, names) # order 2 for all dimension.
v = p.get_array(points)
v = np.array(v)
# errors are not properly handled for now.
s = lstsq(v.T, scalar, rcond=None)
return p, s
def _get_df(obsset):
d = obsset.load("SKY_FITTED_PIXELS_JSON")
df = pd.DataFrame(**d)
index_names = ["kind", "order", "wavelength"]
df = df.set_index(index_names)[["slit_center", "pixels"]]
dd = _append_offset(df)
return dd
def _filter_points(df, drop=0.10):
ss0 = df.groupby("pixels0")["offsets"]
ss0_std = ss0.transform("std")
ss = ss0.std()
vmin = np.percentile(ss, 100*drop)
vmax = np.percentile(ss, 100*(1 - drop))
msk = (ss0_std > vmin) & (ss0_std < vmax)
return df[msk]
def volume_fit(obsset):
dd = _get_df(obsset)
dd = _filter_points(dd)
names = ["pixel", "order", "slit"]
orders = [3, 2, 1]
# because the offset at slit center should be 0, we divide the
# offset by slit_pos, and fit the data then multiply by slit_pos.
cc0 = dd["slit_center"] - 0.5
# 3d points : x-pixel, order, location on the slit
points0 = dict(zip(names, [dd["pixels0"],
dd["order"],
cc0]))
# scalar is offset of the measured line from the location at slic center.
scalar0 = dd["offsets"]
msk = abs(cc0) > 0.
points = dict(zip(names, [dd["pixels0"][msk],
dd["order"][msk],
cc0[msk]]))
scalar = dd["offsets"][msk] / cc0[msk]
poly, params = _volume_poly_fit(points, scalar, orders, names)
if 0:
#values = dict(zip(names, [pixels, orders, slit_pos]))
offsets_fitted = poly.multiply(points0, params[0])
doffsets = scalar0 - offsets_fitted * cc0
clf()
scatter(dd["pixels0"], doffsets, c=cc0.values, cmap="gist_heat")
# clf()
# scatter(dd["pixels0"] + doffsets, dd["order"] + dd["slit_center"], color="g")
# scatter(dd["pixels0"], dd["order"] + dd["slit_center"], color="r")
# # test with fitted data
# #input_points = np.zeros_like(offsets_fitted)
# input_points = offsets_fitted
# poly, params = volume_poly_fit(points,
# input_points,
# orders, names)
# offsets_fitted = poly.multiply(points, params[0])
# doffsets = input_points - offsets_fitted
# clf()
# scatter(dd["pixels0"], dd["order"] + dd["slit_center"] + doffsets, color="g")
# scatter(dd["pixels0"], dd["order"] + dd["slit_center"], color="r")
# save
out_df = poly.to_pandas(coeffs=params[0])
out_df = out_df.reset_index()
d = out_df.to_dict(orient="split")
obsset.store("VOLUMEFIT_COEFFS_JSON", d)
# from ..libs.recipe_helper import RecipeHelper
# def process_band_make_offset_map(utdate, recipe_name, band,
# obsids, config_name):
# from igrins.libs.recipe_helper import RecipeHelper
# helper = RecipeHelper(config_name, utdate, recipe_name)
|
igrinsREPO_NAMEplpPATH_START.@plp_extracted@plp-master@igrins@procedures@process_wvlsol_volume_fit.py@.PATH_END.py
|
{
"filename": "test1a.py",
"repo_name": "teuben/QAC",
"repo_path": "QAC_extracted/QAC-master/ngVLA/test1a.py",
"type": "Python"
}
|
# test1 with more configurations
#
#
test = 'test1a'
model = '../models/skymodel.fits' # this has phasecenter with dec=-30 for ALMA sims
phasecenter = 'J2000 180.000000deg 40.000000deg' # so modify this for ngVLA
# pick the piece of the model to image, and at what pixel size
# natively this model is 4096 pixels at 0.05"
imsize_m = 4096
pixel_m = 0.01
# pick the sky imaging parameters (for tclean)
imsize_s = 512
pixel_s = 0.25
# pick a few niter values for tclean to check flux convergence
niter = [0,1000,2000]
#niter = [0]
# pick a cfg
cfg = [0,1]
#cfg = [0,2]
# -- do not change parameters below this ---
import sys
for arg in qac_argv(sys.argv):
exec(arg)
ptg = test + '.ptg' # use a single pointing mosaic for the ptg
if type(niter) != type([]): niter = [niter]
# report
qac_log("TEST: %s" % test)
qac_begin(test)
qac_version()
# create a single pointing mosaic
qac_ptg(phasecenter, ptg)
# begin clean project
qac_project(test)
# create a MS based on a model and antenna configuration
qac_log("VLA")
ms1 = {}
for c in cfg:
ms1[c] = qac_vla(test, model, imsize_m, pixel_m, cfg=c, ptg=ptg, phasecenter=phasecenter)
cdir = test + '/clean1_%d' % c
qac_clean1(cdir, ms1[c], imsize_s, pixel_s, phasecenter=phasecenter, niter=niter)
qac_plot(cdir + '/dirtymap.image')
intms = ms1.values()
# clean combined
qac_log("CLEAN %s" % str(intms))
cdir = test+'/clean2'
qac_clean1(cdir, intms, imsize_s, pixel_s, phasecenter=phasecenter, niter=niter)
qac_plot(cdir + '/dirtymap.image')
tp2vispl(intms, outfig=test+'/tp2vispl.png')
|
teubenREPO_NAMEQACPATH_START.@QAC_extracted@QAC-master@ngVLA@test1a.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "sherpa/sherpa",
"repo_path": "sherpa_extracted/sherpa-main/sherpa/utils/__init__.py",
"type": "Python"
}
|
#
# Copyright (C) 2007, 2015, 2016, 2018 - 2024
# Smithsonian Astrophysical Observatory
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""Objects and utilities used by multiple Sherpa subpackages.
Code in this module should not be considered stable, as it may be
moved, changed, or removed.
"""
from collections.abc import Iterable
import inspect
import logging
import operator
import os
import pydoc
import string
import sys
from types import FunctionType, MethodType
from typing import Any, Callable, Generic, Optional, Sequence, \
TypeVar
import warnings
import numpy as np
# Note: _utils.gsl_fcmp and _utils.ndtri are not exported from
# this module; is this intentional?
from ._utils import hist1d, hist2d # type: ignore
from . import _utils # type: ignore
from . import _psf # type: ignore
from .err import IOErr
# We re-export symbols from sherpa.utils modules but this will be
# removed at some point.
#
from .guess import _guess_ampl_scale, get_midpoint, get_peak, \
get_position, get_valley, guess_amplitude, guess_amplitude2d, \
guess_amplitude_at_ref, guess_bounds, guess_fwhm, get_fwhm, \
guess_position, guess_radius, guess_reference, param_apply_limits
from .parallel import multi as _multi, ncpus as _ncpus, \
parallel_map, parallel_map_funcs, run_tasks
from .random import poisson_noise
warning = logging.getLogger("sherpa").warning
debug = logging.getLogger("sherpa").debug
__all__ = ('NoNewAttributesAfterInit',
'_guess_ampl_scale', 'apache_muller', 'bisection', 'bool_cast',
'calc_ftest', 'calc_mlr', 'calc_total_error', 'create_expr',
'create_expr_integrated',
'dataspace1d', 'dataspace2d', 'demuller',
'erf', 'export_method', 'extract_kernel',
'filter_bins', 'gamma', 'get_fwhm',
'get_keyword_defaults', 'get_keyword_names', 'get_midpoint',
'get_num_args', 'get_peak', 'get_position', 'get_valley',
'guess_amplitude', 'guess_amplitude2d', 'guess_amplitude_at_ref',
'guess_bounds', 'guess_fwhm', 'guess_position', 'guess_radius',
'guess_reference', 'histogram1d', 'histogram2d', 'igam', 'igamc',
'incbet', 'interpolate', 'is_binary_file', 'Knuth_close',
'lgam', 'linear_interp', 'nearest_interp',
'neville', 'neville2d',
'new_muller', 'normalize',
'pad_bounding_box', 'parallel_map', 'parallel_map_funcs',
'param_apply_limits', 'parse_expr', 'poisson_noise',
'print_fields', 'rebin',
'sao_arange', 'sao_fcmp', 'send_to_pager',
'set_origin', 'sum_intervals', 'zeroin',
'multinormal_pdf', 'multit_pdf', 'get_error_estimates', 'quantile',
)
###############################################################################
#
# Types
#
###############################################################################
# This logic was found in several modules so centralize it. Note that
# this is not added to __all__.
#
def is_subclass(t1, t2):
"""Is t2 a subclass of t1 but not the same as t1?"""
return inspect.isclass(t1) and issubclass(t1, t2) and (t1 is not t2)
###############################################################################
class NoNewAttributesAfterInit:
"""
Prevents attribute deletion and setting of new attributes after
__init__ has been called. Derived classes must call
NoNewAttributesAfterInit.__init__ after all other initialization.
"""
__initialized = False # Use name mangling
def __init__(self) -> None:
self.__initialized = True
def __delattr__(self, name: str) -> None:
if self.__initialized and hasattr(self, name):
raise AttributeError(f"'{type(self).__name__}' object attribute '{name}' "
"cannot be deleted")
object.__delattr__(self, name)
def __setattr__(self, name: str, val: Any) -> None:
if self.__initialized and (not hasattr(self, name)):
raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
if self.__initialized and hasattr(self, name):
cname = callable(getattr(self, name))
cval = callable(val)
if cname and not cval:
raise AttributeError(f"'{type(self).__name__}' object attribute '{name}' "
"cannot be replaced with a non-callable attribute")
if not cname and cval:
raise AttributeError(f"'{type(self).__name__}' object attribute '{name}' "
"cannot be replaced with a callable attribute")
object.__setattr__(self, name, val)
###############################################################################
#
# Compiled Utilities: _utils
#
###############################################################################
def calc_ftest(dof1, stat1, dof2, stat2):
"""Compare two models using the F test.
The F-test is a model comparison test; that is, it is a test
used to select from two competing models which best describes
a particular data set. A model comparison test statistic, T,
is created from the best-fit statistics of each fit; as with all
statistics, it is sampled from a probability distribution p(T).
The test significance is defined as the integral of p(T) from the
observed value of T to infinity. The significance quantifies the
probability that one would select the more complex model when in
fact the null hypothesis is correct. See also `calc_mlr`.
Parameters
----------
dof1 : int or sequence of int
degrees of freedom of the simple model
stat1 : number or sequence of number
best-fit chi-square statistic value of the simple model
dof2 : int or sequence of int
degrees of freedom of the complex model
stat2 : number or sequence of number
best-fit chi-square statistic value of the complex model
Returns
-------
sig : number or ndarray
The significance, or p-value. A standard threshold for
selecting the more complex model is significance < 0.05 (the
'95% criterion' of statistics).
See Also
--------
calc_mlr, incbet
Notes
-----
The F test uses the ratio of the reduced chi2, which follows
the F-distribution, (stat1/dof1) / (stat2/dof2). The incomplete
Beta function is used to calculate the integral of the tail of
the F-distribution.
The F test should only be used when:
- the simpler of the two models is nested within the other;
that is, one can obtain the simpler model by setting the extra
parameters of the more complex model (often to zero or one);
- the extra parameters have values sampled from normal
distributions under the null hypothesis (i.e., if one samples
many datasets given the null hypothesis and fits these data with
the more complex model, the distributions of values for the
extra parameters must be Gaussian);
- those normal distributions are not truncated by parameter space
boundaries;
- the best-fit statistics are sampled from the chi-square
distribution.
See Protassov et al. 2002 [1]_ for more discussion.
References
----------
.. [1] Protassov et al., Statistics, Handle with Care: Detecting
Multiple Model Components with the Likelihood Ratio Test,
Astrophysical Journal, vol 571, pages 545-559, 2002,
http://adsabs.harvard.edu/abs/2002ApJ...571..545P
Examples
--------
>>> calc_ftest(11, 16.3, 10, 10.2)
0.03452352914891555
>>> calc_ftest([11, 11], [16.3, 16.3], [10, 9], [10.2, 10.5])
array([0.03452353, 0.13819987])
"""
return _utils.calc_ftest(dof1, stat1, dof2, stat2)
def calc_mlr(delta_dof, delta_stat):
"""Compare two models using the Maximum Likelihood Ratio test.
The Maximum Likelihood Ratio (MLR) test is a model comparison
test; that is, it is a test used to select from two competing
models which best describes a particular data set. A model
comparison test statistic, T, is created from the best-fit
statistics of each fit; as with all statistics, it is sampled
from a probability distribution p(T). The test significance is
defined as the integral of p(T) from the observed value of T to
infinity. The significance quantifies the probability that one
would select the more complex model when in fact the null hypothesis
is correct. See also `calc_ftest`.
Parameters
----------
delta_dof : int or sequence of int
change in the number of degrees of freedom
delta_stat : number or sequence of number
change in the best-fit statistic value
Returns
-------
sig : number or ndarray
The significance, or p-value. A standard threshold for
selecting the more complex model is significance < 0.05 (the
'95% criterion' of statistics).
See Also
--------
calc_ftest
Notes
-----
The MLR test should only be used when:
- the simpler of the two models is nested within the other;
that is, one can obtain the simpler model by setting the extra
parameters of the more complex model (often to zero or one);
- the extra parameters have values sampled from normal
distributions under the null hypothesis (i.e., if one samples
many datasets given the null hypothesis and fits these data with
the more complex model, the distributions of values for the
extra parameters must be Gaussian);
- those normal distributions are not truncated by parameter space
boundaries;
- the best-fit statistics for each fit are sampled from the
chi-square distribution.
See Protassov et al. 2002 [1]_ for more discussion.
References
----------
.. [1] Protassov et al., Statistics, Handle with Care: Detecting
Multiple Model Components with the Likelihood Ratio Test,
Astrophysical Journal, vol 571, pages 545-559, 2002,
http://adsabs.harvard.edu/abs/2002ApJ...571..545P
Examples
--------
In this example, the more-complex model has 2 extra degrees of
freedom and a statistic value that is larger by 3.7. The MLR test
does not provide any evidence that the complex model is a better
fit to the data than the simple model since the result is much
larger than 0.
>>> calc_mlr(2, 3.7)
0.15723716631362761
"""
return _utils.calc_mlr(delta_dof, delta_stat)
def erf(x):
"""Calculate the error function.
Parameters
----------
x : scalar or array
Returns
-------
val : scalar or array
The error function of the input.
See Also
--------
gamma
Examples
--------
>>> erf(0)
0.0
>>> erf([1.0, 2.3])
array([ 0.84270079, 0.99885682])
"""
return _utils.erf(x)
def igamc(a, x):
"""Calculate the complement of the regularized incomplete Gamma function (upper).
The function is defined using the regularized incomplete Gamma
function - igam(a,x) - and the Gamma function - gamma(a) - as::
igamc(a,x) = 1 - igam(a,x)
= 1 / gamma(a) Int_x^Inf e^(-t) t^(a-1) dt
Parameters
----------
a : scalar or array
a > 0
x : scalar or array
x > 0
Returns
-------
val : scalar or array
The incomplete Gamma function of the input.
See Also
--------
gamma, igam
Notes
-----
In this implementation, which is provided by the Cephes Math
Library [1]_, both arguments must be positive. The integral is
evaluated by either a power series or continued fraction expansion,
depending on the relative values of a and x. Using IEEE arithmetic,
the relative errors are
======== ====== ======== ======= =======
domain domain # trials peak rms
======== ====== ======== ======= =======
0.5,100 0,100 200000 1.9e-14 1.7e-15
0.01,0.5 0,100 200000 1.4e-13 1.6e-15
======== ====== ======== ======= =======
References
----------
.. [1] Cephes Math Library Release 2.0: April, 1987.
Copyright 1985, 1987 by Stephen L. Moshier.
Direct inquiries to 30 Frost Street, Cambridge, MA 02140.
Examples
--------
>>> igamc(1, 2)
0.1353352832366127
>>> igamc([1,1], [2,3])
array([ 0.13533528, 0.04978707])
"""
return _utils.igamc(a, x)
def igam(a, x):
"""Calculate the regularized incomplete Gamma function (lower).
The function is defined using the complete Gamma function -
gamma(a) - as::
igam(a,x) = 1 / gamma(a) Int_0^x e^(-t) t^(a^-1) dt
Parameters
----------
a : scalar or array
a > 0
x : scalar or array
x > 0
Returns
-------
val : scalar or array
The incomplete Gamma function of the input.
See Also
--------
gamma, igamc
Notes
-----
In this implementation, which is provided by the Cephes Math
Library [1]_, both arguments must be positive. The integral is
evaluated by either a power series or continued fraction expansion,
depending on the relative values of a and x. Using IEEE arithmetic,
the relative errors are
====== ======== ======= =======
domain # trials peak rms
====== ======== ======= =======
0,30 200000 3.6e-14 2.9e-15
0,100 300000 9.9e-14 1.5e-14
====== ======== ======= =======
References
----------
.. [1] Cephes Math Library Release 2.0: April, 1987.
Copyright 1985, 1987 by Stephen L. Moshier.
Direct inquiries to 30 Frost Street, Cambridge, MA 02140.
Examples
--------
>>> igam(1, 2)
0.8646647167633873
>>> igam([1,1], [2,3])
array([ 0.86466472, 0.95021293])
"""
return _utils.igam(a, x)
def incbet(a, b, x):
"""Calculate the incomplete Beta function.
The function is defined as::
sqrt(a+b)/(sqrt(a) sqrt(b)) Int_0^x t^(a-1) (1-t)^(b-1) dt
and the integral from x to 1 can be obtained using the relation::
1 - incbet(a, b, x) = incbet(b, a, 1-x)
Parameters
----------
a : scalar or array
a > 0
b : scalar or array
b > 0
x : scalar or array
0 <= x <= 1
Returns
-------
val : scalar or array
The incomplete beta function calculated from the inputs.
See Also
--------
calc_ftest
Notes
-----
In this implementation, which is provided by the Cephes Math
Library [1]_, the integral is evaluated by a continued fraction
expansion or, when b*x is small, by a power series.
Using IEEE arithmetic, the relative errors are (tested uniformly
distributed random points (a,b,x) with a and b in 'domain' and
x between 0 and 1):
======== ======== ======= =======
domain # trials peak rms
======== ======== ======= =======
0,5 10000 6.9e-15 4.5e-16
0,85 250000 2.2e-13 1.7e-14
0,1000 30000 5.3e-12 6.3e-13
0,1000 250000 9.3e-11 7.1e-12
0,100000 10000 8.7e-10 4.8e-11
======== ======== ======= =======
Outputs smaller than the IEEE gradual underflow threshold were
excluded from these statistics.
References
----------
.. [1] Cephes Math Library Release 2.0: April, 1987.
Copyright 1985, 1987 by Stephen L. Moshier.
Direct inquiries to 30 Frost Street, Cambridge, MA 02140.
Examples
--------
>>> incbet(0.3, 0.6, 0.5)
0.68786273145845922
>>> incbet([0.3,0.3], [0.6,0.7], [0.5,0.4])
array([ 0.68786273, 0.67356524])
"""
return _utils.incbet(a, b, x)
def gamma(z):
"""Calculate the Gamma function.
Parameters
----------
z : scalar or array
-171 <= z <- 171.6
Returns
-------
val : scalar or array
The gamma function of the input.
See Also
--------
igam, lgam
Notes
-----
This implementation is provided by the Cephes Math Library [1]_.
Arguments ``|x| >= 34`` are reduced by recurrence and the function
approximated by a rational function of degree 6/7 in the interval
(2,3). Large arguments are handled by Stirling's formula. Large
negative arguments are made positive using a reflection formula.
Relative errors are
======== ======== ======= =======
domain # trials peak rms
======== ======== ======= =======
-170,33 20000 2.3e-15 3.3e-16
-33,33 20000 9.4e-16 2.2e-16
33,171.6 20000 2.3e-15 3.2e-16
======== ======== ======= =======
Errors for arguments outside the test range will be larger owing
to amplification by the exponential function.
References
----------
.. [1] Cephes Math Library Release 2.0: April, 1987.
Copyright 1985, 1987 by Stephen L. Moshier.
Direct inquiries to 30 Frost Street, Cambridge, MA 02140.
Examples
--------
>>> gamma(2.3)
1.1667119051981603
>>> gamma([2.3,1.9])
array([ 1.16671191, 0.96176583])
"""
return _utils.gamma(z)
def lgam(z):
"""Calculate the log (base e) of the Gamma function.
Parameters
----------
z : scalar or array
0 <= z <= 2.556348e305
Returns
-------
val : scalar or array
The log of the Gamma function of the input.
See Also
--------
gamma, igam
Notes
-----
This implementation is provided by the Cephes Math Library [1]_.
For arguments greater than 13, the logarithm of the Gamma function
is approximated by the logarithmic version of Stirling's formula
using a polynomial approximation of degree 4. Arguments
between -33 and +33 are reduced by recurrence to the interval [2,3]
of a rational approximation. The cosecant reflection formula is
employed for arguments less than -33.
Relative errors are
=============== ======== ======= =======
domain # trials peak rms
=============== ======== ======= =======
0,3 28000 5.4e-16 1.1e-16
2.718,2.556e305 40000 3.5e-16 8.3e-17
=============== ======== ======= =======
The error criterion was relative when the function magnitude was
greater than one but absolute when it was less than one.
The following test used the relative error criterion, though at
certain points the relative error could be much higher than
indicated.
======= ======== ======= =======
domain # trials peak rms
======= ======== ======= =======
-200,-4 10000 4.8e-16 1.3e-16
======= ======== ======= =======
References
----------
.. [1] Cephes Math Library Release 2.0: April, 1987.
Copyright 1985, 1987 by Stephen L. Moshier.
Direct inquiries to 30 Frost Street, Cambridge, MA 02140.
Examples
--------
>>> lgam(104.56)
380.21387239435785
>>> lgam([104.56,2823.4])
array([ 380.21387239, 19607.42734396])
"""
return _utils.lgam(z)
def sao_arange(start, stop, step=None):
"""Create a range of values between start and stop.
See also `numpy.arange` and `numpy.linspace`.
Parameters
----------
start, stop : float
The start and stop points.
step : float or None, optional
If not given the step size defaults to 1.0.
Returns
-------
vals : NumPy array
The values start, start + step, ... The last point
is the first position where start + n * step >= stop,
which means that it can include a point > stop.
Examples
--------
>>> sao_arange(1, 3)
array([ 1., 2., 3.])
>>> sao_arange(1, 3, 0.6)
array([ 1. , 1.6, 2.2, 2.8, 3.4])
"""
if step is None:
return _utils.sao_arange(start, stop)
return _utils.sao_arange(start, stop, step)
def sao_fcmp(x, y, tol):
"""Compare y to x, using an absolute tolerance.
Parameters
----------
x : number or array_like
The expected value, or values.
y : number or array_like
The value, or values, to check. If x is an array, then
y must be an array of the same size. If x is a scalar
then y can be a scalar or an array.
tol : number
The absolute tolerance used for comparison.
Returns
-------
flags : int or array_like
0, 1, or -1 for each value in second. If the values match, then 0,
otherwise -1 if the expected value (x) is less than the comparison
value (y) or +1 if x is larger than y.
See Also
--------
Knuth_close
Examples
--------
>>> sao_fcmp(1, 1.01, 0.01)
0
>>> sao_fcmp(1, [0.9, 1, 1.1], 0.01)
array([ 1, 0, -1], dtype=int32)
>>> sao_fcmp([1.2, 2.3], [1.22, 2.29], 0.01)
array([-1, 0], dtype=int32)
"""
return _utils.sao_fcmp(x, y, tol)
def sum_intervals(src, indx0, indx1):
"""Sum up data within one or more pairs of indexes.
Parameters
----------
src : sequence of floats
The data to be summed.
indx0, indx1 : scalar or sequence of int
The pair of indexes over which to sum the src array.
The sizes of indx0 and indx1 must match, and each element of
indx1 must be at least as large as the corresponding element
in indx0.
Returns
-------
val : scalar or array
The sum of the src over the given interval ranges.
Notes
-----
It is assumed that all indexes are valid. That is, they are in
the range [0, length of src). This condition is not checked for.
Examples
--------
>>> sum_intervals([1.1, 2.2, 3.3, 4.4], 1, 2)
5.5
>>> sum_intervals([1.1, -2.2, 3.3, 4.4], [1, 0], [3, 0])
array([ 5.5, 1.1])
"""
return _utils.sum_intervals(src, indx0, indx1)
def rebin(y0, x0lo, x0hi, x1lo, x1hi):
"""Rebin a histogram.
Parameters
----------
y0 : sequence of numbers
The Y values of the histogram to rebin.
x0lo, x0hi : sequence of numbers
The lower and upper edges of the X values to rebin. They must match
the size of `y0`.
x1lo, x1hi : sequence of numbers
The lower and upper edges of the X values of the output histogram.
Returns
-------
yout : NumPy array of numbers
The re-binned Y values (same size as `x1lo`).
"""
return _utils.rebin(y0, x0lo, x0hi, x1lo, x1hi)
def neville(xout, xin, yin):
"""Polynomial one-dimensional interpolation using Neville's method.
The scheme used for interpolation (Neville's method) is described
at [1]_.
Parameters
----------
xout : array_like
The positions at which to interpolate.
xin : array_like
The x values of the data to be interpolated. This must be
sorted so that it is monotonically increasing.
yin : array_like
The y values of the data to interpolate (must be the same
size as ``xin``).
Returns
-------
yout : NumPy array of numbers
The interpolated y values (same size as ``xout``).
See Also
--------
interpolate, linear_interp, nearest_interp
References
----------
.. [1] http://en.wikipedia.org/wiki/Neville%27s_algorithm
Examples
--------
>>> import numpy as np
>>> x = [1.2, 3.4, 4.5, 5.2]
>>> y = [12.2, 14.4, 16.8, 15.5]
>>> xgrid = np.linspace(2, 5, 5)
>>> ygrid = neville(xgrid, x, y)
"""
return _utils.neville(xout, xin, yin)
###############################################################################
#
# Compiled Utilities: _psf
#
###############################################################################
def extract_kernel(kernel, dims_kern, dims_new, center, xlo, xhi, widths,
radial):
"""Extract the kernel.
Parameters
----------
kernel
dims_kern
dims_new
center
xlo
xhi
widths
radial : int
Set to 1 if using a radial profile, 0 otherwise.
Returns
-------
out, dims, frac, lo, hi
"""
return _psf.extract_kernel(kernel, dims_kern, dims_new, center,
xlo, xhi, widths, radial)
def normalize(xs):
"""Normalize an array.
Parameters
----------
xs : sequence
The values to normalize. This must be a 1D array.
Returns
-------
ns : ndarray
The values of xs / sum of xs.
"""
return _psf.normalize(xs)
def set_origin(dims, maxindex=None):
"""Return the position of the origin of the kernel.
Parameters
----------
dims : number or sequence
The dimensions of the kernel. This should be a scalar or
a one- or two-element sequence.
maxindex : None or int, optional
If given, then use this location - which is the index
into the flattened array - as the center, otherwise
use the center of the grid.
Returns
-------
cs : ndarray or number
The coordinates of the center, matching the input
dims format.
Examples
--------
>>> set_origin(12)
5
>>> set_origin([12])
array([5])
>>> set_origin([12], 4)
array([4])
>>> set_origin([12, 13])
array([5, 6])
>>> set_origin([12, 13], 42)
array([6, 3])
"""
if maxindex is None:
return _psf.set_origin(dims)
return _psf.set_origin(dims, maxindex)
def pad_bounding_box(kernel, mask):
"""Expand the kernel to match the mask.
Parameters
----------
kernel : numeric sequence
The data to copy. The data is a 1D array.
mask : int sequence
The mask determines the size of the output and where to place
the kernel values. It is expected that the number of non-zero
mask elements matches the size of the `kernel` parameter.
Returns
-------
nkernel : ndarray
The output is the same size as the mask, and initialized to
zero everywhere. Cells where the mask is non-zero are
copied from the kernel.
Examples
--------
>>> pad_bounding_box([1, 2, 3, 4], [1, 1, 0, 1, 1, 0, 0, 0, 0])
array([ 1., 2., 0., 3., 4., 0., 0., 0., 0.])
"""
return _psf.pad_bounding_box(kernel, mask)
###############################################################################
#
# Utilities
#
###############################################################################
# at what precisions do we assume equality in energy grids?
eps = np.finfo(np.float32).eps
def filter_bins(mins: Sequence[Optional[float]],
maxes: Sequence[Optional[float]],
axislist: Sequence[Sequence[float]],
integrated: bool = False
) -> Optional[np.ndarray]:
"""What mask represents the given set of filters?
The ranges are treated as inclusive at both ends if integrated is
False, the default, otherwise the lower limit is inclusive but the
upper limit is exclusive.
Parameters
----------
mins : sequence of values
The minimum value of the valid range (elements may be None).
maxes : sequence of values
The maximum value of the valid range (elements may be None).
axislist: sequence of arrays
The axis to apply the range to. There must be the same
number of elements in mins, maxes, and axislist.
The number of elements of each element of axislist must
also agree (the cell values do not need to match).
integrated : bool, optional
Is the data integrated (we have low and high bin edges)? The
default is False. When True it is expected that axislist
contains a even number of rows, where the odd values are the
low edges and the even values the upper edges, and that the
mins and maxes only ever contain a single value, given in
(None, hi) and (lo, None) ordering.
Returns
-------
mask : ndarray or None
A mask indicating whether the values are included (True) or
excluded (False). If any of the input sequences are empty then
None will be returned.
Examples
--------
Calculate those points in xs which are in the range 1.5 <= x <= 4.
>>> xs = [1, 2, 3, 4, 5]
>>> filter_bins([1.5], [4], [xs])
array([False, True, True, True, False])
Repeat the above calculation by combining filters for x >= 1.5
and x <= 4 (note that the grid must be repeated for each
filter):
>>> filter_bins([1.5, None], [None, 4], [xs, xs])
array([False, True, True, True, False])
For integrated data sets the lower and upper edges should be sent
separately with the max and min limits, along with setting the
integrated flag. The following selects the bins that cover the
range 2 to 4 and 1.5 to 3.5:
>>> xlo = [1, 2, 3, 4, 5]
>>> xhi = [2, 3, 4, 5, 6]
>>> filter_bins([None, 2], [4, None], [xlo, xhi], integrated=True)
array([False, True, True, False, False])
>>> filter_bins([None, 1.5], [3.5, None], [xlo, xhi], integrated=True)
array([ True, True, True, False, False])
"""
mask = None
def locheck(lo, axis):
if integrated:
return sao_fcmp(lo, axis, eps) < 0
return sao_fcmp(lo, axis, eps) <= 0
def hicheck(hi, axis):
if integrated:
return sao_fcmp(hi, axis, eps) > 0
return sao_fcmp(hi, axis, eps) >= 0
for lo, hi, axis in zip(mins, maxes, axislist):
if (lo is None) and (hi is None):
continue
axis = np.asarray(axis)
axismask = np.ones(axis.size, dtype=bool)
if lo is not None:
axismask &= locheck(lo, axis)
if hi is not None:
axismask &= hicheck(hi, axis)
if mask is None:
mask = axismask
else:
mask &= axismask
return mask
def bool_cast(val):
"""Convert a string to a boolean.
Parameters
----------
val : bool, str or sequence
The input value to decode.
Returns
-------
flag : bool or ndarray
True or False if val is considered to be a true or false term.
If val is a sequence then the return value is an ndarray of
the same size.
Notes
-----
The string is compared in a case-insensitive manner to the
following: 'true', 'on', 'yes', '1', 't', and 'y' for
`True` and 'false', 'off', 'no', '0', 'f', and 'n' for `False`.
If there is no match to the above then the default conversion
provided by the `bool` routine is used.
"""
if type(val) in (tuple, list, np.ndarray):
return np.asarray([bool_cast(item) for item in val], bool)
if type(val) == str:
# since built in bool() only returns false for empty strings
vlo = val.lower()
if vlo in ('false', 'off', 'no', '0', 'f', 'n'):
return False
if vlo in ('true', 'on', 'yes', '1', 't', 'y'):
return True
raise TypeError(f"unknown boolean value: '{val}'")
# use built in bool cast
return bool(val)
# Telling the type system that the signature of meth is "the same" as
# the signature of the return value probably needs Python 3.10 for
# ParamSpec and then maybe Python 3.12 for the generic support.
#
def export_method(meth: Callable,
name: Optional[str] = None,
modname: Optional[str] = None
) -> Callable:
"""
Given a bound instance method, return a simple function that wraps
it. The only difference between the interface of the original
method and the generated function is that the latter doesn't
include 'self' in its argument list. This means that when the
wrapper function is called with an incorrect number of arguments,
the error message does not include 'self' in argument counts. The
only reason to generate such a wrapper is to hide from a user the
fact that they're using an instance method rather than a simple
function. If meth is not an instance method, it is returned
unchanged.
If name is None, the generated function will have the same name as
the input method. Otherwise, name must be a string containing the
desired name of the new method. If modname is not None, it must
be a string and will be used as the module name for the generated
function. Note that the caller is responsible for assigning the
returned function to an appropriate name in the calling scope.
"""
if not isinstance(meth, MethodType):
return meth
# Most of the functionality here can be provided by
# functools.wraps (and in fact would add more functionality, such
# as the ability to handle keyword-only or positional-only
# arguments). It would also likely require less maintenance.
# However, it does not handle two important issues:
#
# a) when an error is raised it is reported as from Session.<name>
# rather than <name>
#
# Attempts to change the __qualname__ field have not been
# successful. The wrapped function can include a check for an
# exception, manually removing any leading "Session." text
# from the message, but it is not particularly good code.
#
# b) Error messages related to the number of arguments include the
# self argument (i.e. are 1 more than the user is told to
# expect), which is one of the main reasons for this routine.
#
# The only time name is not None appears to be in the tests, so
# can this feature be removed?
#
if name is None:
name = meth.__name__
if name == meth.__name__:
old_name = f'_old_{name}'
else:
old_name = meth.__name__
defaults = meth.__defaults__
doc = meth.__doc__
def tostr(p):
if p.kind == p.VAR_KEYWORD:
return f"**{p.name}"
if p.kind == p.VAR_POSITIONAL:
return f"*{p.name}"
return p.name
# Ideally this would also identify when to add "/" or "*"
# to indicate positional-only or keyword-only arguments.
#
sig = inspect.signature(meth)
argspec = ",".join([tostr(p) for p in sig.parameters.values()])
# Create a wrapper function with no default arguments
g: dict[str, Any] = {old_name: meth}
# The only time modname is None appears to be the test so can we
# make it a required argument?
#
if modname is not None:
g['__name__'] = modname
fdef = f'def {name}({argspec}): return {old_name}({argspec})'
exec(fdef, g)
# Create another new function from the one we just made, this time
# adding the default arguments, doc string, and any annotations
# from the original method.
#
# Why does this not change the __defaults__ field of new_meth
# rather than creating a copy of it?
#
new_meth = g[name]
new_meth = FunctionType(new_meth.__code__, new_meth.__globals__,
new_meth.__name__, defaults,
new_meth.__closure__)
new_meth.__doc__ = doc
new_meth.__annotations__ = meth.__annotations__
return new_meth
def get_keyword_names(func, skip=0):
"""Return the names of the keyword arguments.
Parameters
----------
func
The function to query.
skip : int, optional
The number of keyword arguments to skip.
Returns
-------
names : list of str
The names of the keyword arguments. It can be empty.
See Also
--------
get_keyword_defaults, get_num_args
"""
# This used to use getargspec but was changed to use inspect
# since the former was removed briefly (circa Python 3.6).
#
sig = inspect.signature(func)
kwargs = [p.name
for p in sig.parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD and
p.default != p.empty]
return kwargs[skip:]
def get_keyword_defaults(func, skip=0):
"""Return the keyword arguments and their default values.
Note that a similar function `sherpa.plot.backend_utils.get_keyword_defaults`
exits, which differs from this one in that it deals with keyword-only
arguments, not all arguments.
Parameters
----------
func
The function to query.
skip : int, optional
The number of keyword arguments to skip.
Returns
-------
vals : dict
The keys are names of the keyword arguments, the values are
the default value for that parameter. It can be empty.
See Also
--------
get_keyword_names, get_num_args,
`sherpa.plot.backend_utils.get_keyword_defaults`
"""
# This used to use getargspec but was changed to use inspect
# since the former was removed briefly (circa Python 3.6).
#
sig = inspect.signature(func)
kwargs = [(p.name, p.default)
for p in sig.parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD and
p.default != p.empty]
return dict(kwargs[skip:])
def get_num_args(func):
"""Return the number of arguments for a function.
Parameters
----------
func
The function to query.
Returns
-------
ntotal, npos, nkeyword : int, int, int
The total number of arguments, the number of positional
arguments, and the number of keyword arguments.
See Also
--------
get_keyword_defaults, get_keyword_names
"""
# This used to use getargspec but was changed to use inspect
# since the former was removed briefly (circa Python 3.6).
#
sig = inspect.signature(func)
posargs = [True
for p in sig.parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD and
p.default == p.empty]
kwargs = [True
for p in sig.parameters.values()
if p.kind == p.POSITIONAL_OR_KEYWORD and
p.default != p.empty]
npos = len(posargs)
nkw = len(kwargs)
return (npos + nkw, npos, nkw)
def print_fields(names, vals, converters=None):
"""
Given a list of strings names and mapping vals, where names is a
subset of vals.keys(), return a listing of name/value pairs
printed one per line in the format '<name> = <value>'. If a value
is a NumPy array, print it in the format
'<data type name>[<array size>]'. Otherwise, use str(value).
"""
# This is the part of the deprecated typeNA dictionary Sherpa
# would use up to v4.11.0. We included the dictionaty verbatim,
# excluding the complex mapping which where wrong in typeNA.
# Note only the class -> string mappings have been copied over.
if converters is None:
converters = {np.bool_: 'Bool',
np.bytes_: 'Bytes0',
np.complex128: 'Complex128',
np.complex64: 'Complex64',
np.datetime64: 'Datetime64',
np.float16: 'Float16',
np.float32: 'Float32',
np.float64: 'Float64',
np.int16: 'Int16',
np.int32: 'Int32',
np.int64: 'Int64',
np.int8: 'Int8',
np.object_: 'Object0',
np.str_: 'Str0',
np.timedelta64: 'Timedelta64',
np.uint16: 'UInt16',
np.uint32: 'UInt32',
np.uint64: 'UInt64',
np.uint8: 'UInt8',
np.void: 'Void0'
}
try:
converters[np.complex256] = 'Complex256'
except AttributeError:
pass
try:
converters[np.float128] = 'Float128'
except AttributeError:
pass
width = max(len(n) for n in names)
fmt = f'%-{width}s = %s'
lines = []
for n in names:
v = vals[n]
if isinstance(v, np.ndarray):
v = f'{converters[v.dtype.type]}[{v.size}]'
else:
v = str(v)
lines.append(fmt % (n, v))
return '\n'.join(lines)
def create_expr(vals, mask=None, format='%s', delim='-'):
"""Create a string representation of a filter.
Use the mask to convert the input values into a set of
comma-separated filters - low value and high value, separated
by the delimiter - that represent the data. If the mask is
not given then the values must be "channel" values (that is,
two values are consecutive if there difference is 1).
Parameters
----------
vals : sequence
The values that represent the sequence if mask is not None,
otherwise the selected channel numbers (in this case integer
values).
mask : sequence of bool or None, optional
The mask setting for the full dataset, without any filtering
applied. A value of True indicates the element is included
and False means it is excluded.
format : str, optional
The format used to display each value.
delim : str, optional
The separator for a range.
Raises
------
ValueError
If the ``vals`` and ``mask`` sequences do not match: the
length of ``vals`` must equal the number of True values in
``mask``.
See Also
--------
create_expr_integrated, parse_expr
Examples
--------
>>> create_expr([1, 2, 3, 4])
'1-4'
>>> create_expr([1, 2, 4, 5, 7])
'1-2,4-5,7'
>>> create_expr([1, 2, 3, 4], [True, True, True, True])
'1-4'
>>> create_expr([0.1, 0.2, 0.4, 0.8], [True, True, True, True])
'0.1-0.8'
>>> create_expr([0.1, 0.2, 0.4, 0.8], [True, True, True, False, False, True])
'0.1-0.4,0.8'
"""
if len(vals) == 0:
return ''
if len(vals) == 1:
return format % vals[0]
if mask is None:
seq = vals
else:
# Ensure we have a boolean array to make indexing behave sensibly
# (NumPy 1.17 or so changed behavior related to this).
#
mask = np.asarray(mask, dtype=bool)
# Ensure that the vals and mask array match: the number of
# mask=True elements should equal the number of input values.
#
if sum(mask) != len(vals):
raise ValueError("mask array mismatch with vals")
# We only care about the difference between two consecutive
# values, so it doesn't matter if index starts at 0 or 1.
#
index = np.arange(len(mask))
seq = index[mask]
exprs = []
start = vals[0]
# We follow create_expr_integrated but instead of having separate lo/hi
# always we use the same array
#
startbins = vals[1:]
endbins = vals[:-1]
diffs = np.diff(seq)
idxs, = np.where(diffs != 1)
for idx in idxs:
exprs.append((start, endbins[idx]))
start = startbins[idx]
exprs.append((start, vals[-1]))
def filt(lo, hi):
vstr = format % lo
if lo == hi:
return vstr
return vstr + f"{delim}{format % hi}"
return ",".join([filt(*expr) for expr in exprs])
def create_expr_integrated(lovals, hivals, mask=None,
format='%s', delim='-',
eps=np.finfo(np.float32).eps):
"""Create a string representation of a filter (integrated).
Use the mask to convert the input values into a set of
comma-separated filters - low value and high value, separated by
the delimiter - that represent the data. Unlike `create_expr` this
routine uses the lovals values for the start of the bin and
hivals for the end of each bin, and assumes that contiguous bins
should be combined.
Parameters
----------
lovals, hivals : sequence
The lower and upper values of each bin. It is required that
they are in ascending order and ``lovals`` < ``hivals``.
mask : sequence of bool or None, optional
The mask setting for the full dataset, without any filtering
applied. A value of True indicates the element is included
and False means it is excluded. Note that this is opposite to the
numpy convention in numpy masked arrays.
format : str, optional
The format used to display each value.
delim : str, optional
The separator for a range.
eps : number, optional
This value is unused.
Raises
------
ValueError
If the ``lovals`` and ``hivals`` sequences do not match.
See Also
--------
create_expr, parse_expr
Examples
--------
When there is no mask, or all mask values are True, we just show
the full range:
>>> create_expr_integrated([1, 2, 3, 4], [2, 3, 4, 5])
'1-5'
>>> create_expr_integrated([1, 2, 4, 5, 7], [2, 3, 5, 6, 8])
'1-8'
>>> create_expr_integrated([0.1, 0.2, 0.4, 0.8], [0.2, 0.4, 0.8, 1.0])
'0.1-1.0'
>>> create_expr_integrated([0.1, 0.2, 0.4, 0.8], [0.2, 0.4, 0.6, 1.0], [True, True, True, True])
'0.1-1.0'
If a mask is given then it defines the bins that are grouped
together, even if the bins are not contiguous:
>>> create_expr_integrated([1, 2, 4], [2, 3, 5], [True, True, False, True])
'1-3,4-5'
>>> create_expr_integrated([1, 3, 5], [2, 4, 6], [True, True, False])
'1-4,5-6'
More examples of the mask controlling the grouping:
>>> create_expr_integrated([0.1, 0.2, 0.6, 0.8], [0.2, 0.4, 0.8, 1.0], [True, True, False, True, True])
'0.1-0.4,0.6-1.0'
>>> create_expr_integrated([0.1, 0.2, 0.4, 0.8], [0.2, 0.3, 0.5, 1.0], [True, True, False, True, False, True])
'0.1-0.3,0.4-0.5,0.8-1.0'
>>> create_expr_integrated([0.1, 0.2, 0.4, 0.8], [0.2, 0.3, 0.5, 1.0], [False, True, True, False, True, False, True, False])
'0.1-0.3,0.4-0.5,0.8-1.0'
An interesting case is that you can add a "break" between
contiguous bins (this behavior may be changed):
>>> create_expr_integrated([1, 2, 3, 4], [2, 3, 4, 5], [True, False, True, True, True])
'1-2,2-5'
"""
# Follow create_expr.
#
if len(lovals) != len(hivals):
raise ValueError("hivals array mismatch with lovals")
if len(lovals) == 0:
return ''
# To identify where there's a break we use an array of consecutive
# integers that have missing data masked out.
#
if mask is None:
seq = np.arange(len(lovals))
else:
mask = np.asarray(mask, dtype=bool)
if sum(mask) != len(lovals):
raise ValueError("mask array mismatch with lovals")
seq = np.arange(len(mask))
seq = seq[mask]
out = format % lovals[0]
startbins = lovals[1:]
endbins = hivals[:-1]
diffs = np.diff(seq)
idxs, = np.where(diffs != 1)
for idx in idxs:
out += f"{delim}{format % endbins[idx]},{format % startbins[idx]}"
out += f"{delim}{format % hivals[-1]}"
return out
def parse_expr(expr):
"""Convert a filter expression into its parts.
This is intended for parsing a notice or ignore expression
given as a string.
Parameters
----------
expr : str
The filter expression, of the form 'a:b' or a single number,
separated by commas, and white space is ignored. The
upper or lower limit of a pair may be ignored (e.g. 'a:' or
':b').
Returns
-------
filters : list of pairs
Each pair gives the lower- and upper-edge of the filter,
using ``None`` to represent no limit.
See Also
--------
create_expr, create_expr_int
Notes
-----
There is no attempt to validate that the expression contains
strictly ordered pairs, or that the pairs do not overlap, or
that the lower- and upper-limits are in increasing numerical
order. That is, the expression '5:7,:2,4:6,5:3' is allowed.
Examples
--------
>>> parse_expr('0.5:7')
[(0.5, 7.0)]
>>> parse_expr('0.5:')
[(0.5, None)]
>>> parse_expr(':7')
[(None, 7.0)]
>>> parse_expr(':2, 4 : 5 ,7:8,10:')
[(None, 2.0), (4.0, 5.0), (7.0, 8.0), (10.0, None)]
>>> parse_expr('4')
[(4.0, 4.0)]
>>> parse_expr(' ')
[(None, None)]
"""
if expr is None or str(expr).strip() == '':
return [(None, None)]
res = []
vals = str(expr).strip().split(',')
for val in vals:
lo, hi = None, None
interval = val.strip().split(':')
ninterval = len(interval)
if ninterval == 1:
lo = interval[0]
if lo == '':
lo = None
hi = lo
elif ninterval == 2:
lo = interval[0]
hi = interval[1]
if lo == '':
lo = None
if hi == '':
hi = None
else:
# This check exited but was never hit due to the way the
# code was written. It now errors out if a user gives
# a:b:c, whereas the old version would have just ignored
# the ':c' part. Perhaps we should just keep dropping
# it, in case there's existing code that assumes this?
#
raise TypeError("interval syntax requires a tuple, 'lo:hi'")
if lo is not None:
try:
lo = float(lo)
except ValueError:
raise TypeError(f"Invalid lower bound '{lo}'") from None
if hi is not None:
try:
hi = float(hi)
except ValueError:
raise TypeError(f"Invalid upper bound '{hi}'") from None
res.append((lo, hi))
return res
def calc_total_error(staterror=None, syserror=None):
"""Add statistical and systematic errors in quadrature.
Parameters
----------
staterror : array, optional
The statistical error, or ``None``.
syserror : array, optional
The systematic error, or ``None``.
Returns
-------
error : array or ``None``
The errors, added in quadrature. If both ``staterror`` and
``syserror`` are ``None`` then the return value is ``None``.
"""
if (staterror is None) and (syserror is None):
error = None
elif (staterror is not None) and (syserror is None):
error = staterror
elif (staterror is None) and (syserror is not None):
error = syserror
else:
error = np.sqrt(staterror * staterror + syserror * syserror)
return error
def quantile(sorted_array, f):
"""Return the quantile element from sorted_array, where f is [0,1]
using linear interpolation.
Based on the description of the GSL routine
gsl_stats_quantile_from_sorted_data - e.g.
http://www.gnu.org/software/gsl/manual/html_node/Median-and-Percentiles.html
but all errors are my own.
sorted_array is assumed to be 1D and sorted.
"""
sorted_array = np.asarray(sorted_array)
if len(sorted_array.shape) != 1:
raise RuntimeError("Error: input array is not 1D")
n = sorted_array.size
q = (n - 1) * f
i = int(np.floor(q))
delta = q - i
return (1.0 - delta) * sorted_array[i] + delta * sorted_array[i + 1]
def get_error_estimates(x, sorted=False):
"""Compute the median and (-1,+1) sigma values for the data.
Parameters
----------
x : array of numbers
The input values.
sorted : bool, optional
If ``False``, the default, then ``x`` is assumed to not be sorted.
Returns
-------
(median, lsig, usig)
The median, value that corresponds to -1 sigma, and value that
is +1 sigma, for the input distribution.
Examples
--------
>>> (m, l, h) = get_error_estimates(x)
"""
xs = np.asarray(x)
if not sorted:
xs.sort()
xs = np.array(xs)
sigfrac = 0.682689
median = quantile(xs, 0.5)
lval = quantile(xs, (1 - sigfrac) / 2.0)
hval = quantile(xs, (1 + sigfrac) / 2.0)
return (median, lval, hval)
def multinormal_pdf(x, mu, sigma):
"""The PDF of a multivariate-normal distribution.
Returns the probability density function (PDF) of a
multivariate normal [1]_ distribution.
Parameters
----------
x : array
An array of length k.
mu : array
An array of length k.
sigma : array
A matrix of size (k,k). It must be symmetric and positive-definite.
See Also
--------
multit_pdf
References
----------
.. [1] https://en.wikipedia.org/wiki/Multivariate_normal_distribution
"""
x = np.asarray(x)
mu = np.asarray(mu)
sigma = np.asarray(sigma)
if x.size != mu.size:
raise TypeError("x and mu sizes do not match")
if mu.size != sigma.diagonal().size:
raise TypeError("sigma shape does not match x")
if np.min(np.linalg.eigvalsh(sigma)) <= 0:
raise ValueError("sigma is not positive definite")
if np.max(np.abs(sigma - sigma.T)) >= 1.e-9:
raise ValueError("sigma is not symmetric")
rank = mu.size
coeff = 1.0 / (np.power(2.0 * np.pi, rank / 2.0) *
np.sqrt(np.abs(np.linalg.det(sigma))))
xmu = np.asarray(x - mu)
invsigma = np.asarray(np.linalg.inv(sigma))
# The matrix multiplication looks backwards, but mu and x
# are passed in already transposed.
#
# mu = [[a,b,c]]
# x = [[d,e,f]]
#
out = coeff * np.exp(-0.5 * ((xmu @ invsigma) @ xmu.T))
return float(out)
def multit_pdf(x, mu, sigma, dof):
"""The PDF of a multivariate student-t distribution.
Returns the probability density function (PDF) of a
multivariate student-t [1]_ distribution.
Parameters
----------
x : array
An array of length k.
mu : array
An array of length k.
sigma : array
A matrix of size (k,k). It must be symmetric and positive-definite.
dof : int
See Also
--------
multinormal_pdf
References
----------
.. [1] https://en.wikipedia.org/wiki/Multivariate_Student_distribution
"""
n = float(dof)
x = np.asarray(x)
mu = np.asarray(mu)
sigma = np.asarray(sigma)
if x.size != mu.size:
raise TypeError("x and mu sizes do not match")
if mu.size != sigma.diagonal().size:
raise TypeError("sigma shape does not match x")
if np.min(np.linalg.eigvalsh(sigma)) <= 0:
raise ValueError("sigma is not positive definite")
if np.max(np.abs(sigma - sigma.T)) >= 1.e-9:
raise ValueError("sigma is not symmetric")
rank = mu.size
npr = float(n + rank)
coeff = (gamma(npr / 2.0) /
(gamma(n / 2.0) * np.power(n, rank / 2.0) *
np.power(np.pi, rank / 2.0) *
np.sqrt(np.abs(np.linalg.det(sigma)))))
xmu = np.asarray(x - mu)
invsigma = np.asarray(np.linalg.inv(sigma))
# The matrix multiplication looks backwards, but mu and x
# are passed in already transposed.
#
# mu = [[a,b,c]]
# x = [[d,e,f]]
#
term = 1.0 + 1.0 / n * ((xmu @ invsigma) @ xmu.T)
out = coeff * np.power(term, -npr / 2.0)
return float(out)
def dataspace1d(start, stop, step=1, numbins=None):
"""
Populates an integrated grid
if numbins is None (default) -> numpy.arange(start,stop,step)
if numbins is not None -> numpy.linspace(start, stop, numbins)
"""
if start >= stop:
raise TypeError(f"input should be start < stop, found start={start} stop={stop}")
if numbins is None:
if step <= 0:
raise TypeError(f"input should be step > 0, found step={step}")
if step >= (stop - start):
raise TypeError(
f"input has produced less than 2 bins, found start={start} stop={stop} step={step}")
# xx = np.arange(start, stop, step, dtype=float)
# xx = sao_arange(start, stop, step)
xx = None
if numbins is not None:
if numbins <= 1:
raise TypeError(
f"input should be numbins > 1, found numbins={numbins}")
xx = np.linspace(start, stop, numbins + 1)
else:
xx = sao_arange(start, stop, step)
xlo = np.array(xx[:-1])
xhi = np.array(xx[1:])
y = np.zeros(len(xlo), dtype=float)
return xlo, xhi, y
def dataspace2d(dim):
"""
Populates a blank image dataset
"""
if not np.iterable(dim):
raise TypeError("dim must be an array of dimensions")
if len(dim) < 2:
raise TypeError("dimensions for dataspace2d must be > 1")
if dim[0] < 1 or dim[1] < 1:
raise TypeError(f"dimensions should be > 0, found dim0 {dim[0]} dim1 {dim[1]}")
x0 = np.arange(dim[0], dtype=float) + 1.0
x1 = np.arange(dim[1], dtype=float) + 1.0
x0, x1 = np.meshgrid(x0, x1)
shape = tuple(x0.shape)
x0 = x0.ravel()
x1 = x1.ravel()
y = np.zeros(np.prod(dim))
return x0, x1, y, shape
def histogram1d(x, x_lo, x_hi):
"""Create a 1D histogram from a sequence of samples.
See the `numpy.histogram` routine for a version with more options.
.. versionchanged:: 4.15.0
The x_lo and x_hi arguments are no-longer changed (sorted) by
this routine.
Parameters
----------
x : sequence of numbers
The array of samples
x_lo : sequence of numbers
The lower-edges of each bin.
x_hi : sequence of numbers
The upper-edges of each bin, which must be the same size
as ``x_lo``.
Returns
-------
y : NumPy array
The number of samples in each histogram bin defined by
the ``x_lo`` and ``x_hi`` arrays.
Examples
--------
A simple example, calculating the histogram of 1000 values
randomly distributed over [0, 1).
>>> import numpy as np
>>> rng = np.random.default_rng()
>>> x = rng.random(1000)
>>> edges = np.linspace(0, 1, 11)
>>> xlo = edges[:-1]
>>> xhi = edges[1:]
>>> y = histogram1d(x, xlo, xhi)
Given a list of samples, bin them up so that they can be used as
the dependent axis (the value to be fitted) in a Sherpa data set:
>>> dataspace1d(1, 10, 1)
>>> lo, hi = get_indep()
>>> n = histogram1d([2, 3, 2, 8, 5, 2], lo, hi)
>>> set_dep(n)
"""
x_lo = np.asarray(x_lo).copy()
x_hi = np.asarray(x_hi).copy()
x_lo.sort()
x_hi.sort()
return hist1d(np.asarray(x), x_lo, x_hi)
def histogram2d(x, y, x_grid, y_grid):
"""Create 2D histogram from a sequence of samples.
See the `numpy.histogram2d` routine for a version with more options.
.. versionchanged:: 4.15.0
The x_grid and y_grid arguments are no-longer changed (sorted)
by this routine.
Parameters
----------
x : sequence of numbers
The array of samples (X coordinate)
y : sequence of numbers
The array of samples (Y coordinate), which must have the same
size as the ``x`` sequence.
x_grid : sequence of numbers
The X bin edges.
y_grid : sequence of numbers
The Y bin edges.
Returns
-------
y : NumPy array
The number of samples in each histogram bin defined by
the ``x_grid`` and ``y_grid`` arrays.
Examples
--------
Given a list of coordinates (``xvals``, ``yvals``), bin
them up so that they match the 5 by 10 pixel image
data space. In this case the X grid is [1, 2, ..., 5]
and the Y grid is [1, 2, .., 10].
>>> dataspace2d([5, 10])
>>> (xgrid, ygrid) = get_axes()
>>> n = histogram2d(xvals, yvals, xgrid, ygrid)
>>> set_dep(n)
"""
x_grid = np.asarray(x_grid).copy()
y_grid = np.asarray(y_grid).copy()
x_grid.sort()
y_grid.sort()
vals = hist2d(np.asarray(x), np.asarray(y), x_grid, y_grid)
return vals.reshape((len(x_grid), len(y_grid)))
def interp_util(xout, xin, yin):
lenxin = len(xin)
i1 = np.searchsorted(xin, xout)
i1[i1 == 0] = 1
i1[i1 == lenxin] = lenxin - 1
# if 0 == i1:
# i1 = 1
# if lenxin == i1:
# i1 = lenxin - 1
x0 = xin[i1 - 1]
x1 = xin[i1]
y0 = yin[i1 - 1]
y1 = yin[i1]
return x0, x1, y0, y1
def linear_interp(xout, xin, yin):
"""Linear one-dimensional interpolation.
Parameters
----------
xout : array_like
The positions at which to interpolate.
xin : array_like
The x values of the data to interpolate. This must be
sorted so that it is monotonically increasing.
yin : array_like
The y values of the data to interpolate (must be the same
size as ``xin``).
Returns
-------
yout : NumPy array of numbers
The interpolated y values (same size as ``xout``).
See Also
--------
interpolate, nearest_interp, neville
Examples
--------
>>> import numpy as np
>>> x = np.asarray([1.2, 3.4, 4.5, 5.2])
>>> y = np.asarray([12.2, 14.4, 16.8, 15.5])
>>> xgrid = np.linspace(2, 5, 5)
>>> ygrid = linear_interp(xgrid, x, y)
"""
x0, x1, y0, y1 = interp_util(xout, xin, yin)
val = (xout - x0) / (x1 - x0) * (y1 - y0) + y0
if np.isnan(val).any():
# to handle the case where two adjacent elements of xout are equal
return nearest_interp(xout, xin, yin)
return val
def nearest_interp(xout, xin, yin):
"""Nearest-neighbor one-dimensional interpolation.
Parameters
----------
xout : array_like
The positions at which to interpolate.
xin : array_like
The x values of the data to interpolate. This must be
sorted so that it is monotonically increasing.
yin : array_like
The y values of the data to interpolate (must be the same
size as ``xin``).
Returns
-------
yout : NumPy array of numbers
The interpolated y values (same size as ``xout``).
See Also
--------
interpolate, linear_interp, neville
Examples
--------
>>> import numpy as np
>>> x = np.asarray([1.2, 3.4, 4.5, 5.2])
>>> y = np.asarray([12.2, 14.4, 16.8, 15.5])
>>> xgrid = np.linspace(2, 5, 5)
>>> ygrid = nearest_interp(xgrid, x, y)
"""
x0, x1, y0, y1 = interp_util(xout, xin, yin)
return np.where((np.abs(xout - x0) < np.abs(xout - x1)), y0, y1)
def interpolate(xout, xin, yin, function=linear_interp):
"""One-dimensional interpolation.
Parameters
----------
xout : array_like
The positions at which to interpolate.
xin : array_like
The x values of the data to interpolate. This must be
sorted so that it is monotonically increasing.
yin : array_like
The y values of the data to interpolate (must be the same
size as ``xin``).
function : func, optional
The function to perform the interpolation. It accepts
the arguments (xout, xin, yin) and returns the interpolated
values. The default is to use linear interpolation.
Returns
-------
yout : array_like
The interpolated y values (same size as ``xout``).
See Also
--------
linear_interp, nearest_interp, neville
Examples
--------
Use linear interpolation to calculate the Y values for the
``xgrid`` array:
>>> import numpy as np
>>> x = np.asarray([1.2, 3.4, 4.5, 5.2])
>>> y = np.asarray([12.2, 14.4, 16.8, 15.5])
>>> xgrid = np.linspace(2, 5, 5)
>>> ygrid = interpolate(xgrid, x, y)
Use Neville's algorithm for the interpolation:
>>> ygrid = interpolate(xgrid, x, y, neville)
"""
if not callable(function):
raise TypeError(f"input function '{repr(function)}' is not callable")
return function(xout, xin, yin)
def is_binary_file(filename):
"""Estimate if a file is a binary file.
Parameters
----------
filename : str
The name of the file.
Returns
-------
flag : bool
Returns True if a non-printable character is found in the first
1024 bytes of the file.
Notes
-----
For this function, "binary" means the file contains a non-ASCII character.
"""
# Originally "binary" was defined as a character not being in
# string.printable. With Python 3, we can also use UnicodeDecodeError
# as an indicator of a "binary" file, but the check against
# string.printable is kept in, since this is more restrictive
# than UnicodeDecodeError.
#
try:
with open(filename, 'r') as fd:
try:
lines = fd.readlines(1024)
except UnicodeDecodeError:
return True
if len(lines) == 0:
return False
# Are there any non-printable characters in the buffer?
for line in lines:
for char in line:
if char not in string.printable:
return True
except OSError as oe:
raise IOErr('openfailed', f"unable to open {filename}: {oe}") from oe
return False
################################# Neville2d ###################################
def neville2d(xinterp, yinterp, x, y, fval):
"""Polynomial two-dimensional interpolation using Neville's method.
The scheme used for interpolation (Neville's method) is described
at [1]_, where the interpolation is done first over the Y axis
and then the X axis.
References
----------
.. [1] http://en.wikipedia.org/wiki/Neville%27s_algorithm
"""
nrow = fval.shape[0]
# ncol = fval.shape[1]
tmp = np.zeros(nrow)
for row in range(nrow):
tmp[row] = neville(yinterp, y, fval[row])
return neville(xinterp, x, tmp)
################################## Hessian ####################################
class NumDeriv:
def __init__(self, func, fval0):
self.nfev, self.func = func_counter(func)
self.fval_0 = fval0
class NumDerivCentralOrdinary(NumDeriv):
"""
Subtract the following Taylor series expansion::
2
' h '' 3
f( x +/- h ) = f( x ) +/- h f ( x ) + - f ( x ) + O( h )
2
gives::
' 3
f( x + h ) - f( x - h ) = 2 h f ( x ) + O( h )
'
solving for f ( x )::
' f( x + h ) - f( x - h ) 2
f ( x ) = ----------------------- + O( h )
2 h
In addition to the truncation error of order h^2, there is a round off
error due to the finite numerical precision ~ r f( x )::
' f( x + h ) - f( x - h ) r f( x ) 2
f ( x ) = ----------------------- + --------- + O( h )
2 h h
r 2
Error ~= - + h
h
minimizing the error by differentiating wrt h, the solve for h::
h ~ r^1/3
"""
def __init__(self, func, fval0=None):
NumDeriv.__init__(self, func, fval0)
def __call__(self, x, h):
if 0.0 == h:
return np.inf
return (self.func(x + h) - self.func(x - h)) / (2.0 * h)
class NumDerivFowardPartial(NumDeriv):
def __init__(self, func, fval0):
NumDeriv.__init__(self, func, fval0)
def __call__(self, x, h, *args):
if 0.0 == h:
h = pow(np.finfo(np.float32).eps, 1.0 / 3.0)
ith = args[0]
jth = args[1]
ei = np.zeros(len(x), float)
ej = np.zeros(len(x), float)
deltai = h * abs(x[ith])
if 0.0 == deltai:
deltai = h
ei[ith] = deltai
deltaj = h * abs(x[jth])
if 0.0 == deltaj:
deltaj = h
ej[jth] = deltaj
fval = self.fval_0
fval += self.func(x + ei + ej)
fval -= self.func(x + ei)
fval -= self.func(x + ej)
fval /= deltai * deltaj
return fval
class NumDerivCentralPartial(NumDeriv):
"""Add the following Taylor series expansion::
2
' h '' 3
f( x +/- h ) = f( x ) +/- h f ( x ) + - f ( x ) + O( h )
2
''
and solve for f ( x ), gives::
'' f( x + h ) + f( x - h ) - 2 f( x ) 2
f ( x ) = ------------------------------------ + O( h )
2
h
In addition to the truncation error of order h^2, there is a round off
error due to the finite numerical precision ~ r f( x )::
'' f( x + h ) + f( x - h ) - 2 f( x ) r f( x ) 2
f ( x ) = ------------------------------------ + -------- + O( h )
2 2
h h
r 2
Error ~= - + h
2
h
minimizing the error by differentiating wrt h, the solve for h::
h ~ r^1/4
"""
def __init__(self, func, fval0):
NumDeriv.__init__(self, func, fval0)
def __call__(self, x, h, *args):
if 0.0 == h:
h = pow(np.finfo(np.float32).eps, 1.0 / 3.0)
ith = args[0]
jth = args[1]
ei = np.zeros(len(x), float)
if ith == jth:
delta = h * abs(x[ith])
if 0.0 == delta:
delta = h
ei[ith] = delta
fval = - 2.0 * self.fval_0
fval += self.func(x + ei) + self.func(x - ei)
fval /= delta * delta
return fval
ej = np.zeros(len(x), float)
deltai = h * abs(x[ith])
if 0.0 == deltai:
deltai = h
ei[ith] = deltai
deltaj = h * abs(x[jth])
if 0.0 == deltaj:
deltaj = h
ej[jth] = deltaj
fval = self.func(x + ei + ej)
fval -= self.func(x + ei - ej)
fval -= self.func(x - ei + ej)
fval += self.func(x - ei - ej)
fval /= (4.0 * deltai * deltaj)
return fval
class NoRichardsonExtrapolation:
def __init__(self, sequence, verbose=False):
self.sequence = sequence
self.verbose = verbose
def __call__(self, x, t, tol, maxiter, h, *args):
self.sequence(x, h, *args)
class RichardsonExtrapolation(NoRichardsonExtrapolation):
"""From Wikipedia, the free encyclopedia
In numerical analysis, Richardson extrapolation is a sequence acceleration
method, used to improve the rate of convergence of a sequence. It is named
after Lewis Fry Richardson, who introduced the technique in the early 20th
century.[1][2] In the words of Birkhoff and Rota, '... its usefulness for
practical computations can hardly be overestimated.'
1. Richardson, L. F. (1911). \"The approximate arithmetical solution by
finite differences of physical problems including differential equations,
with an application to the stresses in a masonry dam \". Philosophical
Transactions of the Royal Society of London, Series A 210.
2. Richardson, L. F. (1927). \" The deferred approach to the limit \".
Philosophical Transactions of the Royal Society of London, Series A 226:"""
def __call__(self, x, t, tol, maxiter, h, *args):
richardson = np.zeros((maxiter, maxiter), dtype=np.float64)
richardson[0, 0] = self.sequence(x, h, *args)
t_sqr = t * t
for ii in range(1, maxiter):
h /= t
richardson[ii, 0] = self.sequence(x, h, *args)
ii_1 = ii - 1
for jj in range(1, ii + 1):
# jjp1 = jj + 1 -- this variable is not used
jj_1 = jj - 1
factor = pow(t_sqr, jj)
factor_1 = factor - 1
richardson[ii, jj] = (factor * richardson[ii, jj_1] -
richardson[ii_1, jj_1]) / factor_1
arg_jj = richardson[ii, jj]
arg_jj -= richardson[ii, jj_1]
arg_ii = richardson[ii, jj]
arg_ii -= richardson[ii_1, jj_1]
if Knuth_close(richardson[ii, ii],
richardson[ii_1, ii_1], tol):
if self.verbose:
print_low_triangle(richardson, jj)
return richardson[ii, ii]
if self.verbose:
print_low_triangle(richardson, maxiter - 1)
return richardson[maxiter - 1, maxiter - 1]
def hessian(func, par, extrapolation, algorithm, maxiter, h, tol, t):
num_dif = algorithm(func, func(par))
deriv = extrapolation(num_dif)
npar = len(par)
Hessian = np.zeros((npar, npar), dtype=np.float64)
for ii in range(npar):
for jj in range(ii + 1):
answer = deriv(par, t, tol, maxiter, h, ii, jj)
Hessian[ii, jj] = answer / 2.0
Hessian[jj, ii] = Hessian[ii, jj]
return Hessian, num_dif.nfev[0]
def print_low_triangle(matrix, num):
# print matrix
for ii in range(num):
print(matrix[ii, 0], end=' ')
for jj in range(1, ii + 1):
print(matrix[ii, jj], end=' ')
print()
def symmetric_to_low_triangle(matrix, num):
low_triangle = []
for ii in range(num):
for jj in range(ii + 1):
low_triangle.append(matrix[ii, jj])
# print_low_triangle( matrix, num )
# print low_triangle
return low_triangle
############################### Root of all evil ##############################
def printf(format, *args):
"""Format args with the first argument as format string, and write.
Return the last arg, or format itself if there are no args."""
sys.stdout.write(str(format) % args)
# WARNING: where is if_ meant to be defined?
return if_(args, args[-1], format)
# With ParamSpec, added in Python 3.10, we might be able to annotate
# this so that we can match the arguments that `func` uses are the
# same as are sent to the __call__ method, although it might be easier
# to do after the generics changes added in Python 3.12.
#
T = TypeVar("T")
class FuncCounter(Generic[T]):
"""Store the number of times the function is called.
.. versionadded:: 4.17.0
"""
__slots__ = ("nfev", "func")
def __init__(self, func: Callable[..., T]) -> None:
self.nfev = 0
self.func = func
def __call__(self, *args) -> T:
self.nfev += 1
return self.func(*args)
def func_counter(func):
"""DEPRECATED.
.. deprecated:: 4.17.0
Use the `FuncCounter` class instead.
"""
# This is FutureWarning rather than DeprecationWarning to make
# sure users see the message.
#
warnings.warn("func_counter is deprecated in 4.17.0: use FuncCounter instead",
FutureWarning)
def func_counter_wrapper(x, *args):
nfev[0] += 1
return func(x, *args)
return nfev, func_counter_wrapper
def is_in(arg, seq):
"""DEPRECATED.
.. deprecated:: 4.17.0
Use the Python `in` operator instead.
"""
# This is FutureWarning rather than DeprecationWarning to make
# sure users see the message.
#
warnings.warn("is_in is deprecated in 4.17.0: use Python's in instead",
FutureWarning)
for x in seq:
if arg == x:
return True
return False
def is_iterable(arg):
return isinstance(arg, (list, tuple, np.ndarray)) or np.iterable(arg)
# Can this return TypeGuard[Sequence]?
def is_iterable_not_str(arg: Any) -> bool:
"""It is iterable but not a string."""
return not isinstance(arg, str) and isinstance(arg, Iterable)
def is_sequence(start, mid, end):
return start < mid < end
def Knuth_close(x, y, tol, myop=operator.__or__):
"""Check whether two floating-point numbers are close together.
See Also
--------
sao_fcmp
Notes
-----
The following text was taken verbatim from [1]_:
In most cases it is unreasonable to use an operator==(...)
for a floating-point values equality check. The simple solution
like ``abs(f1-f2) <= e`` does not work for very small or very big values.
This floating-point comparison algorithm is based on the more
confident solution presented by D. E. Knuth in 'The art of computer
programming (vol II)'. For a given floating point values u and v and
a tolerance e::
| u - v | <= e * |u| and | u - v | <= e * |v| (1)
defines a "very close with tolerance e" relationship between u and v::
| u - v | <= e * |u| or | u - v | <= e * |v| (2)
defines a "close enough with tolerance e" relationship between
u and v. Both relationships are commutative but are not transitive.
The relationship defined by inequations (1) is stronger that the
relationship defined by inequations (2) (i.e. (1) => (2) ).
References
----------
.. [1] http://www.boost.org/doc/libs/1_35_0/libs/test/doc/components/test_tools/floating_point_comparison.html#Introduction
"""
diff = abs(x - y)
if 0.0 == x or 0.0 == y:
return diff <= tol
return myop(diff <= tol * abs(x), diff <= tol * abs(y))
def safe_div(num, denom):
dbl_max = sys.float_info.max
dbl_min = sys.float_info.min
# avoid overflow
if denom < 1 and num > denom * dbl_max:
return dbl_max
# avoid underflow
if 0.0 == num or denom > 1 and num < denom * dbl_min:
return 0
return num / denom
def Knuth_boost_close(x, y, tol, myop=operator.__or__):
""" The following text was taken verbatim from:
http://www.boost.org/doc/libs/1_35_0/libs/test/doc/components/test_tools/floating_point_comparison.html#Introduction
In most cases it is unreasonable to use an operator==(...)
for a floating-point values equality check. The simple solution
like abs(f1-f2) <= e does not work for very small or very big values.
This floating-point comparison algorithm is based on the more
confident solution presented by D. E. Knuth in 'The art of computer
programming (vol II)'. For a given floating point values u and v and
a tolerance e:
| u - v | <= e * |u| and | u - v | <= e * |v| (1)
defines a "very close with tolerance e" relationship between u and v
| u - v | <= e * |u| or | u - v | <= e * |v| (2)
defines a "close enough with tolerance e" relationship between
u and v. Both relationships are commutative but are not transitive.
The relationship defined by inequations (1) is stronger that the
relationship defined by inequations (2) (i.e. (1) => (2) ).
Because of the multiplication in the right side of inequations,
that could cause an unwanted underflow condition, the implementation
is using modified version of the inequations (1) and (2) where all
underflow, overflow conditions could be guarded safely:
| u - v | / |u| <= e and | u - v | / |v| <= e (1`)
| u - v | / |u| <= e or | u - v | / |v| <= e (2`)"""
diff = abs(x - y)
if 0.0 == x or 0.0 == y:
return diff <= tol
diff_x = safe_div(diff, x)
diff_y = safe_div(diff, y)
return myop(diff_x <= tol, diff_y <= tol)
def list_to_open_interval(arg):
if not np.iterable(arg):
return arg
return f'({arg[0]:e}, {arg[1]:e})'
class OutOfBoundErr(Exception):
"""Indicate an out-of-bounds exception in the error analysis"""
# Should this just move to sherpa.estmethods?
pass
class QuadEquaRealRoot:
""" solve for the real roots of the quadratic equation:
a * x^2 + b * x + c = 0"""
def __call__(self, a, b, c):
if 0.0 == a:
#
# 0 * x^2 + b * x + c = 0
#
if 0.0 != b:
#
# 0 * x^2 + b * x + c = 0
# the folowing still works even if c == 0
#
answer = - c / b
return [answer, answer]
#
# 0 * x^2 + 0 * x + c = 0
#
# a == 0, b == 0, so if c == 0 then all numbers work so
# returning nan is not right. However if c != 0 then no
# roots exist.
#
return [None, None]
if 0.0 == b:
#
# a * x^2 + 0 * x + c = 0
#
if 0.0 == c:
# a * x^2 + 0 * x + 0 = 0
return [0.0, 0.0]
# a * x^2 + 0 * x + c = 0
if np.sign(a) == np.sign(c):
return [None, None]
answer = np.sqrt(c / a)
return [-answer, answer]
if 0.0 == c:
#
# a * x^2 + b * x + 0 = 0
#
return [0.0, - b / a]
discriminant = b * b - 4.0 * a * c
debug("disc=%s", discriminant)
sqrt_disc = np.sqrt(discriminant)
t = - (b + np.sign(b) * sqrt_disc) / 2.0
return [c / t, t / a]
def bisection(fcn, xa, xb, fa=None, fb=None, args=(), maxfev=48, tol=1.0e-6):
"""A basic root finding algorithm that uses standard bisection
Bisection is a relatively slow method for root finding, but it guaranteed to
work for a continuous function with a root in a bracketed interval; in other
words the function must undergo a sign change between the bracketing values.
See https://en.wikipedia.org/wiki/Bisection_method for a description of the
bisection method.
Parameters
----------
fcn : callable
The function with a root. The function signature is ``fcn(x, *args)``.
xa : float
Lower limit of the bracketing interval
xb : float
Upper limit of the bracketing interval
fa : float or None
Function value at ``xa``. This parameter is optional and can be passed
to save time in cases where ``fcn(xa, *args)`` is already known and
function evaluation takes a long time. If `None`, it will be
calculated.
fb : float or None
Function value at ``xb``. This parameter is optional and can be passed
to save time in cases where ``fcn(xb, *args)`` is already known and
function evaluation takes a long time. If `None`, it will be
calculated.
args : tuple
Additional parameters that will be passed through to ``fcn``.
maxfev : int
Maximal number of function evaluations
tol : float
The root finding algorithm stops if a value x with
``abs(fcn(x)) < tol`` is found.
Returns
-------
out : list
The output has the form of a list:
``[[x, fcn(x)], [x1, fcn(x1)], [x2, fcn(x2)], nfev]`` where ``x`` is
the location of the root, and ``x1`` and ``x2`` are the previous
steps. The function value for those steps is returned as well.
``nfev`` is the total number of function evaluations.
If any of those values is not available, ``None`` will be returned
instead.
"""
myfcn = FuncCounter(fcn)
try:
if fa is None:
fa = myfcn(xa, *args)
if abs(fa) <= tol:
return [[xa, fa], [[xa, fa], [xa, fa]], myfcn.nfev]
if fb is None:
fb = myfcn(xb, *args)
if abs(fb) <= tol:
return [[xb, fb], [[xb, fb], [xb, fb]], myfcn.nfev]
if np.sign(fa) == np.sign(fb):
# TODO: is this a useful message for the user?
warning('%s: %s fa * fb < 0 is not met', __name__, fcn.__name__)
return [[None, None], [[None, None], [None, None]],
myfcn.nfev]
while myfcn.nfev < maxfev:
if abs(fa) > tol and abs(fb) > tol:
xc = (xa + xb) / 2.0
fc = myfcn(xc, *args)
if abs(xa - xb) < min(tol * abs(xb), tol / 10.0):
return [[xc, fc], [[xa, fa], [xb, fb]], myfcn.nfev]
if np.sign(fa) != np.sign(fc):
xb, fb = xc, fc
else:
xa, fa = xc, fc
else:
if abs(fa) <= tol:
return [[xa, fa], [[xa, fa], [xb, fb]], myfcn.nfev]
return [[xb, fb], [[xa, fa], [xb, fb]], myfcn.nfev]
xc = (xa + xb) / 2.0
fc = myfcn(xc, *args)
return [[xc, fc], [[xa, fa], [xb, fb]], myfcn.nfev]
except OutOfBoundErr:
return [[None, None], [[xa, fa], [xb, fb]], myfcn.nfev]
# Is this used at all?
def quad_coef(x, f):
"""
p( x ) = f( xc ) + A ( x - xc ) + B ( x - xc ) ( x - xb )
= f( xc ) + A ( x - xc ) + B ( ( x - xc ) ( x - xc ) +
( x - xc ) ( xc - xb ) )
= f( xc ) + ( A + B ( xc - xb ) ) ( x - xc ) + B ( x - xc )^2
= f( xc ) + C ( x - xc ) + B ( x - xc )^2 ; C = A + B ( xc - xb )
= f( xc ) + C x - C xc + B ( x^2 - 2 x xc + xc^2 )
= B x^2 + ( C - 2 * B xc ) x + f( xc ) - C xc + B xc^2
= B x^2 + ( C - 2 * B x[2] ) x + f[ 2 ] + x[2] * ( B x[ 2 ] - C )
"""
[B, C] = transformed_quad_coef(x, f)
B_x2 = B * x[2]
return [B, C - 2 * B_x2, f[2] + x[2] * (B_x2 - C)]
def transformed_quad_coef(x, f):
"""
p( x ) = f( xc ) + A ( x - xc ) + B ( x - xc ) ( x - xb )
where A and B are the divided differences::
f( xc ) - f( xb )
A = -----------------
xc - xb
1 ( f( xc ) - f( xb ) f( xb ) - f( xa ) )
B = ------- ( ----------------- - ----------------- )
xc - xa ( xc - xb xb - xa )
p( x ) = f( xc ) + A ( x - xc ) + B ( x - xc ) ( x - xb )
= f( xc ) + A ( x - xc ) + B ( ( x - xc ) ( x - xc ) +
( x - xc ) ( xc - xb ) )
= f( xc ) + ( A + B ( xc - xb ) ) ( x - xc ) + B ( x - xc )^2
= f( xc ) + C ( x - xc ) + B ( x - xc )^2
where C = A + B ( xc - xb )
The root of p( x ), using the quadratic formula::
1 ( )
x - xc = --- ( - C +/- sqrt( C^2 - 4 f( xc ) B ) )
2 B ( )
Rationalize the numerator to avoid subtractive cancellation::
2 f( xc )
x - xc = -------------------------------
C +/- sqrt( C^2 - 4 f( xc ) B )
The sign should be chosen to maximize the denominator. Therefore,
the next point in the iteration is::
2 f( xc )
x = xc - --------------------------------------
C + sgn( C ) sqrt( C^2 - 4 f( xc ) B )
{ -1, x < 0
where sgn(x) = {
{ 1, x >= 0
"""
xa, xb, xc = x[0], x[1], x[2]
fa, fb, fc = f[0], f[1], f[2]
# What happens if xb_xa or xc_xa are 0? That is, either
# xa == xb
# xc == xa
# Is the assumption that this just never happen?
#
xc_xb = xc - xb
fc_fb = fc - fb
A = fc_fb / xc_xb
fb_fa = fb - fa
xb_xa = xb - xa
xc_xa = xc - xa
B = (A - fb_fa / xb_xa) / xc_xa
C = A + B * xc_xb
return [B, C]
def _get_discriminant(xa, xb, xc, fa, fb, fc):
"""Wrap up code to transformed_quad_coef.
This is common code that could be added to transformed_quad_coef
but is left out at the moment, to make it easier to look back
at code changes. There is no description of the parameters as
the existing code has none.
"""
[B, C] = transformed_quad_coef([xa, xb, xc], [fa, fb, fc])
discriminant = max(C * C - 4.0 * fc * B, 0.0)
return B, C, discriminant
def demuller(fcn, xa, xb, xc, fa=None, fb=None, fc=None, args=(),
maxfev=32, tol=1.0e-6):
"""A root-finding algorithm using Muller's method.
The algorithm is described at https://en.wikipedia.org/wiki/Muller%27s_method.
::
p( x ) = f( xc ) + A ( x - xc ) + B ( x - xc ) ( x - xb )
Notes
-----
The general case::
2 f( x )
n
x = x - ----------------------------------------
n+1 n C + sgn( C ) sqrt( C^2 - 4 f( x ) B )
n n n n n
1 ( f( x ) - f( x ) f( x ) - f( x ) )
( n n-1 n-1 n-2 )
B = ------- ( ------------------ - ------------------- )
n x - x ( x - x x - x )
n n-2 ( n n-1 n-1 n-2 )
f( x ) - f( x )
n n-1
A = -----------------
n x - x
n n-1
C = A + B ( x - x )
n n n n n-1
The convergence rate for Muller's method can be shown to be
the real root of the cubic x - x^3, that is::
p = (a + 4 / a + 1) / 3
a = (19 + 3 sqrt(33))^1/3
In other words: O(h^p) where p is approximately 1.839286755.
Parameters
----------
fcn : callable
The function with a root. The function signature is ``fcn(x, *args)``.
xa, xb, xc : float
Muller's method requires three initial values.
fa, fb, fc : float or None
Function values at ``xa``, ``xb``, and ``xc``. These parameters are
optional and can be passed
to save time in cases where ``fcn(xa, *args)`` is already known and
function evaluation takes a long time. If `None`, they will be
calculated.
args : tuple
Additional parameters that will be passed through to ``fcn``.
maxfev : int
Maximal number of function evaluations
tol : float
The root finding algorithm stops if the function value a value x with
``abs(fcn(x)) < tol`` is found.
Returns
-------
out : list
The output has the form of a list:
``[[x, fcn(x)], [x1, fcn(x1)], [x2, fcn(x2)], nfev]`` where ``x`` is
the location of the root, and ``x1`` and ``x2`` are the previous
steps. The function value for those steps is returned as well.
``nfev`` is the total number of function evaluations.
If any of those values is not available, ``None`` will be returned
instead.
"""
def is_nan(arg):
if arg != arg:
return True
if arg is np.nan:
return True
return np.isnan(arg)
myfcn = FuncCounter(fcn)
try:
if fa is None:
fa = myfcn(xa, *args)
if abs(fa) <= tol:
return [[xa, fa], [[xa, fa], [xa, fa]], myfcn.nfev]
if fb is None:
fb = myfcn(xb, *args)
if abs(fb) <= tol:
return [[xb, fb], [[xb, fb], [xb, fb]], myfcn.nfev]
if fc is None:
fc = myfcn(xc, *args)
if abs(fc) <= tol:
return [[xc, fc], [[xc, fc], [xc, fc]], myfcn.nfev]
while myfcn.nfev < maxfev:
B, C, discriminant = _get_discriminant(xa, xb, xc, fa, fb, fc)
if is_nan(B) or is_nan(C) or \
0.0 == C + np.sign(C) * np.sqrt(discriminant):
return [[None, None], [[None, None], [None, None]],
myfcn.nfev]
xd = xc - 2.0 * fc / (C + np.sign(C) * np.sqrt(discriminant))
fd = myfcn(xd, *args)
if abs(fd) <= tol:
return [[xd, fd], [[None, None], [None, None]],
myfcn.nfev]
xa = xb
fa = fb
xb = xc
fb = fc
xc = xd
fc = fd
return [[xd, fd], [[None, None], [None, None]],
myfcn.nfev]
except ZeroDivisionError:
return [[xd, fd], [[None, None], [None, None]],
myfcn.nfev]
def new_muller(fcn, xa, xb, fa=None, fb=None, args=(), maxfev=32, tol=1.e-6):
'''Alternative implementation of Mueller's method for root finding
Parameters
----------
fcn : callable
The function with a root. The function signature is ``fcn(x, *args)``.
xa, xb: float
Muller's method requires three initial values.
fa, fb: float or None
Function values at ``xa`` and ``xb``. These parameters are
optional and can be passed
to save time in cases where ``fcn(xa, *args)`` is already known and
function evaluation takes a long time. If `None`, they will be
calculated.
args : tuple
Additional parameters that will be passed through to ``fcn``.
maxfev : int
Maximal number of function evaluations
tol : float
The root finding algorithm stops if the function value a value x with
``abs(fcn(x)) < tol`` is found.
Returns
-------
out : list
The output has the form of a list:
``[[x, fcn(x)], [x1, fcn(x1)], [x2, fcn(x2)], nfev]`` where ``x`` is
the location of the root, and ``x1`` and ``x2`` are the previous
steps. The function value for those steps is returned as well.
``nfev`` is the total number of function evaluations.
If any of those values is not available, ``None`` will be returned
instead.
'''
myfcn = FuncCounter(fcn)
try:
if fa is None:
fa = myfcn(xa, *args)
if abs(fa) <= tol:
return [[xa, fa], [[xa, fa], [xa, fa]], myfcn.nfev]
if fb is None:
fb = myfcn(xb, *args)
if abs(fb) <= tol:
return [[xb, fb], [[xb, fb], [xb, fb]], myfcn.nfev]
if np.sign(fa) == np.sign(fb):
warning('%s: %s fa * fb < 0 is not met', __name__, fcn.__name__)
return [[None, None], [[None, None], [None, None]],
myfcn.nfev]
while myfcn.nfev < maxfev:
xc = (xa + xb) / 2.0
fc = myfcn(xc, *args)
if abs(fc) <= tol:
return [[xc, fc], [[xa, fa], [xb, fb]], myfcn.nfev]
B, C, discriminant = _get_discriminant(xa, xb, xc, fa, fb, fc)
xd = xc - 2.0 * fc / (C + np.sign(C) * np.sqrt(discriminant))
fd = myfcn(xd, *args)
if abs(fd) <= tol:
return [[xd, fd], [[xa, fa], [xb, fb]], myfcn.nfev]
if np.sign(fa) != np.sign(fc):
xb, fb = xc, fc
continue
if np.sign(fd) != np.sign(fc) and xc < xd:
xa, fa = xc, fc
xb, fb = xd, fd
continue
if np.sign(fb) != np.sign(fd):
xa, fa = xd, fd
continue
if np.sign(fa) != np.sign(fd):
xb, fb = xd, fd
continue
if np.sign(fc) != np.sign(fd) and xd < xc:
xa, fa = xd, fd
xb, fb = xc, fc
continue
if np.sign(fc) != np.sign(fd):
xa, fa = xc, fc
continue
return [[xd, fd], [[xa, fa], [xb, fb]], myfcn.nfev]
except (ZeroDivisionError, OutOfBoundErr):
return [[xd, fd], [[xa, fa], [xb, fb]], myfcn.nfev]
#
# /*
# * Licensed to the Apache Software Foundation (ASF) under one or more
# * contributor license agreements. See the NOTICE file distributed with
# * this work for additional information regarding copyright ownership.
# * The ASF licenses this file to You under the Apache License, Version 2.0
# * (the "License"); you may not use this file except in compliance with
# * the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# */
#
def apache_muller(fcn, xa, xb, fa=None, fb=None, args=(), maxfev=32,
tol=1.0e-6):
'''An alternative implementation of Muller's method for root finding.
Unlike the rest of Sherpa, this method is available
the Apache Software Foundation (ASF) licence - see code for this method
for details.
Parameters
----------
fcn : callable
The function with a root. The function signature is ``fcn(x, *args)``.
xa, xb: float
Muller's method requires three initial values.
fa, fb: float or None
Function values at ``xa`` and ``xb``. These parameters are
optional and can be passed
to save time in cases where ``fcn(xa, *args)`` is already known and
function evaluation takes a long time. If `None`, they will be
calculated.
args : tuple
Additional parameters that will be passed through to ``fcn``.
maxfev : int
Maximal number of function evaluations
tol : float
The root finding algorithm stops if the function value a value x with
``abs(fcn(x)) < tol`` is found.
Returns
-------
out : list
The output has the form of a list:
``[[x, fcn(x)], [x1, fcn(x1)], [x2, fcn(x2)], nfev]`` where ``x`` is
the location of the root, and ``x1`` and ``x2`` are the previous
steps. The function value for those steps is returned as well.
``nfev`` is the total number of function evaluations.
If any of those values is not available, ``None`` will be returned
instead.
'''
myfcn = FuncCounter(fcn)
try:
if fa is None:
fa = myfcn(xa, *args)
if abs(fa) <= tol:
return [[xa, fa], [[xa, fa], [xa, fa]], myfcn.nfev]
if fb is None:
fb = myfcn(xb, *args)
if abs(fb) <= tol:
return [[xb, fb], [[xb, fb], [xb, fb]], myfcn.nfev]
if np.sign(fa) == np.sign(fb):
warning('%s: %s fa * fb < 0 is not met', __name__, fcn.__name__)
return [[None, None], [[None, None], [None, None]],
myfcn.nfev]
xc = (xa + xb) / 2.0
fc = myfcn(xc, *args)
if abs(fc) <= tol:
return [[xc, fc], [[xc, fc], [xc, fc]], myfcn.nfev]
xbest, fbest = xa, fa
if abs(fb) < abs(fa):
xbest, fbest = xb, fb
if abs(fc) < abs(fbest):
xbest, fbest = xc, fc
oldx = 1.0e128
while myfcn.nfev < maxfev:
B, C, discriminant = _get_discriminant(xa, xb, xc, fa, fb, fc)
den = np.sign(C) * np.sqrt(discriminant)
xplus = xc - 2.0 * fc / (C + den)
if C != den:
xminus = xc - 2.0 * fc / (C - den)
else:
xminus = 1.0e128
if is_sequence(xa, xplus, xb):
x = xplus
else:
x = xminus
# print 'xa=', xa, '\tx=', x, '\txb=', xb, '\txc=', xc
# fubar = quad_coef( [xa,xb,xc], [fa,fb,fc] )
# quad = QuadEquaRealRoot( )
# print quad( fubar[0], fubar[1], fubar[2] )
# print
# sanity check
if not is_sequence(xa, x, xb):
x = (xa + xb) / 2.0
y = myfcn(x, *args)
if abs(y) < abs(fbest):
xbest, fbest = x, y
tolerance = min(tol * abs(x), tol)
if abs(y) <= tol or abs(x - oldx) <= tolerance:
return [[x, y], [[xa, fa], [xb, fb]], myfcn.nfev]
mybisect = (x < xc and (xc - xa) > 0.95 * (xb - xa)) or \
(x > xc and (xb - xc) > 0.95 * (xb - xa)) or \
(x == xc)
if not mybisect:
if x > xc:
xa = xc
fa = fc
if x < xc:
xb = xc
fb = fc
xc, fc = x, y
oldx = x
else:
xmid = (xa + xb) / 2.0
fmid = myfcn(xmid, *args)
if abs(fmid) < abs(fbest):
xbest, fbest = xmid, fmid
if abs(fmid) <= tol:
return [[xmid, fmid], [[xa, fa], [xb, fb]], myfcn.nfev]
if np.sign(fa) + np.sign(fmid) == 0:
xb = xmid
fb = fmid
else:
xa = xmid
fa = fmid
xc = (xa + xb) / 2.0
fc = myfcn(xc, *args)
if abs(fc) < abs(fbest):
xbest, fbest = xc, fc
if abs(fc) <= tol:
return [[xc, fc], [[xa, fa], [xb, fb]], myfcn.nfev]
oldx = 1.0e128
#
# maxfev has exceeded, return the minimum so far
#
return [[xbest, fbest], [[xa, fa], [xb, fb]], myfcn.nfev]
#
# Something drastic has happened
#
except (ZeroDivisionError, OutOfBoundErr):
return [[xbest, fbest], [[xa, fa], [xb, fb]], myfcn.nfev]
def zeroin(fcn, xa, xb, fa=None, fb=None, args=(), maxfev=32, tol=1.0e-2):
"""Obtain a zero of a function of one variable using Brent's root finder.
Return an approximate location for the root with accuracy::
4*DBL_EPSILON*abs(x) + tol
using the algorithm from [1]_.
References
----------
.. [1] G.Forsythe, M.Malcolm, C.Moler, Computer methods for mathematical
computations. M., Mir, 1980, p.180 of the Russian edition
Notes
-----
The function makes use of a bisection procedure combined with
a linear or quadratic inverse interpolation.
At each step the code operates three abscissae - a, b, and c:
- b - the last and the best approximation to the root
- a - the last but one approximation
- c - the last but one or even an earlier approximation such that:
1) ``|f(b)| <= |f(c)|``
2) f(b) and f(c) have opposite signs, i.e. b and c encompass
the root
Given these abscissae, the code computes two new approximations,
one by the bisection procedure and the other one from interpolation
(if a,b, and c are all different the quadratic interpolation is used,
linear otherwise). If the approximation obtained by the interpolation
looks reasonable (i.e. falls within the current interval [b,c], not
too close to the end points of the interval), the point is accepted
as a new approximation to the root. Otherwise, the result of the
bissection is used.
Parameters
----------
fcn : callable
The function with a root. The function signature is ``fcn(x, *args)``.
xa : float
Lower limit of the bracketing interval
xb : float
Upper limit of the bracketing interval
fa : float or None
Function value at ``xa``. This parameter is optional and can be passed
to save time in cases where ``fcn(xa, *args)`` is already known and
function evaluation takes a long time. If `None`, it will be
calculated.
fb : float or None
Function value at ``xb``. This parameter is optional and can be passed
to save time in cases where ``fcn(xb, *args)`` is already known and
function evaluation takes a long time. If `None`, it will be
calculated.
args : tuple
Additional parameters that will be passed through to ``fcn``.
maxfev : int
Maximal number of function evaluations
tol : float
The root finding algorithm stops if a value x with
``abs(fcn(x)) < tol`` is found.
Returns
-------
out : list
The output has the form of a list:
``[[x, fcn(x)], [x1, fcn(x1)], [x2, fcn(x2)], nfev]`` where ``x`` is
the location of the root, and ``x1`` and ``x2`` are the previous
steps. The function value for those steps is returned as well.
``nfev`` is the total number of function evaluations.
If any of those values is not available, ``None`` will be returned
instead.
"""
myfcn = FuncCounter(fcn)
try:
if fa is None:
fa = myfcn(xa, *args)
if abs(fa) <= tol:
return [[xa, fa], [[xa, fa], [xb, fb]], myfcn.nfev]
if fb is None:
fb = myfcn(xb, *args)
if abs(fb) <= tol:
return [[xb, fb], [[xa, fa], [xb, fb]], myfcn.nfev]
if np.sign(fa) == np.sign(fb):
warning('%s: %s fa * fb < 0 is not met', __name__, fcn.__name__)
return [[None, None], [[None, None], [None, None]], myfcn.nfev]
# With NumPy 2.0 the casting rules changed, leading to some
# behavioural changes in this code. The simplest fix was to
# make sure DBL_EPSILON did not remain a np.float32 value.
#
xc = xa
fc = fa
DBL_EPSILON = float(np.finfo(np.float32).eps)
while myfcn.nfev < maxfev:
prev_step = xb - xa
if abs(fc) < abs(fb):
xa, fa = xb, fb
xb, fb = xc, fc
xc, fc = xa, fa
tol_act = 2.0 * DBL_EPSILON * abs(xb) + tol / 2.0
new_step = (xc - xb) / 2.0
if abs(fb) <= tol:
return [[xb, fb], [[xa, fa], [xb, fb]], myfcn.nfev]
if abs(new_step) <= tol_act:
if np.sign(fb) != np.sign(fa):
tmp = apache_muller(fcn, xa, xb, fa, fb, args=args,
maxfev=maxfev - myfcn.nfev,
tol=tol)
tmp[-1] += myfcn.nfev
return tmp
if np.sign(fb) != np.sign(fc):
tmp = apache_muller(fcn, xb, xc, fb, fc, args=args,
maxfev=maxfev - myfcn.nfev,
tol=tol)
tmp[-1] += myfcn.nfev
return tmp
return [[xb, fb], [[xa, fa], [xb, fb]], myfcn.nfev]
if abs(prev_step) >= tol_act and abs(fa) > abs(fb):
cb = xc - xb
if xa == xc:
t1 = fb / fa
p = cb * t1
q = 1.0 - t1
else:
t1 = fb / fc
t2 = fb / fa
q = fa / fc
p = t2 * (cb * q * (q - t1) - (xb - xa) * (t1 - 1.0))
q = (q - 1.0) * (t1 - 1.0) * (t2 - 1.0)
if p > 0:
q = -q
else:
p = -p
if 2 * p < (1.5 * cb * q - abs(tol_act * q)) and \
2 * p < abs(prev_step * q):
new_step = p / q
if abs(new_step) < tol_act:
if new_step > 0:
new_step = tol_act
else:
new_step = -tol_act
xa = xb
fa = fb
xb += new_step
fb = myfcn(xb, *args)
if fb > 0 and fc > 0 or fb < 0 and fc < 0:
xc = xa
fc = fa
return [[xb, fb], [[xa, fa], [xc, fc]], myfcn.nfev]
except (ZeroDivisionError, OutOfBoundErr):
return [[xb, fb], [[xa, fa], [xc, fc]], myfcn.nfev]
def public(f):
"""Use a decorator to avoid retyping function/class names.
* Based on an idea by Duncan Booth:
http://groups.google.com/group/comp.lang.python/msg/11cbb03e09611b8a
* Improved via a suggestion by Dave Angel:
http://groups.google.com/group/comp.lang.python/msg/3d400fb22d8a42e1
See also https://bugs.python.org/issue26632
"""
_all = sys.modules[f.__module__].__dict__.setdefault('__all__', [])
if f.__name__ not in _all: # Prevent duplicates if run from an IDE.
_all.append(f.__name__)
return f
def send_to_pager(txt: str, filename=None, clobber: bool = False) -> None:
"""Write out the given string, using pagination if supported.
This used to call out to using less/more but now is handled
by pydoc.pager
Parameters
----------
txt : str
The text to display
filename : str or StringIO or None, optional
If not None, write the output to the given file or filelike
object.
clobber : bool, optional
If filename is a string, then - when clobber is set - refuse
to overwrite the file if it already exists.
"""
if filename is None:
pydoc.pager(txt)
return
# Have we been sent a StringIO-like object?
#
if hasattr(filename, 'write'):
print(txt, file=filename)
return
# Assume a filename
clobber = bool_cast(clobber)
if os.path.isfile(filename) and not clobber:
raise IOErr('filefound', filename)
with open(filename, 'w', encoding="UTF-8") as fh:
print(txt, file=fh)
|
sherpaREPO_NAMEsherpaPATH_START.@sherpa_extracted@sherpa-main@sherpa@utils@__init__.py@.PATH_END.py
|
{
"filename": "cube_utils.py",
"repo_name": "radio-astro-tools/spectral-cube",
"repo_path": "spectral-cube_extracted/spectral-cube-master/spectral_cube/cube_utils.py",
"type": "Python"
}
|
import contextlib
import warnings
from copy import deepcopy
import builtins
import dask.array as da
import numpy as np
from astropy.wcs.utils import proj_plane_pixel_area
from astropy.wcs import (WCSSUB_SPECTRAL, WCSSUB_LONGITUDE, WCSSUB_LATITUDE)
from astropy.wcs import WCS
from . import wcs_utils
from .utils import FITSWarning, AstropyUserWarning, WCSCelestialError
from astropy import log
from astropy.io import fits
from astropy.wcs.utils import is_proj_plane_distorted
from astropy.io.fits import BinTableHDU, Column
from astropy import units as u
import itertools
import re
from radio_beam import Beam
def _fix_spectral(wcs):
"""
Attempt to fix a cube with an invalid spectral axis definition. Only uses
well-known exceptions, e.g. CTYPE = 'VELOCITY'. For the rest, it will try
to raise a helpful error.
"""
axtypes = wcs.get_axis_types()
types = [a['coordinate_type'] for a in axtypes]
if wcs.naxis not in (3, 4):
raise TypeError("The WCS has {0} axes of types {1}".format(len(types),
types))
# sanitize noncompliant headers
if 'spectral' not in types:
log.warning("No spectral axis found; header may be non-compliant.")
for ind,tp in enumerate(types):
if tp not in ('celestial','stokes'):
if wcs.wcs.ctype[ind] in wcs_utils.bad_spectypes_mapping:
wcs.wcs.ctype[ind] = wcs_utils.bad_spectypes_mapping[wcs.wcs.ctype[ind]]
return wcs
def _split_stokes(array, wcs, beam_table=None):
"""
Given a 4-d data cube with 4-d WCS (spectral cube + stokes) return a
dictionary of data and WCS objects for each Stokes component
Parameters
----------
array : `~numpy.ndarray`
The input 3-d array with two position dimensions, one spectral
dimension, and a Stokes dimension.
wcs : `~astropy.wcs.WCS`
The input 3-d WCS with two position dimensions, one spectral
dimension, and a Stokes dimension.
beam_table : `~astropy.io.fits.hdu.table.BinTableHDU`
When multiple beams are present, a FITS table with the beam information
can be given to be split into the polarization components, consistent with
`array`.
"""
if array.ndim not in (3,4):
raise ValueError("Input array must be 3- or 4-dimensional for a"
" STOKES cube")
if wcs.wcs.naxis != 4:
raise ValueError("Input WCS must be 4-dimensional for a STOKES cube")
wcs = _fix_spectral(wcs)
# reverse from wcs -> numpy convention
axtypes = wcs.get_axis_types()[::-1]
types = [a['coordinate_type'] for a in axtypes]
try:
# Find stokes dimension
stokes_index = types.index('stokes')
except ValueError:
# stokes not in list, but we are 4d
if types.count('celestial') == 2 and types.count('spectral') == 1:
if None in types:
stokes_index = types.index(None)
log.warning("FITS file has no STOKES axis, but it has a blank"
" axis type at index {0} that is assumed to be "
"stokes.".format(4-stokes_index))
else:
for ii,tp in enumerate(types):
if tp not in ('celestial', 'spectral'):
stokes_index = ii
stokes_type = tp
log.warning("FITS file has no STOKES axis, but it has an axis"
" of type {1} at index {0} that is assumed to be "
"stokes.".format(4-stokes_index, stokes_type))
else:
raise IOError("There are 4 axes in the data cube but no STOKES "
"axis could be identified")
# TODO: make the stokes names more general
stokes_names = ["I", "Q", "U", "V"]
stokes_arrays = {}
if beam_table is not None:
beam_tables = {}
wcs_slice = wcs_utils.drop_axis(wcs, wcs.naxis - 1 - stokes_index)
if array.ndim == 4:
for i_stokes in range(array.shape[stokes_index]):
array_slice = [i_stokes if idim == stokes_index else slice(None)
for idim in range(array.ndim)]
stokes_arrays[stokes_names[i_stokes]] = array[tuple(array_slice)]
if beam_table is not None:
beam_pol_idx = beam_table['POL'] == i_stokes
beam_tables[stokes_names[i_stokes]] = beam_table[beam_pol_idx]
else:
# 3D array with STOKES as a 4th header parameter
stokes_arrays['I'] = array
if beam_table is not None:
beam_tables['I'] = beam_table
if beam_table is not None:
return stokes_arrays, wcs_slice, beam_tables
else:
return stokes_arrays, wcs_slice
def _orient(array, wcs):
"""
Given a 3-d spectral cube and WCS, swap around the axes so that the
spectral axis cube is the first in Numpy notation, and the last in WCS
notation.
Parameters
----------
array : `~numpy.ndarray`
The input 3-d array with two position dimensions and one spectral
dimension.
wcs : `~astropy.wcs.WCS`
The input 3-d WCS with two position dimensions and one spectral
dimension.
"""
if array.ndim != 3:
raise ValueError("Input array must be 3-dimensional")
if wcs.wcs.naxis != 3:
raise ValueError("Input WCS must be 3-dimensional")
wcs = wcs_utils.diagonal_wcs_to_cdelt(_fix_spectral(wcs))
# reverse from wcs -> numpy convention
axtypes = wcs.get_axis_types()[::-1]
types = [a['coordinate_type'] for a in axtypes]
n_celestial = types.count('celestial')
if n_celestial == 0:
raise ValueError('No celestial axes found in WCS')
elif n_celestial != 2:
raise ValueError('WCS should contain 2 celestial dimensions but '
'contains {0}'.format(n_celestial))
n_spectral = types.count('spectral')
if n_spectral == 0:
raise ValueError('No spectral axes found in WCS')
elif n_spectral != 1:
raise ValueError('WCS should contain one spectral dimension but '
'contains {0}'.format(n_spectral))
nums = [None if a['coordinate_type'] != 'celestial' else a['number']
for a in axtypes]
if 'stokes' in types:
raise ValueError("Input WCS should not contain stokes")
t = [types.index('spectral'), nums.index(1), nums.index(0)]
if t == [0, 1, 2]:
result_array = array
else:
result_array = array.transpose(t)
result_wcs = wcs.sub([WCSSUB_LONGITUDE, WCSSUB_LATITUDE, WCSSUB_SPECTRAL])
return result_array, result_wcs
def slice_syntax(f):
"""
This decorator wraps a function that accepts a tuple of slices.
After wrapping, the function acts like a property that accepts
bracket syntax (e.g., p[1:3, :, :])
Parameters
----------
f : function
"""
def wrapper(self):
result = SliceIndexer(f, self)
result.__doc__ = f.__doc__
return result
wrapper.__doc__ = slice_doc.format(f.__doc__ or '',
f.__name__)
result = property(wrapper)
return result
slice_doc = """
{0}
Notes
-----
Supports efficient Numpy slice notation,
like ``{1}[0:3, :, 2:4]``
"""
class SliceIndexer(object):
def __init__(self, func, _other):
self._func = func
self._other = _other
def __getitem__(self, view):
result = self._func(self._other, view)
if isinstance(result, da.Array):
result = result.compute()
return result
@property
def size(self):
return self._other.size
@property
def ndim(self):
return self._other.ndim
@property
def shape(self):
return self._other.shape
def __iter__(self):
raise Exception("You need to specify a slice (e.g. ``[:]`` or "
"``[0,:,:]`` in order to access this property.")
# TODO: make this into a proper configuration item
# TODO: make threshold depend on memory?
MEMORY_THRESHOLD=1e8
def is_huge(cube):
if cube.size < MEMORY_THRESHOLD: # smallish
return False
else:
return True
def iterator_strategy(cube, axis=None):
"""
Guess the most efficient iteration strategy
for iterating over a cube, given its size and layout
Parameters
----------
cube : SpectralCube instance
The cube to iterate over
axis : [0, 1, 2]
For reduction methods, the axis that is
being collapsed
Returns
-------
strategy : ['cube' | 'ray' | 'slice']
The recommended iteration strategy.
*cube* recommends working with the entire array in memory
*slice* recommends working with one slice at a time
*ray* recommends working with one ray at a time
"""
# pretty simple for now
if cube.size < 1e8: # smallish
return 'cube'
return 'slice'
def try_load_beam(header):
'''
Try loading a beam from a FITS header.
'''
try:
beam = Beam.from_fits_header(header)
return beam
except Exception as ex:
# We don't emit a warning if no beam was found since it's ok for
# cubes to not have beams
# if 'No BMAJ' not in str(ex):
# warnings.warn("Could not parse beam information from header."
# " Exception was: {0}".format(ex.__repr__()),
# FITSWarning
# )
# Avoid warning since cubes don't have a beam
# Warning now provided when `SpectralCube.beam` is None
beam = None
return beam
def try_load_beams(data):
'''
Try loading a beam table from a FITS HDU list.
'''
try:
from radio_beam import Beam
except ImportError:
warnings.warn("radio_beam is not installed. No beam "
"can be created.",
ImportError
)
if isinstance(data, fits.BinTableHDU):
if 'BPA' in data.data.names:
beam_table = data.data
return beam_table
else:
raise ValueError("No beam table found")
elif isinstance(data, fits.HDUList):
for ihdu, hdu_item in enumerate(data):
if isinstance(hdu_item, (fits.PrimaryHDU, fits.ImageHDU)):
beam = try_load_beams(hdu_item.header)
elif isinstance(hdu_item, fits.BinTableHDU):
if 'BPA' in hdu_item.data.names:
beam_table = hdu_item.data
return beam_table
try:
# if there was a beam in a header, but not a beam table
return beam
except NameError:
# if the for loop has completed, we didn't find a beam table
raise ValueError("No beam table found")
elif isinstance(data, (fits.PrimaryHDU, fits.ImageHDU)):
return try_load_beams(data.header)
elif isinstance(data, fits.Header):
try:
beam = Beam.from_fits_header(data)
return beam
except Exception as ex:
# warnings.warn("Could not parse beam information from header."
# " Exception was: {0}".format(ex.__repr__()),
# FITSWarning
# )
# Avoid warning since cubes don't have a beam
# Warning now provided when `SpectralCube.beam` is None
beam = None
else:
raise ValueError("How did you get here? This is some sort of error.")
def beams_to_bintable(beams):
"""
Convert a list of beams to a CASA-style BinTableHDU
"""
c1 = Column(name='BMAJ', format='1E', array=[bm.major.to(u.arcsec).value for bm in beams], unit=u.arcsec.to_string('FITS'))
c2 = Column(name='BMIN', format='1E', array=[bm.minor.to(u.arcsec).value for bm in beams], unit=u.arcsec.to_string('FITS'))
c3 = Column(name='BPA', format='1E', array=[bm.pa.to(u.deg).value for bm in beams], unit=u.deg.to_string('FITS'))
#c4 = Column(name='CHAN', format='1J', array=[bm.meta['CHAN'] if 'CHAN' in bm.meta else 0 for bm in beams])
c4 = Column(name='CHAN', format='1J', array=np.arange(len(beams)))
c5 = Column(name='POL', format='1J', array=[bm.meta['POL'] if 'POL' in bm.meta else 0 for bm in beams])
bmhdu = BinTableHDU.from_columns([c1, c2, c3, c4, c5])
bmhdu.header['EXTNAME'] = 'BEAMS'
bmhdu.header['EXTVER'] = 1
bmhdu.header['XTENSION'] = 'BINTABLE'
bmhdu.header['NCHAN'] = len(beams)
bmhdu.header['NPOL'] = len(set([bm.meta['POL'] for bm in beams if 'POL' in bm.meta]))
return bmhdu
def beam_props(beams, includemask=None):
'''
Returns separate quantities for the major, minor, and PA of a list of
beams.
'''
if includemask is None:
includemask = itertools.cycle([True])
major = u.Quantity([bm.major for bm, incl in zip(beams, includemask)
if incl], u.deg)
minor = u.Quantity([bm.minor for bm, incl in zip(beams, includemask)
if incl], u.deg)
pa = u.Quantity([bm.pa for bm, incl in zip(beams, includemask)
if incl], u.deg)
return major, minor, pa
def largest_beam(beams, includemask=None):
"""
Returns the largest beam (by area) in a list of beams.
"""
from radio_beam import Beam
major, minor, pa = beam_props(beams, includemask)
largest_idx = (major * minor).argmax()
new_beam = Beam(major=major[largest_idx], minor=minor[largest_idx],
pa=pa[largest_idx])
return new_beam
def smallest_beam(beams, includemask=None):
"""
Returns the smallest beam (by area) in a list of beams.
"""
from radio_beam import Beam
major, minor, pa = beam_props(beams, includemask)
smallest_idx = (major * minor).argmin()
new_beam = Beam(major=major[smallest_idx], minor=minor[smallest_idx],
pa=pa[smallest_idx])
return new_beam
@contextlib.contextmanager
def _map_context(numcores):
"""
Mapping context manager to allow parallel mapping or regular mapping
depending on the number of cores specified.
The builtin map is overloaded to handle python3 problems: python3 returns a
generator, while ``multiprocessing.Pool.map`` actually runs the whole thing
"""
if numcores is not None and numcores > 1:
try:
from joblib import Parallel, delayed
from joblib.pool import has_shareable_memory
map = lambda x,y: Parallel(n_jobs=numcores)(delayed(has_shareable_memory)(x))(y)
parallel = True
except ImportError:
map = lambda x,y: list(builtins.map(x,y))
warnings.warn("Could not import joblib. "
"map will be non-parallel.",
ImportError
)
parallel = False
else:
parallel = False
map = lambda x,y: list(builtins.map(x,y))
yield map
def convert_bunit(bunit):
'''
Convert a BUNIT string to a quantity
Parameters
----------
bunit : str
String to convert to an `~astropy.units.Unit`
Returns
-------
unit : `~astropy.unit.Unit`
Corresponding unit.
'''
# special case: CASA (sometimes) makes non-FITS-compliant jy/beam headers
bunit_lower = re.sub(r"\s", "", bunit.lower())
if bunit_lower == 'jy/beam':
unit = u.Jy / u.beam
else:
try:
unit = u.Unit(bunit)
except ValueError:
warnings.warn("Could not parse unit {0}. "
"If you know the correct unit, try "
"u.add_enabled_units(u.def_unit(['{0}'], represents=u.<correct_unit>))".format(bunit),
AstropyUserWarning)
unit = None
return unit
def world_take_along_axis(cube, position_plane, axis):
'''
Convert a 2D plane of pixel positions to the equivalent WCS coordinates.
For example, this will convert `argmax`
along the spectral axis to the equivalent spectral value (e.g., velocity at
peak intensity).
Parameters
----------
cube : SpectralCube
A spectral cube.
position_plane : 2D numpy.ndarray
2D array of pixel positions along `axis`. For example, `position_plane` can
be the output of `argmax` or `argmin` along an axis.
axis : int
The axis that `position_plane` is collapsed along.
Returns
-------
out : astropy.units.Quantity
2D array of WCS coordinates.
'''
if wcs_utils.is_pixel_axis_to_wcs_correlated(cube.wcs, axis):
raise WCSCelestialError("world_take_along_axis requires the celestial axes"
" to be aligned along image axes.")
# Get 1D slice along that axis.
world_slice = [0, 0]
world_slice.insert(axis, slice(None))
world_coords = cube.world[tuple(world_slice)][axis]
world_newaxis = [np.newaxis] * 2
world_newaxis.insert(axis, slice(None))
world_newaxis = tuple(world_newaxis)
plane_newaxis = [slice(None), slice(None)]
plane_newaxis.insert(axis, np.newaxis)
plane_newaxis = tuple(plane_newaxis)
out = np.take_along_axis(world_coords[world_newaxis],
position_plane[plane_newaxis], axis=axis)
out = out.squeeze()
return out
def _has_beam(obj):
if hasattr(obj, '_beam'):
return obj._beam is not None
else:
return False
def _has_beams(obj):
if hasattr(obj, '_beams'):
return obj._beams is not None
else:
return False
def bunit_converters(obj, unit, equivalencies=(), freq=None):
'''
Handler for all brightness unit conversions, including: K, Jy/beam, Jy/pix, Jy/sr.
This also includes varying resolution spectral cubes, where the beam size varies along
the frequency axis.
Parameters
----------
obj : {SpectralCube, LowerDimensionalObject}
A spectral cube or any other lower dimensional object.
unit : `~astropy.units.Unit`
Unit to convert `obj` to.
equivalencies : tuple, optional
Initial list of equivalencies.
freq : `~astropy.unit.Quantity`, optional
Frequency to use for spectral conversions. If the spectral axis is available, the
frequencies will already be defined.
Outputs
-------
factor : `~numpy.ndarray`
Array of factors for the unit conversion.
'''
# Add a simple check it the new unit is already equivalent, and so we don't need
# any additional unit equivalencies
if obj.unit.is_equivalent(unit):
# return equivalencies
factor = obj.unit.to(unit, equivalencies=equivalencies)
return np.array([factor])
# Determine the bunit "type". This will determine what information we need for the unit conversion.
has_btemp = obj.unit.is_equivalent(u.K) or unit.is_equivalent(u.K)
has_perbeam = obj.unit.is_equivalent(u.Jy/u.beam) or unit.is_equivalent(u.Jy/u.beam)
has_perangarea = obj.unit.is_equivalent(u.Jy/u.sr) or unit.is_equivalent(u.Jy/u.sr)
has_perpix = obj.unit.is_equivalent(u.Jy/u.pix) or unit.is_equivalent(u.Jy/u.pix)
# Is there any beam object defined?
has_beam = _has_beam(obj) or _has_beams(obj)
# Set if this is a varying resolution object
has_beams = _has_beams(obj)
# Define freq, if needed:
if any([has_perangarea, has_perbeam, has_btemp]):
# Create a beam equivalency for brightness temperature
# This requires knowing the frequency along the spectral axis.
if freq is None:
try:
freq = obj.with_spectral_unit(u.Hz).spectral_axis
except AttributeError:
raise TypeError("Object of type {0} has no spectral "
"information. `freq` must be provided for"
" unit conversion from Jy/beam"
.format(type(obj)))
else:
if not freq.unit.is_equivalent(u.Hz):
raise u.UnitsError("freq must be given in equivalent "
"frequency units.")
freq = freq.reshape((-1,))
else:
freq = [None]
# To handle varying resolution objects, loop through "channels"
# Default to a single iteration for a 2D spatial object or when a beam is not defined
# This allows handling all 1D, 2D, and 3D data products.
if has_beams:
iter = range(len(obj.beams))
beams = obj.beams
elif has_beam:
iter = range(0, 1)
beams = [obj.beam]
else:
iter = range(0, 1)
beams = [None]
# Append the unit conversion factors
factors = []
# Iterate through spectral channels.
for ii in iter:
beam = beams[ii]
# Use the range of frequencies when the beam does not change. Otherwise, select the
# frequency corresponding to this beam.
if has_beams:
thisfreq = freq[ii]
else:
thisfreq = freq
# Changes in beam require a new equivalency for each.
this_equivalencies = deepcopy(equivalencies)
# Equivalencies for Jy per ang area.
if has_perangarea:
bmequiv_angarea = u.brightness_temperature(thisfreq)
this_equivalencies = list(this_equivalencies) + bmequiv_angarea
# Beam area equivalencies for Jy per beam and/or Jy per ang area
if has_perbeam:
# create a beam equivalency for brightness temperature
bmequiv = beam.jtok_equiv(thisfreq)
# NOTE: `beamarea_equiv` was included in the radio-beam v0.3.3 release
# The if/else here handles potential cases where earlier releases are installed.
if hasattr(beam, 'beamarea_equiv'):
bmarea_equiv = beam.beamarea_equiv
else:
bmarea_equiv = u.beam_angular_area(beam.sr)
this_equivalencies = list(this_equivalencies) + bmequiv + bmarea_equiv
# Equivalencies for Jy per pixel area.
if has_perpix:
if not obj.wcs.has_celestial:
raise ValueError("Spatial WCS information is required for unit conversions"
" involving spatial areas (e.g., Jy/pix, Jy/sr)")
pix_area = (proj_plane_pixel_area(obj.wcs.celestial) * u.deg**2).to(u.sr)
pix_area_equiv = [(u.Jy / u.pix, u.Jy / u.sr,
lambda x: x / pix_area.value,
lambda x: x * pix_area.value)]
this_equivalencies = list(this_equivalencies) + pix_area_equiv
# Define full from brightness temp to Jy / pix.
# Otherwise isn't working in 1 step
if has_btemp:
if not has_beam:
raise ValueError("Conversions between K and Jy/beam or Jy/pix"
"requires the cube to have a beam defined.")
jtok_factor = beam.jtok(thisfreq) / (u.Jy / u.beam)
# We're going to do this piecemeal because it's easier to conceptualize
# We specifically anchor these conversions based on the beam area. So from
# beam to pix, this is beam -> angular area -> area per pixel
# Altogether:
# K -> Jy/beam -> Jy /sr - > Jy / pix
forward_factor = 1 / (jtok_factor * (beam.sr / u.beam) / (pix_area / u.pix))
reverse_factor = jtok_factor * (beam.sr / u.beam) / (pix_area / u.pix)
pix_area_btemp_equiv = [(u.K, u.Jy / u.pix,
lambda x: x * forward_factor.value,
lambda x: x * reverse_factor.value)]
this_equivalencies = list(this_equivalencies) + pix_area_btemp_equiv
# Equivalencies between pixel and angular areas.
if has_perbeam:
if not has_beam:
raise ValueError("Conversions between Jy/beam or Jy/pix"
"requires the cube to have a beam defined.")
beam_area = beam.sr
pix_area_btemp_equiv = [(u.Jy / u.pix, u.Jy / u.beam,
lambda x: x * (beam_area / pix_area).value,
lambda x: x * (pix_area / beam_area).value)]
this_equivalencies = list(this_equivalencies) + pix_area_btemp_equiv
factor = obj.unit.to(unit, equivalencies=this_equivalencies)
factors.append(factor)
if has_beams:
return factors
else:
# Slice along first axis to return a 1D array.
return factors[0]
def combine_headers(header1, header2, **kwargs):
'''
Given two Header objects, this function returns a fits Header of the optimal wcs.
Parameters
----------
header1 : astropy.io.fits.Header
A Header.
header2 : astropy.io.fits.Header
A Header.
Returns
-------
header : astropy.io.fits.Header
A header object of a field containing both initial headers.
'''
from reproject.mosaicking import find_optimal_celestial_wcs
# Get wcs and shape of both headers
w1 = WCS(header1).celestial
s1 = w1.array_shape
w2 = WCS(header2).celestial
s2 = w2.array_shape
# Get the optimal wcs and shape for both fields together
wcs_opt, shape_opt = find_optimal_celestial_wcs([(s1, w1), (s2, w2)], auto_rotate=False,
**kwargs)
# Make a new header using the optimal wcs and information from cubes
header = header1.copy()
header['NAXIS'] = 3
header['NAXIS1'] = shape_opt[1]
header['NAXIS2'] = shape_opt[0]
header['NAXIS3'] = header1['NAXIS3']
header.update(wcs_opt.to_header())
header['WCSAXES'] = 3
return header
def mosaic_cubes(cubes, spectral_block_size=100, combine_header_kwargs={}, **kwargs):
'''
This function reprojects cubes onto a common grid and combines them to a single field.
Parameters
----------
cubes : iterable
Iterable list of SpectralCube objects to reproject and add together.
spectral_block_size : int
Block size so that reproject does not run out of memory.
combine_header_kwargs : dict
Keywords passed to `~reproject.mosaicking.find_optimal_celestial_wcs`
via `combine_headers`.
Outputs
-------
cube : SpectralCube
A spectral cube with the list of cubes mosaicked together.
'''
cube1 = cubes[0]
header = cube1.header
# Create a header for a field containing all cubes
for cu in cubes[1:]:
header = combine_headers(header, cu.header, **combine_header_kwargs)
# Prepare an array and mask for the final cube
shape_opt = (header['NAXIS3'], header['NAXIS2'], header['NAXIS1'])
final_array = np.zeros(shape_opt)
mask_opt = np.zeros(shape_opt[1:])
for cube in cubes:
# Reproject cubes to the header
try:
if spectral_block_size is not None:
cube_repr = cube.reproject(header,
block_size=[spectral_block_size,
cube.shape[1],
cube.shape[2]],
**kwargs)
else:
cube_repr = cube.reproject(header, **kwargs)
except TypeError:
warnings.warn("The block_size argument is not accepted by `reproject`. "
"A more recent version may be needed.")
cube_repr = cube.reproject(header, **kwargs)
# Create weighting mask (2D)
mask = (cube_repr[0:1].get_mask_array()[0])
mask_opt += mask.astype(float)
# Go through each slice of the cube, add it to the final array
for ii in range(final_array.shape[0]):
slice1 = np.nan_to_num(cube_repr.unitless_filled_data[ii])
final_array[ii] = final_array[ii] + slice1
# Dividing by the mask throws errors where it is zero
with np.errstate(divide='ignore'):
# Use weighting mask to average where cubes overlap
for ss in range(final_array.shape[0]):
final_array[ss] /= mask_opt
# Create Cube
cube = cube1.__class__(data=final_array * cube1.unit, wcs=WCS(header))
return cube
|
radio-astro-toolsREPO_NAMEspectral-cubePATH_START.@spectral-cube_extracted@spectral-cube-master@spectral_cube@cube_utils.py@.PATH_END.py
|
{
"filename": "DistortedModel.py",
"repo_name": "CU-NESS/pylinex",
"repo_path": "pylinex_extracted/pylinex-master/pylinex/model/DistortedModel.py",
"type": "Python"
}
|
"""
File: pylinex/model/DistortedModel.py
Author: Keith Tauscher
Date: 3 Aug 2018
Description: File containing a class representing a model which is the same as
a different model except with transformed inputs.
"""
import numpy as np
from distpy import TransformList
from .Model import Model
class DistortedModel(Model):
"""
Class representing a model which is the same as a different model except
with transformed inputs.
"""
def __init__(self, model, transform_list):
"""
Initializes a TransformedModel based around the given underlying model
and the transform_list which will affect its inputs.
model: a Model object
transform_list: either a TransformList object or something which can be
cast to a TransformList object with the model's number
of parameters. The transform list describes the
transform which will be applied before each parameter
is passed to the underlying model
"""
self.model = model
self.transform_list = transform_list
@property
def model(self):
"""
Property storing the inner model (as a Model object) which is being
distorted.
"""
if not hasattr(self, '_model'):
raise AttributeError("model referenced before it was set.")
return self._model
@model.setter
def model(self, value):
"""
Setter for the inner model which is being distorted.
value: a Model object
"""
if isinstance(value, Model):
self._model = value
else:
raise TypeError("model was set to a non-Model object.")
@property
def transform_list(self):
"""
Property storing the transform_list with which the model underlying
this one will be distorted. This transform_list is applied to
parameters before they are passed to the underlying model.
"""
if not hasattr(self, '_transform_list'):
raise AttributeError("transform_list referenced before it was " +\
"set.")
return self._transform_list
@transform_list.setter
def transform_list(self, value):
"""
Setter for the TransformList which will be applied to the parameters
before they are passed to the underlying model.
value: either a TransformList object or an object which can be cast to
a TransformList object (such as None or a list of strings which
can each be cast to a Transform object)
"""
if TransformList.castable(value, num_transforms=self.num_parameters):
self._transform_list =\
TransformList.cast(value, num_transforms=self.num_parameters)
else:
raise TypeError("transform_list could not be successfully cast " +\
"to a TransformList object.")
@property
def parameters(self):
"""
Property storing a list of strings associated with the parameters
necessitated by this model.
"""
return self.model.parameters
@property
def num_channels(self):
"""
Property storing the number of channels in output
"""
if not hasattr(self, '_num_channels'):
self._num_channels = self.model.num_channels
return self._num_channels
def __call__(self, parameters):
"""
Evaluates the model at the given parameters.
parameters: 1D numpy.ndarray of parameter values
returns: array of size (num_channels,)
"""
return self.model(self.transform_list(parameters))
@property
def gradient_computable(self):
"""
Property storing a boolean describing whether the gradient of this
model is computable.
"""
return self.model.gradient_computable
def gradient(self, parameters):
"""
Evaluates the gradient of the model at the given parameters.
parameters: 1D numpy.ndarray of parameter values
returns: array of shape (num_channels, num_parameters)
"""
transformed_gradient =\
self.model.gradient(self.transform_list(parameters))
return self.transform_list.detransform_gradient(transformed_gradient,\
parameters, axis=-1)
@property
def hessian_computable(self):
"""
Property storing a boolean describing whether the hessian of this model
is computable.
"""
return self.model.gradient_computable and self.model.hessian_computable
def hessian(self, parameters):
"""
Evaluates the hessian of this model at the given parameters.
parameters: 1D numpy.ndarray of parameter values
returns: array of shape (num_channels, num_parameters, num_parameters)
"""
transformed_point = self.transform_list(parameters)
transformed_gradient = self.model.gradient(transformed_point)
transformed_hessian = self.model.hessian(transformed_point)
return self.transform_list.detransform_hessian(transformed_hessian,\
transformed_gradient, parameters, first_axis=-2)
def fill_hdf5_group(self, group):
"""
Fills the given hdf5 file group with information about this model.
group: hdf5 file group to fill with information about this model
"""
group.attrs['class'] = 'DistortedModel'
self.model.fill_hdf5_group(group.create_group('model'))
self.transform_list.fill_hdf5_group(\
group.create_group('transform_list'))
def __eq__(self, other):
"""
Checks for equality with other.
other: object to check for equality
returns: True if other is equal to this mode, False otherwise
"""
if isinstance(other, DistortedModel):
return (self.model == other.model) and\
(self.transform_list == other.transform_list)
else:
return False
@property
def bounds(self):
"""
Property storing the natural bounds of the parameters of this model,
determined by "untransforming" the bounds of the underlying model.
"""
if not hasattr(self, '_bounds'):
self._bounds = {}
for (iname, name) in enumerate(self.parameters):
transform = self.transform_list[iname]
(lower_bound, upper_bound) = self.model.bounds[name]
if type(lower_bound) is type(None):
lower_bound = -np.inf
try:
lower_bound = transform.apply_inverse(lower_bound)
except:
lower_bound = None
else:
if not np.isfinite(lower_bound):
lower_bound = None
if type(upper_bound) is type(None):
upper_bound = np.inf
try:
upper_bound = transform.apply_inverse(upper_bound)
except:
upper_bound = None
else:
if not np.isfinite(upper_bound):
upper_bound = None
self._bounds[name] = (lower_bound, upper_bound)
return self._bounds
def quick_fit(self, data, error, quick_fit_parameters=[], prior=None):
"""
Performs a quick fit to the given data.
data: curve to fit with the model
error: noise level in the data
quick_fit_parameters: quick fit parameters to pass to underlying model
prior: either None or a GaussianDistribution object containing priors
(in space of underlying model)
returns: (parameter_mean, parameter_covariance)
"""
(transformed_mean, transformed_covariance) =\
self.model.quick_fit(data, error,\
quick_fit_parameters=quick_fit_parameters, prior=prior)
untransformed_mean =\
self.transform_list.apply_inverse(transformed_mean)
derivatives = self.transform_list.derivative(untransformed_mean)
untransformed_covariance = transformed_covariance /\
(derivatives[:,np.newaxis] * derivatives[np.newaxis,:])
return (untransformed_mean, untransformed_covariance)
@property
def quick_fit_parameters(self):
"""
Property storing the parameters necessary to call quick_fit.
"""
if not hasattr(self, '_quick_fit_parameters'):
self._quick_fit_parameters = self.model.quick_fit_parameters
return self._quick_fit_parameters
|
CU-NESSREPO_NAMEpylinexPATH_START.@pylinex_extracted@pylinex-master@pylinex@model@DistortedModel.py@.PATH_END.py
|
{
"filename": "models.py",
"repo_name": "OxES/OxKeplerSC",
"repo_path": "OxKeplerSC_extracted/OxKeplerSC-master/src/jc/models.py",
"type": "Python"
}
|
from __future__ import division
from math import log, pi
from numpy import ndarray, array, zeros
from scipy.optimize import minimize
from pyde.de import DiffEvol
from .core import *
from .fmodels import models as fm
ln_two_pi = log(2*pi)
def lnlikelihood(obs, mod, err):
n = float(obs.size)
return -0.5 * (n*ln_two_pi + n*log(err**2) + ((obs-mod)**2).sum()/err**2)
class DiscontinuityType(object):
name = ''
pnames = []
npar = len(pnames)
penalty = 0.
def __init__(self, discontinuity):
self.discontinuity = self.d = discontinuity
self.best_fit_pv = None
self.best_fit_model = None
self.bic = None
self._optimization_result = None
# def __str__(self):
# return '{:s} {:4.1f} {:4.1f}'.format(self.name, self.position, self.amplitude)
# def __repr__(self):
# return '{:s}({:4.1f}, {:4.1f})'.format(self.name, self.position, self.amplitude)
def nlnlike_wn(self, pv):
if not self.is_inside_bounds(pv):
return inf
else:
return -lnlikelihood(self.d.flux, self.model(pv), self.d.wn_estimate)
def nlnlike_gp(self, pv):
if not self.is_inside_bounds(pv):
return inf
else:
return -self.d.gp.lnlikelihood(self.d.cadence, self.d.flux-self.model(pv), freeze_k=True)
def fit(self, use_de=False, de_npop=30, de_niter=100, method='Powell'):
jamp, jpos, fstd = self.d.amplitude, self.d.position, self.d.flux.std()
nlnlike = self.nlnlike_gp if self.d.use_gp else self.nlnlike_wn
if use_de:
self._de = DiffEvol(nlnlike, self._de_bounds(jamp, jpos, fstd), npop=de_npop)
self._de.optimize(de_niter)
pv0 = self._de.minimum_location
else:
pv0 = self._pv0(jamp, jpos, fstd)
self._optimization_result = r = minimize(nlnlike, pv0, method=method)
self.best_fit_pv = r.x
self.best_fit_model = self.model(r.x)
xx = r.x.copy()
xx[-2:] = 0
self.best_fit_model_wo_baseline = self.model(xx)
self.bic = self.c_bic(r.fun)
return self.bic
def model(self, pv, cad=None):
raise NotImplementedError
def c_bic(self, nln):
return 2*nln + self.npar*log(self.d.npt) + self.penalty
class UnclassifiedDiscontinuity(DiscontinuityType):
name = 'Unclassified'
pnames = []
npar = len(pnames)
def fit(self, *nargs, **kwargs):
raise NotImplementedError
class Slope(DiscontinuityType):
name = 'slope'
pnames = 'slope intercept'.split()
npar = len(pnames)
def model(self, pv, cad=None):
return np.poly1d(pv)(self.d.cadence if cad is None else cad)
def is_inside_bounds(self, pv):
return True
def fit(self, use_de=False, de_npop=30, de_niter=100, method='Powell'):
nlnlike = self.nlnlike_gp if self.d.use_gp else self.nlnlike_wn
pv0 = np.polyfit(self.d.cadence, self.d.flux, 2)
self._optimization_result = r = minimize(nlnlike, pv0, method=method)
self.best_fit_pv = r.x
self.best_fit_model = self.model(r.x)
self.bic = self.c_bic(r.fun)
return self.bic
class Jump(DiscontinuityType):
name = 'jump'
pnames = 'center width amplitude bl_constant bl_slope'.split()
npar = len(pnames)
def model(self, pv, cad=None):
return fm.m_jump(*pv, cadence=self.d.cadence if cad is None else cad)
def is_inside_bounds(self, pv):
return all(pv[:2] > 0) and (0.5 < pv[1] < 3.0) and (self.d.position-3 <= pv[0] <= self.d.position+3)
def _de_bounds(self, jamp, jpos, fstd):
return [[ jpos-2, jpos+2], # 0 - center
[ 1, 3], # 1 - width
[ 0.75*jamp, 1.25*jamp], # 2 - amplitude
[ -0.20*fstd, 0.20*fstd], # 3 - baseline constant
[ -1e-3, 1e-3]] # 4 - baseline slope
def _pv0(self, jamp, jpos, fstd):
return [jpos, 2, jamp, fstd, 0]
class Jump2(DiscontinuityType):
name = 'jumpf'
pnames = 'center width famp jamp bl_constant bl_slope'.split()
npar = len(pnames)
def model(self, pv, cad=None):
return fm.m_jumpf(*pv, cadence=self.d.cadence if cad is None else cad)
def is_inside_bounds(self, pv):
return ((self.d.position-3 <= pv[0] <= self.d.position+3)
and (2.5 < pv[1] < 20.)
and (pv[2] < 0.)
and (pv[3] < 0.)
and (pv[2] < pv[3]))
def _de_bounds(self, jamp, jpos, fstd):
return [[ jpos-4, jpos+4], # 0 - center
[ 1, 10], # 1 - width
[ 0.75*jamp, 1.25*jamp], # 2 - jump amplitude
[ 0.75*jamp, 1.25*jamp], # 3 - baseline level after jump
[ -0.20*fstd, 0.20*fstd], # 4 - baseline constant
[ -1e-3, 1e-3]] # 5 - baseline slope
def _pv0(self, jamp, jpos, fstd):
return [jpos, 2, jamp, 0.5*jamp, fstd, 0]
class Transit(DiscontinuityType):
name = 'transit'
pnames = 'depth center duration bl_constant bl_slope'.split()
npar = len(pnames)
def model(self, pv, cad=None):
return fm.m_transit(*pv, cadence=self.d.cadence if cad is None else cad)
def is_inside_bounds(self, pv):
return ((pv[0] > 0.)
and (self.d.cadence[1]+0.55*pv[2] < pv[1] < self.d.cadence[-2]-0.55*pv[2])
and (1. < pv[2] < 50.))
def _de_bounds(self, jamp, jpos, fstd):
return [[ -0.8*jamp, -1.2*jamp], # 0 - transit depth
[ jpos-5, jpos+5], # 1 - center
[ 1.2, 50.], # 2 - duration
[ -0.2*fstd, 0.2*fstd], # 3 - baseline constant
[ -1e-3, 1e-3]] # 4 - baseline slope
def _pv0(self, jamp, jpos, fstd):
return [-jamp, jpos, 10, fstd, 0]
class Flare(DiscontinuityType):
name = 'flare'
pnames = 'start duration amplitude bl_constant bl_slope'.split()
npar = len(pnames)
def model(self, pv, cad=None):
return fm.m_flare(*pv, cadence=self.d.cadence if cad is None else cad)
def is_inside_bounds(self, pv):
return all(pv[:3] >= 0.) and (self.d.cadence[0] < pv[0] < self.d.cadence[-1]) and (pv[1] < 10.)
def _de_bounds(self, jamp, jpos, fstd):
return [[ jpos-5, jpos+5], # 0 - flare start
[ 1.2, 7.], # 1 - flare duration
[ 0.8*jamp, 1.2*jamp], # 2 - amplitude
[-0.2*fstd, 0.2*fstd], # 3 - baseline constant
[ -1e-3, 1e-3]] # 4 - baseline slope
def _pv0(self, jamp, jpos, fstd):
return array([jpos, 2.5, jamp, fstd, 0])
dmodels = Slope, Jump, Jump2, Transit, Flare
|
OxESREPO_NAMEOxKeplerSCPATH_START.@OxKeplerSC_extracted@OxKeplerSC-master@src@jc@models.py@.PATH_END.py
|
{
"filename": "test_example.py",
"repo_name": "spacetelescope/pystortion",
"repo_path": "pystortion_extracted/pystortion-master/pystortion/tests/test_example.py",
"type": "Python"
}
|
def test_primes():
from ..example_mod import primes
assert primes(10) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
def test_deprecation():
import warnings
warnings.warn(
"This is deprecated, but shouldn't raise an exception, unless "
"enable_deprecations_as_exceptions() called from conftest.py",
DeprecationWarning)
|
spacetelescopeREPO_NAMEpystortionPATH_START.@pystortion_extracted@pystortion-master@pystortion@tests@test_example.py@.PATH_END.py
|
{
"filename": "top__desc.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/_includes/work_src/reusage-loss-functions/top__desc.md",
"type": "Markdown"
}
|
The number of top samples in a group that are used to calculate the ranking metric. Top samples are either the samples with the largest approx values or the ones with the lowest target values if approx values are the same.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@_includes@work_src@reusage-loss-functions@top__desc.md@.PATH_END.py
|
{
"filename": "logger.py",
"repo_name": "cosmostatistics/ebms_mcmc",
"repo_path": "ebms_mcmc_extracted/ebms_mcmc-main/ebms_mcmc/util/logger.py",
"type": "Python"
}
|
import logging
import os
from importlib import reload
def separator()->None:
logging.info( "-----------------------------------------------" )
def init_logger(*, fn: str,
verbose: bool = True) -> None:
"""
Initialize the logger with the specified log file path and verbosity level.
Args:
fn (str): The path to the log file directory.
verbose (bool, optional): Whether to enable verbose logging. Defaults to True.
"""
os.makedirs(fn, exist_ok=True)
reload(logging)
logger = logging.getLogger()
logger.handlers.clear() # Clear existing handlers
# Common formatter for both handlers
formatter = logging.Formatter('%(asctime)s - %(message)s')
file_handler = logging.FileHandler(f"{fn}/log", mode='w')
file_handler.setLevel(logging.INFO)
file_handler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
logger.addHandler(file_handler)
if verbose:
# Console handler (added when verbose is True)
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(formatter)
logger.addHandler(console_handler)
# Set the logging level to INFO for the logger
logger.setLevel(logging.INFO)
|
cosmostatisticsREPO_NAMEebms_mcmcPATH_START.@ebms_mcmc_extracted@ebms_mcmc-main@ebms_mcmc@util@logger.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "cmccully/astroscrappy",
"repo_path": "astroscrappy_extracted/astroscrappy-master/docs/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
# Get configuration information from setup.cfg
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# By default, highlight as Python 3.
highlight_language = 'python3'
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# -- Options for HTML output --------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Please update these texts to match the name of your package.
html_theme_options = {
'logotext1': 'package', # white, semi-bold
'logotext2': '-template', # orange, light
'logotext3': ':docs' # white, light
}
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ''
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Options for the edit_on_github extension ---------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['astropy_helpers.sphinx.ext.edit_on_github']
versionmod = __import__(setup_cfg['package_name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
# -- Resolving issue number to links in changelog -----------------------------
github_issues_url = 'https://github.com/{0}/issues/'.format(setup_cfg['github_project'])
|
cmccullyREPO_NAMEastroscrappyPATH_START.@astroscrappy_extracted@astroscrappy-master@docs@conf.py@.PATH_END.py
|
{
"filename": "gui.py",
"repo_name": "gammapy/enrico",
"repo_path": "enrico_extracted/enrico-master/enrico/gui.py",
"type": "Python"
}
|
import gtk
import sys,os
import logging
from math import log10,pow
from enrico.extern.configobj import ConfigObj, flatten_errors
from enrico.config import get_config
from enrico.utils import GetIRFS,Checkevtclass
class EnricoGui:
def getirfs(self,widget,data=None):
win = gtk.Window(gtk.WINDOW_TOPLEVEL)
try :
Checkevtclass(self.evclass.get_value())
print(("selection : event class "+ str(self.evclass.get_value())+" and event type "+str(self.evtype.get_value())))
print(("Corresponding IRFs\t=\t",GetIRFS(self.evclass.get_value(),self.evtype.get_value())))
button = gtk.Button("Corresponding IRFs:"+str(GetIRFS(self.evclass.get_value(),self.evtype.get_value())))
except:
button = gtk.Button("evclass value in config file not valid")
button.connect_object("clicked", gtk.Widget.destroy, win)
win.add(button)
button.show()
win.show()
def Launch(self,widget,data=None):
self.save(None,None)
os.system(data+" "+self.infile)
def LaunchEbin(self,widget,data=None):
self.save(None,None)
if self.config["Ebin"]["NumEnergyBins"]>0 :
# os.system("enrico_sed "+self.config["out"]+'/Ebin'+str(self.config["Ebin"]["NumEnergyBins"])+"/*conf" )
os.system("enrico_sed Ebin"+str(self.config["Ebin"]["NumEnergyBins"])+"/*conf" )
def Sync(self, widget, data=None):
self.x.set_value(self.ra.get_value())
self.y.set_value(self.dec.get_value())
def set_active(self, widget, data):
widget.set_active(0)
if data == 'yes':
widget.set_active(1)
def fct_yesno(self, widget, data=None):
if data=="Spectrum":
self.config["Spectrum"]["FitsGeneration"] = "no"
if widget.get_active():
self.config["Spectrum"]["FitsGeneration"] = "yes"
elif data=="LightCurve":
self.config["LightCurve"]["FitsGeneration"] = "no"
if widget.get_active():
self.config["LightCurve"]["FitsGeneration"] = "yes"
elif data=="AppLC":
self.config["AppLC"]["FitsGeneration"] = "no"
if widget.get_active():
self.config["AppLC"]["FitsGeneration"] = "yes"
elif data=="Ebin":
self.config["Ebin"]["FitsGeneration"] = "no"
if widget.get_active():
self.config["Ebin"]["FitsGeneration"] = "yes"
elif data=="findsrc":
self.config["findsrc"]["FitsGeneration"] = "no"
if widget.get_active():
self.config["findsrc"]["FitsGeneration"] = "yes"
elif data=="verbose":
self.config["verbose"] = "no"
if widget.get_active():
self.config["verbose"] = "yes"
elif data=="clobber":
self.config["clobber"] = "no"
if widget.get_active():
self.config["clobber"] = "yes"
elif data=="Submit":
self.config["Submit"] = "no"
if widget.get_active():
self.config["Submit"] = "yes"
elif data=="refit":
self.config["findsrc"]["Refit"] = "no"
if widget.get_active():
self.config["findsrc"]["Refit"] = "yes"
elif data=="ULenvelope":
self.config["UpperLimit"]["envelope"] = "no"
if widget.get_active():
self.config["UpperLimit"]["envelope"] = "yes"
elif data=="conffile":
self.config["LightCurve"]["MakeConfFile"] = "no"
if widget.get_active():
self.config["LightCurve"]["MakeConfFile"] = "yes"
elif data=="compvar":
self.config["LightCurve"]["ComputeVarIndex"] = "no"
if widget.get_active():
self.config["LightCurve"]["ComputeVarIndex"] = "yes"
elif data=="lcdiagplot":
self.config["LightCurve"]["DiagnosticPlots"] = "no"
if widget.get_active():
self.config["LightCurve"]["DiagnosticPlots"] = "yes"
elif data=="binfromdata":
self.config["AppLC"]["binsFromData"] = "no"
if widget.get_active():
self.config["AppLC"]["binsFromData"] = "yes"
elif data=="tsmap":
self.config["TSMap"]["Re-Fit"] = "no"
if widget.get_active():
self.config["TSMap"]["Re-Fit"] = "yes"
elif data=="removetgr":
self.config["TSMap"]["RemoveTarget"] = "no"
if widget.get_active():
self.config["TSMap"]["RemoveTarget"] = "yes"
elif data=="Diffresp":
self.config["analysis"]["ComputeDiffrsp"] = "no"
if widget.get_active():
self.config["analysis"]["ComputeDiffrsp"] = "yes"
elif data=="roicut":
self.config["analysis"]["roicut"] = "no"
if widget.get_active():
self.config["analysis"]["roicut"] = "yes"
elif data=="fittau":
self.config["target"]["fit_tau"] = "no"
if widget.get_active():
self.config["target"]["fit_tau"] = "yes"
# def fct_rappel(self, widget, data=None):
# print "Le %s a ete %s." % (data, ("desactive", "active")[widget.get_active()])
def delete(self, widget, event=None):
gtk.main_quit()
return False
def reload(self, widget, event=None):
gtk.Widget.destroy(self.window)
EnricoGui(self.infile)
return False
def change(self,widget,data=None):
try :
pixbuf = gtk.gdk.pixbuf_new_from_file(data)
pixbuf = pixbuf.scale_simple(600, 300, gtk.gdk.INTERP_BILINEAR)
self.image.set_from_pixbuf(pixbuf)
except:
logging.error('picture file '+data+' not found.')
def save(self, widget, data=None):
self.config['out'] = self.fout.get_current_folder()
self.config["UpperLimit"]["SpectralIndex"] = self.ULindex.get_value()
self.config["Spectrum"]["FrozenSpectralIndex"] = self.index.get_value()
self.config["Spectrum"]["cl"] = self.ULcl.get_value()
self.config["file"]["xml"] = self.fxml.get_filename()
self.config["file"]["spacecraft"] = self.fsc.get_filename()
self.config["file"]["event"] = self.fevent.get_filename()
self.config["file"]["tag"] = self.ftag.get_text()
self.config["target"]["name"] = self.fname.get_text()
self.config["target"]["ra"] = self.ra.get_value()
self.config["target"]["dec"] = self.dec.get_value()
self.config["target"]["spectrum"] = self.listSpec.entry.get_text()
self.config["target"]["ebl_model"] = int(self.eblmodel.get_value())
self.config["target"]["redshift"] = self.redshift.get_value()
self.config["space"]["xref"] = self.x.get_value()
self.config["space"]["yref"] = self.y.get_value()
self.config["space"]["phibins"] = int(self.phibin.get_value())
self.config["space"]["rad"] = self.rad.get_value()
self.config["space"]["binsz"] = self.binsz.get_value()
self.config["space"]["coordsys"] = self.listSys.entry.get_text()
self.config["space"]["proj"] = self.listProj.entry.get_text()
self.config["energy"]["emin"] = self.emin.get_value()
self.config["energy"]["emax"] = self.emax.get_value()
self.config["energy"]["enumbins_per_decade"] = int(self.nbindec.get_value())
self.config["time"]["tmin"] = self.tmin.get_value()
self.config["time"]["tmax"] = self.tmax.get_value()
if self.ftime.get_filename()==None:
self.config['time']['file'] = ""
else:
self.config['time']['file'] = self.ftime.get_filename()
self.config["time"]["type"] = self.listtime.entry.get_text()
self.config["Ebin"]["NumEnergyBins"] = int(self.nebin.get_value())
self.config["Ebin"]["TSEnergyBins"] = int(self.tsebin.get_value())
self.config["LightCurve"]["NLCbin"] = int(self.nlcbin.get_value())
self.config["LightCurve"]["index"] = self.lcindex.get_value()
self.config["AppLC"]["index"] = self.applcindex.get_value()
self.config["AppLC"]["NLCbin"] = int(self.applcNbin.get_value())
self.config["FoldedLC"]["NLCbin"] =int(self.follcNbin.get_value())
self.config["FoldedLC"]["epoch"] =(self.folepoch.get_value())
self.config["FoldedLC"]["Period"] =(self.folperiod.get_value())
self.config["TSMap"]["npix"] = int(self.tsmapnpix.get_value())
self.config["TSMap"]["method"] = self.listtsmethod.entry.get_text()
self.config["analysis"]["likelihood"] = self.listchain.entry.get_text()
self.config["analysis"]["zmax"] = self.zmax.get_value()
self.config["analysis"]["filter"] = self.filter.get_text()
self.config["event"]["evclass"] = int(self.evclass.get_value())
self.config["event"]["evtype"] = int(self.evtype.get_value())
self.config["event"]["irfs"] = self.irfs.get_text()
self.config["fitting"]["optimizer"] = self.listopt.entry.get_text()
self.config["fitting"]["ftol"] = pow(10,self.ftol.get_value())
self.config["srcprob"]["rad"] = self.radsrcprob.get_value()
self.config["srcprob"]["srclist"] = self.srclist.get_filename()
self.config.write(open(self.infile, 'w'))
# def load(self, widget, data=None):
# print "loading file ",self.fc.get_filename()
# self.config = get_config(self.fc.get_filename())
def AddBlocNotePage(self,lab='bn'):
frame = gtk.Table(6, 5, True)
frame.set_size_request(600, 400)
frame.show()
label = gtk.Label(lab)
self.notebloc.append_page(frame, label)
return frame
def AddSpinButton(self,val,mini,maxi,incre,decimal):
button = gtk.SpinButton(gtk.Adjustment(val, mini, maxi, incre), .5,decimal)
button.set_numeric(True)
button.show()
return button
# def _addconfigBN(self):
# BNPage = self.AddBlocNotePage("Config file")
# self.fc = gtk.FileChooserButton("Config file")
# self.fc.set_title("config file")
# self.fc.set_size_request(600, 400)
# self.fc.show()
# BNPage.attach(self.fc, 4, 8, 0, 1)
# button = gtk.Button("Load config File")
# button.connect("clicked", self.load, None)
# button.show()
# BNPage.attach(button, 5, 8, 6, 8)
def _addFrame(self,title,x,y):
frame = gtk.Frame(title)
table = gtk.Table(x, y, True)
table.show()
frame.add(table)
frame.show()
return frame,table
def _addTargetSpace(self):
BNPage = self.AddBlocNotePage("Target/Space")
frameTarget,tableTarget = self._addFrame("Target",2,3)
BNPage.attach(frameTarget, 0, 8, 0, 2)
self.ra = self.AddSpinButton(self.config["target"]["ra"], 0, 360, .01,2)
self.dec = self.AddSpinButton(self.config["target"]["dec"], -90, 90, .01,2)
self.fname = gtk.Entry()
self.fname.set_text(self.config['target']['name'])
self.fname.show()
label = gtk.Label("Target name")
label.show()
tableTarget.attach(label, 0,1,0,1)
tableTarget.attach(self.fname, 1,3,0,1)
label = gtk.Label("RA")
label.show()
tableTarget.attach(label, 4, 5,0,1)
tableTarget.attach(self.ra,5,6,0,1)
label = gtk.Label("Dec")
label.show()
tableTarget.attach(label,4, 5,1, 2)
tableTarget.attach(self.dec,5,6,1, 2)
self.listSpec = gtk.Combo()
self.listSpec.entry.set_text("Spectrum")
listoption = [ "PowerLaw", "PowerLaw2", "LogParabola", "PLExpCutoff", "Generic" ]
listoption.remove(self.config['target']['spectrum'])
listoption.insert(0,self.config['target']['spectrum'])
self.listSpec.set_popdown_strings(listoption)
self.listSpec.show()
label = gtk.Label("Model")
label.show()
tableTarget.attach(label, 0,1,1, 2)
tableTarget.attach(self.listSpec, 1,3,1, 2)
self.redshift = self.AddSpinButton(self.config["target"]["redshift"], 0, 7, 0.001, 3)
label = gtk.Label("redshift")
label.show()
tableTarget.attach(label,0,1,2,3)
tableTarget.attach(self.redshift,1,2,2,3)
self.eblmodel = self.AddSpinButton(self.config["target"]["ebl_model"], 1, 7, 1, 0)
label = gtk.Label("EBL model")
label.show()
tableTarget.attach(label, 2,3,2,3)
tableTarget.attach(self.eblmodel,3,4,2,3)
MethodButton = gtk.CheckButton("")
MethodButton.connect("toggled", self.fct_yesno, "fittau")
self.set_active(MethodButton,self.config["target"]["fit_tau"])
MethodButton.show()
labts = gtk.Label("fit tau")
labts.show()
tableTarget.attach(labts,4,5,2,3)
tableTarget.attach(MethodButton, 5,6,2,3)
frameSpace,tableSpace = self._addFrame("Space",2,3)
BNPage.attach(frameSpace, 0, 8, 2, 6)
self.x = self.AddSpinButton(self.config["space"]["xref"], 0, 360, .01,2)
self.y = self.AddSpinButton(self.config["space"]["yref"], -90, 90, .01,2)
label = gtk.Label("Xref")
label.show()
tableSpace.attach(label, 0,1,1, 2)
tableSpace.attach(self.x,1,2,1, 2)
label = gtk.Label("Yref")
label.show()
tableSpace.attach(label,2, 3,1, 2)
tableSpace.attach(self.y,3,4,1,2)
syncButton = gtk.Button("sync with RA-Dec")
# syncButton.set_size_request(20,20)
syncButton.connect("clicked", self.Sync, "")
syncButton.show()
tableSpace.attach(syncButton,2,4,2,3)
self.rad = self.AddSpinButton(self.config["space"]["rad"],0, 360, .1, 1)
self.binsz = self.AddSpinButton(self.config["space"]["binsz"], 0, 1, .01, 2)
self.phibin = self.AddSpinButton(self.config["space"]["phibins"], 0, 40, 1, 0)
label = gtk.Label("ROI")
label.show()
tableSpace.attach(label, 0,1,0, 1)
tableSpace.attach(self.rad,1,2,0,1)
label = gtk.Label("Bin size")
label.show()
tableSpace.attach(label,2, 3,0,1)
tableSpace.attach(self.binsz,3,4,0,1)
label = gtk.Label("Number of bin in phi (default 0)")
label.show()
tableSpace.attach(label,0, 3,4,5)
tableSpace.attach(self.phibin,3,4,4,5)
self.listProj = gtk.Combo()
self.listProj.entry.set_text("Projection")
listoption = [ "AIT","ARC","CAR","GLS","MER","NCP","SIN","STG","TAN" ]
listoption.remove(self.config['space']['proj'])
listoption.insert(0,self.config['space']['proj'])
self.listProj.set_popdown_strings(listoption)
self.listProj.show()
label = gtk.Label("Projection")
label.show()
tableSpace.attach(label,0,1,3,4)
tableSpace.attach(self.listProj,1,2,3,4)
self.listSys = gtk.Combo()
self.listSys.entry.set_text("Systeme")
listoption = [ "CEL", "GAL" ]
listoption.remove(self.config['space']['coordsys'])
listoption.insert(0,self.config['space']['coordsys'])
self.listSys.set_popdown_strings(listoption)
self.listSys.show()
label = gtk.Label("Systeme")
label.show()
tableSpace.attach(label,2,3,3,4)
tableSpace.attach(self.listSys,3,4,3,4)
def _addMainOption(self):
BNPage = self.AddBlocNotePage("Main options")
frame,table = self._addFrame("General options",2,3)
self.fout = gtk.FileChooserButton("Config file")
self.fout.set_current_folder(self.config['out'])
self.fout.set_action(gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER)
self.fout.show()
# self.fout.set_visible(True)
VerboseButton = gtk.CheckButton("verbose")
VerboseButton.connect("toggled", self.fct_yesno, "verbose")
self.set_active(VerboseButton,self.config["verbose"])
VerboseButton.show()
ClobberButton = gtk.CheckButton("Clobber")
ClobberButton.connect("toggled", self.fct_yesno, "clobber")
self.set_active(ClobberButton,self.config["clobber"])
ClobberButton.show()
SubmitButton = gtk.CheckButton("Submit jobs")
SubmitButton.connect("toggled", self.fct_yesno, "Submit")
self.set_active(SubmitButton,self.config["Submit"])
SubmitButton.show()
label = gtk.Label("Output directory")
label.show()
table.attach(label, 0,1, 0, 1)
table.attach(self.fout, 1,3, 0, 1,gtk.EXPAND | gtk.FILL, gtk.EXPAND | gtk.FILL, 5, 1)
table.attach(VerboseButton, 0,1, 1, 2)
table.attach(ClobberButton,1,2,1, 2)
table.attach(SubmitButton,2,3,1, 2)
BNPage.attach(frame, 0, 8, 0, 2)
framebut,tablebut = self._addFrame("Launcher",2,5)
sedbutton = gtk.Button("run enrico_ sed")
sedbutton.connect("clicked", self.Launch, 'enrico_sed')
sedbutton.show()
tablebut.attach(sedbutton,0, 2,0,1)
lcbutton = gtk.Button("run enrico_ lc")
lcbutton.connect("clicked", self.Launch, 'enrico_lc')
lcbutton.show()
tablebut.attach(lcbutton, 2,4,0,1)
tsbutton = gtk.Button("run enrico_ tsmap")
tsbutton.connect("clicked", self.Launch, 'enrico_tsmap')
tsbutton.show()
tablebut.attach(tsbutton, 4,6,0,1)
sedplotbutton = gtk.Button("run enrico_ plot_ sed")
sedplotbutton.connect("clicked", self.Launch, 'enrico_plot_sed')
sedplotbutton.show()
tablebut.attach(sedplotbutton,0, 2,1,2)
lcplotbutton = gtk.Button("run enrico_ plot_ lc")
lcplotbutton.connect("clicked", self.Launch, 'enrico_plot_lc')
lcplotbutton.show()
tablebut.attach(lcplotbutton, 2,4,1,2)
tsplotbutton = gtk.Button("run enrico_ plot_ tsmap")
tsplotbutton.connect("clicked", self.Launch, 'enrico_plot_tsmap')
tsplotbutton.show()
tablebut.attach(tsplotbutton, 4,6,1,2)
sep2 = gtk.HSeparator()
sep2.show()
tablebut.attach(sep2, 0,6,2,3)
xmlbutton = gtk.Button("run enrico_ xml")
xmlbutton.connect("clicked", self.Launch, 'enrico_xml')
xmlbutton.show()
tablebut.attach(xmlbutton,0, 2,3,4)
modelbutton = gtk.Button("run enrico_ testmodel")
modelbutton.connect("clicked", self.Launch, 'enrico_testmodel')
modelbutton.show()
tablebut.attach(modelbutton, 2,4,3,4)
findsrcbutton = gtk.Button("run enrico_ findsrc")
findsrcbutton.connect("clicked", self.Launch, 'enrico_findsrc')
findsrcbutton.show()
tablebut.attach(findsrcbutton, 4,6,3,4)
srcprobbutton = gtk.Button("run enrico_ srcprob")
srcprobbutton.connect("clicked", self.Launch, 'enrico_srcprob')
srcprobbutton.show()
tablebut.attach(srcprobbutton, 2,4,4,5)
sep2 = gtk.HSeparator()
sep2.show()
tablebut.attach(sep2, 0,6,5,6)
applcbutton = gtk.Button("run enrico_ applc")
applcbutton.connect("clicked", self.Launch, 'enrico_applc')
applcbutton.show()
tablebut.attach(applcbutton,0, 2,6,7)
foldedlcbutton = gtk.Button("run enrico_ foldedlc")
foldedlcbutton.connect("clicked", self.Launch, 'enrico_foldedlc')
foldedlcbutton.show()
tablebut.attach(foldedlcbutton, 2,4,6,7)
foldedlcplotbutton = gtk.Button("run enrico_ plot_ foldedlc")
foldedlcplotbutton.connect("clicked", self.Launch, 'enrico_plot_foldedlc')
foldedlcplotbutton.show()
tablebut.attach(foldedlcplotbutton, 4,6,6,7)
BNPage.attach(framebut, 0, 8, 2,7)
def _addAnalysis(self):
BNPage = self.AddBlocNotePage("Analysis")
frameAna,tableAna = self._addFrame("Analysis options",2,4)
frameevt,tableevt = self._addFrame("events selection",2,4)
frameFit,tableFit = self._addFrame("Fitting options",2,4)
BNPage.attach(frameAna, 0, 8, 0, 4)
self.listchain = gtk.Combo()
self.listchain.entry.set_text("Chain")
listoption = [ "unbinned","binned" ]
listoption.remove(self.config['analysis']['likelihood'])
listoption.insert(0,self.config['analysis']['likelihood'])
self.listchain.set_popdown_strings(listoption)
self.listchain.show()
label = gtk.Label("Chain")
label.show()
tableAna.attach(label,0,1,0,1)
tableAna.attach(self.listchain,1,2,0,1)
FitButton = gtk.CheckButton("")
FitButton.connect("toggled", self.fct_yesno, "Diffresp")
self.set_active(FitButton,self.config["analysis"]["ComputeDiffrsp"])
FitButton.show()
label = gtk.Label("Compute diffuse response (only for UNBINNED)")
label.show()
tableAna.attach(label,0, 3,1,2)
tableAna.attach(FitButton,3,4,1,2)
self.zmax = self.AddSpinButton(self.config["analysis"]["zmax"], 50, 180, 1, 0)
label = gtk.Label("zmax")
label.show()
tableAna.attach(label, 0,1,2,3)
tableAna.attach(self.zmax,1,2,2,3)
FitButton = gtk.CheckButton("")
FitButton.connect("toggled", self.fct_yesno, "roicut")
self.set_active(FitButton,self.config["analysis"]["roicut"])
FitButton.show()
label = gtk.Label("ROI cut")
label.show()
tableAna.attach(label,2, 3,2,3)
tableAna.attach(FitButton,3,4,2,3)
self.filter = gtk.Entry()
self.filter.set_text(self.config['analysis']['filter'])
self.filter.show()
label = gtk.Label("filter")
label.show()
tableAna.attach(label, 0,1,3,4)
tableAna.attach(self.filter, 1,4,3,4)
# self.convtype = self.AddSpinButton(self.config["analysis"]["convtype"], -1,1, 1, 0)
# label = gtk.Label("convtype")
# label.show()
# tableAna.attach(label, 2,3,4,5)
# tableAna.attach(self.convtype,3,4,4,5)
BNPage.attach(frameevt, 0, 8, 4, 6)
self.irfs = gtk.Entry()
self.irfs.set_text(self.config['event']['irfs'])
self.irfs.show()
label = gtk.Label("IRFs")
label.show()
tableevt.attach(label, 1,2,0,1)
tableevt.attach(self.irfs, 3,5,0,1)
self.evclass = self.AddSpinButton(self.config["event"]["evclass"], 1, 16777216, 1, 0)
label = gtk.Label("Event Class")
label.show()
tableevt.attach(label, 0,1,1,2)
tableevt.attach(self.evclass,1,2,1,2)
self.evtype = self.AddSpinButton(self.config["event"]["evtype"], 1, 512, 1, 0)
label = gtk.Label("Event Type")
label.show()
tableevt.attach(label, 2,3,1,2)
tableevt.attach(self.evtype,3,4,1,2)
irfsbutton = gtk.Button("Check IRFS")
irfsbutton.connect("clicked", self.getirfs, "")
irfsbutton.show()
tableevt.attach(irfsbutton,5,6,1,2)
BNPage.attach(frameFit, 0, 8, 6, 8)
self.listopt = gtk.Combo()
self.listopt.entry.set_text("Optimizer")
listoption = [ 'MINUIT', 'DRMNGB', 'DRMNFB', 'NEWMINUIT' ]
listoption.remove(self.config['fitting']['optimizer'])
listoption.insert(0,self.config['fitting']['optimizer'])
self.listopt.set_popdown_strings(listoption)
self.listopt.show()
label = gtk.Label("Optimizer")
label.show()
tableFit.attach(label,0,1,0,1)
tableFit.attach(self.listopt,1,2,0,1)
self.ftol = self.AddSpinButton(log10(self.config["fitting"]["ftol"]), -9,-3, 1, 0)
label = gtk.Label("log(tolerance)")
label.show()
tableFit.attach(label,0,1,1,2)
tableFit.attach(self.ftol,1,2,1,2)
def _addFiles(self):
BNPage = self.AddBlocNotePage("Files")
frame,table = self._addFrame("File options",4,2)
BNPage.attach(frame, 0, 8, 0, 4)
self.fevent = gtk.FileChooserButton("Event file")
self.fevent.set_title("Event file")
self.fevent.set_filename(self.config['file']['event'])
self.fevent.show()
self.fsc = gtk.FileChooserButton("Spacecraft file")
self.fsc.set_title("Spacecraft file")
self.fsc.set_filename(self.config['file']['spacecraft'])
self.fsc.show()
self.fxml = gtk.FileChooserButton("XML file")
self.fxml.set_title("XML file")
self.fxml.set_filename(self.config['file']['xml'])
self.fxml.show()
label = gtk.Label("Event file")
label.show()
table.attach(label, 0,1, 0, 1)
table.attach(self.fevent, 1,3, 0, 1,gtk.EXPAND | gtk.FILL, gtk.EXPAND | gtk.FILL, 5, 1)
label = gtk.Label("spacecraft file")
label.show()
table.attach(label, 0,1, 1,2)
table.attach(self.fsc, 1,3, 1,2,gtk.EXPAND | gtk.FILL, gtk.EXPAND | gtk.FILL, 5, 1)
label = gtk.Label("XML file")
label.show()
table.attach(label, 0,1, 2,3)
table.attach(self.fxml, 1,3, 2,3,gtk.EXPAND | gtk.FILL, gtk.EXPAND | gtk.FILL, 5, 1)
self.ftag = gtk.Entry()
self.ftag.set_text(self.config['file']['tag'])
self.ftag.show()
label = gtk.Label("tag")
label.show()
table.attach(label, 0,1,3,4)
table.attach(self.ftag, 1,2,3,4)
def _addEnergyTime(self):
BNPage = self.AddBlocNotePage("Energy/Time")
frameEner,tableEner = self._addFrame("Energy",2,2)
frameTime,tableTime = self._addFrame("Time",3,2)
BNPage.attach(frameEner, 0, 8, 0, 2)
self.emin = self.AddSpinButton(self.config["energy"]["emin"], 0, 1e6, 100, 0)
self.emax = self.AddSpinButton(self.config["energy"]["emax"], 0, 1e6, 100, 0)
self.nbindec = self.AddSpinButton(self.config["energy"]["enumbins_per_decade"], 0, 40, 1, 0)
label = gtk.Label("Emin (MeV)")
label.show()
tableEner.attach(label, 0,1,0, 1)
tableEner.attach(self.emin,1,2,0,1)
label = gtk.Label("Emax (MeV)")
label.show()
tableEner.attach(label,2, 3,0,1)
tableEner.attach(self.emax,3,4,0,1)
label = gtk.Label("Number of bin per decade (default 10)")
label.show()
tableEner.attach(label, 0,3,1, 2)
tableEner.attach(self.nbindec,3,4,1,2)
BNPage.attach(frameTime, 0, 8, 2, 5)
self.tmin = self.AddSpinButton(self.config["time"]["tmin"], 239557418., 1e10, 1, 1)
self.tmax = self.AddSpinButton(self.config["time"]["tmax"], 239557418., 1e10, 1, 1)
label = gtk.Label("Tmin")
label.show()
tableTime.attach(label, 0,1,0, 1)
tableTime.attach(self.tmin,1,2,0,1)
label = gtk.Label("Tmax")
label.show()
tableTime.attach(label,2, 3,0,1)
tableTime.attach(self.tmax,3,4,0,1)
self.ftime = gtk.FileChooserButton("Time definition file")
self.ftime.set_title("Time definition file")
self.ftime.set_filename(self.config['time']['file'])
self.ftime.show()
label = gtk.Label("Time definition file (optinal)")
label.show()
tableTime.attach(label,0, 2,1,2)
tableTime.attach(self.ftime,2,4,1,2)
self.listtime = gtk.Combo()
self.listtime.entry.set_text("time")
listoption = [ "MET", "MJD", "JD" ]
listoption.remove(self.config['time']['type'])
listoption.insert(0,self.config['time']['type'])
self.listtime.set_popdown_strings(listoption)
self.listtime.show()
label = gtk.Label("Unit (type)")
label.show()
tableTime.attach(label, 0,2, 2,3)
tableTime.attach(self.listtime, 2,3, 2,3)
def _addSpectrum(self):
BNPage = self.AddBlocNotePage("Spectrum/Ebin")
frame,table = self._addFrame("Spectrum options",2,4)
BNPage.attach(frame, 0, 8, 0, 4)
FitButton = gtk.CheckButton("")
FitButton.connect("toggled", self.fct_yesno, "Spectrum")
self.set_active(FitButton,self.config["Spectrum"]["FitsGeneration"])
FitButton.show()
PlotButton = gtk.CheckButton("")
PlotButton.connect("toggled", self.fct_yesno, "Spectrum")
self.set_active(PlotButton,self.config["Spectrum"]["ResultPlots"])
PlotButton.show()
self.index = self.AddSpinButton(self.config["Spectrum"]["FrozenSpectralIndex"], 0, 5, .1, 2)
SummedButton = gtk.CheckButton("")
SummedButton.connect("toggled", self.fct_yesno, "Spectrum")
self.set_active(SummedButton,self.config["Spectrum"]["SummedLike"])
SummedButton.show()
labfits = gtk.Label("Generation of the fits files")
labfits.show()
table.attach(labfits,0, 2,0, 1)
labplots = gtk.Label("Generation of the plots")
labplots.show()
table.attach(labplots,0, 2 ,1, 2)
labindex = gtk.Label("Frozen Spectral Index value (no effect if 0)")
labindex.show()
table.attach(labindex,0, 3 ,2,3)
labsumm = gtk.Label("Used the summed likelihood")
labsumm.show()
table.attach(labsumm,0, 2 ,3, 4)
table.attach(FitButton, 3,4,0, 1)
table.attach(PlotButton, 3,4,1, 2)
table.attach(self.index, 3,4,2,3)
table.attach(SummedButton, 3,4,3, 4)
frameebin,tableebin = self._addFrame("Energy bins options",2,4)
BNPage.attach(frameebin, 0, 8, 4, 7)
FitButton = gtk.CheckButton("")
FitButton.connect("toggled", self.fct_yesno, "Ebin")
self.set_active(FitButton,self.config["Ebin"]["FitsGeneration"])
FitButton.show()
labfits = gtk.Label("Generation of the fits files")
labfits.show()
tableebin.attach(labfits,0, 2,0, 1)
tableebin.attach(FitButton, 3,4,0, 1)
self.nebin = self.AddSpinButton(self.config["Ebin"]["NumEnergyBins"], 0, 30, 1, 0)
label = gtk.Label("Number of bins")
label.show()
tableebin.attach(label,0, 2,1,2)
tableebin.attach(self.nebin,3,4,1,2)
self.tsebin = self.AddSpinButton(self.config["Ebin"]["TSEnergyBins"],0, 1000, .1, 1)
label = gtk.Label("Minimal TS")
label.show()
tableebin.attach(label,0, 2,2,3)
tableebin.attach(self.tsebin,3,4,2,3)
label = gtk.Label("Re-run the Ebin calculation only")
label.show()
ebinbutton = gtk.Button("Re-run Ebin")
ebinbutton.connect("clicked", self.LaunchEbin, '')
ebinbutton.show()
tableebin.attach(label,0, 2,3,4)
tableebin.attach(ebinbutton,3,4,3,4)
def _addUL(self):
BNPage = self.AddBlocNotePage("Upper Limits")
frame,table = self._addFrame("Upper Limits options",2,4)
BNPage.attach(frame, 0, 8, 0, 4)
self.ULindex = self.AddSpinButton(self.config["UpperLimit"]["SpectralIndex"], 0, 5, .1,2)
MethodButton = gtk.CheckButton("")
MethodButton.connect("toggled", self.fct_yesno, "ULenvelope")
self.set_active(MethodButton,self.config["UpperLimit"]["envelope"])
MethodButton.show()
self.ULcl = self.AddSpinButton(self.config["UpperLimit"]["cl"], 0, 1, .01,2)
self.TSlim = self.AddSpinButton(self.config["UpperLimit"]["TSlimit"], 0, 1000, .1, 1)
labindex = gtk.Label("Assumed Spectral Index value")
labindex.show()
table.attach(labindex,0, 2 ,0,1)
labts = gtk.Label("Minimal TS")
labts.show()
table.attach(labts,0, 2 ,1,2)
labts = gtk.Label("Confidence level")
labts.show()
table.attach(labts,0, 2 ,2,3)
labts = gtk.Label("Envelope UL")
labts.show()
table.attach(labts,0, 2 ,3,4)
table.attach(self.ULindex, 3,4,0,1)
table.attach(self.TSlim, 3,4,1,2)
table.attach(self.ULcl, 3,4,2,3)
table.attach(MethodButton, 3,4,3,4)
def _addLC(self):
BNPage = self.AddBlocNotePage("Light Curves")
frame,table = self._addFrame("Light Curves options",2,4)
BNPage.attach(frame, 0, 8, 0, 4)
FitButton = gtk.CheckButton("")
FitButton.connect("toggled", self.fct_yesno, "LightCurve")
self.set_active(FitButton,self.config["LightCurve"]["FitsGeneration"])
FitButton.show()
labfits = gtk.Label("Generation of the fits files")
labfits.show()
table.attach(labfits,0, 2,0, 1)
table.attach(FitButton, 3,4,0, 1)
self.nlcbin = self.AddSpinButton(self.config["LightCurve"]["NLCbin"], 0, 1e6, 1, 0)
label = gtk.Label("Number of bins")
label.show()
table.attach(label,0, 2,1,2)
table.attach(self.nlcbin,3,4,1,2)
self.lcindex = self.AddSpinButton(self.config["LightCurve"]["SpectralIndex"], 0, 5, .1, 2)
label = gtk.Label("Spectral Index")
label.show()
table.attach(label,0, 2,2,3)
table.attach(self.lcindex,3,4,2,3)
FitButton = gtk.CheckButton("")
FitButton.connect("toggled", self.fct_yesno, "conffile")
self.set_active(FitButton,self.config["LightCurve"]["MakeConfFile"])
FitButton.show()
label = gtk.Label("Make config file")
label.show()
table.attach(label,0, 2,3,4)
table.attach(FitButton,3,4,3,4)
FitButton = gtk.CheckButton("")
FitButton.connect("toggled", self.fct_yesno, "compvar")
self.set_active(FitButton,self.config["LightCurve"]["ComputeVarIndex"])
FitButton.show()
label = gtk.Label("Compute variability index")
label.show()
table.attach(label,0, 2,4,5)
table.attach(FitButton,3,4,4,5)
FitButton = gtk.CheckButton("")
FitButton.connect("toggled", self.fct_yesno, "lcdiagplot")
self.set_active(FitButton,self.config["LightCurve"]["DiagnosticPlots"])
FitButton.show()
label = gtk.Label("Make diagnostic plots")
label.show()
table.attach(label,0, 2,5,6)
table.attach(FitButton,3,4,5,6)
# def _addFoldedLC(self):
# BNPage = self.AddBlocNotePage("Folded LC")
# frame,table = self._addFrame("Folded Light Curves options",2,4)
# BNPage.attach(frame, 0, 8, 0, 4)
def _addAppFoldedLC(self):
BNPage = self.AddBlocNotePage("Aperture/Folded LC")
frameapp,tableapp = self._addFrame("Aperture photometry options",2,4)
BNPage.attach(frameapp, 0, 8, 0, 4)
FitButton = gtk.CheckButton("")
FitButton.connect("toggled", self.fct_yesno, "AppLC")
self.set_active(FitButton,self.config["AppLC"]["FitsGeneration"])
FitButton.show()
labfits = gtk.Label("Generation of the fits files")
labfits.show()
tableapp.attach(labfits,0, 2,0, 1)
tableapp.attach(FitButton, 3,4,0, 1)
self.applcindex = self.AddSpinButton(self.config["AppLC"]["index"], 0, 5, .1, 2)
label = gtk.Label("Spectral Index")
label.show()
tableapp.attach(label,0, 2,1,2)
tableapp.attach(self.applcindex,3,4,1,2)
self.applcNbin = self.AddSpinButton(self.config["AppLC"]["NLCbin"], 0, 1e6, 1, 0)
label = gtk.Label("Number of bins")
label.show()
tableapp.attach(label,0, 2,2,3)
tableapp.attach(self.applcNbin,3,4,2,3)
FitButton = gtk.CheckButton("")
FitButton.connect("toggled", self.fct_yesno, "binfromdata")
self.set_active(FitButton,self.config["AppLC"]["binsFromData"])
FitButton.show()
labfits = gtk.Label("Make bin from data")
labfits.show()
tableapp.attach(labfits,0, 2,3,4)
tableapp.attach(FitButton, 3,4,3,4)
framefol,tablefol = self._addFrame("Folded LightCurves options",2,4)
BNPage.attach(framefol, 0, 8, 4, 8)
self.follcNbin = self.AddSpinButton(self.config["FoldedLC"]["NLCbin"], 0, 1e6, 1, 0)
label = gtk.Label("Number of bins")
label.show()
tablefol.attach(label,0, 2,0,1)
tablefol.attach(self.follcNbin,3,4,0,1)
self.folepoch = self.AddSpinButton(self.config["FoldedLC"]["epoch"], 0, 1e10, 1, 0)
label = gtk.Label("Epoch")
label.show()
tablefol.attach(label,0, 2,1,2)
tablefol.attach(self.folepoch,3,4,1,2)
self.folperiod = self.AddSpinButton(self.config["FoldedLC"]["Period"], 0, 1e10, 1, 0)
label = gtk.Label("Period")
label.show()
tablefol.attach(label,0, 2,2,3)
tablefol.attach(self.folperiod,3,4,2,3)
def _addTSMap(self):
BNPage = self.AddBlocNotePage("TS Map")
frame,table = self._addFrame("TS Map options",2,4)
BNPage.attach(frame, 0, 8, 0, 3)
reFitButton = gtk.CheckButton("")
reFitButton.connect("toggled", self.fct_yesno, "tsmap")
self.set_active(reFitButton,self.config["TSMap"]["Re-Fit"])
reFitButton.show()
label = gtk.Label("Re-fit")
label.show()
table.attach(label,0, 2,0,1)
table.attach(reFitButton, 3,4,0,1)
self.tsmapnpix = self.AddSpinButton(self.config["TSMap"]["npix"], 0, 1000, 1, 0)
label = gtk.Label("Number of pixel")
label.show()
table.attach(label,0, 2,1,2)
table.attach(self.tsmapnpix,3,4,1,2)
reFitButton = gtk.CheckButton("")
reFitButton.connect("toggled", self.fct_yesno, "removetgr")
self.set_active(reFitButton,self.config["TSMap"]["RemoveTarget"])
reFitButton.show()
label = gtk.Label("Remove Target")
label.show()
table.attach(label,0, 2,2,3)
table.attach(reFitButton, 3,4,2,3)
self.listtsmethod = gtk.Combo()
self.listtsmethod.entry.set_text("method")
listoption = [ "row", "pixel" ]
listoption.remove(self.config['TSMap']['method'])
listoption.insert(0,self.config['TSMap']['method'])
self.listtsmethod.set_popdown_strings(listoption)
self.listtsmethod.show()
label = gtk.Label("Method to use")
label.show()
table.attach(label, 0, 2,3,4)
table.attach(self.listtsmethod, 3,4,3,4)
def _addfindsrcsrcprob(self):
BNPage = self.AddBlocNotePage("Findsrc/Srcprob")
frame,table = self._addFrame("Find source options",2,4)
BNPage.attach(frame, 0, 8, 0, 2)
FitButton = gtk.CheckButton("")
FitButton.connect("toggled", self.fct_yesno, "findsrc")
self.set_active(FitButton,self.config["findsrc"]["FitsGeneration"])
FitButton.show()
labfits = gtk.Label("Generation of the fits files")
labfits.show()
table.attach(labfits,0, 2,0, 1)
table.attach(FitButton, 3,4,0, 1)
reFitButton = gtk.CheckButton("")
reFitButton.connect("toggled", self.fct_yesno, "findsrc")
self.set_active(FitButton,self.config["findsrc"]["FitsGeneration"])
reFitButton.show()
label = gtk.Label("Re-fit")
label.show()
table.attach(label,0, 2,1,2)
table.attach(reFitButton, 3,4,1,2)
frameprob,tableprob = self._addFrame("Srcprob options",2,4)
BNPage.attach(frameprob, 0, 8, 2, 4)
self.radsrcprob = self.AddSpinButton(self.config["srcprob"]["rad"],0, 360, .1, 1)
labrad = gtk.Label("Radius of the search region")
labrad.show()
tableprob.attach(labrad,0, 2,0,1)
tableprob.attach(self.radsrcprob,3,4,0, 1)
self.srclist = gtk.FileChooserButton("srclist")
self.srclist.set_title("srclist")
self.srclist.set_filename(self.config['srcprob']['srclist'])
self.srclist.show()
labsrclist = gtk.Label("srclist filename")
labsrclist.show()
tableprob.attach(labsrclist,0, 2,1,2)
tableprob.attach(self.srclist,3,4,1,2)
def _addPlot(self):
BNPage = self.AddBlocNotePage("Plots")
frame,table = self._addFrame("Results and debug plots",2,4)
BNPage.attach(frame, 0, 8, 0, 8)
filebase= self.config['out'] + '/Spectrum/SED_' + self.config['target']['name'] +'_'+ self.config['target']['spectrum']
self.image = gtk.Image()
pixbuf = gtk.gdk.pixbuf_new_from_file(os.environ.get('ENRICO_DIR', '')+'/enrico/enrico.jpg')
pixbuf = pixbuf.scale_simple(250, 300, gtk.gdk.INTERP_BILINEAR)
self.image.set_from_pixbuf(pixbuf)
self.image.show()
table.attach(self.image,0,8,1,7)
sedbutton = gtk.Button("SED")
sedbutton.connect("clicked", self.change, filebase+".png")
table.attach(sedbutton, 0,1,0,1)
sedbutton.show()
countbutton = gtk.Button("count")
countbutton.connect("clicked", self.change, filebase+"_CountsPlot.png")
table.attach(countbutton, 1,2,0,1)
countbutton.show()
resbutton = gtk.Button("residuals")
resbutton.connect("clicked", self.change, filebase+"_ResPlot.png")
table.attach(resbutton, 2,3,0,1)
resbutton.show()
sep = gtk.VSeparator()
sep.show()
table.attach(sep, 3,4,0,1)
lcfiles = self.config["out"]+"/LightCurve_"+str(self.config["LightCurve"]["NLCbin"])+"bins/"
lcbutton = gtk.Button("LC")
lcbutton.connect("clicked", self.change, lcfiles+"_LC.png")
table.attach(lcbutton, 4,5,0,1)
lcbutton.show()
tsbutton = gtk.Button("TS vs time")
tsbutton.connect("clicked", self.change, lcfiles+"_TS.png")
table.attach(tsbutton, 5,7,0,1)
tsbutton.show()
npredbutton = gtk.Button("Npred")
npredbutton.connect("clicked", self.change, lcfiles+"_Npred.png")
table.attach(npredbutton, 7,8,0,1)
npredbutton.show()
def __init__(self,infile):
self.infile = infile
try :
self.config = get_config(infile)
except :
config = ConfigObj(indent_type='\t')
config['out'] = os.getcwd()
self.config = get_config(config)
try :
ftmp=open(self.config['file']['xml'],'r')
ftmp.close()
except :
os.system('touch '+self.config['file']['xml'])
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("delete_event", self.delete)
self.window.set_border_width(10)
table = gtk.Table(3,6,False)
self.window.add(table)
self.notebloc = gtk.Notebook()
self.notebloc.set_tab_pos(gtk.POS_LEFT)
table.attach(self.notebloc, 0,6,0,1)
self.notebloc.show()
self._addMainOption()
self._addFiles()
self._addTargetSpace()
self._addAnalysis()
self._addEnergyTime()
self._addSpectrum()
self._addUL()
self._addLC()
# self._addFoldedLC()
self._addAppFoldedLC()
# self._addEbin()
self._addTSMap()
self._addfindsrcsrcprob()
self._addPlot()
self.notebloc.set_current_page(0)
ReloadButton = gtk.Button("Reload conf file")
ReloadButton.connect("clicked", self.reload, "")
table.attach(ReloadButton, 1,2,1,2)
ReloadButton.show()
SaveButton = gtk.Button("Save file")
SaveButton.connect("clicked", self.save, "")
table.attach(SaveButton, 3,4,1,2)
SaveButton.show()
CloseButton = gtk.Button("Close")
CloseButton.connect("clicked", self.delete)
table.attach(CloseButton, 4,5,1,2)
CloseButton.show()
table.show()
self.window.show()
if __name__ == "__main__":
try:
infile = sys.argv[1]
except:
logging.error('Config file not found.')
print(('Usage: '+sys.argv[0]+' <output config file name>'))
sys.exit(1)
EnricoGui(infile)
gtk.main()
|
gammapyREPO_NAMEenricoPATH_START.@enrico_extracted@enrico-master@enrico@gui.py@.PATH_END.py
|
{
"filename": "mpsig.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/signal/tests/mpsig.py",
"type": "Python"
}
|
"""
Some signal functions implemented using mpmath.
"""
from __future__ import division
try:
import mpmath
except ImportError:
mpmath = None
def _prod(seq):
"""Returns the product of the elements in the sequence `seq`."""
p = 1
for elem in seq:
p *= elem
return p
def _relative_degree(z, p):
"""
Return relative degree of transfer function from zeros and poles.
This is simply len(p) - len(z), which must be nonnegative.
A ValueError is raised if len(p) < len(z).
"""
degree = len(p) - len(z)
if degree < 0:
raise ValueError("Improper transfer function. "
"Must have at least as many poles as zeros.")
return degree
def _zpkbilinear(z, p, k, fs):
"""Bilinear transformation to convert a filter from analog to digital."""
degree = _relative_degree(z, p)
fs2 = 2*fs
# Bilinear transform the poles and zeros
z_z = [(fs2 + z1) / (fs2 - z1) for z1 in z]
p_z = [(fs2 + p1) / (fs2 - p1) for p1 in p]
# Any zeros that were at infinity get moved to the Nyquist frequency
z_z.extend([-1] * degree)
# Compensate for gain change
numer = _prod(fs2 - z1 for z1 in z)
denom = _prod(fs2 - p1 for p1 in p)
k_z = k * numer / denom
return z_z, p_z, k_z.real
def _zpklp2lp(z, p, k, wo=1):
"""Transform a lowpass filter to a different cutoff frequency."""
degree = _relative_degree(z, p)
# Scale all points radially from origin to shift cutoff frequency
z_lp = [wo * z1 for z1 in z]
p_lp = [wo * p1 for p1 in p]
# Each shifted pole decreases gain by wo, each shifted zero increases it.
# Cancel out the net change to keep overall gain the same
k_lp = k * wo**degree
return z_lp, p_lp, k_lp
def _butter_analog_poles(n):
"""
Poles of an analog Butterworth lowpass filter.
This is the same calculation as scipy.signal.buttap(n) or
scipy.signal.butter(n, 1, analog=True, output='zpk'), but mpmath is used,
and only the poles are returned.
"""
poles = []
for k in range(-n+1, n, 2):
poles.append(-mpmath.exp(1j*mpmath.pi*k/(2*n)))
return poles
def butter_lp(n, Wn):
"""
Lowpass Butterworth digital filter design.
This computes the same result as scipy.signal.butter(n, Wn, output='zpk'),
but it uses mpmath, and the results are returned in lists instead of numpy
arrays.
"""
zeros = []
poles = _butter_analog_poles(n)
k = 1
fs = 2
warped = 2 * fs * mpmath.tan(mpmath.pi * Wn / fs)
z, p, k = _zpklp2lp(zeros, poles, k, wo=warped)
z, p, k = _zpkbilinear(z, p, k, fs=fs)
return z, p, k
def zpkfreqz(z, p, k, worN=None):
"""
Frequency response of a filter in zpk format, using mpmath.
This is the same calculation as scipy.signal.freqz, but the input is in
zpk format, the calculation is performed using mpath, and the results are
returned in lists instead of numpy arrays.
"""
if worN is None or isinstance(worN, int):
N = worN or 512
ws = [mpmath.pi * mpmath.mpf(j) / N for j in range(N)]
else:
ws = worN
h = []
for wk in ws:
zm1 = mpmath.exp(1j * wk)
numer = _prod([zm1 - t for t in z])
denom = _prod([zm1 - t for t in p])
hk = k * numer / denom
h.append(hk)
return ws, h
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@signal@tests@mpsig.py@.PATH_END.py
|
{
"filename": "lisaxml.py",
"repo_name": "vallis/synthlisa",
"repo_path": "synthlisa_extracted/synthlisa-master/lisasim/lisaxml.py",
"type": "Python"
}
|
# $Id$
# $Date$
# $Author$
# $Revision$
import synthlisa
import sys
import os.path
import time
import string
import re
import math
import numpy
import pyRXPU as pyRXP
# require xmlutils from pyRXP examples
import xmlutils
# begin definitions encoding synthLISA syntax
argumentList = {}
outputList = {}
# give parameter name, default unit, default value (or None if parameter is required)
# this list is used to generate xml from the "initsave" arglist of the SWIGged Wave classes
# and also to generate a Wave class from an xml PlaneWave structure
# LISA types
argumentList['OriginalLISA'] = ( ('Armlength1','Second','16.6782'),
('Armlength2','Second','16.6782'),
('Armlength3','Second','16.6782') )
outputList['OriginalLISA'] = argumentList['OriginalLISA']
argumentList['ModifiedLISA'] = argumentList['OriginalLISA']
outputList['ModifiedLISA'] = argumentList['ModifiedLISA']
argumentList['CircularRotating'] = ( ('InitialEta','Radian','0'),
('InitialXi','Radian','0'),
('ArmSwitch','1','1'),
('TimeOffset','Second','0') )
outputList['CircularRotating'] = ( ('TimeOffset','Second',None),
('InitialPosition','Radian',None),
('InitialRotation','Radian',None),
('Armlength','Second','16.6782'),
('OrbitRadius','Second','499.004'),
('OrbitApproximation','String','CircularRigid') )
argumentList['EccentricInclined'] = ( ('InitialEta','Radian','0'),
('InitialXi','Radian','0'),
('ArmSwitch','1','1'),
('TimeOffset','Second','0') )
outputList['EccentricInclined'] = ( ('TimeOffset','Second',None),
('InitialPosition','Radian',None),
('InitialRotation','Radian',None),
('Armlength','Second','16.6782') )
# PyLISA, AllPyLISA, CacheLISA (special treatment of initargs),
# SimpleLISA, CacheLengthLISA?
# noises
argumentList['PowerLawNoise'] = ( ('Cadence','Second',None),
('TimeOffset','Second',None),
('PowerSpectralDensity','(f/Hz)^n/Hz',None),
('SpectralType','1',None),
('InterpolatorLength','1',None),
('PseudoRandomSeed','1',None) )
# need to convert SpectralType from Exponent to String,
# InterpolatorType to Interpolator and InterpolatorWindow
outputList['PowerLawNoise'] = ( ('SpectralType','String',None),
('Cadence','Second',None),
('TimeOffset','Second',None),
('PowerSpectralDensity','(f/Hz)^n/Hz',None),
('PseudoRandomGenerator','String','taus2-gsl1.4'),
('PseudoRandomSeed','1',None),
('Interpolator','String',None),
('InterpolatorWindow','1','None') )
# Wave objects
argumentList['SimpleBinary'] = ( ('Frequency','Hertz',None),
('InitialPhase','Radian',None),
('ThetaInclination','Radian',None),
('Amplitude','1',None),
('EclipticLatitude','Radian',None),
('EclipticLongitude','Radian',None),
('SLPolarization','Radian',None) )
outputList['SimpleBinary'] = ( ('EclipticLatitude','Radian',None),
('EclipticLongitude','Radian',None),
('Polarization','Radian',None),
('Frequency','Hertz',None),
('InitialPhase','Radian',None),
('Inclination','Radian',None),
('Amplitude','1',None) )
# let's not support normalization, right now...
argumentList['SampledWave'] = ( ('hp','numpy',None),
('hc','numpy',None),
('Length','1',None),
('Cadence','Second',None),
('Prebuffer','Second',None),
('Normalization','1','1.0'),
('Filtering','Filter','None'),
('InterpolatorLength','1','1'),
('EclipticLatitude','Radian',None),
('EclipticLongitude','Radian',None),
('SLPolarization','Radian',None) )
outputList['SampledWave'] = ( ('EclipticLatitude','Radian',None),
('EclipticLongitude','Radian',None),
('Polarization','Radian',None),
('Interpolator','String',None),
('InterpolatorWindow','1','None') )
# this is special...
outputList['TimeSeries'] = ( ('TimeOffset','Second',None),
('Cadence','Second',None),
('Length','1',None),
('hc','numpy',None),
('hp','numpy',None) )
# give translations between synthlisa and XML, and backwards
ObjectToXML = {
'OriginalLISA': 'OriginalLISA',
'CircularRotating': 'PseudoLISA',
'EccentricInclined': 'PseudoLISA',
'PowerLawNoise': 'PseudoRandomNoise',
'SimpleBinary': 'GalacticBinary',
'SampledWave': 'SampledPlaneWave'
}
XMLToObject = {
# Synthetic LISA objects
'OriginalLISA': ('OriginalLISA',synthlisa.OriginalLISA),
'CircularRotating': ('CircularRotating',synthlisa.CircularRotating),
'EccentricInclined': ('EccentricInclined',synthlisa.EccentricInclined),
'SimpleBinary': ('SimpleBinary',synthlisa.SimpleBinary),
# standard lisaXML objects
'PseudoLISA': ('EccentricInclined',synthlisa.EccentricInclined),
'PseudoRandomNoise': ('PowerLawNoise',synthlisa.PowerLawNoise),
'GalacticBinary': ('SimpleBinary',synthlisa.SimpleBinary),
'SampledPlaneWave': ('SampledWave',synthlisa.SampledWave)
}
# begin definitions encoding XML syntax
minimumParameterSet = {}
optionalParameterSet = {}
# for the moment, no minimum or optional parameter sets for synthLISA objects
# that are not default lisaXML objects
# we'll let the code discover if any parameters should be given that are not
minimumParameterSet['OriginalLISA'] = []
optionalParameterSet['OriginalLISA'] = []
minimumParameterSet['CircularRotating'] = []
optionalParameterSet['CircularRotating'] = []
minimumParameterSet['EccentricInclined'] = []
optionalParameterSet['EccentricInclined'] = []
# LISA objects (aren't these duplications of what we can get from the above?)
def makeminimum(parlist):
return map(lambda p: lambda s: p in s or p,parlist)
def makeoptional(parlist):
return map(lambda p: lambda s: p[0] in s or p,parlist)
minimumParameterSet['PseudoLISA'] = makeminimum(['InitialPosition',
'InitialRotation',
'TimeOffset'])
optionalParameterSet['PseudoLISA'] = makeoptional([('Armlength',(16.6782,'Second')),
('ArmSwitch',(-1.0,'1'))])
minimumParameterSet['PseudoRandomNoise'] = []
optionalParameterSet['PseudoRandomNoise'] = []
# this construction would replicate the outputList as lambda tests
# map(lambda p: lambda s: p[0] in s or p[0], outputList['PowerLawNoise'])
# for PlaneWave sources only...
standardSourceParameterSet = [
lambda s: ( (('EclipticLatitude' in s) and ('EclipticLongitude' in s)) or (('RightAscension' in s) and ('Declination' in s))
or 'EclipticLatitude/EclipticLongitude or RightAscension/Declination' ),
lambda s: 'Polarization' in s or 'Polarization'
]
# PlaneWave objects
minimumParameterSet['GalacticBinary'] = makeminimum(['Polarization',
'Amplitude',
'Inclination',
'InitialPhase',
'Frequency'])
optionalParameterSet['GalacticBinary'] = makeoptional([('TimeOffset',('0.0','Second')),
('FrequencyDot',('0.0','Hertz/Second')),
('FrequencyDotDot',('0.0','Hertz/Second^2')),
('Eccentricity',('0.0','1'))])
minimumParameterSet['SampledPlaneWave'] = makeminimum(['TimeOffset',
'Cadence',
'Duration'])
optionalParameterSet['SampledPlaneWave'] = []
# default units
defaultUnits = {
'EclipticLatitude': 'Radian',
'EclipticLongitude': 'Radian',
'Polarization': 'Radian',
'SLPolarization': 'Radian',
'Amplitude': '1',
'Inclination': 'Radian',
'InitialPhase': 'Radian',
'Frequency': 'Hertz',
'TimeOffset': 'Second',
'FrequencyDot': 'Hertz/Second',
'FrequencyDotDot': 'Hertz/Second^2',
'Eccentricity': '1',
'InitialAngularOrbitalPhase': 'Radian',
'CoalescenceTime': 'Second',
'InitialPosition': 'Radian',
'InitialRotation': 'Radian',
'Armlength': 'Second'
}
from convertunit import convertParameters, convertUnit
class writeXML:
def __init__(self,filename):
if filename:
self.f = open(filename,'w')
self.opened = 1
else:
self.f = sys.stdout
self.opened = -1
def __del__(self):
if self.opened == 1:
self.close()
def close(self):
if self.opened == 1:
self.f.close()
self.opened = 0
# handling of XML indentation
stdindent = 4
indent = ""
def incind(self):
self.indent += " " * self.stdindent
def decind(self):
self.indent = self.indent[0:len(self.indent)-self.stdindent]
def iprint(self,s):
if self.opened != 0:
print >> self.f, self.indent + s
def doattrs(self,attrs):
string = ''
if len(attrs) > 0:
# always put 'Name' first
if 'Name' in attrs.keys():
string += ' Name="' + attrs['Name'] +'"'
for attr in attrs.keys():
if attr != 'Name':
string += ' ' + attr + '="' + str(attrs[attr]) + '"'
return string
def opentag(self,tag,attrs):
"""Open an XML element; take dictionary for attributes"""
string = '<' + tag + self.doattrs(attrs) + '>'
self.iprint(string)
self.incind()
def singletag(self,tag,attrs):
"""Do an XML singleton; take dictionary for attributes"""
string = '<' + tag + self.doattrs(attrs) + '/>'
self.iprint(string)
def coupletag(self,tag,attrs,thevalue):
"""Do inline XML open/close tag"""
string = '<' + tag + self.doattrs(attrs) + '>'
string += str(thevalue)
string += '</' + tag + '>'
self.iprint(string)
def closetag(self,tag):
"""Close an XML element"""
string = '</' + tag + '>'
self.decind()
self.iprint(string)
# it would be better to redefine this function in lisaXML
# to implement the binaryData routine
def content(self,thevalue):
"""Output XML characters"""
if isinstance(thevalue,binaryData):
filename = re.sub('\.xml$','',self.filename) + '-' + str(self.binaryfiles) + '.bin'
self.iprint(os.path.basename(filename))
thevalue.dumpdata(filename)
self.binaryfiles += 1
else:
# try to keep indentation for multiline content
for line in str(thevalue).split('\n'):
# look here for content serialization concerns
self.iprint(line)
def outputrxp(self,rxpexp):
"""Output RXP tuple-based expression"""
if rxpexp[2]:
# have children!
self.opentag(rxpexp[0],rxpexp[1])
for elem in rxpexp[2]:
if type(elem) in (tuple,list):
self.outputrxp(elem)
else:
self.content(elem)
self.closetag(rxpexp[0])
else:
# I am a singleton
self.singletag(rxpexp[0],rxpexp[1])
class typeTimeSeries:
pass
class typeFrequencySeries:
pass
class binaryData:
def __init__(self,thedata,length):
self.thedata = thedata
self.records = len(thedata)
self.length = length
def dumpdata(self,filename):
buffer = numpy.zeros([self.length,self.records],'d')
for i in range(self.records):
buffer[:,i] = self.thedata[i][0:self.length]
bfile = open(filename,'w')
bfile.write(buffer.tostring())
bfile.close()
class fileData:
def __init__(self,filename,basename,length,records,encoding,index):
self.filename = filename
self.basename = basename
self.length = length
self.records = records
self.encoding = encoding
self.index = index
def dumpXML(object):
stdoutXML = lisaXML('')
stdoutXML.outputrxp(stdoutXML.ProcessObjectData(object))
class lisaXML(writeXML):
"""Create lisaXML file with metadata (author,date,comments);
date should be given as ISO-8601, or will be set to today"""
def __init__(self,filename,author='',date='',comments=''):
"""Create lisaXML file with metadata [author,date,comments];
date should be given as ISO-8601."""
self.filename = filename
# add ".xml" to file if necessary
if not re.match('.*\.xml$',self.filename) and self.filename != '':
self.filename += '.xml'
writeXML.__init__(self,self.filename)
self.author = author
# ??? the date should be validated before using it!
if date:
self.date = date
else:
self.date = time.strftime('%Y-%m-%dT%H:%M:%S%Z',time.localtime())
self.comments = comments
self.binaryfiles = 0
self.theLISAData = []
self.theNoiseData = []
self.theSourceData = []
self.theTDIData = []
def doComment(self,comments):
return ('Comment', {}, [comments])
def ProcessObjectData(self,object,name='',comments=''):
"""Add an object (LISA, Wave,Noise) to an XML output file."""
# get init arguments and type
if hasattr(object,'xmlargs'):
objectarglist = object.xmlargs
else:
objectarglist = object.initargs
if hasattr(object,'xmltype'):
objecttype = object.xmltype
else:
objecttype = object.__class__.__name__
try:
defaultarglist = argumentList[objecttype]
except KeyError:
raise KeyError, 'lisaXML.ObjectData: unknown object type %s' % objecttype
# the synthlisa constructor parameters are contained (as a list)
# in object.xmlargs (preferred) or object.initargs
# in the order and with the units given by argumentList[objecttype]
# assign the standard parameters to my structure
objectpars = {}
for i in range(len(defaultarglist)):
param = defaultarglist[i]
try:
if param[1]:
# if argumentList[objecttype] specifies a standard Unit, use it
objectpars[param[0]] = (objectarglist[i],param[1])
else:
# otherwise get it from the element in xmlargs/initargs,
# which is a tuple
objectpars[param[0]] = objectarglist[i]
except IndexError:
if param[2] != None:
# if a parameter is missing in xmlargs/initargs, see if
# we have a default value and use it
objectpars[param[0]] = (param[2],param[1])
else:
raise AttributeError, 'lisaXML.ProcessObjectData(): missing internal parameter %s in object %s' % (param[0],object)
# translate the object to an XML type
xmltype = ObjectToXML[objecttype]
# the Params to be output in the XSIL element are given
# in outputList[objecttype]; this allows for parameter reordering,
# for converting to the right units, and for setting some default values
params = []
for param in outputList[objecttype]:
# first, see if we have the parameter...
if not param[0] in objectpars:
# try obtaining the parameter from a transformation of the other ones
try:
thisparam = convertParameters(param,objectpars)
except AttributeError:
# try using a default output value if there is one
if param[2] != None:
thisparam = (param[2],param[1])
else:
raise AttributeError, 'readXML.ProcessObjectData(): missing external parameter(s) %s for object %s' % (param[0],object)
else:
thisparam = objectpars[param[0]]
# convert to the correct units (if we know how to)
if param[1]:
thisparam = convertUnit(thisparam[0],thisparam[1],param[1],param[0])
# add in pyRXP format
params.append(('Param', {'Name': param[0], 'Unit': thisparam[1]}, [thisparam[0]]))
if comments:
params.append(self.doComment(comments))
# create the right kind of XSIL element
if isinstance(object,synthlisa.Wave) and not xmltype == 'SampledPlaneWave':
xsildict = {'Type': 'PlaneWave'}
params.append(('Param',{'Name': 'SourceType'},[xmltype]))
else:
xsildict = {'Type': xmltype}
if name:
xsildict['Name'] = name
# add a TimeSeries if needed
if objecttype == 'SampledWave':
params.append(self.doSampledWaveTimeSeries(objectpars))
return ('XSIL', xsildict, params)
def doSampledWaveTimeSeries(self,myobjectpars):
objectpars = {}
for param in outputList['TimeSeries']:
if not param[0] in myobjectpars:
try:
thisparam = convertParameters(param,myobjectpars)
except AttributeError:
if param[2] != None:
thisparam = (param[2],param[1])
else:
raise AttributeError, 'readXML.doSampledWaveTimeSeries(): missing external parameter(s) %s for object %s' % (param[0],object)
else:
thisparam = myobjectpars[param[0]]
if param[1]:
thisparam = convertUnit(thisparam[0],thisparam[1],param[1],param[0])
objectpars[param[0]] = thisparam
children = []
for name in ('TimeOffset','Cadence'):
children.append( ('Param',
{'Name': name, 'Unit': objectpars[name][1]},
[objectpars[name][0]]) )
children.append( ('Param',
{'Name': 'Duration', 'Unit': objectpars['Cadence'][1]},
[str(float(objectpars['Cadence'][0]) * float(objectpars['Length'][0]))]) )
arraycontent = []
arraycontent.append( ( 'Dim', {'Name': 'Length'}, [objectpars['Length'][0]] ) )
arraycontent.append( ( 'Dim', {'Name': 'Records'}, [str(2)] ) )
# check bigendian, littleendian
if sys.byteorder == 'big':
encoding = 'Binary,BigEndian'
elif sys.byteorder == 'little':
encoding = 'Binary,LittleEndian'
arraycontent.append( ('Stream',
{'Type': 'Remote', 'Encoding': encoding},
[ binaryData( (objectpars['hp'][0], objectpars['hc'][0]),
int(objectpars['Length'][0]) ) ]
) )
children.append( ('Array',
{'Name': 'hp,hc', 'Type': 'double', 'Unit': '1'},
arraycontent) )
return ('XSIL', {'Name': 'hp,hc', 'Type': 'TimeSeries'}, children)
# the following calls piggyback on ProcessObjectData
def SourceData(self,source,name='',comments=''):
"""Add a SourceData entry describing a synthlisa source to
a lisaXML file object."""
self.theSourceData.append(self.ProcessObjectData(source,name,comments))
def LISAData(self,lisa,comments=''):
"""Add a LISAData entry describing a synthlisa LISA object to
a lisaXML file object."""
# allow only one LISA object
self.theLISAData = [self.ProcessObjectData(lisa,'LISA',comments)]
def NoiseData(self,object,name='',comments='',hideseed=0):
"""Add a NoiseData entry describing a synthlisa Noise object to
a lisaXML file object; if called for a TDInoise object, will
loop over all component noises and dump an XML description for
each of them."""
if object.__class__.__name__ == 'TDInoise':
# if we get a TDInoise, do the component noises one by one
if name:
if comments:
comments = comments + '\n(part of %s) ' % name
else:
comments = '(part of %s) ' % name
if not self.theLISAData:
self.LISAData(object.initargs[0],comments)
# proof-mass noise
map(lambda noise, name: self.NoiseData(noise,name,comments,hideseed),
object.initargs[1],
['pm1','pm1s','pm2','pm2s','pm3','pm3s'])
# photodetector noise; note mapping per abstract-format.txt
map(lambda noise, name: self.NoiseData(noise,name,comments,hideseed),
object.initargs[2],
['pdm3','pd3','pdm1','pd1','pdm2','pd2'])
# laser noise
map(lambda noise, name: self.NoiseData(noise,name,comments,hideseed),
object.initargs[3],
['C1','C1s','C2','C2s','C3','C3s'])
elif object.__class__.__name__ == 'SumSignal':
self.NoiseData(object.initargs[0],name,comments,hideseed)
self.NoiseData(object.initargs[1],name,comments,hideseed)
elif object.__class__.__name__ == 'NoSignal':
return
else:
if hasattr(object,'xmltype') and object.xmltype == 'PowerLawNoise' and hideseed == 1:
# hide pseudorandom seed if so requested
object.xmlargs[5] = 0
self.theNoiseData.append(self.ProcessObjectData(object,name,comments))
def TDIData(self,data,length,cadence,description,offset=0,encoding='Binary',comments=''):
"""Add a TimeSeries object to a lisaXML file object. Here
'data' is the numpy array containing the time series
(simultaneous entries on the same row); 'length' is the desired
length to be written in the array; 'cadence' is its nominal
cadence in seconds; 'description' should be a comma-separated
string listing the TDI observables represented in the time
series; 'offset' is the nominal initial time for the data
(currently in seconds); 'encoding' can be 'Binary' for storage
in a separate binary file, or 'Text' for inline storage in the
XML file; 'comments' is added to the TimeSeries entry. To
skip some records at the beginning of the array, use a slicing
syntax such as data[1:].
Each lisaXML file can contain several TimeSeries objects,
all contained in the TDIData block; if binary storage is
requested, each TimeSeries (or FrequencySeries) object
corresponds to a separate binary file, named file-0.bin,
file-1.bin, etc., if the main XML file is file.xml."""
# mimick the getobsc call, and get description as comma-separated string
# ??? provide way to read variable names directly from list
# Should use different names for phase and frequency TDI variables
TimeSeries = typeTimeSeries()
try:
TimeSeries.data = data
TimeSeries.dim = len(numpy.shape(data))
TimeSeries.alength = numpy.shape(data)[0]
if data.dtype.char != 'd':
raise TypeError
except:
print "lisaXML::TDIData: data must be a proper numpy array of doubles"
raise TypeError
if TimeSeries.dim == 1:
TimeSeries.records = 1
elif TimeSeries.dim == 2:
TimeSeries.records = numpy.shape(data)[1]
else:
print "lisaXML::TDIData: behavior undetermined for arrays of this dimension"
raise NotImplementedError
TimeSeries.length = length
if length > TimeSeries.alength:
print "lisaXML::TDIData: data shorter than requested output length"
raise IndexError
TimeSeries.cadence = cadence
TimeSeries.duration = cadence * length
TimeSeries.description = description
TimeSeries.start = offset
TimeSeries.encoding = encoding
TimeSeries.comments = comments
self.theTDIData.append(TimeSeries)
def writearray(self,data,length,records,description,encoding):
self.opentag('Array',{'Name': description,'Type': 'double'})
self.coupletag('Dim',{'Name': 'Length'},length)
self.coupletag('Dim',{'Name': 'Records'},records)
# ??? support only remote binary, local ascii
if 'Binary' in encoding:
# determine the system binary encoding
if sys.byteorder == 'big':
encoding = 'Binary,BigEndian'
elif sys.byteorder == 'little':
encoding = 'Binary,LittleEndian'
else:
print 'lisaXML::writedata: cannot determine binary encoding'
raise NotImplementedError
# defaulting to remote storage
# determine binary filename (base filename + ordinal + '.bin')
binaryfilename = (re.sub('\.xml$','',self.filename) +
'-' + str(self.binaryfiles) + '.bin')
self.binaryfiles += 1
self.coupletag('Stream',{'Type': 'Remote','Encoding': encoding},
os.path.basename(binaryfilename))
bfile = open(binaryfilename, 'w')
if len(data) != length:
bfile.write(data[0:length].tostring())
else:
bfile.write(data.tostring())
bfile.close()
elif 'Text' in encoding:
# defaulting to inline storage
self.opentag('Stream',{'Type': 'Local', 'Encoding': 'Text', 'Delimiter': ' '})
linepattern = '%le ' * records
for line in range(0,length):
self.content(linepattern % tuple(data[line]))
self.closetag('Stream')
else:
print 'lisaXML::writedata: encoding not implemented'
raise NotImplementedError
self.closetag('Array')
def writeTimeSeries(self,TimeSeries):
# write out the TimeSeries defined in TimeSeries
self.opentag('XSIL',{'Type': 'TimeSeries',
'Name': TimeSeries.description})
if TimeSeries.comments:
self.opentag('Comment',{})
self.content(TimeSeries.comments)
self.closetag('Comment')
self.coupletag('Param',{'Name': 'TimeOffset','Type': 'Second'},
str(TimeSeries.start))
self.coupletag('Param',{'Name': 'Cadence','Unit': 'Second'},
str(TimeSeries.cadence))
self.coupletag('Param',{'Name': 'Duration','Unit': 'Second'},
str(TimeSeries.duration))
# ??? use <Column> to define columns (not in XSIL, but in BFD)?
self.writearray(TimeSeries.data,
TimeSeries.length,
TimeSeries.records,
TimeSeries.description,
TimeSeries.encoding)
self.closetag('XSIL')
def TDISpectraSelfDescribed(self,data,description,encoding='Binary',comments=''):
"""Add a FrequencySeries object to a lisaXML file object.
Here 'data' is the numpy array containing the time
series (simultaneous entries on the same row); 'description'
is a comma-separated string listing the TDI observables
represented in the time series; 'encoding' can be 'Binary'
for storage in a separate binary file, or 'Text' for inline
storage in the XML file; 'comments' is added to the
FrequencySeries entry.
The other parameters to TDISpectra are obtained by examining the first
column of 'data', which is assumed to contain frequencies in Hz."""
return self.TDISpectra(data,
len(data),
data[1,0]-data[0,0],
description,
data[0,0],
encoding,
comments)
def TDISpectra(self,data,length,deltaf,description,offset=0,encoding='Binary',comments=''):
"""Add a FrequencySeries object to a lisaXML file object.
Here 'data' is the numpy array containing the time series
(simultaneous entries on the same row); 'length' is the desired
length of the array to be written; 'deltaf' is the difference
between successive records [in Hz]; comma-separated string
listing the TDI observables represented in the time series;
'offset' is the initial frequency for the spectra (in Hz);
'encoding' can be 'Binary' for storage in a separate binary
file, or 'Text' for inline storage in the XML file; 'comments'
is added to the FrequencySeries entry. To skip some records
at the beginning of the array, use a slicing syntax such as
data[1:].
Each lisaXML file can contain several FrequencySeries
objects, all contained in the TDIData block; if binary storage
is requested, each FrequencySeries (or TimeSeries)
object corresponds to a separate binary file, named file-0.bin,
file-1.bin, etc., if the main XML file is file.xml."""
# mimick the getobsc call, and get description as comma-separated string
# ??? provide way to read variable names directly from list
# Should use different names for phase and frequency TDI variables
FrequencySeries = typeFrequencySeries()
try:
FrequencySeries.data = data
FrequencySeries.dim = len(numpy.shape(data))
FrequencySeries.alength = numpy.shape(data)[0]
if data.dtype.char != 'd':
raise TypeError
except:
print "lisaXML::TDISpectra: data must be a proper numpy array of doubles"
raise TypeError
if FrequencySeries.dim == 1:
FrequencySeries.records = 1
elif FrequencySeries.dim == 2:
FrequencySeries.records = numpy.shape(data)[1]
else:
print "lisaXML::TDISpectra: behavior undetermined for arrays of this dimension"
raise NotImplementedError
FrequencySeries.length = length
if length > FrequencySeries.alength:
print "lisaXML::TDISpectra: data shorter than requested output length"
raise IndexError
FrequencySeries.minf = offset * deltaf
FrequencySeries.maxf = (offset + length - 1) * deltaf
FrequencySeries.deltaf = deltaf
FrequencySeries.description = description
FrequencySeries.encoding = encoding
FrequencySeries.comments = comments
self.theTDIData.append(FrequencySeries)
def writeFrequencySeries(self,FrequencySeries):
# write out the FrequencySeries defined in FrequencySeries
self.opentag('XSIL',{'Type': 'FrequencySeries',
'Name': FrequencySeries.description})
if FrequencySeries.comments:
self.opentag('Comment',{})
self.content(FrequencySeries.comments)
self.closetag('Comment')
# ??? fix the frequency types to "Hz" (XSIL extension) for the moment
# ??? provide facility for automatic step specification
self.coupletag('Param',{'Name': 'MinFreq','Type': 'Hz'},
str(FrequencySeries.minf))
self.coupletag('Param',{'Name': 'MaxFreq','Type': 'Hz'},
str(FrequencySeries.maxf))
self.coupletag('Param',{'Name': 'DeltaFreq','Type': 'Hz'},
str(FrequencySeries.deltaf))
# ??? use <Column> to define columns (not in XSIL, but in BFD)?
self.writearray(FrequencySeries.data,
FrequencySeries.length,
FrequencySeries.records,
FrequencySeries.description,
FrequencySeries.encoding)
self.closetag('XSIL')
def close(self):
"""Write the XML file to disk. This happens also on destruction of
the lisaXML object."""
if self.opened == 0:
print "lisaXML::close(): File is closed already"
raise IOError
self.content('<?xml version="1.0"?>')
self.content('<!DOCTYPE XSIL SYSTEM "http://www.vallis.org/lisa-xml.dtd">')
self.content('<?xml-stylesheet type="text/xsl" href="lisa-xml.xsl"?>')
self.content('<?xml-stylesheet type="text/xsl" href="http://www.vallis.org/lisa-xml.xsl"?>')
self.opentag('XSIL',{})
self.coupletag('Param',{'Name': 'Author'},self.author)
self.coupletag('Param',{'Name': 'GenerationDate', 'Type': 'ISO-8601'},
self.date)
if self.comments:
self.comments += '\n\n'
self.comments += 'This file produced by Synthetic LISA v. %s\n' % synthlisa.version_short
self.comments += '(c) 2006 Michele Vallisneri, California Institute of Technology\n'
self.comments += '---------------------------------------------------------------\n'
self.comments += synthlisa.version_full
self.outputrxp(self.doComment(self.comments))
if self.theLISAData:
self.opentag('XSIL',{'Type': 'LISAData'})
for object in self.theLISAData:
self.outputrxp(object)
self.closetag('XSIL')
if self.theNoiseData:
self.opentag('XSIL',{'Type': 'NoiseData'})
for object in self.theNoiseData:
self.outputrxp(object)
self.closetag('XSIL')
if self.theSourceData:
self.opentag('XSIL',{'Type': 'SourceData'})
for object in self.theSourceData:
self.outputrxp(object)
self.closetag('XSIL')
# do the TDIdata objects (first supported)
if len(self.theTDIData) > 0:
self.opentag('XSIL',{'Type': 'TDIData'})
for object in self.theTDIData:
if isinstance(object,typeTimeSeries):
self.opentag('XSIL',{'Type': 'TDIObservable',
'Name': object.description})
self.coupletag('Param',{'Name': 'DataType'},'FractionalFrequency')
self.writeTimeSeries(object)
self.closetag('XSIL')
elif isinstance(object,typeFrequencySeries):
self.opentag('XSIL',{'Type': 'TDIObservable',
'Name': object.description})
self.coupletag('Param',{'Name': 'DataType'},'FractionalFrequency')
self.writeFrequencySeries(object)
self.closetag('XSIL')
self.closetag('XSIL')
self.closetag('XSIL')
# do the actual writing
writeXML.close(self)
class readXML:
def __init__(self,filename):
p = pyRXP.Parser()
f = open(filename)
lines = f.read()
f.close()
try:
tree = p(lines)
except pyRXP.error:
print "XML validation error! (Or perhaps I couldn't access the DTD)."
print "I'll try to use the file anyway by removing the DTD..."
lines = re.sub('<!DOCTYPE XSIL SYSTEM ".*">','',lines)
tree = p(lines)
if tree[0] != 'XSIL':
print 'Not a LISA XSIL file!'
raise TypeError
self.directory = os.path.dirname(filename)
self.tw = xmlutils.TagWrapper(tree)
def close(self):
pass
def getTime(self,node):
try:
# keep Time as string, get Type if provided
return (str(node),node.Type)
except AttributeError:
return (str(node),)
def getParam(self,node):
try:
# convert Param to float, get Unit if provided
return [str(node),node.Unit]
except AttributeError:
return [str(node),None]
def getDim(self,node):
return int(str(node))
def processSeries(self,node):
timeseries = {}
timeseries['Type'] = node.Type
# I suppose 'Name' must be provided!
timeseries['Name'] = node.Name
timeseries['Vars'] = node.Name.split(',')
for node2 in node:
if node2.tagName == 'Time':
timeseries[node2.Name] = self.getTime(node2)
elif node2.tagName == 'Param':
timeseries[node2.Name] = self.getParam(node2)
elif node2.tagName == 'Array':
for node3 in node2:
if node3.tagName == 'Dim':
timeseries[node3.Name] = self.getDim(node3)
elif node3.tagName == 'Stream':
timeseries['Encoding'] = node3.Encoding
if node3.Type == 'Remote':
timeseries['Filename'] = str(node3)
if 'Binary' in timeseries['Encoding']:
# assume length of doubles is 8 (generic?)
readlength = 8*timeseries['Length']*timeseries['Records']
# need to catch reading errors here
if self.directory:
binaryfile = open(self.directory + '/' + timeseries['Filename'],'r')
else:
binaryfile = open(timeseries['Filename'],'r')
readbuffer = numpy.fromstring(binaryfile.read(readlength),'double')
binaryfile.close()
if (('BigEndian' in timeseries['Encoding'] and sys.byteorder == 'little') or
('LittleEndian' in timeseries['Encoding'] and sys.byteorder == 'big')):
readbuffer = readbuffer.byteswap()
if timeseries['Records'] == 1:
timeseries['Data'] = readbuffer
else:
timeseries['Data'] = numpy.reshape(readbuffer,
[timeseries['Length'],timeseries['Records']])
else:
# remote data, not binary
raise NotImplementedError
elif node3.Type == 'Local':
if 'Text' in timeseries['Encoding']:
timeseries['Delimiter'] = node3.Delimiter
datastring = str(node3)
for delchar in timeseries['Delimiter']:
datastring = string.join(datastring.split(delchar),' ')
# there may be a more efficient way to initialize an array
datavalues = map(float,datastring.split())
if timeseries['Records'] == 1:
timeseries['Data'] = numpy.array(datavalues,'d')
else:
timeseries['Data'] = numpy.reshape(numpy.array(datavalues,'d'),
[timeseries['Length'],timeseries['Records']])
# should try different delimiters
else:
# local data, not textual
raise NotImplementedError
return timeseries
def getTDITimeSeries(self):
result = []
for node in self.tw:
# outermost XSIL level container
if node.tagName == 'XSIL':
if node.Type == 'TDIData':
# inside TDIData
for node2 in node:
if node2.tagName == 'XSIL':
if node2.Type == 'TDIObservable':
for node3 in node2:
if node3.tagName == 'XSIL':
if node3.Type == 'TimeSeries':
# got a TimeSeries!
result.append(self.processSeries(node3))
return result
def getTDIFrequencySeries(self):
result = []
for node in self.tw:
# outermost XSIL level container
if node.tagName == 'XSIL':
if node.Type == 'TDIData':
# inside TDIData
for node2 in node:
if node2.tagName == 'XSIL':
if node2.Type == 'TDIObservable':
for node3 in node2:
if node3.tagName == 'XSIL':
if node3.Type == 'FrequencySeries':
# got a FrequencySeries!
result.append(self.processSeries(node3))
return result
def getLISASampledNoise(self):
result = []
for node in self.tw:
# outermost XSIL level container
if node.tagName == 'XSIL':
if node.Name == 'NoiseData':
# inside NoiseData
for node2 in node:
if node2.tagName == 'XSIL':
if node2.Type == 'TimeSeries':
# got a TimeSeries!
result.append(self.processSeries(node2))
return result
def getLISASources(self,returnFactory=False):
result = []
for node in self.tw:
if node.tagName == 'XSIL':
if node.Type == 'SourceData':
# inside SourceData
for node2 in node:
if node2.tagName == 'XSIL':
if node2.Type in ('PlaneWave','SampledPlaneWave'):
r = self.processObject(node2)
if returnFactory:
result.append(r)
else:
result.append(MakeObject(r))
if returnFactory:
return LISASourceFactory(result)
else:
return result
def getLISAGeometry(self):
result = None
for node in self.tw:
if node.tagName == 'XSIL':
if node.Type == 'LISAData':
for node2 in node:
if node2.tagName == 'XSIL':
if node2.Type == 'PseudoLISA':
r = self.processObject(node2)
result = MakeObject(r)
return result
def getLISANoise(self):
result = []
for node in self.tw:
if node.tagName == 'XSIL':
if node.Type == 'NoiseData':
for node2 in node:
if node2.tagName == 'XSIL':
if node2.Type == 'PseudoRandomNoise':
r = self.processObject(node2)
result.append(MakeObject(r))
return result
def getTDInoise(self):
try:
lisa = self.getLISAGeometry()
if not lisa:
raise AttributeError
except:
raise AttributeError, 'readXML.getTDInoise(): problems reading LISA geometry'
try:
noises = self.getLISANoise()
except:
raise AttributeError, 'readXML.getTDInoise(): problems reading LISA noises'
noisedict = {}
for noise in noises:
if noise.name in noisedict:
# handle composite noise
noisedict[noise.name] = synthlisa.SumSignal(noisedict[noise.name],noise)
else:
noisedict[noise.name] = noise
# undefined noises are replaced with NoSignal()
checknoise = lambda id: id in noisedict and noisedict[id] or synthlisa.NoSignal()
pmnoises = [checknoise(id) for id in ('pm1','pm1s','pm2','pm2s','pm3','pm3s')]
pdnoises = [checknoise(id) for id in ('pdm3','pd3','pdm1','pd1','pdm2','pd2')]
lsnoises = [checknoise(id) for id in ('C1','C1s','C2','C2s','C3','C3s')]
return synthlisa.TDInoise(lisa,pmnoises,pdnoises,lsnoises)
def processArray(self,node,objectparams):
for node2 in node:
if node2.tagName == 'Dim':
objectparams[node2.Name] = (self.getDim(node2),'1')
for node2 in node:
if node2.tagName == 'Stream':
try:
encoding = node2.Encoding
except:
raise AttributeError, 'readXML.processArray(): encoding/type not specified in stream'
if node2.Type == 'Remote':
vars = map(lambda s: string.lstrip(string.rstrip(s)),string.split(node.Name,','))
for v in range(len(vars)):
objectparams[vars[v]] = (fileData(self.directory + '/' + str(node2),str(node2),
int(objectparams['Length'][0]),int(objectparams['Records'][0]),encoding,v),
'numpy')
# should handle local data here...
def processObject(self,node):
objectparams = {}
try:
objectname = node.Name
except AttributeError:
objectname = ''
objectparams = {}
for node2 in node:
if node2.tagName == 'Param':
objectparams[node2.Name] = self.getParam(node2)
if node.Type in ('PlaneWave','SampledPlaneWave'):
for test in standardSourceParameterSet:
if test(objectparams) != True:
raise KeyError, 'readXML.processObject(): need standard parameter(s) %s in source %s' % (test(objectparams),objectname)
if node.Type == 'PlaneWave':
try:
objecttype = objectparams['SourceType'][0]
except:
raise KeyError, 'readXML.processObject(): need SourceType for PlaneWave source %s' % objectname
else:
objecttype = node.Type
# look inside included TimeSeries to get more parameters there
# can only handle the first TimeSeries within each SampledPlaneWave
# look inside included Array to get more parameters there
# can only handle the first Array
if node.Type == 'SampledPlaneWave':
for node2 in node:
if node2.tagName == 'XSIL' and node2.Type == 'TimeSeries':
for node3 in node2:
if node3.tagName == 'Param':
objectparams[node3.Name] = self.getParam(node3)
elif node3.tagName == 'Array':
self.processArray(node3,objectparams)
break
if not objecttype in minimumParameterSet:
raise NotImplementedError, 'readXML.processObject(): unknown object type %s for object %s' % (objecttype,objectname)
# check that minimum parameter set is included
for test in minimumParameterSet[objecttype]:
if test(objectparams) != True:
raise KeyError, 'readXML.processObject(): need parameter(s) %s for object %s of type %s' % (test(objectparams),objectname,objecttype)
# add the default units if not included and if defined
for param in objectparams:
if (not param[1]) and (param[0] in defaultUnits):
param[1] = defaultUnits[param[0]]
# add default value of optional parameters if not defined
for test in optionalParameterSet[objecttype]:
t = test(objectparams)
if t != True:
objectparams[t[0]] = t[1]
# now convert to a synthlisa object; see if we have it defined
if not objecttype in XMLToObject:
raise NotImplementedError, 'readXML.processObject(): unknown object %s of type %s' % (objectname,objecttype)
synthlisatype = XMLToObject[objecttype][0]
# assemble the argument list
arglist = []
for param in argumentList[synthlisatype]:
# first, see if we have the parameter...
if not param[0] in objectparams:
# try obtaining the parameter from the other ones
try:
thisparam = convertParameters(param,objectparams)
except AttributeError:
if param[2]:
thisparam = (param[2],param[1])
else:
raise AttributeError, 'readXML.processObject(): need parameter(s) %s in object %s of type %s' % (param[0],objectname,objecttype)
else:
thisparam = objectparams[param[0]]
# convert to the correct units (if we know how to)
thisparam = convertUnit(thisparam[0],thisparam[1],param[1],param[0])
if param[1] == 'String' or param[1] == 'numpy':
evalparam = thisparam[0]
else:
try:
# first try converting to an int...
evalparam = int(thisparam[0])
except ValueError:
# if it doesn't work, try a float...
try:
evalparam = float(thisparam[0])
except ValueError:
# if the float does not work, try calling Python...
evalparam = eval(thisparam[0])
arglist.append(evalparam)
return (XMLToObject[objecttype][1],arglist,objectname,objectparams)
# for the moment handle only remote files...
def handleFiles(args):
files = {}
for i in range(0,len(args)):
if isinstance(args[i],fileData):
if not args[i].filename in files:
readlength = 8 * args[i].length * args[i].records
try:
binaryfile = open(args[i].filename,'r')
readbuffer = numpy.fromstring(binaryfile.read(readlength),'double')
binaryfile.close()
except:
try:
binaryfile = open(args[i].basename,'r')
readbuffer = numpy.fromstring(binaryfile.read(readlength),'double')
binaryfile.close()
except:
raise IOError, 'handleFiles(): problems reading file %s' % args[i].filename
if (('BigEndian' in args[i].encoding and sys.byteorder == 'little') or
('LittleEndian' in args[i].encoding and sys.byteorder == 'big')):
readbuffer = readbuffer.byteswap()
if args[i].records == 1:
files[args[i].filename] = [readbuffer]
else:
readbuffer = numpy.reshape(readbuffer,[args[i].length,args[i].records])
files[args[i].filename] = [readbuffer[:,j].copy() for j in range(0,args[i].records)]
args[i] = files[args[i].filename][args[i].index].copy()
def MakeObject(s):
# if any data needs to be loaded, do so!
for arg in s[1]:
if isinstance(arg,fileData):
handleFiles(s[1])
break
ret = (s[0])(*(s[1]))
ret.name = s[2]
for param in s[3]:
if s[3][param][1] != 'numpy':
ret.__setattr__(param,(s[3][param]))
return ret
class LISASourceFactory:
def __init__(self,sourcelist):
self.sourcelist = sourcelist
self.sourcenumber = len(sourcelist)
def __getitem__(self,index):
return MakeObject(self.sourcelist[index])
def __getslice__(self,index1,index2):
return map(MakeObject,self.sourcelist[index1:index2])
def __len__(self):
return self.sourcenumber
def __iter__(self):
return LISASourceIterator(self.sourcelist)
class LISASourceIterator:
def __init__(self,sourcelist):
self.sourcelist = sourcelist
self.sourcenumber = len(sourcelist)
self.last = 0
def __iter__(self):
return self
def next(self):
if self.last == self.sourcenumber:
raise StopIteration
else:
i = self.sourcelist[self.last]
self.last = self.last + 1
return MakeObject(i)
|
vallisREPO_NAMEsynthlisaPATH_START.@synthlisa_extracted@synthlisa-master@lisasim@lisaxml.py@.PATH_END.py
|
{
"filename": "parameters.py",
"repo_name": "mtalapinto/moes",
"repo_path": "platospec/optics/parameters.py",
"type": "Python"
}
|
import numpy as np
def get_name(i):
basedir = '/home/eduspec/Documentos/moes/v3.1/vis/parameters/'
param_file = open(basedir+'init_params_siman_a.txt', 'r')
params_names = []
for line in param_file:
linea = line.split()
params_names.append(str(linea[0]))
for k in range(len(params_names)):
if k == i:
param_name = params_names[k]
return str(param_name)
def load():
params = []
# Load parameters
basedir = '/home/eduspec/Documentos/moes/v3.1/vis/parameters/'
param_file = open(basedir+'init_params.txt', 'r')
for line in param_file:
linea = line.split()
params.append(float(linea[2]))
return params
def load4plots():
params = []
# Load parameters
basedir = '/home/eduspec/Documentos/moes/v3.1/vis/parameters/'
param_file = open(basedir+'init_params_for_plots.txt', 'r')
for line in param_file:
linea = line.split()
params.append(float(linea[2]))
return params
def load_sa(fib):
params = []
# Load parameters
basedir = '/home/eduspec/Documentos/moes/v3.1/vis/parameters/'
if fib == 'a':
param_file = open(basedir+'init_params_siman_a.txt', 'r')
else:
param_file = open(basedir+'init_params_siman_b.txt', 'r')
for line in param_file:
linea = line.split()
params.append(float(linea[2]))
return params
def load_pymul():
params = []
# Load parameters
basedir = '/home/eduspec/Documentos/moes/v3.1/vis/parameters/'
param_file = open(basedir+'init_params_pymul.txt', 'r')
for line in param_file:
linea = line.split()
params.append(float(linea[2]))
return params
def load_ini():
params = []
# Load parameters
basedir = '/home/eduspec/Documentos/moes/v3.1/vis/parameters/'
param_file = open(basedir+'init_params.txt', 'r')
for line in param_file:
linea = line.split()
params.append(float(linea[2]))
return params
def adjust_err_bud(par, i):
dT = 0.3 # K
cte = np.array([
0.0101*1e-6, # Zerodur CTE in K-1
20.3151*1e-6, # Aluminum-5083 CTE
9.1*1e-6, # LF5 grismm in K-1
8.4*1e-6, # S-BAM4 glass in C-1
13.1 * 1e-6, # S-FPL51 glass in C-1
14.5 * 1e-6, # S-FPL53 glass in C-1
7.2 * 1e-6, # S-BSL7 glass in C-1
6.1 * 1e-6, # S-LAL10 glass in C-1
0.51 * 1e-6, # Silica glass in C-1
8.1e-6, # stim2
11.1e-6, # caf 2
-0.23e-6, # infrasil
6.748e-6, # sftm16
4.6e-6 # znse
])
if i == 0 or i == 1 or i == 2 or i == 3 or i == 4 or i == 5 or i == 6 or i == 7: # Slit parameters
if par == 0.0:
dx = cte[0] * dT
else:
dx = cte[0] * par * dT
elif i == 8 or i == 15 or i == 18 or i == 26 or i == 30 or i == 36: # distance parameters
if par == 0.0:
dx = cte[1] * dT
else:
dx = cte[1] * par * dT
elif i == 11: # echelle G
daux = 1/par
daux_dx = cte[0] * daux * dT
dout = daux + daux_dx
parout = 1/dout
dx = np.abs(par - parout)
elif i == 23: # grism G
daux = 1 / par
daux_dx = cte[2] * daux * dT
dout = daux + daux_dx
parout = 1 / dout
dx = np.abs(par - parout)
elif i == 24: # grism apex
dx = cte[2]*par*dT
else: # angles an decenter
if par == 0.0:
dx = cte[1] * dT
else:
dx = cte[1] * par * dT
return par, dx
def uniform_adjust_par(pars, i):
'''
if i == 2 or i == 4:
dx = 0.1
elif i == 11 or i == 12:
dx = 0.01
elif i == 19 or i == 20 or i == 23 or i == 24 or i == 28 or i == 29 or i == 33 or i == 34 or i == 35 or i == 36 or i == 37 or i == 38 or i == 39 or i == 40 or i == 41 or i == 42:
dx = 0.1
else:
dx = 0.1
'''
if i == 0 or i == 1 or i == 2 or i == 3 or i == 4: # fiber decenter
dx = 0.005
elif i == 5 or i == 6 or i == 7: # fiber angles
dx = 0.001
elif i == 8 or i == 15 or i == 18 or i == 26 or i == 30 or i == 36: # distance parameters
dx = 0.1 # in mm
elif i == 19 or i == 20 or i == 26 or i == 27 or i == 26 or i == 27 or i == 31 or i == 32 or i == 37 or i == 38 or i == 39:
dx = 0.1 # in mm
elif i == 11 or i == 24: # grating constants
dx = 0.2
else: # angles
dx = 0.1
return pars, dx
def list_main_pars(par_sort):
basedir = '/home/eduspec/Documentos/moes/v3.1/vis/parameters/'
file_in = open(basedir+'init_params.txt', 'r')
file_out_0 = open(basedir+'params_list_ordered_uniform_y.dat', 'w')
par_names = []
for line in file_in:
linea = line.split()
par_names.append(linea[0])
for i in range(len(par_sort)):
par_aux = int(par_sort[i][0])
for k in range(len(par_names)):
if par_aux == k:
file_out_0.write(par_names[k]+'\n')
file_out_0.close()
def write(params):
file = open(basedir+'init_params_crm.txt', 'w')
basedir = '/home/eduspec/Documentos/moes/v3.1/vis/parameters/'
file.write(
'slit_dec_x_a = ' + str(params[0]) + '\n'
'slit_dec_y_a = ' + str(params[1]) + '\n'
'slit_dec_x_b = ' + str(params[2]) + '\n'
'slit_dec_y_b = ' + str(params[3]) + '\n'
'slit_defocus = ' + str(params[4]) + '\n'
'slit_tilt_x = ' + str(params[5]) + '\n'
'slit_tilt_y = ' + str(params[6]) + '\n'
'slit_tilt_z = ' + str(params[7]) + '\n'
'd_slit_col = ' + str(params[8]) + '\n'
'coll_tilt_x = ' + str(params[9]) + '\n'
'coll_tilt_y = ' + str(params[10]) + '\n'
'ech_G = '+str(params[11])+'\n'
'ech_blaze = '+str(params[12])+'\n'
'ech_gamma = '+str(params[13])+'\n'
'echelle_z = ' + str(params[14]) + '\n'
'd_col_trf = ' + str(params[15]) + '\n'
'trf_mirror_tilt_x = ' + str(params[16]) + '\n'
'trf_mirror_tilt_y = ' + str(params[17]) + '\n'
'd_col_grm = ' + str(params[18]) + '\n'
'grism_dec_x = ' + str(params[19]) + '\n'
'grism_dec_y = ' + str(params[20]) + '\n'
'grm_tilt_x = ' + str(params[21]) + '\n'
'grm_tilt_y = '+str(params[22])+'\n'
'grm_G = '+str(params[23])+'\n'
'grm_apex = '+str(params[24])+'\n'
'd_grm_cam = ' + str(params[25]) + '\n'
'cam_dec_x = ' + str(params[26]) + '\n'
'cam_dec_y = ' + str(params[27]) + '\n'
'cam_tilt_x = ' + str(params[28]) + '\n'
'cam_tilt_y = ' + str(params[29]) + '\n'
'd_cam_ff = ' + str(params[30]) + '\n'
'ccd_ff_dec_x = ' + str(params[31]) + '\n'
'ccd_ff_dec_y = ' + str(params[32]) + '\n'
'ccd_ff_tilt_x = ' + str(params[33]) + '\n'
'ccd_ff_tilt_y = ' + str(params[34]) + '\n'
'ccd_ff_tilt_z = ' + str(params[35]) + '\n'
'd_ff_ccd = ' + str(params[36]) + '\n'
'ccd_dec_x = ' + str(params[37]) + '\n'
'ccd_dec_y = ' + str(params[38]) + '\n'
'ccd_defocus = ' + str(params[39]) + '\n'
'ccd_tilt_x = ' + str(params[40]) + '\n'
'ccd_tilt_y = ' + str(params[41]) + '\n'
'ccd_tilt_z = ' + str(params[42]) + '\n'
'p = ' + str(params[43])
)
return 'Parameters saved.'
def write_old(params):
basedir = '/home/eduspec/Documentos/moes/v3.1/vis/parameters/'
file = open(basedir+'init_params_old.txt', 'w')
file.write(
'slit_dec_x_a = ' + str(params[0]) + '\n'
'slit_dec_y_a = ' + str(params[1]) + '\n'
'slit_dec_x_b = ' + str(params[2]) + '\n'
'slit_dec_y_b = ' + str(params[3]) + '\n'
'slit_defocus = ' + str(params[4]) + '\n'
'slit_tilt_x = ' + str(params[5]) + '\n'
'slit_tilt_y = ' + str(params[6]) + '\n'
'slit_tilt_z = ' + str(params[7]) + '\n'
'd_slit_col = ' + str(params[8]) + '\n'
'coll_tilt_x = ' + str(params[9]) + '\n'
'coll_tilt_y = ' + str(params[10]) + '\n'
'ech_G = '+str(params[11])+'\n'
'ech_blaze = '+str(params[12])+'\n'
'ech_gamma = '+str(params[13])+'\n'
'echelle_z = ' + str(params[14]) + '\n'
'd_col_trf = ' + str(params[15]) + '\n'
'trf_mirror_tilt_x = ' + str(params[16]) + '\n'
'trf_mirror_tilt_y = ' + str(params[17]) + '\n'
'd_col_grm = ' + str(params[18]) + '\n'
'grism_dec_x = ' + str(params[19]) + '\n'
'grism_dec_y = ' + str(params[20]) + '\n'
'grm_tilt_x = ' + str(params[21]) + '\n'
'grm_tilt_y = '+str(params[22])+'\n'
'grm_G = '+str(params[23])+'\n'
'grm_apex = '+str(params[24])+'\n'
'd_grm_cam = ' + str(params[25]) + '\n'
'cam_dec_x = ' + str(params[26]) + '\n'
'cam_dec_y = ' + str(params[27]) + '\n'
'cam_tilt_x = ' + str(params[28]) + '\n'
'cam_tilt_y = ' + str(params[29]) + '\n'
'd_cam_ff = ' + str(params[30]) + '\n'
'ccd_ff_dec_x = ' + str(params[31]) + '\n'
'ccd_ff_dec_y = ' + str(params[32]) + '\n'
'ccd_ff_tilt_x = ' + str(params[33]) + '\n'
'ccd_ff_tilt_y = ' + str(params[34]) + '\n'
'ccd_ff_tilt_z = ' + str(params[35]) + '\n'
'd_ff_ccd = ' + str(params[36]) + '\n'
'ccd_dec_x = ' + str(params[37]) + '\n'
'ccd_dec_y = ' + str(params[38]) + '\n'
'ccd_defocus = ' + str(params[39]) + '\n'
'ccd_tilt_x = ' + str(params[40]) + '\n'
'ccd_tilt_y = ' + str(params[41]) + '\n'
'ccd_tilt_z = ' + str(params[42]) + '\n'
'p = ' + str(params[43])
)
return 'Parameters saved'
def write_pymul(params):
basedir = '/home/eduspec/Documentos/moes/v3.1/vis/parameters/'
file = open(basedir+'init_params_pymul.txt','w')
file.write(
'slit_dec_x_a = ' + str(params[0]) + '\n'
'slit_dec_y_a = ' + str(params[1]) + '\n'
'slit_dec_x_b = ' + str(params[2]) + '\n'
'slit_dec_y_b = ' + str(params[3]) + '\n'
'slit_defocus = ' + str(params[4]) + '\n'
'slit_tilt_x = ' + str(params[5]) + '\n'
'slit_tilt_y = ' + str(params[6]) + '\n'
'slit_tilt_z = ' + str(params[7]) + '\n'
'd_slit_col = ' + str(params[8]) + '\n'
'coll_tilt_x = ' + str(params[9]) + '\n'
'coll_tilt_y = ' + str(params[10]) + '\n'
'ech_G = '+str(params[11])+'\n'
'ech_blaze = '+str(params[12])+'\n'
'ech_gamma = '+str(params[13])+'\n'
'echelle_z = ' + str(params[14]) + '\n'
'd_col_trf = ' + str(params[15]) + '\n'
'trf_mirror_tilt_x = ' + str(params[16]) + '\n'
'trf_mirror_tilt_y = ' + str(params[17]) + '\n'
'd_col_grm = ' + str(params[18]) + '\n'
'grism_dec_x = ' + str(params[19]) + '\n'
'grism_dec_y = ' + str(params[20]) + '\n'
'grm_tilt_x = ' + str(params[21]) + '\n'
'grm_tilt_y = '+str(params[22])+'\n'
'grm_G = '+str(params[23])+'\n'
'grm_apex = '+str(params[24])+'\n'
'd_grm_cam = ' + str(params[25]) + '\n'
'cam_dec_x = ' + str(params[26]) + '\n'
'cam_dec_y = ' + str(params[27]) + '\n'
'cam_tilt_x = ' + str(params[28]) + '\n'
'cam_tilt_y = ' + str(params[29]) + '\n'
'd_cam_ff = ' + str(params[30]) + '\n'
'ccd_ff_dec_x = ' + str(params[31]) + '\n'
'ccd_ff_dec_y = ' + str(params[32]) + '\n'
'ccd_ff_tilt_x = ' + str(params[33]) + '\n'
'ccd_ff_tilt_y = ' + str(params[34]) + '\n'
'ccd_ff_tilt_z = ' + str(params[35]) + '\n'
'd_ff_ccd = ' + str(params[36]) + '\n'
'ccd_dec_x = ' + str(params[37]) + '\n'
'ccd_dec_y = ' + str(params[38]) + '\n'
'ccd_defocus = ' + str(params[39]) + '\n'
'ccd_tilt_x = ' + str(params[40]) + '\n'
'ccd_tilt_y = ' + str(params[41]) + '\n'
'ccd_tilt_z = ' + str(params[42]) + '\n'
'p = ' + str(params[43])
)
return 'Parameters saved.'
def write_sim(params, fib):
basedir = '/home/eduspec/Documentos/moes/v3.1/vis/parameters/'
if fib == 'a':
file = open(basedir+'init_params_siman_a.txt', 'w')
if fib == 'b':
file = open(basedir+'init_params_siman_b.txt', 'w')
file.write(
'slit_dec_x_a = ' + str(params[0]) + '\n'
'slit_dec_y_a = ' + str(params[1]) + '\n'
'slit_dec_x_b = ' + str(params[2]) + '\n'
'slit_dec_y_b = ' + str(params[3]) + '\n'
'slit_defocus = ' + str(params[4]) + '\n'
'slit_tilt_x = ' + str(params[5]) + '\n'
'slit_tilt_y = ' + str(params[6]) + '\n'
'slit_tilt_z = ' + str(params[7]) + '\n'
'd_slit_col = ' + str(params[8]) + '\n'
'coll_tilt_x = ' + str(params[9]) + '\n'
'coll_tilt_y = ' + str(params[10]) + '\n'
'ech_G = '+str(params[11])+'\n'
'ech_blaze = '+str(params[12])+'\n'
'ech_gamma = '+str(params[13])+'\n'
'echelle_z = ' + str(params[14]) + '\n'
'd_col_trf = ' + str(params[15]) + '\n'
'trf_mirror_tilt_x = ' + str(params[16]) + '\n'
'trf_mirror_tilt_y = ' + str(params[17]) + '\n'
'd_col_grm = ' + str(params[18]) + '\n'
'grism_dec_x = ' + str(params[19]) + '\n'
'grism_dec_y = ' + str(params[20]) + '\n'
'grm_tilt_x = ' + str(params[21]) + '\n'
'grm_tilt_y = '+str(params[22])+'\n'
'grm_G = '+str(params[23])+'\n'
'grm_apex = '+str(params[24])+'\n'
'd_grm_cam = ' + str(params[25]) + '\n'
'cam_dec_x = ' + str(params[26]) + '\n'
'cam_dec_y = ' + str(params[27]) + '\n'
'cam_tilt_x = ' + str(params[28]) + '\n'
'cam_tilt_y = ' + str(params[29]) + '\n'
'd_cam_ff = ' + str(params[30]) + '\n'
'ccd_ff_dec_x = ' + str(params[31]) + '\n'
'ccd_ff_dec_y = ' + str(params[32]) + '\n'
'ccd_ff_tilt_x = ' + str(params[33]) + '\n'
'ccd_ff_tilt_y = ' + str(params[34]) + '\n'
'ccd_ff_tilt_z = ' + str(params[35]) + '\n'
'd_ff_ccd = ' + str(params[36]) + '\n'
'ccd_dec_x = ' + str(params[37]) + '\n'
'ccd_dec_y = ' + str(params[38]) + '\n'
'ccd_defocus = ' + str(params[39]) + '\n'
'ccd_tilt_x = ' + str(params[40]) + '\n'
'ccd_tilt_y = ' + str(params[41]) + '\n'
'ccd_tilt_z = ' + str(params[42]) + '\n'
'p = ' + str(params[43])
)
return 'Parameters saved...'
if __name__ == '__main__':
pars = load()
list_main_pars(pars)
|
mtalapintoREPO_NAMEmoesPATH_START.@platospec@optics@parameters.py@.PATH_END.py
|
{
"filename": "override.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/setuptools/py3/_distutils_hack/override.py",
"type": "Python"
}
|
__import__('_distutils_hack').do_override()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@setuptools@py3@_distutils_hack@override.py@.PATH_END.py
|
{
"filename": "_bgcolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/parcoords/line/colorbar/_bgcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bgcolor", parent_name="parcoords.line.colorbar", **kwargs
):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@parcoords@line@colorbar@_bgcolor.py@.PATH_END.py
|
{
"filename": "random_cat.md",
"repo_name": "CosmoStat/shapepipe",
"repo_path": "shapepipe_extracted/shapepipe-master/docs/source/random_cat.md",
"type": "Markdown"
}
|
# Create random catalogue
This section describes how to create a random catalogue from ShapePipe
mask files corresponding to a selection of tiles.
## Set up
First, if if does not exist already, create the file ``tile_numbers.txt`` containing a list of tile IDs,
one per line. This is the same format as the input file to ``get_images_runner``.
For example, link to a patch ID list,
```bash
ln -s tiles_PX.txt tile_numbers.txt
```
Next, set the run and config paths,
```bash
export SP_RUN=.
export SP_CONFIG=/path/to/config-files
```
## Get images or image headers
We need to footprint of the image tiles. If they have been downloaded for a ``ShapePipe`` run,
check that they are accessible as last run of the ``get_images_runner`` module.
If not, we can just download the headers to gain significant download time.
```bash
shapepipe_run -c $SP_CONFIG/config_get_tiles_vos_headers.ini
```
## Check mask files
Make sure that all mask files are present. If they have been downloaded from ``vos`` as ``.tgz`` files,
type
```bash
canfar_avail_results -i tile_numbers.txt --input_path . -v -m -o missing_mask.txt
```
In case of missing mask files, check whether they are present in the ``vos`` remote directory,
```bash
canfar_avail_results -i tile_numbers.txt --input_path vos:cfis/vos-path/to/results -v -m
```
If missing on ``vos``, process those tiles. If processing only up the the mask is necessary,
the following steps can be carried out,
```bash
job_sp -j 7 TILE_ID
job_sp -j 128 TILE_ID
```
The first command processes the tile up to the mask; the second line uploads the mask files
to ``vos``.
Now, download the missing masks with
```bash
canfar_download_results -i missing_mask.txt --input_vos vos-path/to/results -m -v
```
Untar .tgz files if required,
```bash
while read p; do tar xvf pipeline_flag_$p.tgz; done <missing_mask.txt
```
## Prepare combined mask input directory
To combine all mask files into one input directory, easy to find by the subsequent ``ShapePipe`` module
``random_runner``, type
```bash
prepare_tiles_for_final -c flag
```
To make sure everything went well, check the linked .fits files with
```bash
canfar_avail_results -i tile_numbers.txt --input_path output/run_sp_combined_flag/mask_runner/output -x fits -v -m
```
## Create random catalogue
First, create a random catalogue for each input tile and mask file,
```bash
shapepipe_run -c $SP_CONFIG/config_Rc.ini
```
Next, merge those catalogues into a numpy binary (``.npy``) file,
```bash
merge_final_cat -i output/run_sp_Rc/random_cat_runner/output -n random_cat -v
```
## Results
We can plot the random objects,
```bash
python ~/astro/repositories/github/sp_validation/scripts/plot_rand.py
```
and finally compute the effective survey area,
```bash
~/astro/repositories/github/sp_validation/scripts/compute_area.py
```
|
CosmoStatREPO_NAMEshapepipePATH_START.@shapepipe_extracted@shapepipe-master@docs@source@random_cat.md@.PATH_END.py
|
{
"filename": "markup.py",
"repo_name": "dingswin/psrvlbireduce",
"repo_path": "psrvlbireduce_extracted/psrvlbireduce-master/datareduction/markup.py",
"type": "Python"
}
|
# This code is in the public domain, it comes
# with absolutely no warranty and you can do
# absolutely whatever you want with it.
__date__ = '1 October 2012'
__version__ = '1.9'
__doc__= """
This is markup.py - a Python module that attempts to
make it easier to generate HTML/XML from a Python program
in an intuitive, lightweight, customizable and pythonic way.
The code is in the public domain.
Version: %s as of %s.
Documentation and further info is at http://markup.sourceforge.net/
Please send bug reports, feature requests, enhancement
ideas or questions to nogradi at gmail dot com.
Installation: drop markup.py somewhere into your Python path.
""" % ( __version__, __date__ )
try:
basestring
import string
except:
# python 3
basestring = str
string = str
# tags which are reserved python keywords will be referred
# to by a leading underscore otherwise we end up with a syntax error
import keyword
class element:
"""This class handles the addition of a new element."""
def __init__( self, tag, case='lower', parent=None ):
self.parent = parent
if case == 'upper':
self.tag = tag.upper( )
elif case == 'lower':
self.tag = tag.lower( )
elif case =='given':
self.tag = tag
else:
self.tag = tag
def __call__( self, *args, **kwargs ):
if len( args ) > 1:
raise ArgumentError( self.tag )
# if class_ was defined in parent it should be added to every element
if self.parent is not None and self.parent.class_ is not None:
if 'class_' not in kwargs:
kwargs['class_'] = self.parent.class_
if self.parent is None and len( args ) == 1:
x = [ self.render( self.tag, False, myarg, mydict ) for myarg, mydict in _argsdicts( args, kwargs ) ]
return '\n'.join( x )
elif self.parent is None and len( args ) == 0:
x = [ self.render( self.tag, True, myarg, mydict ) for myarg, mydict in _argsdicts( args, kwargs ) ]
return '\n'.join( x )
if self.tag in self.parent.twotags:
for myarg, mydict in _argsdicts( args, kwargs ):
self.render( self.tag, False, myarg, mydict )
elif self.tag in self.parent.onetags:
if len( args ) == 0:
for myarg, mydict in _argsdicts( args, kwargs ):
self.render( self.tag, True, myarg, mydict ) # here myarg is always None, because len( args ) = 0
else:
raise ClosingError( self.tag )
elif self.parent.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag )
else:
raise InvalidElementError( self.tag, self.parent.mode )
def render( self, tag, single, between, kwargs ):
"""Append the actual tags to content."""
out = "<%s" % tag
for key, value in list( kwargs.items( ) ):
if value is not None: # when value is None that means stuff like <... checked>
key = key.strip('_') # strip this so class_ will mean class, etc.
if key == 'http_equiv': # special cases, maybe change _ to - overall?
key = 'http-equiv'
elif key == 'accept_charset':
key = 'accept-charset'
out = "%s %s=\"%s\"" % ( out, key, escape( value ) )
else:
out = "%s %s" % ( out, key )
if between is not None:
out = "%s>%s</%s>" % ( out, between, tag )
else:
if single:
out = "%s />" % out
else:
out = "%s>" % out
if self.parent is not None:
self.parent.content.append( out )
else:
return out
def close( self ):
"""Append a closing tag unless element has only opening tag."""
if self.tag in self.parent.twotags:
self.parent.content.append( "</%s>" % self.tag )
elif self.tag in self.parent.onetags:
raise ClosingError( self.tag )
elif self.parent.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag )
def open( self, **kwargs ):
"""Append an opening tag."""
if self.tag in self.parent.twotags or self.tag in self.parent.onetags:
self.render( self.tag, False, None, kwargs )
elif self.mode == 'strict_html' and self.tag in self.parent.deptags:
raise DeprecationError( self.tag )
class page:
"""This is our main class representing a document. Elements are added
as attributes of an instance of this class."""
def __init__( self, mode='strict_html', case='lower', onetags=None, twotags=None, separator='\n', class_=None ):
"""Stuff that effects the whole document.
mode -- 'strict_html' for HTML 4.01 (default)
'html' alias for 'strict_html'
'loose_html' to allow some deprecated elements
'xml' to allow arbitrary elements
case -- 'lower' element names will be printed in lower case (default)
'upper' they will be printed in upper case
'given' element names will be printed as they are given
onetags -- list or tuple of valid elements with opening tags only
twotags -- list or tuple of valid elements with both opening and closing tags
these two keyword arguments may be used to select
the set of valid elements in 'xml' mode
invalid elements will raise appropriate exceptions
separator -- string to place between added elements, defaults to newline
class_ -- a class that will be added to every element if defined"""
valid_onetags = [ "AREA", "BASE", "BR", "COL", "FRAME", "HR", "IMG", "INPUT", "LINK", "META", "PARAM" ]
valid_twotags = [ "A", "ABBR", "ACRONYM", "ADDRESS", "B", "BDO", "BIG", "BLOCKQUOTE", "BODY", "BUTTON",
"CAPTION", "CITE", "CODE", "COLGROUP", "DD", "DEL", "DFN", "DIV", "DL", "DT", "EM", "FIELDSET",
"FORM", "FRAMESET", "H1", "H2", "H3", "H4", "H5", "H6", "HEAD", "HTML", "I", "IFRAME", "INS",
"KBD", "LABEL", "LEGEND", "LI", "MAP", "NOFRAMES", "NOSCRIPT", "OBJECT", "OL", "OPTGROUP",
"OPTION", "P", "PRE", "Q", "SAMP", "SCRIPT", "SELECT", "SMALL", "SPAN", "STRONG", "STYLE",
"SUB", "SUP", "TABLE", "TBODY", "TD", "TEXTAREA", "TFOOT", "TH", "THEAD", "TITLE", "TR",
"TT", "UL", "VAR" ]
deprecated_onetags = [ "BASEFONT", "ISINDEX" ]
deprecated_twotags = [ "APPLET", "CENTER", "DIR", "FONT", "MENU", "S", "STRIKE", "U" ]
self.header = [ ]
self.content = [ ]
self.footer = [ ]
self.case = case
self.separator = separator
# init( ) sets it to True so we know that </body></html> has to be printed at the end
self._full = False
self.class_= class_
if mode == 'strict_html' or mode == 'html':
self.onetags = valid_onetags
self.onetags += list( map( string.lower, self.onetags ) )
self.twotags = valid_twotags
self.twotags += list( map( string.lower, self.twotags ) )
self.deptags = deprecated_onetags + deprecated_twotags
self.deptags += list( map( string.lower, self.deptags ) )
self.mode = 'strict_html'
elif mode == 'loose_html':
self.onetags = valid_onetags + deprecated_onetags
self.onetags += list( map( string.lower, self.onetags ) )
self.twotags = valid_twotags + deprecated_twotags
self.twotags += list( map( string.lower, self.twotags ) )
self.mode = mode
elif mode == 'xml':
if onetags and twotags:
self.onetags = onetags
self.twotags = twotags
elif ( onetags and not twotags ) or ( twotags and not onetags ):
raise CustomizationError( )
else:
self.onetags = russell( )
self.twotags = russell( )
self.mode = mode
else:
raise ModeError( mode )
def __getattr__( self, attr ):
# tags should start with double underscore
if attr.startswith("__") and attr.endswith("__"):
raise AttributeError( attr )
# tag with single underscore should be a reserved keyword
if attr.startswith( '_' ):
attr = attr.lstrip( '_' )
if attr not in keyword.kwlist:
raise AttributeError( attr )
return element( attr, case=self.case, parent=self )
def __str__( self ):
if self._full and ( self.mode == 'strict_html' or self.mode == 'loose_html' ):
end = [ '</body>', '</html>' ]
else:
end = [ ]
return self.separator.join( self.header + self.content + self.footer + end )
def __call__( self, escape=False ):
"""Return the document as a string.
escape -- False print normally
True replace < and > by < and >
the default escape sequences in most browsers"""
if escape:
return _escape( self.__str__( ) )
else:
return self.__str__( )
def add( self, text ):
"""This is an alias to addcontent."""
self.addcontent( text )
def addfooter( self, text ):
"""Add some text to the bottom of the document"""
self.footer.append( text )
def addheader( self, text ):
"""Add some text to the top of the document"""
self.header.append( text )
def addcontent( self, text ):
"""Add some text to the main part of the document"""
self.content.append( text )
def init( self, lang='en', css=None, metainfo=None, title=None, header=None,
footer=None, charset=None, encoding=None, doctype=None, bodyattrs=None, script=None, base=None ):
"""This method is used for complete documents with appropriate
doctype, encoding, title, etc information. For an HTML/XML snippet
omit this method.
lang -- language, usually a two character string, will appear
as <html lang='en'> in html mode (ignored in xml mode)
css -- Cascading Style Sheet filename as a string or a list of
strings for multiple css files (ignored in xml mode)
metainfo -- a dictionary in the form { 'name':'content' } to be inserted
into meta element(s) as <meta name='name' content='content'>
(ignored in xml mode)
base -- set the <base href="..."> tag in <head>
bodyattrs --a dictionary in the form { 'key':'value', ... } which will be added
as attributes of the <body> element as <body key='value' ... >
(ignored in xml mode)
script -- dictionary containing src:type pairs, <script type='text/type' src=src></script>
or a list of [ 'src1', 'src2', ... ] in which case 'javascript' is assumed for all
title -- the title of the document as a string to be inserted into
a title element as <title>my title</title> (ignored in xml mode)
header -- some text to be inserted right after the <body> element
(ignored in xml mode)
footer -- some text to be inserted right before the </body> element
(ignored in xml mode)
charset -- a string defining the character set, will be inserted into a
<meta http-equiv='Content-Type' content='text/html; charset=myset'>
element (ignored in xml mode)
encoding -- a string defining the encoding, will be put into to first line of
the document as <?xml version='1.0' encoding='myencoding' ?> in
xml mode (ignored in html mode)
doctype -- the document type string, defaults to
<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN'>
in html mode (ignored in xml mode)"""
self._full = True
if self.mode == 'strict_html' or self.mode == 'loose_html':
if doctype is None:
doctype = "<!DOCTYPE HTML PUBLIC '-//W3C//DTD HTML 4.01 Transitional//EN'>"
self.header.append( doctype )
self.html( lang=lang )
self.head( )
if charset is not None:
self.meta( http_equiv='Content-Type', content="text/html; charset=%s" % charset )
if metainfo is not None:
self.metainfo( metainfo )
if css is not None:
self.css( css )
if title is not None:
self.title( title )
if script is not None:
self.scripts( script )
if base is not None:
self.base( href='%s' % base )
self.head.close()
if bodyattrs is not None:
self.body( **bodyattrs )
else:
self.body( )
if header is not None:
self.content.append( header )
if footer is not None:
self.footer.append( footer )
elif self.mode == 'xml':
if doctype is None:
if encoding is not None:
doctype = "<?xml version='1.0' encoding='%s' ?>" % encoding
else:
doctype = "<?xml version='1.0' ?>"
self.header.append( doctype )
def css( self, filelist ):
"""This convenience function is only useful for html.
It adds css stylesheet(s) to the document via the <link> element."""
if isinstance( filelist, basestring ):
self.link( href=filelist, rel='stylesheet', type='text/css', media='all' )
else:
for file in filelist:
self.link( href=file, rel='stylesheet', type='text/css', media='all' )
def metainfo( self, mydict ):
"""This convenience function is only useful for html.
It adds meta information via the <meta> element, the argument is
a dictionary of the form { 'name':'content' }."""
if isinstance( mydict, dict ):
for name, content in list( mydict.items( ) ):
self.meta( name=name, content=content )
else:
raise TypeError( "Metainfo should be called with a dictionary argument of name:content pairs." )
def scripts( self, mydict ):
"""Only useful in html, mydict is dictionary of src:type pairs or a list
of script sources [ 'src1', 'src2', ... ] in which case 'javascript' is assumed for type.
Will be rendered as <script type='text/type' src=src></script>"""
if isinstance( mydict, dict ):
for src, type in list( mydict.items( ) ):
self.script( '', src=src, type='text/%s' % type )
else:
try:
for src in mydict:
self.script( '', src=src, type='text/javascript' )
except:
raise TypeError( "Script should be given a dictionary of src:type pairs or a list of javascript src's." )
class _oneliner:
"""An instance of oneliner returns a string corresponding to one element.
This class can be used to write 'oneliners' that return a string
immediately so there is no need to instantiate the page class."""
def __init__( self, case='lower' ):
self.case = case
def __getattr__( self, attr ):
# tags should start with double underscore
if attr.startswith("__") and attr.endswith("__"):
raise AttributeError( attr )
# tag with single underscore should be a reserved keyword
if attr.startswith( '_' ):
attr = attr.lstrip( '_' )
if attr not in keyword.kwlist:
raise AttributeError( attr )
return element( attr, case=self.case, parent=None )
oneliner = _oneliner( case='lower' )
upper_oneliner = _oneliner( case='upper' )
given_oneliner = _oneliner( case='given' )
def _argsdicts( args, mydict ):
"""A utility generator that pads argument list and dictionary values, will only be called with len( args ) = 0, 1."""
if len( args ) == 0:
args = None,
elif len( args ) == 1:
args = _totuple( args[0] )
else:
raise Exception( "We should have never gotten here." )
mykeys = list( mydict.keys( ) )
myvalues = list( map( _totuple, list( mydict.values( ) ) ) )
maxlength = max( list( map( len, [ args ] + myvalues ) ) )
for i in range( maxlength ):
thisdict = { }
for key, value in zip( mykeys, myvalues ):
try:
thisdict[ key ] = value[i]
except IndexError:
thisdict[ key ] = value[-1]
try:
thisarg = args[i]
except IndexError:
thisarg = args[-1]
yield thisarg, thisdict
def _totuple( x ):
"""Utility stuff to convert string, int, long, float, None or anything to a usable tuple."""
if isinstance( x, basestring ):
out = x,
elif isinstance( x, ( int, long, float ) ):
out = str( x ),
elif x is None:
out = None,
else:
out = tuple( x )
return out
def escape( text, newline=False ):
"""Escape special html characters."""
if isinstance( text, basestring ):
if '&' in text:
text = text.replace( '&', '&' )
if '>' in text:
text = text.replace( '>', '>' )
if '<' in text:
text = text.replace( '<', '<' )
if '\"' in text:
text = text.replace( '\"', '"' )
if '\'' in text:
text = text.replace( '\'', '"' )
if newline:
if '\n' in text:
text = text.replace( '\n', '<br>' )
return text
_escape = escape
def unescape( text ):
"""Inverse of escape."""
if isinstance( text, basestring ):
if '&' in text:
text = text.replace( '&', '&' )
if '>' in text:
text = text.replace( '>', '>' )
if '<' in text:
text = text.replace( '<', '<' )
if '"' in text:
text = text.replace( '"', '\"' )
return text
class dummy:
"""A dummy class for attaching attributes."""
pass
doctype = dummy( )
doctype.frameset = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">"""
doctype.strict = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">"""
doctype.loose = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">"""
class russell:
"""A dummy class that contains anything."""
def __contains__( self, item ):
return True
class MarkupError( Exception ):
"""All our exceptions subclass this."""
def __str__( self ):
return self.message
class ClosingError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' does not accept non-keyword arguments (has no closing tag)." % tag
class OpeningError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' can not be opened." % tag
class ArgumentError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' was called with more than one non-keyword argument." % tag
class InvalidElementError( MarkupError ):
def __init__( self, tag, mode ):
self.message = "The element '%s' is not valid for your mode '%s'." % ( tag, mode )
class DeprecationError( MarkupError ):
def __init__( self, tag ):
self.message = "The element '%s' is deprecated, instantiate markup.page with mode='loose_html' to allow it." % tag
class ModeError( MarkupError ):
def __init__( self, mode ):
self.message = "Mode '%s' is invalid, possible values: strict_html, html (alias for strict_html), loose_html, xml." % mode
class CustomizationError( MarkupError ):
def __init__( self ):
self.message = "If you customize the allowed elements, you must define both types 'onetags' and 'twotags'."
if __name__ == '__main__':
import sys
sys.stdout.write( __doc__ )
|
dingswinREPO_NAMEpsrvlbireducePATH_START.@psrvlbireduce_extracted@psrvlbireduce-master@datareduction@markup.py@.PATH_END.py
|
{
"filename": "ModLinAlg.py",
"repo_name": "saopicc/DDFacet",
"repo_path": "DDFacet_extracted/DDFacet-master/DDFacet/Array/ModLinAlg.py",
"type": "Python"
}
|
'''
DDFacet, a facet-based radio imaging package
Copyright (C) 2013-2016 Cyril Tasse, l'Observatoire de Paris,
SKA South Africa, Rhodes University
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DDFacet.compatibility import range
import scipy.linalg
import numpy as np
from DDFacet.Other import ModColor
def invertChol(A):
L=np.linalg.cholesky(A)
Linv=np.linalg.inv(L)
Ainv=np.dot(Linv.T,Linv)
return Ainv
def invertLU(A):
lu,piv=scipy.linalg.lu_factor(A)
return scipy.linalg.lu_solve((lu,piv),np.eye(A.shape[0],A.shape[0]))
def sqrtSVD(A):
#u,s,v=np.linalg.svd(A+np.random.randn(*A.shape)*(1e-6*A.max()))
A=(A+A.T)/2.
thr=1e-8
u,s,v=np.linalg.svd(A+np.random.randn(*A.shape)*(thr*A.max()))
s[s<0.]=0.
ssq=np.diag(np.sqrt(s))
Asq=np.dot(np.dot(u,ssq),v)
return Asq
def BatchInverse(A,H=False):
shapeOut=A.shape
A=A.reshape((A.size//4,2,2))
#A.shape=N,2,2
N,dum,dum=A.shape
Ainv=np.zeros_like(A)
if not(H):
a0=A[:,0,0]
d0=A[:,1,1]
b0=A[:,0,1]
c0=A[:,1,0]
else:
a0=A[:,0,0].conj()
d0=A[:,1,1].conj()
b0=A[:,1,0].conj()
c0=A[:,0,1].conj()
det=1./(a0*d0-b0*c0)
Ainv[:,0,0]=d0*det
Ainv[:,0,1]=-b0*det
Ainv[:,1,0]=-c0*det
Ainv[:,1,1]=a0*det
Ainv=Ainv.reshape(shapeOut)
return Ainv
def BatchH(A):
shapeOut=A.shape
A=A.reshape((A.size//4,2,2))
N,dum,dum=A.shape
AH=np.zeros_like(A)
a0=A[:,0,0].conj()
d0=A[:,1,1].conj()
b0=A[:,1,0].conj()
c0=A[:,0,1].conj()
AH[:,0,0]=a0
AH[:,1,1]=d0
AH[:,0,1]=b0
AH[:,1,0]=c0
AH=AH.reshape(shapeOut)
return AH
def BatchDot(A,B):
shapeOut=A.shape
A=A.reshape((A.size//4,2,2))
B=B.reshape((B.size//4,2,2))
C=np.zeros_like(A)
# if A.size>=B.size:
# C=np.zeros_like(A)
# shapeOut=A.shape
# else:
# C=np.zeros_like(B)
# shapeOut=B.shape
# print("A:",A.shape)
# print("B:",B.shape)
# print("C:",C.shape)
a0=A[:,0,0]
b0=A[:,1,0]
c0=A[:,0,1]
d0=A[:,1,1]
a1=B[:,0,0]
b1=B[:,1,0]
c1=B[:,0,1]
d1=B[:,1,1]
C00=C[:,0,0]
C01=C[:,0,1]
C10=C[:,1,0]
C11=C[:,1,1]
C00[:]=a0*a1+c0*b1
C01[:]=a0*c1+c0*d1
C10[:]=b0*a1+d0*b1
C11[:]=b0*c1+d0*d1
C=C.reshape(shapeOut)
return C
def BatchDot2(A,B):
#A=A.reshape((A.size//4,2,2))
#B=B.reshape((B.size//4,2,2))
shapeOut=A.shape
NDir_a,nf,na,_=shapeOut
A=A.reshape((NDir_a,nf,na,2,2))
NDir_b,nf,na,_=B.shape
B=B.reshape((NDir_b,nf,na,2,2))
C=np.zeros_like(A)
# if B.shape[0]==1:
# NDir=A.shape[0]
# #print("a")
# #B=B*np.ones((NDir,1,1,1,1))
# #print("b")
# #return BatchDot(A,B)
# #B=B.reshape((1,B.size/(4*NDir),2,2))
# C=np.zeros_like(A)
# else:
# C=np.zeros_like(B)
# shapeOut=B.shape
# print("A:",A.shape)
# print("B:",B.shape)
# print("C:",C.shape)
a0=A[:,:,:,0,0]
b0=A[:,:,:,1,0]
c0=A[:,:,:,0,1]
d0=A[:,:,:,1,1]
a1=B[:,:,:,0,0]
b1=B[:,:,:,1,0]
c1=B[:,:,:,0,1]
d1=B[:,:,:,1,1]
C00=C[:,:,:,0,0]
C01=C[:,:,:,0,1]
C10=C[:,:,:,1,0]
C11=C[:,:,:,1,1]
C00[:,:,:]=a0*a1+c0*b1
C01[:,:,:]=a0*c1+c0*d1
C10[:,:,:]=b0*a1+d0*b1
C11[:,:,:]=b0*c1+d0*d1
C=C.reshape(shapeOut)
return C
def testInvertSVD():
import pylab
A=np.random.randn(10,10)+1j*np.random.randn(10,10)
A0=np.linalg.inv(A)
A1=invSVD(A)
A2=A0-A1
pylab.clf()
pylab.subplot(3,2,1)
pylab.imshow(A0.real,interpolation="nearest")
pylab.colorbar()
pylab.subplot(3,2,2)
pylab.imshow(A0.imag,interpolation="nearest")
pylab.colorbar()
pylab.subplot(3,2,3)
pylab.imshow(A1.real,interpolation="nearest")
pylab.colorbar()
pylab.subplot(3,2,4)
pylab.imshow(A1.imag,interpolation="nearest")
pylab.colorbar()
pylab.subplot(3,2,5)
pylab.imshow(A2.real,interpolation="nearest")
pylab.colorbar()
pylab.subplot(3,2,6)
pylab.imshow(A2.imag,interpolation="nearest")
pylab.colorbar()
pylab.draw()
pylab.show(False)
pylab.pause(0.1)
# def testSVD():
# a=np.
def invSVD(A,Cut=1e-6):
#print("rand")
Ar=A # +np.random.randn(*A.shape)*(1e-6*A.max())
#print("stard",Ar.shape)
try:
u,s,v=np.linalg.svd(Ar)
except:
Name="errSVDArray_%i"%int(np.random.rand(1)[0]*10000)
print(ModColor.Str("Problem inverting Matrix, saving as %s"%Name))
print(ModColor.Str(" will make it svd-able"))
np.save(Name,Ar)
# weird - I found a matrix I cannot do svd on... - that works
Cut=1e-20
#Ar=np.complex64(Ar)
u,s,v=np.linalg.svd(np.complex64(Ar))#+np.random.randn(*Ar.shape)*(1e-10*np.abs(Ar).max()))
#u,s,v=np.linalg.svd()
u=np.real(u)
s=np.real(s)
v=np.real(v)
#u,s,v=np.linalg.svd(np.complex128(Ar))
s[s<0.]=Cut
s[s<Cut*s.max()]=Cut*s.max()
ssq=(1./s)
#Asq=np.conj(np.dot(np.dot(v.T,ssq),u.T))
v0=v.T*ssq.reshape(1,ssq.size)
Asq=np.conj(np.dot(v0,u.T))
return Asq
import scipy.sparse.linalg
def invSVD_Lanczos(A):
u,s,v=scipy.sparse.linalg.svds(A+np.random.randn(*A.shape)*(1e-6*A.max()))
#s[s<0.]=1.e-6
s[s<1.e-6*s.max()]=1.e-6*s.max()
ssq=(1./s)
#Asq=np.conj(np.dot(np.dot(v.T,ssq),u.T))
v0=v.T*ssq.reshape(1,ssq.size)
Asq=np.conj(np.dot(v0,u.T))
return Asq
def SVDw(A):
#A=(A+A.T)/2.
u,s,v=np.linalg.svd(A)
s[s<0.]=0.
ssq=np.diag(np.sqrt(s))
Asq=np.dot(np.dot(u,ssq),u.T)
return Asq
def EigClean(A):
Lq,Uq=np.linalg.eig(A.copy())
ind =np.where(Lq<0.)[0]
if ind.shape[0]>0:
Lq[ind]=1e-3
#UqInv=np.linalg.inv(Uq)
Anew=np.real(np.dot(np.dot(Uq,np.diag(Lq)),Uq.T))
Lq,Uq=np.linalg.eig(Anew)
return Anew
def Dot_ListBlockMat_Mat(ListBlocks,Mat):
n=ListBlocks[0].shape[0]
m=Mat.shape[1]
nblock=len(ListBlocks)
WorkMat=Mat.reshape(nblock,n,m)
OutMat=np.zeros_like(WorkMat)
for iblock in range(nblock):
ThisBlock=ListBlocks[iblock]
OutMat[iblock]=np.dot(ThisBlock.astype(np.float64),WorkMat[iblock].astype(np.float64))
OutMat=OutMat.reshape(nblock*n,m)
return OutMat
def Dot_ListBlockMat_Mat_Iregular(ListBlocks,Mat):
m=Mat.shape[1]
nblock=len(ListBlocks)
OutMat=np.zeros_like(Mat)
i0=0
for iblock in range(nblock):
ThisBlock=ListBlocks[iblock]
xb,yb=ThisBlock.shape
i1=i0+xb
WorkMat=Mat[i0:i1,:]
OutMat[i0:i1,:]=np.dot(ThisBlock.astype(np.float64),WorkMat.astype(np.float64))
i0+=xb
return OutMat
def test_Dot_ListBlockMat_Mat():
nblocks=50
n=100
m=200
B=np.random.randn(nblocks*n,m)
ListBlocks=[]
BlocksMat=np.zeros((nblocks*n,nblocks*n),float)
for iblock in range(nblocks):
ThisBlock=np.random.randn(n,n)
ListBlocks.append(ThisBlock)
istart=iblock*n
BlocksMat[istart:istart+n,istart:istart+n]=ThisBlock
import ClassTimeIt
T=ClassTimeIt.ClassTimeIt()
print("Dimensions A[%s], B[%s]"%(BlocksMat.shape,B.shape))
R0=Dot_ListBlockMat_Mat(ListBlocks,B)
T.timeit("ListProd")
R1=np.dot(BlocksMat,B)
T.timeit("NpProd")
R2=Dot_ListBlockMat_Mat_Iregular(ListBlocks,B)
T.timeit("ListProdIrregular")
print(np.allclose(R0,R1))
print(np.allclose(R2,R1))
def test_Dot_ListBlockMat_Mat_Big():
nblocks=10*50
n=100
m=200
B=np.random.randn(nblocks*n,m)
ListBlocks=[]
for iblock in range(nblocks):
ThisBlock=np.random.randn(n,n)
ListBlocks.append(ThisBlock)
import ClassTimeIt
T=ClassTimeIt.ClassTimeIt()
print("Dimensions A[%ix%s -> %s], B[%s]"%(nblocks,ThisBlock.shape,(nblocks*n,nblocks*n),B.shape))
R0=Dot_ListBlockMat_Mat(ListBlocks,B)
T.timeit("ListProd")
|
saopiccREPO_NAMEDDFacetPATH_START.@DDFacet_extracted@DDFacet-master@DDFacet@Array@ModLinAlg.py@.PATH_END.py
|
{
"filename": "missing-values__categorical-features-values.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/_includes/work_src/reusage-missing-values/missing-values__categorical-features-values.md",
"type": "Markdown"
}
|
{{ product }} does not process categorical features in any specific way.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@_includes@work_src@reusage-missing-values@missing-values__categorical-features-values.md@.PATH_END.py
|
{
"filename": "test_svm_hrcsbc.py",
"repo_name": "spacetelescope/drizzlepac",
"repo_path": "drizzlepac_extracted/drizzlepac-main/tests/hap/test_svm_hrcsbc.py",
"type": "Python"
}
|
""" This module tests full pipeline SVM processing as a demonstration template.
"""
import datetime
import glob
import os
import pytest
import numpy as np
from drizzlepac.haputils import astroquery_utils as aqutils
from drizzlepac import runsinglehap
from astropy.io import fits, ascii
from pathlib import Path
"""
test_svm_demo.py
This test file can be executed in the following manner:
$ pytest -s --basetemp=/internal/hladata/yourUniqueDirectoryHere test_svm.py >& test_svm.log &
$ tail -f test_svm.log
* Note: When running this test, the `--basetemp` directory should be set to a unique
existing directory to avoid deleting previous test output.
* The POLLER_FILE exists in the tests/hap directory.
"""
WCS_SUB_NAME = "FIT_SVM_GAIA"
POLLER_FILE = "acs_hrc_sbc_input.out"
# Gather expected values for pass/fail criteria here
expected_total_point_sources = {'hrc': 268, 'sbc': 65}
expected_total_segment_sources = {'hrc': 642, 'sbc': 250}
tolerance = 0.25
@pytest.fixture(scope="module")
def read_csv_for_filenames():
# Read the CSV poller file residing in the tests directory to extract the individual visit FLT/FLC filenames
path = os.path.join(os.path.dirname(__file__), POLLER_FILE)
table = ascii.read(path, format="no_header")
filename_column = table.colnames[0]
filenames = list(table[filename_column])
print("\nread_csv_for_filenames. Filesnames from poller: {}".format(filenames))
return filenames
@pytest.fixture(scope="module")
def gather_data_for_processing(read_csv_for_filenames, tmp_path_factory):
# create working directory specified for the test
curdir = tmp_path_factory.mktemp(os.path.basename(__file__))
os.chdir(curdir)
# Establish FLC/FLT lists and obtain the requested data
flc_flag = ""
flt_flag = ""
# In order to obtain individual FLC or FLT images from MAST (if the files are not reside on disk) which
# may be part of an ASN, use only IPPPSS with a wildcard. The unwanted images have to be removed
# after-the-fact.
for fn in read_csv_for_filenames:
if fn.lower().endswith("flc.fits") and flc_flag == "":
flc_flag = fn[0:6] + "*"
elif fn.lower().endswith("flt.fits") and flt_flag == "":
flt_flag = fn[0:6] + "*"
# If both flags have been set, then break out the loop early. It may be
# that all files have to be checked which means the for loop continues
# until its natural completion.
if flc_flag and flt_flag:
break
# Get test data through astroquery - only retrieve the pipeline processed FLC and/or FLT files
# (e.g., j*_flc.fits) as necessary. The logic here and the above for loop is an attempt to
# avoid downloading too many images which are not needed for processing.
flcfiles = []
fltfiles = []
if flc_flag:
flcfiles = aqutils.retrieve_observation(flc_flag, suffix=["FLC"], product_type="pipeline")
if flt_flag:
fltfiles = aqutils.retrieve_observation(flt_flag, suffix=["FLT"], product_type="pipeline")
flcfiles.extend(fltfiles)
# Keep only the files which exist in BOTH lists for processing
files_to_process = set(read_csv_for_filenames).intersection(set(flcfiles))
# Identify unwanted files from the download list and remove from disk
files_to_remove = set(read_csv_for_filenames).symmetric_difference(set(flcfiles))
try:
for ftr in files_to_remove:
os.remove(ftr)
except Exception as x_cept:
print("")
print("Exception encountered: {}.".format(x_cept))
print("The file {} could not be deleted from disk. ".format(ftr))
print("Remove files which are not used for processing from disk manually.")
print("\ngather_data_for_processing. Gathered data: {}".format(files_to_process))
return files_to_process
@pytest.fixture(scope="module")
def gather_output_data(construct_manifest_filename):
# Determine the filenames of all the output files from the manifest
print(f"\nManifest Filename: {construct_manifest_filename}")
files = []
with open(construct_manifest_filename, 'r') as fout:
for line in fout.readlines():
files.append(line.rstrip('\n'))
print("\ngather_output_data. Output data files: {}".format(files))
return files
@pytest.fixture(scope="module")
def construct_manifest_filename(read_csv_for_filenames):
# Construct the output manifest filename from input file keywords
inst = fits.getval(read_csv_for_filenames[0], "INSTRUME", ext=0).lower()
root = fits.getval(read_csv_for_filenames[0], "ROOTNAME", ext=0).lower()
tokens_tuple = (inst, root[1:4], root[4:6], "manifest.txt")
manifest_filename = "_".join(tokens_tuple)
print("\nconstruct_manifest_filename. Manifest filename: {}".format(manifest_filename))
return manifest_filename
@pytest.fixture(scope="module", autouse=True)
def svm_setup(gather_data_for_processing):
# Act: Process the input data by executing runsinglehap - time consuming activity
current_dt = datetime.datetime.now()
print(str(current_dt))
print("\nsvm_setup fixture")
# Read the "poller file" and download the input files, as necessary
input_names = gather_data_for_processing
# Run the SVM processing
path = os.path.join(os.path.dirname(__file__), POLLER_FILE)
try:
status = runsinglehap.perform(path)
# Catch anything that happens and report it. This is meant to catch unexpected errors and
# generate sufficient output exception information so algorithmic problems can be addressed.
except Exception as except_details:
print(except_details)
pytest.fail("\nsvm_setup. Exception Visit: {}\n", path)
current_dt = datetime.datetime.now()
print(str(current_dt))
# TESTS
def test_svm_manifest_name(construct_manifest_filename):
# Construct the manifest filename from the header of an input file in the list and check it exists.
path = Path(construct_manifest_filename)
print("\ntest_svm_manifest. Filename: {}".format(path))
# Ensure the manifest file uses the proper naming convention
assert (path.is_file())
def test_svm_wcs(gather_output_data):
# Check the output primary WCSNAME includes FIT_SVM_GAIA as part of the string value
tdp_files = [files for files in gather_output_data if
files.lower().find("total") > -1 and files.lower().endswith(".fits")]
for tdp in tdp_files:
wcsname = fits.getval(tdp, "WCSNAME", ext=1).upper()
print("\ntest_svm_wcs. WCSNAME: {} Output file: {}".format(wcsname, tdp))
assert WCS_SUB_NAME in wcsname, f"WCSNAME is not as expected for file {tdp}."
def test_svm_samewcs(gather_output_data):
# Check that products for both detectors are aligned to the same catalog
# The assumption is that if they are all aligned to the same catalog, they are
# correctly aligned to each other.
tdp_files = [files for files in gather_output_data if
files.lower().find("total") > -1 and files.lower().endswith(".fits")]
print(f'TDP_FILES: \n{tdp_files}')
wcsnames = [fits.getval(tdp, "WCSNAME", ext=1).upper().split('-')[1] for tdp in tdp_files]
assert len(set(wcsnames)) == 1, f"WCSNAMES are not all the same: {wcsnames}"
def test_svm_empty_cats(gather_output_data):
# Check the output catalogs should contain > 0 measured sources
cat_files = [files for files in gather_output_data if files.lower().endswith("-cat.ecsv")]
valid_tables = {}
for cat in cat_files:
table_length = len(ascii.read(cat, format="ecsv"))
print("\ntest_svm_cat_sources. Number of sources in catalog {} is {}.".format(cat, table_length))
valid_tables[cat] = table_length > 0
bad_tables = [cat for cat in cat_files if not valid_tables[cat]]
assert len(bad_tables) == 0, f"Catalog file(s) {bad_tables} is/are unexpectedly empty"
# Due to the way the catalogs are filtered, check the size of the total catalog and one of the filter
# catalogs separately. The total catalog has the row removed for each source where the constituent
# filter catalogs *ALL* have flag>5 for the source. Rows are NOT removed from the filter table based on
# flag values. NOTE: Filtered catalogs are actually not checked by these tests.
@pytest.mark.skip(reason="Modifying tests and cannot reproduce failed result at this time - need for RC.")
def test_svm_point_total_cat(gather_output_data):
# Check the output catalogs should contain the correct number of sources -- allows for a broad tolerance
print("\ntest_svm_point_total_cat.")
tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith("point-cat.ecsv")]
num_sources = {tdp:len(ascii.read(tdp, format="ecsv")) for tdp in tdp_files}
valid_cats = {}
for tdp in expected_total_point_sources.keys():
for file in tdp_files:
if tdp in file:
tol_limit = tolerance * expected_total_point_sources[tdp]
valid_cats[tdp] = (file, np.isclose(expected_total_point_sources[tdp], num_sources[file], atol=tol_limit))
break
bad_cats = [cat for cat in valid_cats if not valid_cats[cat][1]]
assert len(bad_cats) == 0, f"Total Point Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_total_point_sources}"
@pytest.mark.skip(reason="Modifying tests and cannot reproduce failed result at this time. - need for RC")
def test_svm_segment_total_cat(gather_output_data):
# Check the output catalogs should contain the correct number of sources -- allows for a broad tolerance
print("\ntest_svm_segment_total_cat.")
tdp_files = [files for files in gather_output_data if files.lower().find("total") > -1 and files.lower().endswith("segment-cat.ecsv")]
num_sources = {tdp:len(ascii.read(tdp, format="ecsv")) for tdp in tdp_files}
valid_cats = {}
for tdp in expected_total_segment_sources.keys():
for file in tdp_files:
if tdp in file:
tol_limit = tolerance * expected_total_segment_sources[tdp]
valid_cats[tdp] = (file, np.isclose(expected_total_segment_sources[tdp], num_sources[file], atol=tol_limit))
break
bad_cats = [cat for cat in valid_cats if not valid_cats[cat][1]]
assert len(bad_cats) == 0, f"Total Segment Catalog(s) {bad_cats} had {valid_cats} sources, expected {expected_total_segment_sources}"
|
spacetelescopeREPO_NAMEdrizzlepacPATH_START.@drizzlepac_extracted@drizzlepac-main@tests@hap@test_svm_hrcsbc.py@.PATH_END.py
|
{
"filename": "version.py",
"repo_name": "emerge-erc/ALminer",
"repo_path": "ALminer_extracted/ALminer-main/alminer/version.py",
"type": "Python"
}
|
__version__ = "0.1.3"
|
emerge-ercREPO_NAMEALminerPATH_START.@ALminer_extracted@ALminer-main@alminer@version.py@.PATH_END.py
|
{
"filename": "nuance.py",
"repo_name": "lgrcia/paper-nuance",
"repo_path": "paper-nuance_extracted/paper-nuance-main/workflows/benchmark/scripts/nuance.py",
"type": "Python"
}
|
import yaml
import numpy as np
import os
from multiprocessing import cpu_count
cores = 1 #int(eval(snakemake.wildcards.cores))
os.environ["XLA_FLAGS"] = f"--xla_force_host_platform_device_count={cores}"
import jax
from time import time as _time
import jax
import os
os.environ["XLA_FLAGS"] = f"--xla_force_host_platform_device_count={1}"
from nuance import Nuance
print(jax.device_count())
n_durations = int(eval(snakemake.wildcards.durations))
periods = np.linspace(*[eval(i) for i in snakemake.wildcards.periods.split("_")])
durations = np.linspace(0.01, 0.05, n_durations)
time, flux, error = np.load(snakemake.input[0])
n_points = int(snakemake.wildcards.n_points)
times = {}
nu = Nuance(time, flux, error)
t0 = _time()
nu.linear_search(time, durations)
times["linear"] = float(_time() - t0)
search = nu.periodic_search(periods)
times["all"] = float(_time() - t0)
times["cpu_counts"] = jax.device_count(), cores
yaml.safe_dump(times, open(snakemake.output[0], "w"))
|
lgrciaREPO_NAMEpaper-nuancePATH_START.@paper-nuance_extracted@paper-nuance-main@workflows@benchmark@scripts@nuance.py@.PATH_END.py
|
{
"filename": "solves.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/lax/control_flow/solves.py",
"type": "Python"
}
|
# Copyright 2022 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for the custom linear solve and utilities."""
import collections
from functools import partial
import operator
from jax.tree_util import (tree_flatten, treedef_children, tree_leaves,
tree_unflatten, treedef_tuple)
from jax._src import ad_util
from jax._src import api
from jax._src import core
from jax._src import custom_derivatives
from jax._src import linear_util as lu
from jax._src.interpreters import ad
from jax._src.interpreters import batching
from jax._src.interpreters import mlir
from jax._src.interpreters import xla
from jax._src.traceback_util import api_boundary
from jax._src.util import split_list, safe_map
import numpy as np
from jax._src.lax.control_flow.common import (
_abstractify,
_check_tree,
_initial_style_jaxpr,
)
_map = safe_map
_RootTuple = collections.namedtuple('_RootTuple', 'f, solve, l_and_s')
def _split_root_args(args, const_lengths):
params_list = split_list(args, list(const_lengths))
return _RootTuple(*params_list[:-1]), params_list[-1]
@api_boundary
def custom_root(f, initial_guess, solve, tangent_solve, has_aux=False):
"""Differentiably solve for the roots of a function.
This is a low-level routine, mostly intended for internal use in JAX.
Gradients of custom_root() are defined with respect to closed-over variables
from the provided function ``f`` via the implicit function theorem:
https://en.wikipedia.org/wiki/Implicit_function_theorem
Args:
f: function for which to find a root. Should accept a single argument,
return a tree of arrays with the same structure as its input.
initial_guess: initial guess for a zero of f.
solve: function to solve for the roots of f. Should take two positional
arguments, f and initial_guess, and return a solution with the same
structure as initial_guess such that func(solution) = 0. In other words,
the following is assumed to be true (but not checked)::
solution = solve(f, initial_guess)
error = f(solution)
assert all(error == 0)
tangent_solve: function to solve the tangent system. Should take two
positional arguments, a linear function ``g`` (the function ``f``
linearized at its root) and a tree of array(s) ``y`` with the same
structure as initial_guess, and return a solution ``x`` such that
``g(x)=y``:
- For scalar ``y``, use ``lambda g, y: y / g(1.0)``.
- For vector ``y``, you could use a linear solve with the Jacobian, if
dimensionality of ``y`` is not too large:
``lambda g, y: np.linalg.solve(jacobian(g)(y), y)``.
has_aux: bool indicating whether the ``solve`` function returns
auxiliary data like solver diagnostics as a second argument.
Returns:
The result of calling solve(f, initial_guess) with gradients defined via
implicit differentiation assuming ``f(solve(f, initial_guess)) == 0``.
"""
guess_flat, in_args_tree = tree_flatten((initial_guess,))
guess_avals = tuple(_map(_abstractify, guess_flat))
f_jaxpr, f_consts, out_tree = _initial_style_jaxpr(
f, in_args_tree, guess_avals)
in_tree, = treedef_children(in_args_tree)
_check_tree("f", "initial_guess", out_tree, in_tree, False)
solve_jaxpr, solve_consts, solution_tree = _initial_style_jaxpr(
partial(solve, f), in_args_tree, guess_avals)
_check_tree("solve", "initial_guess", solution_tree, in_tree, has_aux)
def linearize_and_solve(x, b):
unchecked_zeros, f_jvp = api.linearize(f, x)
return tangent_solve(f_jvp, b)
l_and_s_jaxpr, l_and_s_consts, out_tree = _initial_style_jaxpr(
linearize_and_solve, treedef_tuple((in_tree,) * 2), guess_avals * 2)
_check_tree("tangent_solve", "x", out_tree, in_tree, False)
all_consts = [f_consts, solve_consts, l_and_s_consts]
const_lengths = _RootTuple(*_map(len, all_consts))
jaxprs = _RootTuple(f_jaxpr, solve_jaxpr, l_and_s_jaxpr)
solution_flat = _custom_root(
const_lengths, jaxprs, *(_flatten(all_consts) + guess_flat))
return tree_unflatten(solution_tree, solution_flat)
@partial(custom_derivatives.custom_jvp, nondiff_argnums=(0, 1))
def _custom_root(const_lengths, jaxprs, *args):
params, initial_guess = _split_root_args(args, const_lengths)
solution = core.jaxpr_as_fun(jaxprs.solve)(*(params.solve + initial_guess))
return solution
@_custom_root.defjvp
def _root_jvp(const_lengths, jaxprs, primals, tangents):
params, _ = _split_root_args(primals, const_lengths)
sol = _custom_root(const_lengths, jaxprs, *primals)
f_out_vals = len(jaxprs.f.out_avals)
solution, aux = split_list(sol, [f_out_vals])
params_dot, _ = _split_root_args(tangents, const_lengths)
# F(m, u) = 0 # system of equations in u, parameterized by m
# # solution is u*(m) defined in a neighborhood
# F(m, u*(m)) = 0 # satisfied in a neighborhood
#
# β_0 F(m, u*(m)) + β_1 F(m, u*(m)) β u*(m) = 0 # implied by line above
# β u*(m) = - (β_1 F(m, u*(m)))^{-1} β_0 F(m, u*(m)) # rearrange
#
# β u*(m)[v] = - (β_1 F(m, u*(m)))^{-1} [β_0 F(m, u*(m))[v]] # jvp
f = core.jaxpr_as_fun(jaxprs.f)
linearize_and_solve = partial(
core.jaxpr_as_fun(jaxprs.l_and_s), *params.l_and_s)
f_at_solution = lambda *params: f(*params, *solution)
_, rhs = ad.jvp(lu.wrap_init(f_at_solution)).call_wrapped(
params.f, params_dot.f)
solution_dot = _map(
operator.neg, linearize_and_solve(*solution, *rhs))
# append aux, create symbolic zero tangents for the aux values
solution += aux
solution_dot += _map(ad_util.zeros_like_jaxval, aux)
return solution, solution_dot
class _LinearSolveTuple(collections.namedtuple(
'_LinearSolveTuple', 'matvec, vecmat, solve, transpose_solve')):
def transpose(self):
return type(self)(self.vecmat, self.matvec, self.transpose_solve, self.solve)
def _split_linear_solve_args(args, const_lengths):
params_list = split_list(args, list(const_lengths))
return _LinearSolveTuple(*params_list[:-1]), params_list[-1]
def _transpose_one_output(linear_fun, primals):
transpose_fun = api.linear_transpose(linear_fun, primals)
def transposed_fun(x):
(y,) = transpose_fun(x)
return y
return transposed_fun
def _flatten(args):
return [x for arg in args for x in arg]
def _check_shapes(func_name, expected_name, actual, expected):
actual_shapes = _map(np.shape, tree_leaves(actual))
expected_shapes = _map(np.shape, tree_leaves(expected))
if actual_shapes != expected_shapes:
raise ValueError(
f"{func_name}() output shapes must match {expected_name}, "
f"got {actual_shapes} and {expected_shapes}")
@api_boundary
def custom_linear_solve(
matvec, b, solve, transpose_solve=None, symmetric=False, has_aux=False):
"""Perform a matrix-free linear solve with implicitly defined gradients.
This function allows for overriding or defining gradients for a linear
solve directly via implicit differentiation at the solution, rather than by
differentiating *through* the solve operation. This can sometimes be much faster
or more numerically stable, or differentiating through the solve operation
may not even be implemented (e.g., if ``solve`` uses ``lax.while_loop``).
Required invariant::
x = solve(matvec, b) # solve the linear equation
assert matvec(x) == b # not checked
Args:
matvec: linear function to invert. Must be differentiable.
b: constant right handle side of the equation. May be any nested structure
of arrays.
solve: higher level function that solves for solution to the linear
equation, i.e., ``solve(matvec, x) == x`` for all ``x`` of the same form
as ``b``. This function need not be differentiable.
transpose_solve: higher level function for solving the transpose linear
equation, i.e., ``transpose_solve(vecmat, x) == x``, where ``vecmat`` is
the transpose of the linear map ``matvec`` (computed automatically with
autodiff). Required for backwards mode automatic differentiation, unless
``symmetric=True``, in which case ``solve`` provides the default value.
symmetric: bool indicating if it is safe to assume the linear map
corresponds to a symmetric matrix, i.e., ``matvec == vecmat``.
has_aux: bool indicating whether the ``solve`` and ``transpose_solve`` functions
return auxiliary data like solver diagnostics as a second argument.
Returns:
Result of ``solve(matvec, b)``, with gradients defined assuming that the
solution ``x`` satisfies the linear equation ``matvec(x) == b``.
"""
if transpose_solve is None and symmetric:
transpose_solve = solve
b_flat, in_args_tree = tree_flatten((b,))
b_avals = tuple(_map(_abstractify, b_flat))
tree, = treedef_children(in_args_tree)
def _shape_checked(fun, name, has_aux):
def f(x):
y = fun(x)
_check_shapes(name, "b", y, b_flat)
return y
def f_aux(x):
y, aux = fun(x)
_check_shapes(name, "b", y, b_flat)
return y, aux
return f_aux if has_aux else f
# no auxiliary data assumed for matvec
matvec_jaxpr, matvec_consts, out_tree = _initial_style_jaxpr(
_shape_checked(matvec, "matvec", False), in_args_tree, b_avals,
'custom_linear_solve')
_check_tree("matvec", "b", out_tree, tree, False)
solve_jaxpr, solve_consts, out_tree = _initial_style_jaxpr(
_shape_checked(partial(solve, matvec), "solve", has_aux), in_args_tree, b_avals,
'custom_linear_solve')
_check_tree("solve", "b", out_tree, tree, has_aux)
if transpose_solve is None:
vecmat_jaxpr = tr_solve_jaxpr = None
vecmat_consts = tr_solve_consts = []
else:
if symmetric:
vecmat = matvec
vecmat_jaxpr = matvec_jaxpr
vecmat_consts = matvec_consts
else:
vecmat = _transpose_one_output(matvec, b)
vecmat_jaxpr, vecmat_consts, out_tree = _initial_style_jaxpr(
vecmat, in_args_tree, b_avals, 'custom_linear_solve')
assert out_tree == tree
tr_solve_jaxpr, tr_solve_consts, out_tree = _initial_style_jaxpr(
_shape_checked(partial(transpose_solve, vecmat), "transpose_solve", has_aux),
in_args_tree, b_avals, 'custom_linear_solve')
_check_tree("transpose_solve", "b", out_tree, tree, has_aux)
all_consts = [matvec_consts, vecmat_consts, solve_consts, tr_solve_consts]
const_lengths = _LinearSolveTuple(*_map(len, all_consts))
jaxprs = _LinearSolveTuple(
matvec_jaxpr, vecmat_jaxpr, solve_jaxpr, tr_solve_jaxpr)
out_flat = linear_solve_p.bind(
*(_flatten(all_consts) + b_flat),
const_lengths=const_lengths, jaxprs=jaxprs)
return tree_unflatten(out_tree, out_flat)
def _linear_solve_abstract_eval(*args, const_lengths, jaxprs):
args_to_raise = args[sum(const_lengths):]
# raise aux_args to shaped arrays as well if present
# number of aux args is the difference in out_avals
# of solve and matvec (since they map to the same vector space)
num_aux = len(jaxprs.solve.out_avals) - len(jaxprs.matvec.out_avals)
if num_aux > 0:
args_to_raise += tuple(jaxprs.solve.out_avals[-num_aux:])
return args_to_raise
def _custom_linear_solve_impl(*args, const_lengths, jaxprs):
params, b = _split_linear_solve_args(args, const_lengths)
x = core.jaxpr_as_fun(jaxprs.solve)(*(params.solve + b))
return x
def _tangent_linear_map(func, params, params_dot, *x):
"""Compute the tangent of a linear map.
Assuming ``func(*params, *x)`` is linear in ``x`` and computes ``A @ x``,
this function computes ``βA @ x``.
"""
assert any(type(p) is not ad_util.Zero for p in params_dot)
zeros = _map(ad_util.Zero.from_primal_value, x)
_, out_tangent = ad.jvp(lu.wrap_init(func)).call_wrapped(
params + list(x), params_dot + zeros)
return out_tangent
def _custom_linear_solve_jvp(primals, tangents, const_lengths, jaxprs):
# A x - b = 0
# βA x + A βx - βb = 0
# βx = A^{-1} (βb - βA x)
kwargs = dict(const_lengths=const_lengths, jaxprs=jaxprs)
x = linear_solve_p.bind(*primals, **kwargs)
params, _ = _split_linear_solve_args(primals, const_lengths)
params_dot, b_dot = _split_linear_solve_args(tangents, const_lengths)
num_x_leaves = len(b_dot)
# x is a flat tree with possible aux values appended
# since x_tree == b_tree == b_dot_tree, we can cut off
# aux values with len info provided by b_dot tree here
x_leaves, _ = split_list(x, [num_x_leaves])
if all(type(p) is ad_util.Zero for p in params_dot.matvec):
# no need to evaluate matvec_tangents
rhs = b_dot
else:
matvec_tangents = _tangent_linear_map(
core.jaxpr_as_fun(jaxprs.matvec), params.matvec, params_dot.matvec, *x_leaves)
rhs = _map(ad.add_tangents, b_dot, _map(operator.neg, matvec_tangents))
x_dot = linear_solve_p.bind(*(_flatten(params) + rhs), **kwargs)
# split into x tangents and aux tangents (these become zero)
dx_leaves, daux_leaves = split_list(x_dot, [num_x_leaves])
daux_leaves = _map(ad_util.Zero.from_primal_value, daux_leaves)
x_dot = dx_leaves + daux_leaves
return x, x_dot
def _linear_solve_transpose_rule(cotangent, *primals, const_lengths, jaxprs):
if jaxprs.transpose_solve is None:
raise TypeError('transpose_solve required for backwards mode automatic '
'differentiation of custom_linear_solve')
params, b = _split_linear_solve_args(primals, const_lengths)
# split off symbolic zeros in the cotangent if present
x_cotangent, _ = split_list(cotangent, [len(b)])
assert all(ad.is_undefined_primal(x) for x in b)
cotangent_b_full = linear_solve_p.bind(
*(_flatten(params.transpose()) + x_cotangent),
const_lengths=const_lengths.transpose(), jaxprs=jaxprs.transpose())
# drop aux values in cotangent computation
cotangent_b, _ = split_list(cotangent_b_full, [len(b)])
return [None] * sum(const_lengths) + cotangent_b
def _linear_solve_batching_rule(axis_data, args, dims, const_lengths, jaxprs):
orig_bat = [d is not batching.not_mapped for d in dims]
params, b = _split_linear_solve_args(args, const_lengths)
params_dims, b_dims = _split_linear_solve_args(dims, const_lengths)
params_bat, orig_b_bat = _split_linear_solve_args(orig_bat, const_lengths)
(matvec, vecmat, solve, solve_t) = jaxprs
(matvec_bat, vecmat_bat, solve_bat, solve_t_bat) = params_bat
# number of operator out avals is assumed to be the same for matvec/vecmat
num_operator_out_avals = len(matvec.out_avals)
num_aux = len(solve.out_avals) - num_operator_out_avals
# Fixpoint computation of which parts of x and b are batched; we need to
# ensure this is consistent between all four jaxprs
b_bat = orig_b_bat
x_bat = [False] * len(solve.out_avals)
for i in range(1 + len(orig_b_bat) + len(solve.out_avals)):
# Apply vecmat and solve -> new batched parts of x
solve_jaxpr_batched, solve_x_bat = batching.batch_jaxpr(
solve, axis_data, solve_bat + b_bat, instantiate=x_bat)
if vecmat is None:
vecmat_jaxpr_batched = None
x_bat_out = solve_x_bat
else:
vecmat_jaxpr_batched, vecmat_x_bat = batching.batch_jaxpr(
vecmat, axis_data, vecmat_bat + b_bat, instantiate=b_bat)
# batch all aux data by default
x_bat_out = _map(operator.or_, vecmat_x_bat + [True] * num_aux, solve_x_bat)
# keep a slice of only the linear operator part of solve's avals
x_bat_noaux = x_bat_out[:num_operator_out_avals]
# Apply matvec and solve_t -> new batched parts of b
matvec_jaxpr_batched, matvec_b_bat = batching.batch_jaxpr(
matvec, axis_data, matvec_bat + x_bat_noaux, instantiate=b_bat)
if solve_t is None:
solve_t_jaxpr_batched = None
b_bat_out = _map(operator.or_, matvec_b_bat, orig_b_bat)
else:
solve_t_jaxpr_batched, solve_t_b_aux_bat = batching.batch_jaxpr(
solve_t, axis_data, solve_t_bat + x_bat_noaux, instantiate=x_bat_out)
assert len(solve_t_b_aux_bat) == len(orig_b_bat) + num_aux
solve_t_b_bat, _ = split_list(solve_t_b_aux_bat, [len(orig_b_bat)])
b_bat_out = _map(lambda m, s, o: m or s or o, matvec_b_bat, solve_t_b_bat,
orig_b_bat)
if x_bat_out == x_bat and b_bat_out == b_bat:
break
else:
x_bat = x_bat_out
b_bat = b_bat_out
else:
assert False, "Fixedpoint not reached"
batched_jaxprs = _LinearSolveTuple(matvec_jaxpr_batched, vecmat_jaxpr_batched,
solve_jaxpr_batched, solve_t_jaxpr_batched)
# Move batched axes to the front
new_params = [
batching.moveaxis(x, d, 0)
if d is not batching.not_mapped and d != 0 else x
for x, d in zip(_flatten(params), _flatten(params_dims))
]
# Broadcast out b if necessary
new_b = [
batching.broadcast(x, axis_data.size, 0) if now_bat and not was_bat else
batching.moveaxis(x, d, 0) if now_bat and d != 0 else x
for x, d, was_bat, now_bat in zip(b, b_dims, orig_b_bat, b_bat)
]
outs = linear_solve_p.bind(
*(new_params + new_b),
const_lengths=const_lengths,
jaxprs=batched_jaxprs)
out_dims = [0 if batched else batching.not_mapped for batched in solve_x_bat]
return outs, out_dims
linear_solve_p = core.Primitive('custom_linear_solve')
linear_solve_p.multiple_results = True
linear_solve_p.def_impl(_custom_linear_solve_impl)
linear_solve_p.def_abstract_eval(_linear_solve_abstract_eval)
ad.primitive_jvps[linear_solve_p] = _custom_linear_solve_jvp
xla.register_initial_style_primitive(linear_solve_p)
mlir.register_lowering(
linear_solve_p, mlir.lower_fun(_custom_linear_solve_impl,
multiple_results=True))
ad.primitive_transposes[linear_solve_p] = _linear_solve_transpose_rule
batching.fancy_primitive_batchers[linear_solve_p] = _linear_solve_batching_rule
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@lax@control_flow@solves.py@.PATH_END.py
|
{
"filename": "agreement.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/graphics/agreement.py",
"type": "Python"
}
|
"""
Bland-Altman mean-difference plots
Author: Joses Ho
License: BSD-3
"""
import numpy as np
from . import utils
def mean_diff_plot(
m1,
m2,
sd_limit=1.96,
ax=None,
scatter_kwds=None,
mean_line_kwds=None,
limit_lines_kwds=None,
):
"""
Construct a Tukey/Bland-Altman Mean Difference Plot.
Tukey's Mean Difference Plot (also known as a Bland-Altman plot) is a
graphical method to analyze the differences between two methods of
measurement. The mean of the measures is plotted against their difference.
For more information see
https://en.wikipedia.org/wiki/Bland-Altman_plot
Parameters
----------
m1 : array_like
A 1-d array.
m2 : array_like
A 1-d array.
sd_limit : float
The limit of agreements expressed in terms of the standard deviation of
the differences. If `md` is the mean of the differences, and `sd` is
the standard deviation of those differences, then the limits of
agreement that will be plotted are md +/- sd_limit * sd.
The default of 1.96 will produce 95% confidence intervals for the means
of the differences. If sd_limit = 0, no limits will be plotted, and
the ylimit of the plot defaults to 3 standard deviations on either
side of the mean.
ax : AxesSubplot
If `ax` is None, then a figure is created. If an axis instance is
given, the mean difference plot is drawn on the axis.
scatter_kwds : dict
Options to to style the scatter plot. Accepts any keywords for the
matplotlib Axes.scatter plotting method
mean_line_kwds : dict
Options to to style the scatter plot. Accepts any keywords for the
matplotlib Axes.axhline plotting method
limit_lines_kwds : dict
Options to to style the scatter plot. Accepts any keywords for the
matplotlib Axes.axhline plotting method
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
References
----------
Bland JM, Altman DG (1986). "Statistical methods for assessing agreement
between two methods of clinical measurement"
Examples
--------
Load relevant libraries.
>>> import statsmodels.api as sm
>>> import numpy as np
>>> import matplotlib.pyplot as plt
Making a mean difference plot.
>>> # Seed the random number generator.
>>> # This ensures that the results below are reproducible.
>>> np.random.seed(9999)
>>> m1 = np.random.random(20)
>>> m2 = np.random.random(20)
>>> f, ax = plt.subplots(1, figsize = (8,5))
>>> sm.graphics.mean_diff_plot(m1, m2, ax = ax)
>>> plt.show()
.. plot:: plots/graphics-mean_diff_plot.py
"""
fig, ax = utils.create_mpl_ax(ax)
if len(m1) != len(m2):
raise ValueError("m1 does not have the same length as m2.")
if sd_limit < 0:
raise ValueError(f"sd_limit ({sd_limit}) is less than 0.")
means = np.mean([m1, m2], axis=0)
diffs = m1 - m2
mean_diff = np.mean(diffs)
std_diff = np.std(diffs, axis=0)
scatter_kwds = scatter_kwds or {}
if "s" not in scatter_kwds:
scatter_kwds["s"] = 20
mean_line_kwds = mean_line_kwds or {}
limit_lines_kwds = limit_lines_kwds or {}
for kwds in [mean_line_kwds, limit_lines_kwds]:
if "color" not in kwds:
kwds["color"] = "gray"
if "linewidth" not in kwds:
kwds["linewidth"] = 1
if "linestyle" not in mean_line_kwds:
kwds["linestyle"] = "--"
if "linestyle" not in limit_lines_kwds:
kwds["linestyle"] = ":"
ax.scatter(means, diffs, **scatter_kwds) # Plot the means against the diffs.
ax.axhline(mean_diff, **mean_line_kwds) # draw mean line.
# Annotate mean line with mean difference.
ax.annotate(
f"mean diff:\n{mean_diff:0.3g}",
xy=(0.99, 0.5),
horizontalalignment="right",
verticalalignment="center",
fontsize=14,
xycoords="axes fraction",
)
if sd_limit > 0:
half_ylim = (1.5 * sd_limit) * std_diff
ax.set_ylim(mean_diff - half_ylim, mean_diff + half_ylim)
limit_of_agreement = sd_limit * std_diff
lower = mean_diff - limit_of_agreement
upper = mean_diff + limit_of_agreement
for j, lim in enumerate([lower, upper]):
ax.axhline(lim, **limit_lines_kwds)
ax.annotate(
f"-{sd_limit} SD: {lower:0.2g}",
xy=(0.99, 0.07),
horizontalalignment="right",
verticalalignment="bottom",
fontsize=14,
xycoords="axes fraction",
)
ax.annotate(
f"+{sd_limit} SD: {upper:0.2g}",
xy=(0.99, 0.92),
horizontalalignment="right",
fontsize=14,
xycoords="axes fraction",
)
elif sd_limit == 0:
half_ylim = 3 * std_diff
ax.set_ylim(mean_diff - half_ylim, mean_diff + half_ylim)
ax.set_ylabel("Difference", fontsize=15)
ax.set_xlabel("Means", fontsize=15)
ax.tick_params(labelsize=13)
fig.tight_layout()
return fig
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@graphics@agreement.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "GeminiDRSoftware/MAROONXDR",
"repo_path": "MAROONXDR_extracted/MAROONXDR-main/maroonxdr/maroonx/recipes/__init__.py",
"type": "Python"
}
|
GeminiDRSoftwareREPO_NAMEMAROONXDRPATH_START.@MAROONXDR_extracted@MAROONXDR-main@maroonxdr@maroonx@recipes@__init__.py@.PATH_END.py
|
|
{
"filename": "_smoothing.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattersmith/line/_smoothing.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SmoothingValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="smoothing", parent_name="scattersmith.line", **kwargs
):
super(SmoothingValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
max=kwargs.pop("max", 1.3),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattersmith@line@_smoothing.py@.PATH_END.py
|
{
"filename": "_position.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnelarea/title/_position.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class PositionValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="position", parent_name="funnelarea.title", **kwargs
):
super(PositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["top left", "top center", "top right"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnelarea@title@_position.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/violin/marker/line/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="violin.marker.line", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", False),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@violin@marker@line@_color.py@.PATH_END.py
|
{
"filename": "CAMB.py",
"repo_name": "Valcin/BE_HaPPy",
"repo_path": "BE_HaPPy_extracted/BE_HaPPy-master/coefficients/other neutrinos masses/0.45eV/CAMB.py",
"type": "Python"
}
|
import numpy as np
import camb
import sys,os
################################## INPUT ######################################
# neutrino parameters
hierarchy = 'degenerate' #'degenerate', 'normal', 'inverted'
Mnu = 0.45 #eV
Nnu = 3 #number of massive neutrinos
Neff = 3.046
#~ Neff = 0.00641
# cosmological parameters
h = 0.6711
Omega_c = 0.2685 - Mnu/(93.14*h**2)
Omega_b = 0.049
Omega_k = 0.0
tau = None
# initial P(k) parameters
ns = 0.9624
As = 2.13e-9
pivot_scalar = 0.05
pivot_tensor = 0.05
# redshifts and k-range
redshifts = [0.0, 0.5, 1, 2, 3, 99]
kmax = 10.0
k_per_logint = 10
# dz, relative difference dz/z to compute growths
dz = 0.01
###############################################################################
# create a new redshift list to compute growth rates
zs = []
for z in redshifts:
dz_abs = (1.0+z)*dz
if z==0.0:
zs.append(z); zs.append(z+dz_abs)
else:
zs.append(z-dz_abs); zs.append(z); zs.append(z+dz_abs)
z_list = redshifts; redshifts = zs
Omega_cb = Omega_c + Omega_b
pars = camb.CAMBparams()
# set accuracy of the calculation
pars.set_accuracy(AccuracyBoost=5.0, lSampleBoost=5.0,
lAccuracyBoost=5.0, HighAccuracyDefault=True,
DoLateRadTruncation=True)
# set value of the cosmological parameters
pars.set_cosmology(H0=h*100.0, ombh2=Omega_b*h**2, omch2=Omega_c*h**2,
mnu=Mnu, omk=Omega_k,
neutrino_hierarchy=hierarchy,
num_massive_neutrinos=Nnu,
nnu=Neff,
tau=tau)
# set the value of the primordial power spectrum parameters
pars.InitPower.set_params(As=As, ns=ns,
pivot_scalar=pivot_scalar,
pivot_tensor=pivot_tensor)
# set redshifts, k-range and k-sampling
pars.set_matter_power(redshifts=redshifts, kmax=kmax,
k_per_logint=k_per_logint)
# compute results
results = camb.get_results(pars)
# get raw matter power spectrum and transfer functions with strange k-binning
#k, zs, Pk = results.get_linear_matter_power_spectrum()
#Tk = (results.get_matter_transfer_data()).transfer_data
# interpolate to get Pmm, Pcc...etc
k, zs, Pkmm = results.get_matter_power_spectrum(minkh=2e-5, maxkh=kmax,
npoints=400, var1=7, var2=7,
have_power_spectra=True,
params=None)
k, zs, Pkcc = results.get_matter_power_spectrum(minkh=2e-5, maxkh=kmax,
npoints=400, var1=2, var2=2,
have_power_spectra=True,
params=None)
k, zs, Pkbb = results.get_matter_power_spectrum(minkh=2e-5, maxkh=kmax,
npoints=400, var1=3, var2=3,
have_power_spectra=True,
params=None)
k, zs, Pkcb = results.get_matter_power_spectrum(minkh=2e-5, maxkh=kmax,
npoints=400, var1=2, var2=3,
have_power_spectra=True,
params=None)
Pkcb = (Omega_c**2*Pkcc + Omega_b**2*Pkbb +\
2.0*Omega_b*Omega_c*Pkcb)/Omega_cb**2
k, zs, Pknn = results.get_matter_power_spectrum(minkh=2e-5, maxkh=kmax,
npoints=400, var1=6, var2=6,
have_power_spectra=True,
params=None)
print pars
# get sigma_8 and Hz in km/s/(kpc/h)
s8 = np.array(results.get_sigma8())
Hz = results.hubble_parameter(99.0)
print 'H(z=99) = %.4f km/s/(kpc/h)'%(Hz/1e3/h)
print 'sigma_8(z=0) = %.4f'%s8[-1]
# do a loop over all redshifts
for i,z in enumerate(zs):
fout1 = 'Pk_mm_z=%.3f.txt'%z
fout2 = 'Pk_cc_z=%.3f.txt'%z
fout3 = 'Pk_bb_z=%.3f.txt'%z
fout4 = 'Pk_cb_z=%.3f.txt'%z
fout5 = 'Pk_nn_z=%.3f.txt'%z
np.savetxt(fout1,np.transpose([k,Pkmm[i,:]]))
np.savetxt(fout2,np.transpose([k,Pkcc[i,:]]))
np.savetxt(fout3,np.transpose([k,Pkbb[i,:]]))
np.savetxt(fout4,np.transpose([k,Pkcb[i,:]]))
np.savetxt(fout5,np.transpose([k,Pknn[i,:]]))
#fout = 'Pk_trans_z=%.3f.txt'%z
# notice that transfer functions have an inverted order:i=0 ==>z_max
#np.savetxt(fout,np.transpose([Tk[0,:,i],Tk[1,:,i],Tk[2,:,i],Tk[3,:,i],
# Tk[4,:,i],Tk[5,:,i],Tk[6,:,i]]))
# compute growth rates
for z in z_list:
dz_abs = (1.0+z)*dz
for suffix in ['mm','cb','nn']:
fout = 'f%s_z=%.3f.txt'%(suffix,z)
f2 = 'Pk_%s_z=%.3f.txt'%(suffix,z+dz_abs)
if z==0.0:
f1 = 'Pk_%s_z=%.3f.txt'%(suffix,z); fac = 1.0
else:
f1 = 'Pk_%s_z=%.3f.txt'%(suffix,z-dz_abs); fac = 2.0
k1,Pk1 = np.loadtxt(f1,unpack=True)
k2,Pk2 = np.loadtxt(f2,unpack=True)
if np.any(k1!=k2):
print 'Error!'; sys.exit()
f = -0.5*(1.0+z)*np.log(Pk2/Pk1)/(fac*dz_abs)
np.savetxt(fout,np.transpose([k1,f]))
os.system('rm '+f2)
if z!=0.0: os.system('rm '+f1)
|
ValcinREPO_NAMEBE_HaPPyPATH_START.@BE_HaPPy_extracted@BE_HaPPy-master@coefficients@other neutrinos masses@0.45eV@CAMB.py@.PATH_END.py
|
{
"filename": "definitions.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/frontends/artio/definitions.py",
"type": "Python"
}
|
yt_to_art = {
"Density": "HVAR_GAS_DENSITY",
"TotalEnergy": "HVAR_GAS_ENERGY",
"GasEnergy": "HVAR_INTERNAL_ENERGY",
"Pressure": "HVAR_PRESSURE",
"XMomentumDensity": "HVAR_MOMENTUM_X",
"YMomentumDensity": "HVAR_MOMENTUM_Y",
"ZMomentumDensity": "HVAR_MOMENTUM_Z",
"Gamma": "HVAR_GAMMA",
"MetalDensitySNIa": "HVAR_METAL_DENSITY_Ia",
"MetalDensitySNII": "HVAR_METAL_DENSITY_II",
"Potential": "VAR_POTENTIAL",
"PotentialHydro": "VAR_POTENTIAL_HYDRO",
"particle_position_x": "POSITION_X",
"particle_position_y": "POSITION_Y",
"particle_position_z": "POSITION_Z",
"particle_velocity_x": "VELOCITY_X",
"particle_velocity_y": "VELOCITY_Y",
"particle_velocity_z": "VELOCITY_Z",
"particle_mass": "MASS",
"particle_index": "PID",
"particle_species": "SPECIES",
"creation_time": "BIRTH_TIME",
"particle_mass_initial": "INITIAL_MASS",
"particle_metallicity1": "METALLICITY_SNIa",
"particle_metallicity2": "METALLICITY_SNII",
"stars": "STAR",
"nbody": "N-BODY",
}
art_to_yt = dict(zip(yt_to_art.values(), yt_to_art.keys()))
class ARTIOconstants:
def __init__(self):
self.yr = 365.25 * 86400
self.Myr = 1.0e6 * self.yr
self.Gyr = 1.0e9 * self.yr
self.pc = 3.0856775813e18
self.kpc = 1.0e3 * self.pc
self.Mpc = 1.0e6 * self.pc
self.kms = 1.0e5
self.mp = 1.672621637e-24
self.k = 1.3806504e-16
self.G = 6.67428e-8
self.c = 2.99792458e10
self.eV = 1.602176487e-12
self.amu = 1.660538782e-24
self.mH = 1.007825 * self.amu
self.mHe = 4.002602 * self.amu
self.Msun = 1.32712440018e26 / self.G
self.Zsun = 0.0199
self.Yp = 0.24
self.wmu = 4.0 / (8.0 - 5.0 * self.Yp)
self.wmu_e = 1.0 / (1.0 - 0.5 * self.Yp)
self.XH = 1.0 - self.Yp
self.XHe = 0.25 * self.Yp
self.gamma = 5.0 / 3.0
self.sigmaT = 6.6524e-25
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@frontends@artio@definitions.py@.PATH_END.py
|
{
"filename": "custom_batching.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/custom_batching.py",
"type": "Python"
}
|
# Copyright 2021 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Callable
from typing import Any
import functools
import operator
from jax import lax
from jax._src import api
from jax._src import core
from jax._src import custom_api_util
from jax._src import linear_util as lu
from jax._src import source_info_util
from jax._src import traceback_util
from jax._src import tree_util
from jax._src import util
from jax._src.api_util import flatten_fun_nokwargs, resolve_kwargs
from jax._src.interpreters import ad
from jax._src.interpreters import batching
from jax._src.interpreters.batching import not_mapped
from jax._src.interpreters import mlir
from jax._src.interpreters import partial_eval as pe
from jax._src.interpreters import xla
from jax._src.tree_util import (tree_flatten, tree_map, tree_structure,
tree_unflatten, treedef_tuple)
source_info_util.register_exclusion(__file__)
traceback_util.register_exclusion(__file__)
map, unsafe_map = util.safe_map, map
zip, unsafe_zip = util.safe_zip, zip
@custom_api_util.register_custom_decorator_type
class custom_vmap:
"""Customize the vmap behavior of a JAX-transformable function.
This decorator is used to customize the behavior of a JAX function under the
:func:`jax.vmap` transformation. A ``custom_vmap``-decorated function will
mostly (see below for caveats) have the same behavior as the underlying
function, except when batched using :py:func:`jax.vmap`. When batched, the
rule defined using :py:func:`~jax.custom_batching.custom_vmap.def_vmap` will
be used.
For example:
>>> @jax.custom_batching.custom_vmap
... def f(x, y):
... return x + y
...
>>> @f.def_vmap
... def f_vmap_rule(axis_size, in_batched, xs, ys):
... assert all(in_batched)
... assert xs.shape[0] == axis_size
... assert ys.shape[0] == axis_size
... out_batched = True
... return xs * ys, out_batched
...
>>> xs = jnp.arange(3)
>>> ys = jnp.arange(1, 4)
>>> jax.vmap(f)(xs, ys) # prints xs * ys instead of xs + ys
Array([0, 2, 6], dtype=int32)
Of note, ``custom_vmap`` functions do not support reverse-mode autodiff. To
customize both vmap and reverse-mode autodiff, combine ``custom_vmap`` with
:py:class:`jax.custom_vjp`. For example:
>>> @jax.custom_vjp
... @jax.custom_batching.custom_vmap
... def f(x, y):
... return jnp.sin(x) * y
...
>>> @f.def_vmap
... def f_vmap_rule(axis_size, in_batched, xs, ys):
... return jnp.cos(xs) * ys, True
...
>>> def f_fwd(x, y):
... return f(x, y), (jnp.cos(x), jnp.sin(x), y)
...
>>> def f_bwd(res, g):
... cos_x, sin_x, y = res
... return (cos_x * g * y, sin_x * g)
...
>>> f.defvjp(f_fwd, f_bwd)
>>> jax.vmap(f)(jnp.zeros(3), jnp.ones(3))
Array([1., 1., 1.], dtype=float32)
>>> jax.grad(f)(jnp.zeros(()), jnp.ones(()))
Array(1., dtype=float32)
Note that the :py:class:`jax.custom_vjp` must be on the ouside, wrapping the
``custom_vmap``-decorated function.
"""
fun: Callable[..., Any]
vmap_rule: Callable[..., tuple[Any, Any]] | None
def __init__(self, fun: Callable[..., Any]):
functools.update_wrapper(self, fun)
self.fun = fun
self.vmap_rule = None
__getattr__ = custom_api_util.forward_attr
def def_vmap(
self,
vmap_rule: Callable[..., tuple[Any, Any]],
) -> Callable[..., tuple[Any, Any]]:
"""Define the vmap rule for this custom_vmap function.
Args:
vmap_rule: A function that implements the vmap rule. This function should
accept the following arguments: (1) an integer ``axis_size`` as its
first argument, (2) a pytree of booleans with the same structure as the
inputs to the function, specifying whether each argument is batched,
and (3) the batched arguments. It should return a tuple of the batched
output and a pytree of booleans with the same structure as the output,
specifying whether each output element is batched. See the documentation
for :py:func:`jax.custom_batching.custom_vmap` for some examples.
Returns:
This method passes the rule through, returning ``vmap_rule`` unchanged.
"""
self.vmap_rule = vmap_rule
return vmap_rule
@traceback_util.api_boundary
def __call__(self, *args, **kwargs):
args = resolve_kwargs(self.fun, args, kwargs)
fun_name = getattr(self.fun, "__name__", str(self.fun))
if not self.vmap_rule:
raise AttributeError(
f"No batching rule defined for custom_vmap function {fun_name} "
"using def_vmap.")
args_flat, in_tree = tree_flatten(args)
flat_fun, out_tree = flatten_fun_nokwargs(lu.wrap_init(self.fun), in_tree)
in_avals = [core.raise_to_shaped(core.get_aval(x)) for x in args_flat]
debug = pe.debug_info(self.fun, in_tree, out_tree, False, "custom_vmap")
jaxpr, _, consts, () = pe.trace_to_jaxpr_dynamic(flat_fun, in_avals, debug)
closed_call = core.ClosedJaxpr(pe.convert_constvars_jaxpr(jaxpr), ())
in_tree = treedef_tuple((tree_structure(consts), in_tree))
assert self.vmap_rule is not None
out_flat = custom_vmap_p.bind(*consts, *args_flat,
call=closed_call,
rule=ClosedRule(self.vmap_rule),
in_tree=in_tree,
out_tree=out_tree())
return tree_unflatten(out_tree(), out_flat)
### utils
# Define a class, instead of making a function closing over `rule`, so
# that we can override __str__
class ClosedRule:
def __init__(self, rule):
functools.update_wrapper(self, rule)
self.rule = rule
def __call__(self, axis_size, all_in_batched, *all_args):
_, args = all_args
consts_batched, in_batched = all_in_batched
assert not any(tree_util.tree_leaves(consts_batched)), consts_batched
return call_rule(self.rule, axis_size, in_batched, args)
def __str__(self):
return str(self.rule)
def ensure_list(xs):
return xs if type(xs) is list else list(xs)
def rule_name(rule):
return getattr(rule, '__name__', '<unnamed rule>')
def call_rule(rule, axis_size, in_batched, args):
return rule(axis_size, ensure_list(in_batched), *args)
def check_vmap_rule_trees(rule, original_out_tree, out_tree, out_batched_tree):
if out_tree != out_batched_tree:
raise ValueError(
'structure of output value and output batching specification returned '
f'by custom vmap rule ({rule_name(rule)}) do not match.\n'
f'Output values: {out_tree}\n'
f'Batching spec: {out_batched_tree}')
if out_tree != original_out_tree:
raise ValueError(
f'structure of output returned by custom vmap rule ({rule_name(rule)}) '
'does not match that of original custom-vmapped function.\n'
f'Original output: {original_out_tree}\n'
f'Rule output: {out_tree}')
# Like batching.bdim_at_front, but doesn't broadcast if not mapped
def maybe_bdim_at_front(x, bdim):
if bdim is not_mapped:
return x
else:
return util.moveaxis(x, bdim, 0)
# Like batching.batch except (a) not curried and (b) returns inferred output
# axes instead of accepting and matching a given spec of output axes. Assumes
# `f` is pytree-flattened
def vmap_unrestricted(f: lu.WrappedFun, *args, in_axes, axis_name, axis_size):
axis_data = batching.AxisData(axis_name, axis_size, None)
tag = core.TraceTag()
f, out_axes = batching.batch_subtrace(f, tag, axis_data, in_axes)
outs = f.call_wrapped(*args)
return outs, out_axes()
### custom_vmap_p rules
def custom_vmap_impl(*args, call, rule, in_tree, out_tree):
del rule, in_tree, out_tree
return core.jaxpr_as_fun(call)(*args)
def custom_vmap_batching(args_flat, dims, *, call, rule, in_tree, out_tree):
del call
axis_size, = {x.shape[d] for x, d in zip(args_flat, dims) if d is not None}
args_flat = map(maybe_bdim_at_front, args_flat, dims)
flat_in_batched = [d is not not_mapped for d in dims]
args = tree_unflatten(in_tree, args_flat)
in_batched = tree_unflatten(in_tree, flat_in_batched)
out, out_batched = call_rule(rule, axis_size, in_batched, args)
flat_outs, tree1 = tree_flatten(out)
flat_out_batched, tree2 = tree_flatten(out_batched)
check_vmap_rule_trees(rule, out_tree, tree1, tree2)
flat_out_dims = [0 if b else not_mapped for b in flat_out_batched]
return flat_outs, flat_out_dims
def custom_vmap_abstract_eval(*in_avals, call, **_):
return call.out_avals
def custom_vmap_jvp(primals, tangents, *, call, rule, in_tree, out_tree):
def jvp_of_rule_rule(axis_size, in_batched, primals, tangents):
in_batched_ps, in_batched_ts = in_batched
mutually_batched = tree_map(operator.and_, in_batched_ps, in_batched_ts)
extra_batched_ps = tree_map(lambda pb, tb: 0 if pb and not tb else None,
in_batched_ps, in_batched_ts)
extra_batched_ts = tree_map(lambda pb, tb: 0 if tb and not pb else None,
in_batched_ps, in_batched_ts)
out_mutually_batched = lu.Store()
flat_ps_ts, tree_ps_ts = tree_flatten((primals, tangents))
flat_extra_batched_ps_ts, tree_ps_ts2 = tree_flatten(
(extra_batched_ps, extra_batched_ts),
is_leaf=lambda x: x is None)
# TODO(frostig): assert these also equal:
# treedef_tuple((in_tree, in_tree))
# once https://github.com/jax-ml/jax/issues/9066 is fixed
assert tree_ps_ts == tree_ps_ts2
del tree_ps_ts2
def to_jvp(*primals):
out, out_batched = call_rule(rule, axis_size, mutually_batched, primals)
check_vmap_rule_trees(
rule, out_tree, tree_structure(out), tree_structure(out_batched))
out_mutually_batched.store(out_batched)
return out
def to_vmap_over_extra_batched_dims(primals, tangents):
return api.jvp(to_jvp, primals, tangents)
to_vmap_over_extra_batched_dims_flat, out_tree2 = flatten_fun_nokwargs(
lu.wrap_init(to_vmap_over_extra_batched_dims),
tree_ps_ts)
flat_out_ps_ts, flat_out_axes = vmap_unrestricted(
to_vmap_over_extra_batched_dims_flat, *flat_ps_ts,
in_axes=flat_extra_batched_ps_ts,
axis_name=core.no_axis_name, axis_size=axis_size)
n, ragged = divmod(len(flat_out_ps_ts), 2)
assert not ragged
flat_out_ps, flat_out_ts = flat_out_ps_ts[:n], flat_out_ps_ts[n:]
flat_out_axes_p, flat_out_axes_t = flat_out_axes[:n], flat_out_axes[n:]
flat_out_ps = map(maybe_bdim_at_front, flat_out_ps, flat_out_axes_p)
flat_out_extra_batched_ps = [d is not not_mapped for d in flat_out_axes_p]
flat_out_ts = map(maybe_bdim_at_front, flat_out_ts, flat_out_axes_t)
flat_out_extra_batched_ts = [d is not not_mapped for d in flat_out_axes_t]
out_ps, out_ts = tree_unflatten(
out_tree2(), [*flat_out_ps, *flat_out_ts])
out_extra_batched_ps, out_extra_batched_ts = tree_unflatten(
out_tree2(), [*flat_out_extra_batched_ps, *flat_out_extra_batched_ts])
out_batched_ps = tree_map(
operator.or_, out_mutually_batched.val, out_extra_batched_ps)
out_batched_ts = tree_map(
operator.or_, out_mutually_batched.val, out_extra_batched_ts)
return (out_ps, out_ts), (out_batched_ps, out_batched_ts)
tangents = map(ad.instantiate_zeros, tangents)
jvp_call, _ = ad.jvp_jaxpr(call, [True] * len(primals), True)
jvp_in_tree = treedef_tuple((in_tree, in_tree))
jvp_out_tree = treedef_tuple((out_tree, out_tree))
outs = custom_vmap_p.bind(
*primals, *tangents,
call=jvp_call, rule=jvp_of_rule_rule,
in_tree=jvp_in_tree, out_tree=jvp_out_tree)
assert len(outs) % 2 == 0, len(outs)
out_primals, out_tangents = util.split_list(outs, [len(outs) // 2])
return out_primals, out_tangents
custom_vmap_p = core.Primitive('custom_vmap_call')
custom_vmap_p.multiple_results = True
custom_vmap_p.def_impl(custom_vmap_impl)
custom_vmap_p.def_abstract_eval(custom_vmap_abstract_eval)
batching.primitive_batchers[custom_vmap_p] = custom_vmap_batching
ad.primitive_jvps[custom_vmap_p] = custom_vmap_jvp
xla.register_initial_style_primitive(custom_vmap_p)
mlir.register_lowering(custom_vmap_p, mlir.lower_fun(
custom_vmap_impl, multiple_results=True))
# -- custom vmap applications
def tree_split(mask, tree):
lhs = tree_map(lambda l, x: x if l else None, mask, tree)
rhs = tree_map(lambda l, x: None if l else x, mask, tree)
return lhs, rhs
def tree_merge(mask, lhs_tree, rhs_tree):
return tree_map(lambda l, x_l, x_r: x_l if l else x_r,
mask, lhs_tree, rhs_tree)
def sequential_vmap(f):
"""A special case of ``custom_vmap`` that uses a loop.
A function decorated with ``sequential_vmap`` will be called sequentially
within a loop when batched. This is useful for functions that don't natively
support batch dimensions.
For example:
>>> @jax.custom_batching.sequential_vmap
... def f(x):
... jax.debug.print("{}", x)
... return x + 1
...
>>> jax.vmap(f)(jnp.arange(3))
0
1
2
Array([1, 2, 3], dtype=int32)
Where the print statements demonstrate that this :py:func:`~jax.vmap` is being
generated using a loop.
See the documentation for :py:class:`~jax.custom_batching.custom_vmap` for
more details.
"""
f = custom_vmap(f)
@f.def_vmap
def rule(axis_size, in_batched, *args):
del axis_size
def to_map(mapped_args):
args = tree_merge(in_batched, mapped_args, bcast_args)
return f(*args)
mapped_args, bcast_args = tree_split(in_batched, list(args))
out = lax.map(to_map, mapped_args)
out_batched = tree_map(lambda _: True, out)
return out, out_batched
return f
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@custom_batching.py@.PATH_END.py
|
{
"filename": "_separatethousands.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/coloraxis/colorbar/_separatethousands.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SeparatethousandsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="separatethousands",
parent_name="layout.coloraxis.colorbar",
**kwargs,
):
super(SeparatethousandsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@coloraxis@colorbar@_separatethousands.py@.PATH_END.py
|
{
"filename": "manually_test_power_mnu.ipynb",
"repo_name": "LSSTDESC/CCL",
"repo_path": "CCL_extracted/CCL-master/benchmarks/data/codes/manually_test_power_mnu.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pyccl as ccl
```
We want to compare the matter power spectrum as output manually from class with massive neutrinos nonzero to the power spectrum as output from class via ccl for the same parameters. This is a sanity check.
```python
OmegaC = 0.25
OmegaB = 0.05
h = 0.7
As = 2.1*10**(-9)
#OmegaL = [0.7, 0.7, 0.7, 0.65, 0.75]
Omegak = [0., 0., 0.]
w0 = [-1., -0.9, -0.9]
wa = [0., 0., 0.1]
mnu = [[0.04,0., 0.], [0.05, 0.01, 0.], [0.03, 0.02, 0.04]]
#N_nu_mass = [1, 2, 3]
#N_nu_rel =[2.0328, 1.0196, 0.00641]
Neff = 3.046
#Neff = [N_nu_rel[i] + N_nu_mass[i] * 0.71611**4 / ((4. / 11.)**(4./3.)) for i in range(0,3)]
print Neff
```
3.046
```python
# Load class linear and nonlinear power spectra for the 5 different models
lin_pk = [0]*3
lin_k = [0]*3
for mi in range(0,3):
lin_k[mi], lin_pk[mi] = np.loadtxt('./model'+str(mi+1)+'_pk.dat', unpack=True)
```
```python
# Loop over models
pk_ccl_lin = [0]*3
for mi in range(0,3):
p = ccl.Cosmology(Omega_c=OmegaC, Omega_b=OmegaB, Neff = Neff, h=h, A_s=As, n_s=0.96, Omega_k=Omegak[mi], w0=w0[mi], wa=wa[mi], m_nu=mnu[mi])
pk_ccl_lin[mi] = ccl.power.linear_matter_power(cosmo, lin_k[mi]*h, 1.) * h**3
```
```python
# Get the fractional difference between power spectra for each model
frac_diff_lin = [0]*3
for mi in range(0,3):
frac_diff_lin[mi] = np.abs(pk_ccl_lin[mi] - lin_pk[mi]) / lin_pk[mi]
```
```python
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$P(k)$ [Mpc$^{3}$]',fontsize=22)
plt.loglog(lin_k[0], pk_ccl_lin[0], 'm')
plt.loglog(lin_k[0], lin_pk[0], 'g')
plt.show()
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$\Delta P(k)/P(k)$',fontsize=22)
plt.loglog(lin_k[0], frac_diff_lin[0], 'm')
plt.show()
```


```python
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$P(k)$ [Mpc$^{3}$]',fontsize=22)
plt.loglog(lin_k[1], pk_ccl_lin[1], 'm')
plt.loglog(lin_k[1], lin_pk[1], 'g')
plt.show()
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$\Delta P(k)/P(k)$',fontsize=22)
plt.loglog(lin_k[1], frac_diff_lin[1], 'm')
plt.show()
```


```python
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$P(k)$ [Mpc$^{3}$]',fontsize=22)
plt.loglog(lin_k[2], pk_ccl_lin[2], 'm')
plt.loglog(lin_k[2], lin_pk[2], 'g')
plt.show()
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$\Delta P(k)/P(k)$',fontsize=22)
plt.loglog(lin_k[2], frac_diff_lin[2], 'm')
plt.show()
```


Now do nonlinear case
```python
nl_pk = [0]*3
nl_k = [0]*3
for mi in range(0,3):
nl_k[mi], nl_pk[mi] = np.loadtxt('./model'+str(mi+1)+'_nl_pk.dat', unpack=True)
```
```python
pk_ccl_nl = [0]*3
for mi in range(0,3):
p = ccl.Cosmology(Omega_c=OmegaC, Omega_b=OmegaB, Neff = Neff, h=h, A_s=As, n_s=0.96, Omega_k=Omegak[mi], w0=w0[mi], wa=wa[mi], m_nu=mnu[mi])
pk_ccl_nl[mi] = ccl.power.nonlin_matter_power(cosmo, nl_k[mi]*h, 1.) * h**3
```
```python
# Get the fractional difference between power spectra for each model
frac_diff_nl = [0]*3
for mi in range(0,3):
frac_diff_nl[mi] = np.abs(pk_ccl_nl[mi] - nl_pk[mi]) / nl_pk[mi]
```
```python
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$P(k)$ [Mpc$^{3}$]',fontsize=22)
plt.loglog(nl_k[0], pk_ccl_nl[0], 'm')
plt.loglog(nl_k[0], nl_pk[0], 'g')
plt.show()
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$\Delta P(k)/P(k)$',fontsize=22)
plt.loglog(nl_k[0], frac_diff_nl[0], 'm')
plt.show()
```


```python
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$P(k)$ [Mpc$^{3}$]',fontsize=22)
plt.loglog(nl_k[1], pk_ccl_nl[1], 'm')
plt.loglog(nl_k[1], nl_pk[1], 'g')
plt.show()
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$\Delta P(k)/P(k)$',fontsize=22)
plt.loglog(nl_k[1], frac_diff_nl[1], 'm')
plt.show()
```


```python
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$P(k)$ [Mpc$^{3}$]',fontsize=22)
plt.loglog(nl_k[2], pk_ccl_nl[2], 'm')
plt.loglog(nl_k[2], nl_pk[2], 'g')
plt.show()
plt.figure()
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(r'$k$ [Mpc$^{-1}$]',fontsize=22)
plt.ylabel(r'$\Delta P(k)/P(k)$',fontsize=22)
plt.loglog(nl_k[2], frac_diff_nl[2], 'm')
plt.show()
```


|
LSSTDESCREPO_NAMECCLPATH_START.@CCL_extracted@CCL-master@benchmarks@data@codes@manually_test_power_mnu.ipynb@.PATH_END.py
|
{
"filename": "test_py_hipert_em.py",
"repo_name": "NumCosmo/NumCosmo",
"repo_path": "NumCosmo_extracted/NumCosmo-master/tests/test_py_hipert_em.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# test_py_hipert_em.py
#
# Tue Apr 16 10:24:29 2024
# Copyright 2024 Sandro Dias Pinto Vitenti
# <vitenti@uel.br>
#
# test_hipert_em.py
# Copyright (C) 2024 Sandro Dias Pinto Vitenti <vitenti@uel.br>
#
# numcosmo is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# numcosmo is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
"""Tests on NcHIPertEM class."""
import math
import pytest
from numpy.testing import assert_allclose
import numpy as np
from numcosmo_py import Ncm, Nc
Ncm.cfg_init()
@pytest.fixture(
name="pem_vexp",
params=[
Nc.HICosmoVexpEMCoupling.NONE,
Nc.HICosmoVexpEMCoupling.CAUCHY,
Nc.HICosmoVexpEMCoupling.GAUSS,
],
ids=["none", "cauchy", "gauss"],
)
def fixture_em(request):
"""Fixture for NcHIPertEM."""
pem = Nc.HIPertEM.new()
vexp = Nc.HICosmoVexp()
current_set = {
"alphab": 7.4847e-3, # Alpha (# of e-fold\s) at the bounce
"sigmaphi": 100.0, # Width of the Gaussian solution for the WdW equation
"xb": 2.0e36, # Inverse of the scale factor at the bounce (Initial condition)
"dphi": -9.0e-4, # Deviation of the Gaussian solution for the WdW equation
"OmegaL": 1.0, # HΒ²(a when w=-1)/HΒ²(a0). Basically gives the DE-dominated phase
"Omegac": 1.0, # Omega_d???
"H0": 67.8, # Hubble parameter today given by CMB observations
"alphaem": 14.4, # Amplitude of the EM gaussian coupling
"betaem": 2.2, # Width of the EM gaussian coupling
}
vexp.set_properties(**current_set)
vexp.set_em_coupling(request.param)
# No coupling has F1 exactly zero, so computing F2 numerically can be problematic
# using ADIABATIC2 avoids this problem.
# Gaussians have a very small F1 (suppressed by the Gaussian term), so it is also
# problematic to compute F2 numerically.
if request.param == Nc.HICosmoVexpEMCoupling.NONE:
pem.set_abstol(1.0e-200)
pem.set_initial_condition_type(Ncm.CSQ1DInitialStateType.ADIABATIC2)
elif request.param == Nc.HICosmoVexpEMCoupling.GAUSS:
pem.set_initial_condition_type(Ncm.CSQ1DInitialStateType.ADIABATIC2)
else:
pem.set_initial_condition_type(Ncm.CSQ1DInitialStateType.ADIABATIC4)
pem.set_k(1.0)
pem.set_ti(vexp.tau_min())
# Warning: this model has a singularity at the expanding phase, the final time
# should be set prior to it.
pem.set_tf(vexp.tau_max())
pem.set_vacuum_max_time(-1.0e-1)
pem.set_vacuum_reltol(1.0e-8)
return pem, vexp
def test_hipert_em_vexp(pem_vexp):
"""Test basic functionality of NcHIPertAdiab."""
pem, vexp = pem_vexp
assert_allclose(pem.get_k(), 1.0)
assert_allclose(pem.get_ti(), vexp.tau_min())
assert_allclose(pem.get_tf(), vexp.tau_max())
pem.set_reltol(1.0e-6)
pem.set_abstol(1.0e-7)
assert_allclose(pem.get_reltol(), 1.0e-6)
assert_allclose(pem.get_abstol(), 1.0e-7)
pem.set_adiab_threshold(1.0e-3)
pem.set_prop_threshold(1.0e-3)
assert_allclose(pem.get_adiab_threshold(), 1.0e-3)
assert_allclose(pem.get_prop_threshold(), 1.0e-3)
pem.set_save_evol(True)
assert pem.get_save_evol()
pem.set_save_evol(False)
assert not pem.get_save_evol()
pem.set_initial_condition_type(Ncm.CSQ1DInitialStateType.AD_HOC)
assert pem.get_initial_condition_type() == Ncm.CSQ1DInitialStateType.AD_HOC
pem.set_initial_condition_type(Ncm.CSQ1DInitialStateType.ADIABATIC2)
assert pem.get_initial_condition_type() == Ncm.CSQ1DInitialStateType.ADIABATIC2
pem.set_initial_condition_type(Ncm.CSQ1DInitialStateType.ADIABATIC4)
assert pem.get_initial_condition_type() == Ncm.CSQ1DInitialStateType.ADIABATIC4
pem.set_initial_condition_type(Ncm.CSQ1DInitialStateType.NONADIABATIC2)
assert pem.get_initial_condition_type() == Ncm.CSQ1DInitialStateType.NONADIABATIC2
def test_initial_conditions_time_vexp(pem_vexp):
"""Test initial conditions of NcHIPertAdiab."""
pem, vexp = pem_vexp
limit_found, t_adiab = pem.find_adiab_time_limit(
vexp, vexp.tau_min(), pem.get_vacuum_max_time(), 1.0e-6
)
assert limit_found
assert t_adiab >= pem.get_ti()
assert t_adiab <= pem.get_tf()
t_min, F1_min, t_lb, t_ub = pem.find_adiab_max(
vexp, vexp.tau_min(), pem.get_vacuum_max_time(), 1.0e-1
)
assert_allclose(F1_min, pem.eval_F1(vexp, t_min))
assert math.fabs(F1_min - pem.eval_F1(vexp, t_lb)) <= 1.001e-1
assert math.fabs(F1_min - pem.eval_F1(vexp, t_ub)) <= 1.001e-1
def test_initial_conditions_adiabatic_vexp(pem_vexp):
"""Test initial conditions of NcHIPertAdiab."""
pem, vexp = pem_vexp
state = Ncm.CSQ1DState.new()
for prec in np.geomspace(1.0e-14, 1.0e-6, 10):
limit_found, t_adiab = pem.find_adiab_time_limit(
vexp, vexp.tau_min(), pem.get_vacuum_max_time(), prec
)
assert limit_found
# Getting the adiabatic solution
state, _alpha_reltol, _dgamma_reltol = pem.compute_adiab(vexp, t_adiab, state)
pem.change_frame(vexp, state, Ncm.CSQ1DFrame.ORIG)
phi_vec, Pphi_vec = state.get_phi_Pphi()
phi = phi_vec[0] + 1.0j * phi_vec[1]
Pphi = Pphi_vec[0] + 1.0j * Pphi_vec[1]
# Compare with analytical solution
assert np.isfinite(phi)
assert np.isfinite(Pphi)
def test_evolution_vexp(pem_vexp):
"""Test initial conditions of NcHIPertAdiab."""
pem, vexp = pem_vexp
state = Ncm.CSQ1DState.new()
pem.set_tf(1.0) # We do not want to evolve through the singularity
pem.prepare(vexp)
t_a, _smaller_abst = pem.get_time_array()
for t in t_a:
state = pem.eval_at(vexp, t, state)
phi_vec, Pphi_vec = state.get_phi_Pphi()
phi = phi_vec[0] + 1.0j * phi_vec[1]
Pphi = Pphi_vec[0] + 1.0j * Pphi_vec[1]
# Compare with analytical solution
assert np.isfinite(abs(phi))
assert np.isfinite(abs(Pphi))
J11, J12, J22 = state.get_J()
assert np.isfinite(J11)
assert np.isfinite(J22)
assert np.isfinite(J12)
def test_evolution_vexp_duplicate(pem_vexp):
"""Test initial conditions of NcHIPertAdiab."""
pem, vexp = pem_vexp
ser = Ncm.Serialize.new(Ncm.SerializeOpt.CLEAN_DUP)
pem_dup = ser.dup_obj(pem)
vexp_dup = ser.dup_obj(vexp)
test_evolution_vexp((pem_dup, vexp_dup))
def test_evolution_EB_vexp(pem_vexp):
"""Test initial conditions of NcHIPertAdiab."""
pem, vexp = pem_vexp
pem.set_tf(1.0) # We do not want to evolve through the singularity
pem.prepare(vexp)
t_a, _smaller_abst = pem.get_time_array()
for t in t_a:
PE, PB = pem.eval_PE_PB(vexp, t)
assert np.isfinite(PE)
assert np.isfinite(PB)
def _compute_tau_array(vexp, abs_tau_min=1.0e-10):
"""Compute an array of tau values for testing.
This includes a range from tau_min to -abs_tau_min and from abs_tau_min to tau_max.
This is useful since we want to test the behavior of the model around the bounce.
"""
tau_min = vexp.tau_min() + 1.0
tau_max = vexp.tau_max()
tau_a = np.concatenate(
(
np.geomspace(tau_min, -abs_tau_min, 5000),
np.geomspace(abs_tau_min, tau_max, 5000),
)
)
return tau_a
def test_interface_eval_vexp(pem_vexp):
"""Test interface evaluation of NcHIPertAdiab."""
_, vexp = pem_vexp
tau_a = _compute_tau_array(vexp)
assert np.isfinite(Nc.HIPertIGW.eval_unit(vexp))
for tau in tau_a:
assert np.isfinite(Nc.HIPertIGW.eval_F1(vexp, tau, 1.0))
assert np.isfinite(Nc.HIPertIGW.eval_m(vexp, tau, 1.0))
assert np.isfinite(Nc.HIPertIGW.eval_nu(vexp, tau, 1.0))
assert np.isfinite(Nc.HIPertIGW.eval_xi(vexp, tau, 1.0))
assert np.isfinite(Nc.HIPertIGW.eval_x(vexp, tau))
|
NumCosmoREPO_NAMENumCosmoPATH_START.@NumCosmo_extracted@NumCosmo-master@tests@test_py_hipert_em.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/cluster/__init__.py",
"type": "Python"
}
|
from .load import load
from pyspedas.utilities.datasets import find_datasets
from .load_csa import load_csa
from typing import List, Union, Optional
from .fgm import fgm
from .aspoc import aspoc
from .cis import cis
from .dwp import dwp
from .edi import edi
from .efw import efw
from .peace import peace
from .rapid import rapid
from .staff import staff
from .wbd import wbd
from .whi import whi
from .datasets import datasets
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@cluster@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/indicator/delta/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._valueformat import ValueformatValidator
from ._relative import RelativeValidator
from ._reference import ReferenceValidator
from ._position import PositionValidator
from ._increasing import IncreasingValidator
from ._font import FontValidator
from ._decreasing import DecreasingValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._valueformat.ValueformatValidator",
"._relative.RelativeValidator",
"._reference.ReferenceValidator",
"._position.PositionValidator",
"._increasing.IncreasingValidator",
"._font.FontValidator",
"._decreasing.DecreasingValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@indicator@delta@__init__.py@.PATH_END.py
|
{
"filename": "yaml2avsc.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/utility/DP02/yaml2avsc.py",
"type": "Python"
}
|
import json, sys
from yaml import load, dump
from yaml import CLoader as Loader, CDumper as Dumper
def fix_type(typ):
if typ == 'char': return 'string'
else: return typ
def table_schema(table):
fields = []
for column in table['columns']:
field = {
'name':column['name'],
'type': ['null', fix_type(column['datatype'])],
'doc':column['description'],
}
fields.append(field)
schema = {
"name": table['name'],
"type": ['null', {
"type": "record",
"name": table['name'],
"fields": fields,
}]
}
return schema
def get_table(tables, table_name):
for table in tables:
# print(table['name'])
if table['name'] == table_name:
return table_schema(table)
print('ERROR: table %s not found' % table_name)
#######
schema = {
"name": "DP0.2",
"type": "record",
"fields": [ ]
}
data = load(sys.stdin, Loader=Loader)
tables = data['tables']
s = get_table(tables, 'DiaObject')
schema['fields'].append(s)
s = get_table(tables, 'DiaSource')
schema['fields'].append(s)
s = {
"name": "DiaSourceList",
"type": [ "null", { "type": "array", "items": "DiaSource" } ]
}
schema['fields'].append(s)
s = get_table(tables, 'ForcedSourceOnDiaObject')
schema['fields'].append(s)
s = {
"name": "ForcedSourceOnDiaObjectList",
"type": [ "null", { "type": "array", "items": "ForcedSourceOnDiaObject" } ]
}
schema['fields'].append(s)
print(json.dumps(schema, indent=2))
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@utility@DP02@yaml2avsc.py@.PATH_END.py
|
{
"filename": "measurement_set.py",
"repo_name": "RTIP/artip",
"repo_path": "artip_extracted/artip-master/src/main/python/models/measurement_set.py",
"type": "Python"
}
|
import casac
import itertools
import numpy
from datetime import datetime, timedelta
from itertools import product
from casa.casa_runner import CasaRunner
from casa.flag_reasons import BAD_ANTENNA, BAD_ANTENNA_TIME, BAD_BASELINE_TIME, BAD_TIME
from casa.flag_recorder import FlagRecorder
from configs import config
from models.antenna import Antenna
from models.antenna_state import AntennaState
from models.baseline import Baseline
from models.phase_set import PhaseSet
from models.visibility_data import VisibilityData
class MeasurementSet:
def __init__(self, dataset_path, output_path):
self._dataset_path = dataset_path
self.output_path = output_path
self.casa_runner = CasaRunner(dataset_path, output_path)
self.flag_recorder = FlagRecorder()
self._casac = casac.casac
self._allow_logs_above_warning_level()
self._ms = self._casac.ms()
self._ms.open(dataset_path)
self._all_antenna_ids = self._all_antenna_ids()
self.flagged_antennas = self._initialize_flag_data()
self._antennas = self.create_antennas()
def _allow_logs_above_warning_level(self):
sink = self._casac.logsink()
sink.showconsole(True)
sink.setglobal(True)
sink.filter('WARN')
def __del__(self):
self._ms.close()
def get_dataset_path(self):
return self._dataset_path
def get_output_path(self):
return self.output_path
def _initialize_flag_data(self):
flag_data = {
polarization: {scan_id: set() for scan_id in self.scan_ids()} for
polarization in
config.GLOBAL_CONFIGS['polarizations']}
return flag_data
def quack(self):
self.casa_runner.quack()
def _filter(self, spw, channel, polarization, filters={}):
self._ms.selectinit(reset=True)
self._ms.msselect({"spw": spw})
self._ms.selectpolarization(polarization)
self._ms.selectchannel(**channel)
if filters: self._ms.select(filters)
def reload(self):
self._ms.close()
self._ms.open(self._dataset_path)
def create_antennas(self):
first_scan_id = self._ms.metadata().scannumbers()[0]
antenna_ids = self._ms.metadata().antennasforscan(first_scan_id).tolist()
antennas = map(lambda id: Antenna(id), antenna_ids)
product_pol_scan_ant = []
for polarization in config.GLOBAL_CONFIGS['polarizations']:
scan_ids = self.scan_ids(polarization=polarization)
product_pol_scan_ant += list(itertools.product([polarization], scan_ids, antennas))
for polarization, scan_id, antenna in product_pol_scan_ant:
antennaState = AntennaState(antenna.id, polarization, scan_id)
antenna.add_state(antennaState)
return antennas
def _all_antenna_ids(self):
first_scan_id = self._ms.metadata().scannumbers()[0]
return self._ms.metadata().antennasforscan(first_scan_id).tolist()
def antenna_ids(self, polarization=None, scan_id=None):
return map(lambda antenna: antenna.id, self.antennas(polarization, scan_id))
def baselines(self, polarization=None, scan_id=None):
antennaids = self.antenna_ids(polarization, scan_id)
baselines = [Baseline(antenna1, antenna2)
for antenna1, antenna2 in itertools.product(antennaids, antennaids)
if antenna1 < antenna2]
return baselines
def get_antenna_by_id(self, id):
return filter(lambda antenna: antenna.id == id, self.antennas())[0]
def antennas(self, polarization=None, scan_id=None):
if not (polarization or scan_id):
return self._antennas
return filter(lambda antenna: antenna.id not in self.flagged_antennas[polarization][scan_id], self._antennas)
def get_data(self, spw, channel, polarization, filters, selection_params):
ifraxis = True # This will always inserts a default value for the missing rows
self._filter(spw, channel, polarization, filters)
data_items = self._ms.getdata(selection_params, ifraxis=ifraxis)
return VisibilityData(data_items)
def get_phase_data(self, channel, polarization, filters={}): # To be removed
return PhaseSet(self.get_data("0", channel, polarization, filters, ['phase'])['phase'][0][0])
def get_field_name_for(self, field_id):
return self._ms.metadata().fieldnames()[field_id]
def source_ids(self):
return self._ms.metadata().fieldsforspw(int(config.GLOBAL_CONFIGS['default_spw']))
def _all_scan_ids(self, source_id=None):
if source_id is None:
scan_ids = list(self._ms.metadata().scannumbers())
else:
scan_ids = self._ms.metadata().scansforfield(source_id)
return map(lambda scan_id: int(scan_id), scan_ids)
def _get_unflagged_scan_ids_for(self, source_id, polarization):
antenna_ids = set(self._all_antenna_ids)
def _is_flagged(scan_id, polarization):
if not polarization: return False
return set(self.flagged_antennas[polarization][scan_id]) == antenna_ids
return filter(lambda scan_id: not _is_flagged(scan_id, polarization),
self._all_scan_ids(source_id))
def scan_ids(self, source_ids=None, polarization=None):
if not source_ids: source_ids = self.source_ids()
scan_ids = []
for source_id in source_ids:
scan_ids += self._get_unflagged_scan_ids_for(source_id, polarization)
return scan_ids
def baselines_for(self, antenna, polarization, scan_id):
antennas = list(self.antennas(polarization, scan_id))
antennas.remove(antenna)
baselines = list(itertools.product(antennas, [antenna]))
def sort_antennas(baseline):
sorted_baseline = tuple(sorted(list(baseline), key=lambda antenna: antenna.id))
return sorted_baseline
return map(sort_antennas, baselines)
def antenna_count(self, polarization, scan_id):
return len(self.antennas(polarization, scan_id))
def timesforscan(self, scan_id, formatted=True):
times = self._ms.metadata().timesforscan(scan_id)
if not formatted: return times
quanta = self._casac.quanta()
times_with_second = map(lambda time: str(time) + 's', times)
return numpy.array(
map(lambda time: quanta.time(quanta.quantity(time), form='ymd'), times_with_second)).flatten()
def get_completely_flagged_antennas(self, polarization):
return list(set.intersection(*self.flagged_antennas[polarization].values()))
def make_entry_in_flag_file(self, flag_file, polarizations, scan_ids, antenna_ids):
if antenna_ids:
self.flag_recorder.mark_entry(flag_file,
{'mode': 'manual', 'antenna': ','.join(map(str, antenna_ids)),
'reason': BAD_ANTENNA, 'correlation': ','.join(map(str, polarizations)),
'scan': ','.join(map(str, scan_ids))})
def flag_antennas(self, flag_file, polarizations, scan_ids, antenna_ids):
self.make_entry_in_flag_file(flag_file, polarizations, scan_ids, antenna_ids)
for polarization, scan_id in product(polarizations, scan_ids):
self.flagged_antennas[polarization][scan_id] = self.flagged_antennas[polarization][scan_id].union(
set(antenna_ids))
def flag_bad_antennas(self, flag_file, sources):
source_scan_ids = self.scan_ids(sources)
all_scan_ids = self.scan_ids()
for antenna in self._antennas:
for state in antenna.get_states(source_scan_ids):
if state.scan_id in all_scan_ids and state.is_bad():
self.flag_antennas(flag_file, [state.polarization], [state.scan_id], [antenna.id])
def _get_timerange_for_flagging(self, timerange):
datetime_format = '%Y/%m/%d/%H:%M:%S'
time_delta = timedelta(seconds=1)
start = datetime.strptime(timerange[0], datetime_format)
end = datetime.strptime(timerange[1], datetime_format)
start_with_delta = datetime.strftime(start - time_delta, datetime_format)
end_with_delta = datetime.strftime(end + time_delta, datetime_format)
return start_with_delta, end_with_delta
def flag_bad_time(self, flag_file, polarization, scan_id, timerange):
timerange_for_flagging = self._get_timerange_for_flagging(timerange)
self.flag_recorder.mark_entry(flag_file,
{'mode': 'manual', 'reason': BAD_TIME, 'correlation': polarization,
'scan': scan_id, 'timerange': '~'.join(timerange_for_flagging)})
def flag_bad_antenna_time(self, flag_file, polarization, scan_id, antenna_id, timerange):
timerange_for_flagging = self._get_timerange_for_flagging(timerange)
self.flag_recorder.mark_entry(flag_file,
{'mode': 'manual', 'antenna': antenna_id, 'reason': BAD_ANTENNA_TIME,
'correlation': polarization,
'scan': scan_id, 'timerange': '~'.join(timerange_for_flagging)})
def flag_bad_baseline_time(self, flag_file, polarization, scan_id, baseline, timerange):
timerange_for_flagging = self._get_timerange_for_flagging(timerange)
self.flag_recorder.mark_entry(flag_file,
{'mode': 'manual', 'antenna': str(baseline), 'reason': BAD_BASELINE_TIME,
'correlation': polarization,
'scan': scan_id, 'timerange': '~'.join(timerange_for_flagging)})
def get_bad_antennas_with_scans_for(self, polarization, source_id):
scan_ids = self.scan_ids(source_id, polarization)
bad_antennas_with_scans = {}
for scan_id in scan_ids:
bad_antennas = self.flagged_antennas[polarization][scan_id]
for antenna in bad_antennas:
if not antenna in bad_antennas_with_scans: bad_antennas_with_scans[antenna] = []
bad_antennas_with_scans[antenna].append(scan_id)
return bad_antennas_with_scans
def split(self, output_ms, filters):
self.casa_runner.split(output_ms, filters)
def generate_flag_summary(self, reason):
self.casa_runner.generate_flag_summary(reason, self.scan_ids())
|
RTIPREPO_NAMEartipPATH_START.@artip_extracted@artip-master@src@main@python@models@measurement_set.py@.PATH_END.py
|
{
"filename": "martini_simba.ipynb",
"repo_name": "kyleaoman/martini",
"repo_path": "martini_extracted/martini-main/examples/martini_simba.ipynb",
"type": "Jupyter Notebook"
}
|
In this short tutorial we will install and use [MARTINI](https://martini.readthedocs.io/en/latest/), an analysis package for creating mock HI-data cubes similar to radio interferometer data, written by Kyle Oman (kyle.a.oman@durham.ac.uk). This example uses input from the [Simba](https://ui.adsabs.harvard.edu/abs/2019MNRAS.486.2827D/abstract) simulations. The data are publicly available and hosted at [simba.roe.ac.uk](http://simba.roe.ac.uk).

MARTINI is a modular package for the creation of synthetic resolved HI line observations (data cubes) of smoothed-particle hydrodynamics simulations of galaxies. The various aspects of the mock-observing process are divided logically into sub-modules handling the data cube, source, beam, noise, spectral model and SPH kernel. MARTINI is object-oriented: each sub-module provides a class (or classes) which can be configured as desired. For most sub-modules, base classes are provided to allow for straightforward customization. Instances of each sub-module class are given as parameters to the Martini class; a mock observation is then constructed by calling a handful of functions to execute the desired steps in the mock-observing process.
This tutorial focuses on particulars related to working with the Simba simulations. More general information is available in the MARTINI documentation, [hosted on ReadTheDocs](https://martini.readthedocs.io/en/latest/).
## Installation
MARTINI requires `python3` version `3.9` or higher.
The following command will use `pip` to download and install [MARTINI from pypi](https://pypi.org/project/astromartini/):
```python
import sys
!{sys.executable} -m pip install "astromartini[simbasource]==2.1.9"
```
Requirement already satisfied: astromartini==2.1.9 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from astromartini[simbasource]==2.1.9) (2.1.9)
Requirement already satisfied: numpy in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from astromartini==2.1.9->astromartini[simbasource]==2.1.9) (1.26.4)
Requirement already satisfied: scipy in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from astromartini==2.1.9->astromartini[simbasource]==2.1.9) (1.14.0)
Requirement already satisfied: astropy in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from astromartini==2.1.9->astromartini[simbasource]==2.1.9) (6.1.1)
Requirement already satisfied: tqdm in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from astromartini==2.1.9->astromartini[simbasource]==2.1.9) (4.66.4)
Requirement already satisfied: h5py in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from astromartini[simbasource]==2.1.9) (3.11.0)
Requirement already satisfied: pyerfa>=2.0.1.1 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from astropy->astromartini==2.1.9->astromartini[simbasource]==2.1.9) (2.0.1.4)
Requirement already satisfied: astropy-iers-data>=0.2024.5.27.0.30.8 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from astropy->astromartini==2.1.9->astromartini[simbasource]==2.1.9) (0.2024.7.15.0.31.42)
Requirement already satisfied: PyYAML>=3.13 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from astropy->astromartini==2.1.9->astromartini[simbasource]==2.1.9) (6.0.1)
Requirement already satisfied: packaging>=19.0 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from astropy->astromartini==2.1.9->astromartini[simbasource]==2.1.9) (24.1)
If you do not have superuser permissions or use a virtual environment, you may wish to add the --user flag.
With this command required dependencies will be fetched and installed automatically. Watch for error messages during installation. For greater control you may also install the dependencies by hand. These are: numpy, astropy, scipy and h5py.
We'll also install a few other packages used in this notebook:
```python
!{sys.executable} -m pip install requests matplotlib
```
Requirement already satisfied: requests in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (2.32.3)
Requirement already satisfied: matplotlib in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (3.9.1)
Requirement already satisfied: charset-normalizer<4,>=2 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from requests) (3.3.2)
Requirement already satisfied: idna<4,>=2.5 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from requests) (3.7)
Requirement already satisfied: urllib3<3,>=1.21.1 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from requests) (2.2.2)
Requirement already satisfied: certifi>=2017.4.17 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from requests) (2024.7.4)
Requirement already satisfied: contourpy>=1.0.1 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from matplotlib) (1.2.1)
Requirement already satisfied: cycler>=0.10 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from matplotlib) (0.12.1)
Requirement already satisfied: fonttools>=4.22.0 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from matplotlib) (4.53.1)
Requirement already satisfied: kiwisolver>=1.3.1 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from matplotlib) (1.4.5)
Requirement already satisfied: numpy>=1.23 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from matplotlib) (1.26.4)
Requirement already satisfied: packaging>=20.0 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from matplotlib) (24.1)
Requirement already satisfied: pillow>=8 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from matplotlib) (10.4.0)
Requirement already satisfied: pyparsing>=2.3.1 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from matplotlib) (3.1.2)
Requirement already satisfied: python-dateutil>=2.7 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from matplotlib) (2.9.0.post0)
Requirement already satisfied: six>=1.5 in /cosma/home/durham/dc-oman1/.virtualenv/koman/lib/python3.12/site-packages (from python-dateutil>=2.7->matplotlib) (1.16.0)
This cell may be needed in some cases to display figures below:
```python
%matplotlib inline
```
Let's check that we can `import martini`:
```python
import martini
```
If this produces errors, you may need to restart the Python kernel of this notebook so that it sees the recently installed packages (Kernel -> Restart in the menubar).
We can run MARTINI's built-in demo to check that all of the basic functionality works:
```python
from martini import demo
demo()
```
Source module contained 500 particles with total HI mass of 5.00e+09 solMass.
Pruned particles that will not contribute to data cube, 500 particles remaining with total HI mass of 5.00e+09 solMass.
100%|ββββββββββ| 23716/23716 [00:27<00:00, 869.18it/s]
Source inserted.
Flux density in cube: 2.35e+02 Jy
Mass in cube (assuming distance 3.00 Mpc and a spatially resolved source): 5.00e+09 solMass
[100% of initial source mass]
Maximum pixel: 1.23e-03 Jy / arcsec2
Median non-zero pixel: 1.07e-09 Jy / arcsec2
Noise added.
Noise cube RMS: 1.46e-07 Jy / arcsec2 (before beam convolution).
Data cube RMS after noise addition (before beam convolution): 2.43e-05 Jy / arcsec2
Beam convolved.
Data cube RMS after beam convolution: 2.77e-02 Jy / beam
Maximum pixel: 6.61e-01 Jy / beam
Median non-zero pixel: 3.32e-05 Jy / beam
Wrote demo fits output to testcube.fits, and beam image to testbeam.fits.
Wrote demo hdf5 output to testcube.hdf5.
When run successfully, this will make a mock observation of a very simple analytic disc model and write some output to the working directory. Rather than inspect this toy model, let's look at a "real" simulation...
## Simba Data
This example uses data from the Simba simulations. These data are hosted at [simba.roe.ac.uk](http://simba.roe.ac.uk).
In this example, we will use a snapshot file [`snap_m25n512_151.hdf5`](http://simba.roe.ac.uk/simdata/m25n512/s50/snapshots/snap_m25n512_151.hdf5) (31 GB) and the corresponding galaxy catalogue file [`m25n512_151.hdf5`](http://simba.roe.ac.uk/simdata/m25n512/s50/catalogs/m25n512_151.hdf5) (325 MB). The following cell will download these directly:
```python
import os
import re
import requests
from tqdm import tqdm
snapurl = "http://simba.roe.ac.uk/simdata/m25n512/s50/snapshots/snap_m25n512_151.hdf5"
groupurl = "http://simba.roe.ac.uk/simdata/m25n512/s50/catalogs/m25n512_151.hdf5"
def chunked_download(url):
fname = url.split("/")[-1]
if os.path.isfile(fname):
print(f"File {fname} found locally, skipping download.")
else:
with requests.get(url, stream=True) as r:
r.raise_for_status()
total_size_in_bytes = int(r.headers.get("content-length", 0))
chunk_size = 8192
progress_bar = tqdm(total=total_size_in_bytes, unit="iB", unit_scale=True)
with open(fname, "wb") as f:
for chunk in r.iter_content(chunk_size=chunk_size):
progress_bar.update(len(chunk))
f.write(chunk)
return fname
groupfile = chunked_download(groupurl)
snapfile = chunked_download(snapurl)
```
File m25n512_151.hdf5 found locally, skipping download.
File snap_m25n512_151.hdf5 found locally, skipping download.
If you have downloaded the data manually, edit this cell to specify the file locations:
```python
snappath = "."
snapfile = snapfile
grouppath = "."
groupfile = groupfile
```
The `group_id` specifies the identifier of the galaxy to use as a source, and is matched against the dataset of the same name in the group catalogue. For this example, we will use group `15`, which is a galaxy with a massive neutral gas disc in snapshot `151` of simulation `m25n512`. If using another snapshot as input, you should use the group catalogue to identify an object of interest.
```python
group_id = 15
```
## Simba Example
First, import some modules from Martini, and the units module from astropy.
```python
import numpy as np
from martini.sources import SimbaSource
from martini import DataCube, Martini
from martini.beams import GaussianBeam
from martini.noise import GaussianNoise
from martini.spectral_models import GaussianSpectrum
from martini.sph_kernels import CubicSplineKernel
import astropy.units as U
```
The different martini sub-modules need to be initialized,
see the [full documentation](https://martini.readthedocs.io/en/latest/) for details of all configuration options. A few suggested best-practices specific to Simba are outlined below.
### SOURCE
The argument `aperture` controls the radial extent of a region
to load around the galaxy of interest, in physical (not comoving,
no little h) units. Using larger values will include more
foreground/background, which may be desirable, but will also slow
down execution and can impair the automatic routine used to find a
disc plane. Normally it is advisable to set this to approximately
the virial radius of the source object, or just large enough to
capture the region of interest around it (e.g. enough to encompass
the host of a satellite galaxy).
Running the following cell temporarily uses about 20 GB of system memory as entire particle arrays are read and then pruned to retain only the particles belonging to the galaxy of interest.
```python
source = SimbaSource(
snapPath=snappath,
snapName=snapfile,
groupPath=grouppath,
groupName=groupfile,
groupID=group_id,
aperture=100.0 * U.kpc,
distance=4.0 * U.Mpc,
vpeculiar=0 * U.km / U.s,
rotation={"rotmat": np.eye(3)},
ra=0.0 * U.deg,
dec=0.0 * U.deg,
)
```
The rotation argument above has been set to the identity matrix, so the source has the (random) orientation that it has within the simulation volume. The source class includes a function to make a quick plot to get an idea of the source's appearance:
```python
preview_fig_unrotated = source.preview(title="unrotated")
```

The preview function defaults to apertures in position and velocity that enclose all particles in the source, so this preview emphasizes the diffuse circumgalactic gas. The apertures can be set manually using the `lim` and `vlim` keywords to set the maximum absolute offsets in position and velocity relative to the source centre to be plotted. For example, restricting the aperture to 50kpc and 300km/s makes the disc more clearly visible.
```python
preview_fig_unrotated_zoom = source.preview(
title="unrotated, zoomed-in",
lim=50 * U.kpc,
vlim=300 * U.km / U.s,
)
```

This randomly-oriented viewing angle seems to be moderately inclined with respect to the disc. The source can be rotated to a different orientation. MARTINI's tool for quick/approximate manipulation of the orientation of the source aligns the source based on its angular momentum vector ("L"), for example:
```python
source.rotate(L_coords=(60 * U.deg, 90 * U.deg))
```
The rotation configuration takes an inclination (here 60deg) and rotation about the pole (here 90deg, relative to an arbitrary reference direction). The code attempts to
automatically align the galactic disk in the y-z plane by aligning
the angular momentum along the x-axis. The polar rotation is then
applied, and finally the disc inclined by a rotation around the
y-axis (the line of sight is along the x-axis). The automatic
alignment will work for typical reasonably isolated discs, but will
struggle when companions are present, when the angular momentum axis
is a poor tracer of the disc plane, and especially for satellites.
```python
preview_fig_rotated_zoomed_in = source.preview(
title="rotated, zoomed-in",
lim=50 * U.kpc,
vlim=300 * U.km / U.s,
)
```

If finer control of the orientation is needed, derive the transformation from the simulation box coordinates (see [the documentation](https://martini.readthedocs.io/en/latest/) for examples) to the desired coordinates for the 'observation', keeping in mind that the line of sight is along the x-axis. This rotation matrix can then be passed to the rotate function as `rotmat=np.eye(3)` (here the identity rotation matrix used as a place holder). The rotation can also be provided when the source is initialized by using the `rotation` keyword argument.
A common problem is deriving the inverse transform instead of the forward transform, if unexpected results are obtained, first try passing the transpose of the rotation matrix.
### DATACUBE
It is usually advisable to set the centre of the cube to track the
centre of the source, as illustrated below. Note that the source
systemic velocity is set according to the distance, peculiar velocity, and Hubble's law.
These values can instead be set explicitly, if desired. A datacube
with 128x128 pixels usually takes a few minutes, depending on the number of particles. 1024x1024 can take
several hours. The number of channels has less influence on the
runtime. Most of the runtime is spent when `M.insert_source_in_cube` is
called below.
```python
datacube = DataCube(
n_px_x=384,
n_px_y=384,
n_channels=50,
px_size=10.0 * U.arcsec,
channel_width=16.0 * U.km * U.s**-1,
velocity_centre=source.vsys,
ra=source.ra,
dec=source.dec,
)
```
### BEAM
It is usually advisable to set the beam size to be ~3x the pixel
size. Note that the data cube is padded according to the size of the
beam, this usually results in the number of pixel rows printed in the
progress messages to differ from the requested dimensions. The
padding is required for accurate convolution with the beam, but
contains incorrect values after convolution and is discarded to
produce the final data cube of the requested size.
```python
beam = GaussianBeam(
bmaj=30.0 * U.arcsec,
bmin=30.0 * U.arcsec,
bpa=0.0 * U.deg,
truncate=3.0,
)
```
### NOISE
The noise is normally added before convolution with the beam (as
below in this example). The rms value passed is that corresponding to the desired noise level in the final data cube, in Jy/beam or equivalent units. To obtain consistent random realisations each time the code is run, we provide a random seed (integer).
```python
noise = GaussianNoise(
rms=3.0e-8 * U.Jy * U.beam**-1,
seed=0,
)
```
### SPECTRAL MODEL
The `thermal` mode estimates the HI line width for each particle based on its properties (temperature, etc.). The 'subgrid' velocity dispersion can also be fixed to a constant value, e.g. `sigma=7 * U.km / U.s`.
```python
spectral_model = GaussianSpectrum(sigma="thermal")
```
The calculation of the spectra (that will happen when the `Martini` module is initialized below) can be done in parallel by providing a keyword argument `ncpu=N`, where `N` is the number of CPUs to use. However, the details of the implementation mean that for small numbers of particles running in parallel tends to slow down the calculation, so turning this on should be done with care. Significant speedups can be expected when the particle count is very large.
### SPH KERNEL
The Simba simulations are meshless finite mass (MFM), not smoothed particle hydrodynamics (SPH) simulations. MARTINI is strictly speaking designed for SPH simulations, but can still provide a good approximation of non-SPH simulations by representing mesh cells as SPH particles assuming their centroids and characteristic smoothing lengths. The MFM scheme in Simba uses a cubic spline kernel function (although this is not an SPH kernel), so we may as well use MARTINI's `CubicSplineKernel` module for the approximation.
```python
sph_kernel = CubicSplineKernel()
```
### MARTINI
Now set up the configuration:
```python
M = Martini(
source=source,
datacube=datacube,
beam=beam,
noise=noise,
spectral_model=spectral_model,
sph_kernel=sph_kernel,
)
```
Source module contained 9344 particles with total HI mass of 1.02e+10 solMass.
Pruned particles that will not contribute to data cube, 8661 particles remaining with total HI mass of 1.02e+10 solMass.
Similar to previewing the source, we can make a preview here. Now the extent of the datacube is overlaid with a red box.
```python
M.preview()
```


The main source insertion loop, that is the most computationally demanding step, can be run in parallel if the `multiprocess` package is installed (not to be confused with `multiprocessing`, which is normally included with python!). Edit the cell below to use more than 1 CPU core if this package is installed.
```python
ncpu = 1 # can be >1 if multiprocess package is installed
```
If we're happy with the preview, we're ready to call the functions to make the actual mock observation.
```python
M.insert_source_in_cube(ncpu=ncpu)
M.add_noise()
M.convolve_beam()
```
100%|ββββββββββ| 163216/163216 [05:48<00:00, 468.87it/s]
Source inserted.
Flux density in cube: 1.69e+02 Jy
Mass in cube (assuming distance 4.00 Mpc and a spatially resolved source): 1.02e+10 solMass
[100% of initial source mass]
Maximum pixel: 9.84e-05 Jy / arcsec2
Median non-zero pixel: 5.52e-15 Jy / arcsec2
Noise added.
Noise cube RMS: 1.46e-10 Jy / arcsec2 (before beam convolution).
Data cube RMS after noise addition (before beam convolution): 2.17e-06 Jy / arcsec2
Beam convolved.
Data cube RMS after beam convolution: 2.26e-03 Jy / beam
Maximum pixel: 9.14e-02 Jy / beam
Median non-zero pixel: 3.28e-08 Jy / beam
You may notice that the number of pixels in the progress counter differs from the number defined in the DataCube module. This is because convolution with the beam requires some padding, which is ultimately filled with nonsense and discarded.
To write the results: two output formats are available, depending on preference. Both
formats are self-documenting, via FITS header keywords and HDF5
attributes, respectively. For HDF5 output, the beam image is included
in the same file. (If you do not have h5py installed, comment out the call to `write_hdf5`.)
```python
M.write_fits("simba_martini_demo.fits")
M.write_beam_fits("simba_martini_demo_beam.fits")
M.write_hdf5("simba_martini_demo.hdf5")
```
### Inspect the results (FITS)
Let's load the FITS file that MARTINI produced and take a quick look.
```python
import numpy as np
from astropy.io import fits
from astropy.wcs import WCS
with fits.open("simba_martini_demo.fits") as f:
cube_wcs = WCS(f[0].header)
flux_cube = f[0].data * U.Unit(f[0].header["BUNIT"])
n_channels = cube_wcs.pixel_shape[cube_wcs.wcs.spec]
vch = np.array(cube_wcs.sub(axes=[3]).all_pix2world(np.arange(n_channels), 0))[
0
] * U.Unit(cube_wcs.world_axis_units[cube_wcs.wcs.spec])
vch = vch - source.vsys
```
```python
flux_cube.shape
```
(50, 384, 384)
Let's examine one of the velocity channels:
```python
import matplotlib.pyplot as plt
```
```python
fig = plt.figure(figsize=(5, 5))
plt.clf()
ax = fig.add_subplot(1, 1, 1, aspect="equal", projection=cube_wcs.celestial)
# choose units for plotting, not necessarily the units data are stored in:
flux_unit = U.Jy / U.beam
plt.imshow(flux_cube[27, ...].to_value(flux_unit), cmap="Greys")
plt.xlabel("RA")
plt.ylabel("Dec")
plt.colorbar(label=f"Flux [{flux_unit}]");
```

And do a quick plot of the first three moments:
```python
import numpy as np
# choose plotting units
mom0_unit = U.Jy / U.beam
mom1_unit = U.km / U.s
mom2_unit = U.km / U.s
rms = np.std(
flux_cube[:, :20, :20]
) # noise in a corner patch where there is little signal
clip = np.where(flux_cube > 5 * rms, 1, 0)
np.seterr(all="ignore")
fig = plt.figure(figsize=(16, 5))
plt.clf()
sp1 = fig.add_subplot(1, 3, 1, aspect="equal", projection=cube_wcs.celestial)
sp2 = fig.add_subplot(1, 3, 2, aspect="equal", projection=cube_wcs.celestial)
sp3 = fig.add_subplot(1, 3, 3, aspect="equal", projection=cube_wcs.celestial)
mom0 = np.sum(flux_cube, axis=0)
mask = np.where(mom0 > 0.002 * U.Jy / U.beam, 1, np.nan)
mom1 = np.sum(flux_cube * clip * vch[:, np.newaxis, np.newaxis], axis=0) / mom0
mom2 = np.sqrt(
np.sum(
flux_cube
* clip
* np.power(vch[:, np.newaxis, np.newaxis] - mom1[np.newaxis], 2),
axis=0,
)
/ mom0
)
im1 = sp1.imshow(mom0.to_value(mom0_unit), cmap="Greys")
plt.colorbar(im1, ax=sp1, label=f"mom0 [{mom0_unit}]")
im2 = sp2.imshow(
(mom1 * mask).to_value(mom1_unit),
cmap="RdBu_r",
vmin=-np.nanmax(np.abs(mom1 * mask)).to_value(mom1_unit),
vmax=np.nanmax(np.abs(mom1 * mask)).to_value(mom1_unit),
)
plt.colorbar(im2, ax=sp2, label=f"mom1 [{mom1_unit}]")
im3 = sp3.imshow(
(mom2 * mask).to_value(mom2_unit),
cmap="magma",
)
plt.colorbar(im3, ax=sp3, label=f"mom2 [{mom2_unit}]")
for sp in sp1, sp2, sp3:
sp.set_xlabel("RA")
sp.set_ylabel("Dec")
plt.subplots_adjust(wspace=0.3)
```

This galaxy clearly has a very nice spiral morphology in HI, a central hole, and a nice rotation-dominated velocity field. The alignment of the disc looks as expected - the inclination looks to be about 60 degrees, and the position angle is horizontal in the figure - in this case the automated orientation function has performed well, though it should never be assumed that this will always be the case!
For complete documentation, more usage examples, and further information, please take a look at the [MARTINI webpage](https://martini.readthedocs.io/en/latest/).
### Inspect the results (HDF5)
Let's load the HDF5 that MARTINI produced and take a quick look.
```python
import h5py
f = h5py.File("simba_martini_demo.hdf5", "r")
```
```python
list(f.keys())
```
['Beam',
'Dec',
'Dec_vertices',
'FluxCube',
'RA',
'RA_vertices',
'channel_mids',
'channel_vertices',
'frequency_channel_edges',
'frequency_channel_mids',
'velocity_channel_edges',
'velocity_channel_mids']
In addition to the mock cube itself (`f["FluxCube"]`), the HDF5 output contains arrays of the same size with the coordinates of the centre of each cell in the cube in RA, Dec and spectral space (`f["RA"]`,`f["Dec"]` and `f["channel_mids"]`). There are also arrays longer by one in each dimension that contain the coordinates of the corners of each cell in the cube (`f["RA_vertices"]`,`f["Dec_vertices"]` and `f["channel_vertices"]`). The latter are convenient for use with the `pcolormesh` plotting function from matplotlib, so we'll read these.
```python
flux_cube = f["FluxCube"][()] * U.Unit(f["FluxCube"].attrs["FluxCubeUnit"])
ra_vertices = f["RA_vertices"][()] * U.Unit(f["RA_vertices"].attrs["Unit"])
dec_vertices = f["Dec_vertices"][()] * U.Unit(f["RA_vertices"].attrs["Unit"])
spec_vertices = f["channel_vertices"][()] * U.Unit(f["channel_vertices"].attrs["Unit"])
vch = (
f["velocity_channel_mids"][()] * U.Unit(f["velocity_channel_mids"].attrs["Unit"])
- source.vsys
)
f.close()
```
The RA range of our cube straddles the RA=0 boundary, let's shift the `ra_vertices` to a (-180, 180] range for plotting:
```python
ra_vertices = np.where(
ra_vertices > 180 * U.deg, ra_vertices - 360 * U.deg, ra_vertices
)
```
```python
flux_cube.shape
```
(384, 384, 50)
Let's examine one of the velocity channels:
```python
import matplotlib.pyplot as plt
```
```python
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(1, 1, 1, aspect="equal")
channel_slice = np.s_[:, :, 15] # a slice of the data cube containing one channel
# choose units for plotting, not necessarily the units data are stored in:
ra_unit = U.deg
dec_unit = U.deg
flux_unit = U.Jy / U.beam
plt.pcolormesh(
ra_vertices[channel_slice].to_value(ra_unit),
dec_vertices[channel_slice].to_value(dec_unit),
flux_cube[channel_slice].to_value(flux_unit),
cmap="Greys",
)
ax.set_xlabel(f"RA [{ra_unit}]")
ax.set_ylabel(f"Dec [{dec_unit}]")
ax.set_xlim(ax.get_xlim()[::-1])
plt.colorbar(label=f"Flux [{flux_unit}]");
```

And do a quick plot of the first three moments:
```python
import numpy as np
# choose units for plotting, not necessarily the units data are stored in:
ra_unit = U.deg
dec_unit = U.deg
mom0_unit = U.Jy / U.beam
mom1_unit = U.km / U.s
mom2_unit = U.km / U.s
np.seterr(all="ignore")
fig = plt.figure(figsize=(16, 5))
sp1 = fig.add_subplot(1, 3, 1, aspect="equal")
sp2 = fig.add_subplot(1, 3, 2, aspect="equal")
sp3 = fig.add_subplot(1, 3, 3, aspect="equal")
rms = np.std(
flux_cube[:16, :16]
) # noise in a corner patch where there is little signal
clip = np.where(flux_cube > 5 * rms, 1, 0)
mom0 = np.sum(flux_cube, axis=-1)
mask = np.where(mom0 > 0.002 * U.Jy / U.beam, 1, np.nan)
mom1 = np.sum(flux_cube * clip * vch, axis=-1) / mom0
mom2 = np.sqrt(
np.sum(flux_cube * clip * np.power(vch - mom1[..., np.newaxis], 2), axis=-1) / mom0
)
im1 = sp1.pcolormesh(
ra_vertices[..., 0].to_value(
ra_unit
), # pick one channel, coordinates are the same in all of them
dec_vertices[..., 0].to_value(
dec_unit
), # pick one channel, coordinates are the same in all of them
mom0.to_value(mom0_unit),
cmap="Greys",
)
plt.colorbar(im1, ax=sp1, label=f"mom0 [{mom0_unit}]")
im2 = sp2.pcolormesh(
ra_vertices[..., 0].to_value(
ra_unit
), # pick one channel, coordinates are the same in all of them
dec_vertices[..., 0].to_value(
dec_unit
), # pick one channel, coordinates are the same in all of them
(mom1 * mask).to_value(mom1_unit),
cmap="RdBu_r",
vmin=-np.nanmax(np.abs(mom1 * mask)).to_value(mom1_unit),
vmax=np.nanmax(np.abs(mom1 * mask)).to_value(mom1_unit),
)
plt.colorbar(im2, ax=sp2, label=f"mom1 [{mom1_unit}]")
im3 = sp3.pcolormesh(
ra_vertices[..., 0].to_value(
ra_unit
), # pick one channel, coordinates are the same in all of them
dec_vertices[..., 0].to_value(
dec_unit
), # pick one channel, coordinates are the same in all of them
(mom2 * mask).to_value(mom2_unit),
cmap="magma",
vmin=0,
)
plt.colorbar(im3, ax=sp3, label=f"mom2 [{mom2_unit}]")
for sp in sp1, sp2, sp3:
sp.set_xlabel(f"RA [{ra_unit}]")
sp.set_ylabel(f"Dec [{dec_unit}]")
sp.set_xlim(sp.get_xlim()[::-1])
plt.subplots_adjust(wspace=0.3)
```

This galaxy clearly has a very nice spiral morphology in HI, a central hole, and a nice rotation-dominated velocity field. The alignment of the disc looks as expected - the inclination looks to be about 60 degrees, and the position angle is horizontal in the figure - in this case the automated orientation function has performed well, though it should never be assumed that this will always be the case!
For complete documentation, more usage examples, and further information, please take a look at the [MARTINI webpage](https://martini.readthedocs.io/en/latest/).
|
kyleaomanREPO_NAMEmartiniPATH_START.@martini_extracted@martini-main@examples@martini_simba.ipynb@.PATH_END.py
|
{
"filename": "optical_depth.py",
"repo_name": "annehutter/grid-model",
"repo_path": "grid-model_extracted/grid-model-master/analysis_tools/optical_depth.py",
"type": "Python"
}
|
import sys
import os
import numpy as np
from numpy.random import random
import math as m
import matplotlib as m
m.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.ticker as mt
from matplotlib import gridspec
from matplotlib import rc
from grid import *
import read_parameterfile as rp
import read_fields as rf
import observations as ob
def compute_meanIon(infile, isPadded, inputIsDouble, gridsize):
ion = rf.read_ion(infile, isPadded, inputIsDouble, gridsize)
meanIon = np.mean(ion, dtype=np.float64)
return meanIon
def compute_meanMassIon(infile, densfile, double_precision, isPadded, inputIsDouble, gridsize):
ion = rf.read_ion(infile, isPadded, inputIsDouble, gridsize)
dens = rf.read_dens(densfile, isPadded, double_precision, gridsize)
meanIon = np.mean(ion*dens, dtype=np.float64)/np.mean(dens, dtype=np.float64)
return meanIon
def get_X(X, redshift, z):
if(z >= redshift[0]):
result = 0.
else:
index = 0
for i in range(len(X)):
if(z > redshift[i]):
index = i
break
if(index == 0):
result = 1.
else:
result = X[index-1] + (X[index] - X[index-1])/(redshift[index] - redshift[index-1]) * (z - redshift[index-1])
return result
inifile = sys.argv[1]
inputIsDouble = np.int32(sys.argv[2])
outputfile = sys.argv[3]
lines = rp.read_inifile(inifile)
redshiftfile = rp.identify_string(lines, rp.redshiftfile_str, rp.splitting_str) #sys.argv[4]
simulationtype = rp.identify_string(lines, rp.simulationtype_str, rp.splitting_str)
if(simulationtype == "EVOLVE_BY_SNAPSHOT"):
snapshotstart = rp.identify_int(lines, rp.snapshotstart_str, rp.splitting_str)
else:
snapshotstart = 0
ionfile = rp.identify_string(lines, rp.ionfile_str, rp.splitting_str) #sys.argv[1]
densfile = rp.identify_string(lines, rp.densfile_str, rp.splitting_str)
double_precision = rp.identify_int(lines, rp.double_precision_str, rp.splitting_str)
isPadded = rp.identify_int(lines, rp.padded_str, rp.splitting_str) #np.int32(sys.argv[3])
isPadded_factor = isPadded**(1./3.)
if(isPadded != 0):
gridsize = np.int32(rp.identify_int(lines, rp.gridsize_str, rp.splitting_str)/isPadded_factor)
boxsize = rp.identify_float(lines, rp.boxsize_str, rp.splitting_str)/isPadded_factor
else:
gridsize = rp.identify_int(lines, rp.gridsize_str, rp.splitting_str)
boxsize = rp.identify_float(lines, rp.boxsize_str, rp.splitting_str)
solve_he = rp.identify_int(lines, rp.solve_he_str, rp.splitting_str)
HeIIionfile = rp.identify_string(lines, rp.HeIIionfile_str, rp.splitting_str)
HeIIIionfile = rp.identify_string(lines, rp.HeIIIionfile_str, rp.splitting_str)
if(solve_he == 1):
HeIIionfile = rp.identify_string(lines, rp.HeIIionfile_str, rp.splitting_str)
HeIIIionfile = rp.identify_string(lines, rp.HeIIIionfile_str, rp.splitting_str)
redshift, snap = np.loadtxt(redshiftfile, unpack='True', skiprows=0, usecols=(0,1))
hubble_h = rp.identify_float(lines, rp.h_str, rp.splitting_str)
omega_b = rp.identify_float(lines, rp.omega_b_str, rp.splitting_str)
omega_l = rp.identify_float(lines, rp.omega_l_str, rp.splitting_str)
omega_m = rp.identify_float(lines, rp.omega_m_str, rp.splitting_str)
Y = rp.identify_float(lines, rp.Y_str, rp.splitting_str)
clight = 3.0e10
G = 6.67408e-8
mp = 1.673e-24
sigmaT = 6.65e-25
Mpc_cm = 3.0857e24
H0 = hubble_h * 1.e7 / Mpc_cm
histionfile = os.path.dirname(os.path.abspath(outputfile)) + '/hist_ion.dat'
print "\n--------------------------------"
print "Computing electron optical depth"
print "--------------------------------"
if(solve_he == 1):
redshift, XHII, XHeII, XHeIII, XmHII, XmHeII, XmHeIII = np.loadtxt(histionfile, unpack=True, usecols=(0,1,2,3,4,5,6))
else:
redshift, XHII, XmHII = np.loadtxt(histionfile, unpack=True, usecols=(0,1,2))
XHeII = XHII
XHeIII = np.zeros(len(XHII))
prefactor = clight * sigmaT * H0 * omega_b / (4. * np.pi * G * mp * omega_m)
dz = 0.2
zhigh = np.max(redshift) + 1
nbins = np.int32(zhigh / dz + 1)
z = dz * np.arange(nbins)
tau = np.zeros(nbins)
xHII = np.zeros(nbins)
xHeII = np.zeros(nbins)
xHeIII = np.zeros(nbins)
if(XmHII[0] <= 0.):
for i in range(nbins):
xHII[i] = get_X(XHII, redshift, z[i])
if(solve_he == 1):
xHeII[i] = get_X(XHeII, redshift, z[i])
else:
xHeII[i] = xHII[i]
if(z[i] <=3.):
xHeIII[i] = 1.
else:
xHeIII[i] = 0.
else:
for i in range(nbins):
xHII[i] = get_X(XmHII, redshift, z[i])
if(solve_he == 1):
xHeII[i] = get_X(XmHeII, redshift, z[i])
else:
xHeII[i] = xHII[i]
if(z[i] <=3.):
xHeIII[i] = 1.
else:
xHeIII[i] = 0.
for i in range(nbins-1):
termi = (omega_m * (1. + z[i])**3 + omega_l)**0.5
termf = (omega_m * (1. + z[i+1])**3 + omega_l)**0.5
if(i > 0):
tau[i] = tau[i-1] + prefactor * (xHII[i] * (1.-Y) + (xHeII[i] + 2. * xHeIII[i]) * 0.25 * Y) * (termf - termi)
else:
tau[i] = prefactor * (xHII[i] * (1.-Y) + (xHeII[i] + 2. * xHeIII[i]) * 0.25 * Y) * (termf - termi)
np.savetxt(outputfile + '.dat',np.c_[z, tau])
#----------------------------------------------
#----------------------------------------------
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams.update({'figure.subplot.hspace':0.0, 'figure.subplot.bottom':0.1, 'figure.subplot.top':0.95, 'figure.subplot.left':0.1, 'figure.subplot.right':0.97})
plt.rcParams["figure.figsize"] = (6,4)
fig = plt.figure()
plt.subplots_adjust(wspace=0.0, hspace=0.1)
#----------------------------------------------
ax1 = plt.subplot()
redshift_range = z[len(z)-1] - z[0]
print redshift_range
if(redshift_range <= 1.0):
xmaxL = 0.1
xminL = 0.02
elif(redshift_range <= 5.0):
xmaxL = 0.5
xminL = 0.1
elif(redshift_range <= 10.0):
xmaxL = 1.
xminL = 0.5
else:
xmaxL = 2.
xminL = 0.5
print tau
print tau[0]
print tau[len(tau)-1]
tau_range = tau[len(tau)-2] - tau[0]
print tau_range
if(tau_range <= 0.02):
ymaxL = 0.005
yminL = 0.001
ystring = '%0.3f'
elif(tau_range <= 0.05):
ymaxL = 0.01
yminL = 0.002
ystring = '%0.3f'
elif(tau_range <= 0.1):
ymaxL = 0.01
yminL = 0.005
ystring = '%0.3f'
else:
ymaxL = 0.1
yminL = 0.05
ystring = '%0.2f'
print ymaxL
print yminL
majorLocator = mt.MultipleLocator(xmaxL)
majorFormatter = mt.FormatStrFormatter('%0.1f')
minorLocator = mt.MultipleLocator(xminL)
ymajorLocator = mt.MultipleLocator(ymaxL)
ymajorFormatter = mt.FormatStrFormatter(ystring)
yminorLocator = mt.MultipleLocator(yminL)
ax1.plot(z, tau, linestyle='-', color='black', label=None)
ax1.plot(z, ob.tau*np.ones(len(z)), color='gray', linestyle='--', label='Planck 2018')
ax1.fill_between(z, ob.tau_lowlim*np.ones(len(z)), ob.tau_uplim*np.ones(len(z)), facecolor='gray', alpha=0.5)
ax1.yaxis.set_major_locator(ymajorLocator)
ax1.yaxis.set_major_formatter(ymajorFormatter)
ax1.yaxis.set_minor_locator(yminorLocator)
ax1.xaxis.set_major_locator(majorLocator)
ax1.xaxis.set_major_formatter(majorFormatter)
ax1.xaxis.set_minor_locator(minorLocator)
ax1.set_xlabel('Redshift z')
ax1.set_ylabel('Optical depth $\\tau$')
ax1.set_xlim((0., z[len(z)-2]))
plt.legend(bbox_to_anchor=(0.68, 0.2), loc=2, borderaxespad=0., frameon=True, labelspacing=0.4, handlelength=3, prop={'size':11})
#----------------------------------------------
fig.savefig(outputfile + '.png', format='png', dpi=512)#, transparent=True)
|
annehutterREPO_NAMEgrid-modelPATH_START.@grid-model_extracted@grid-model-master@analysis_tools@optical_depth.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "Vital-Fernandez/lime",
"repo_path": "lime_extracted/lime-master/docs/source/conf.py",
"type": "Python"
}
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# Adding library path to the compilation for the autodoc documentation
import sys
import os
import shutil
from pathlib import Path
def all_but_ipynb(dir, contents):
result = []
for c in contents:
if os.path.isfile(os.path.join(dir, c)) and (not c.endswith(".py")):
result += [c]
return result
def create_rst_from_changelog(input_file, output_file):
with open(input_file, 'r') as file:
lines = file.readlines()
# Start the rst file content
rst_content = ["Changelog\n", "=========\n\n"]
# Detect version lines (assuming LiMe start)
for line in lines:
if line.strip().startswith("LiMe") and "LiMe" in line:
version_type, version_number, version_date = line.split('-')
version_info = f'{version_number.strip()} {version_type.strip()} ({version_date.strip()})\n'
rst_content.append(version_info)
rst_content.append(f"{'-' * len(version_info)}\n\n")
# Process bullet points with indentation
elif line.strip().startswith("-"):
rst_content.append(f"* {line.strip()[1:].strip()}\n")
# Detect the date format and append it after the version
elif line.strip():
rst_content.append(f"**{line.strip()}**\n")
# Empty lines or additional text
else:
rst_content.append(f"{line.strip()}\n")
# Write the content to the output rst file
with open(output_file, 'w') as file:
file.writelines(rst_content)
_lib_path = Path(__file__).parents[2]/'src'
_doc_folder = Path(__file__).parents[2]/'docs/source'
_examples_path = Path(__file__).parents[2]/'examples'
sys.path.append(_lib_path.as_posix())
sys.path.append(_examples_path.as_posix())
# -- Project information -----------------------------------------------------
project = 'lime'
copyright = '2021, Vital-Fernandez'
author = 'Vital-Fernandez'
# The full version, including alpha/beta/rc tags
release = '1.3.0'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.duration',
'sphinx.ext.doctest',
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.imgmath',
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.imgmath',
'nbsphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates', '_build']
autodoc_member_order = 'bysource'
autodoc_default_options = {"imported-members": True}
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
imgmath_latex_preamble = r'\usepackage[active]{preview}' # + other custom stuff for inline math, such as non-default math fonts etc.
imgmath_use_preview = True
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
shutil.rmtree(_doc_folder/'images', ignore_errors=True)
shutil.rmtree(_doc_folder/'inputs', ignore_errors=True)
shutil.rmtree(_doc_folder/'outputs', ignore_errors=True)
shutil.rmtree(_doc_folder/'sample_data', ignore_errors=True)
shutil.rmtree(_doc_folder/'tutorials', ignore_errors=True)
shutil.copytree(_examples_path, _doc_folder, dirs_exist_ok=True)
# Compile the changelog page
input_txt_changelog = _lib_path/'lime/changelog.txt' # Path to the uploaded changelog file
output_rst_changelog = _doc_folder/'introduction/changelog.rst' # Output rst file
create_rst_from_changelog(input_txt_changelog, output_rst_changelog)
|
Vital-FernandezREPO_NAMElimePATH_START.@lime_extracted@lime-master@docs@source@conf.py@.PATH_END.py
|
{
"filename": "full_W_complex.py",
"repo_name": "ratt-ru/CubiCal",
"repo_path": "CubiCal_extracted/CubiCal-master/cubical/kernels/full_W_complex.py",
"type": "Python"
}
|
# CubiCal: a radio interferometric calibration suite
# (c) 2017 Rhodes University & Jonathan S. Kenyon
# http://github.com/ratt-ru/CubiCal
# This code is distributed under the terms of GPLv2, see LICENSE.md for details
"""
Cython kernels for the robust 2x2 complex gain machine. Functions require output
arrays to be provided. Common dimensions of arrays are:
+----------------+------+
| Dimension | Size |
+================+======+
| Direction | d |
+----------------+------+
| Model | m |
+----------------+------+
| Time | t |
+----------------+------+
| Time Intervals | ti |
+----------------+------+
| Frequency | f |
+----------------+------+
| Freq Intervals | fi |
+----------------+------+
| Antenna | a |
+----------------+------+
| Correlation | c |
+----------------+------+
"""
from builtins import range
import numpy as np
from numba import jit, prange
import cubical.kernels
from cubical.kernels import generics
from cubical.kernels import full_complex
use_parallel = True if cubical.kernels.num_omp_threads > 1 else False
use_cache = cubical.kernels.use_cache
# defines memory layout of model-like arrays (axis layout is NDxNMxNTxNFxNAxNAxNCxNC)
_model_axis_layout = [4,5,1,2,3,0,6,7] # layout is AAMTFD
# defines memory layout of gain-like arrays (axis layout is NDxNTxNFxNAxNCxNC)
_gain_axis_layout = [3,1,2,0,4,5] # layout is ATFD
# defines memory layout of flag-like arrays (axis layout is NTxNFxNAxNA)
_flag_axis_layout = [2,3,0,1] # layout is AATF
def allocate_vis_array(shape, dtype, zeros=False):
"""Allocates a visibility array of the desired shape, laid out in preferred order"""
return cubical.kernels.allocate_reordered_array(shape, dtype, _model_axis_layout, zeros=zeros)
def allocate_gain_array(shape, dtype, zeros=False):
"""Allocates a gain array of the desired shape, laid out in preferred order"""
return cubical.kernels.allocate_reordered_array(shape, dtype, _gain_axis_layout, zeros=zeros)
def allocate_flag_array(shape, dtype, zeros=False):
"""Allocates a flag array of the desired shape, laid out in preferred order"""
return cubical.kernels.allocate_reordered_array(shape, dtype, _flag_axis_layout, zeros=zeros)
allocate_param_array = allocate_gain_array
# compute_residual is identical to the general full complex case.
compute_residual = full_complex.compute_residual
# compute_jh is identical to the general full complex case.
compute_jh = full_complex.compute_jh
# compute_update is identical to the general full complex case.
compute_update = full_complex.compute_update
# compute_corrected is identical to the general full complex case.
compute_corrected = full_complex.compute_corrected
# apply_gains is identical to the general full complex case.
apply_gains = full_complex.apply_gains
# right_multiply_gains is identical to the general full complex case.
right_multiply_gains = full_complex.right_multiply_gains
# Map the J^H.J inversion method to a generic inversion.
compute_jhjinv = generics.compute_2x2_inverse
# Map inversion to generic 2x2 inverse.
invert_gains = generics.compute_2x2_inverse
@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)
def compute_jhwr(jh, r, w, jhwr, t_int, f_int):
"""
Given J\ :sup:`H` and the residual (or observed data, in special cases), computes J\ :sup:`H`\R.
J\ :sup:`H`\R is computed over intervals.
Args:
jh (np.complex64 or np.complex128):
Typed memoryview of J\ :sup:`H` array with dimensions (d, m, t, f, a, a, c, c).
r (np.complex64 or np.complex128):
Typed memoryview of residual array with dimensions (m, t, f, a, a, c, c).
w (np.complex64 or np.complex128):
Typed memoryview of residual array with dimensions (m, t, f, a, a, 1).
jhwr (np.complex64 or np.complex128):
Typed memoryview of J\ :sup:`H`\R array with dimensions (d, ti, fi, a, c, c).
t_int (int):
Number of time slots per solution interval.
f_int (int):
Number of frequencies per solution interval.
"""
n_dir = jh.shape[0]
n_mod = jh.shape[1]
n_tim = jh.shape[2]
n_fre = jh.shape[3]
n_ant = jh.shape[4]
all_bls = np.array([[i,j] for i in range(n_ant) for j in range(n_ant) if i!=j], dtype=np.int32)
n_bl = all_bls.shape[0]
broadcast_times = np.array([t//t_int for t in range(n_tim)])
broadcast_freqs = np.array([f//f_int for f in range(n_fre)])
for ibl in prange(n_bl):
aa, ab = all_bls[ibl][0], all_bls[ibl][1]
for i in range(n_mod):
for t in range(n_tim):
bt = broadcast_times[t]
for f in range(n_fre):
bf = broadcast_freqs[f]
for d in range(n_dir):
r00 = r[i,t,f,aa,ab,0,0]
r01 = r[i,t,f,aa,ab,0,1]
r10 = r[i,t,f,aa,ab,1,0]
r11 = r[i,t,f,aa,ab,1,1]
jhh00 = jh[d,i,t,f,ab,aa,0,0]
jhh01 = jh[d,i,t,f,ab,aa,0,1]
jhh10 = jh[d,i,t,f,ab,aa,1,0]
jhh11 = jh[d,i,t,f,ab,aa,1,1]
w0 = w[i,t,f,aa,ab,0]
jhwr[d,bt,bf,aa,0,0] += (r00*jhh00 + r01*jhh10)*w0
jhwr[d,bt,bf,aa,0,1] += (r00*jhh01 + r01*jhh11)*w0
jhwr[d,bt,bf,aa,1,0] += (r10*jhh00 + r11*jhh10)*w0
jhwr[d,bt,bf,aa,1,1] += (r10*jhh01 + r11*jhh11)*w0
@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)
def compute_jhwj(jh, w, jhwj, t_int, f_int):
"""
Given J\ :sup:`H` ,computes the diagonal entries of J\ :sup:`H`\J. J\ :sup:`H`\J is computed
over intervals. This is an approximation of the Hessian.
Args:
jh (np.complex64 or np.complex128):
Typed memoryview of J\ :sup:`H` array with dimensions (d, m, t, f, a, a, c, c).
w (np.complex64 or np.complex128):
Typed memoryview of residual array with dimensions (m, t, f, a, a, 1).
jhj (np.complex64 or np.complex128):
Typed memoryview of J\ :sup:`H`\J array with dimensions (d, ti, fi, a, c, c).
t_int (int):
Number of time slots per solution interval.
f_int (int):
Number of frequencies per solution interval.
"""
n_dir = jh.shape[0]
n_mod = jh.shape[1]
n_tim = jh.shape[2]
n_fre = jh.shape[3]
n_ant = jh.shape[4]
all_bls = np.array([[i,j] for i in range(n_ant) for j in range(n_ant) if i!=j], dtype=np.int32)
n_bl = all_bls.shape[0]
broadcast_times = np.array([t//t_int for t in range(n_tim)])
broadcast_freqs = np.array([f//f_int for f in range(n_fre)])
for ibl in prange(n_bl):
aa, ab = all_bls[ibl][0], all_bls[ibl][1]
for i in range(n_mod):
for t in range(n_tim):
bt = broadcast_times[t]
for f in range(n_fre):
bf = broadcast_freqs[f]
for d in range(n_dir):
j00 = jh[d,i,t,f,ab,aa,0,0]
j01 = jh[d,i,t,f,ab,aa,0,1]
j10 = jh[d,i,t,f,ab,aa,1,0]
j11 = jh[d,i,t,f,ab,aa,1,1]
jh00 = jh[d,i,t,f,ab,aa,0,0].conjugate()
jh01 = jh[d,i,t,f,ab,aa,1,0].conjugate()
jh10 = jh[d,i,t,f,ab,aa,0,1].conjugate()
jh11 = jh[d,i,t,f,ab,aa,1,1].conjugate()
w0 = w[i,t,f,aa,ab,0]
jhwj[d,bt,bf,aa,0,0] += (jh00*j00 + jh01*j10)*w0
jhwj[d,bt,bf,aa,0,1] += (jh00*j01 + jh01*j11)*w0
jhwj[d,bt,bf,aa,1,0] += (jh10*j00 + jh11*j10)*w0
jhwj[d,bt,bf,aa,1,1] += (jh10*j01 + jh11*j11)*w0
@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)
def compute_weights(r, ic, w, v, npol):
"""
This function updates the weights, using the expression:
w[i] = (v+2*npol)/(v + 2*r[i].T.cov.r[i])
Args:
r (np.complex64 or np.complex128):
Typed memoryview of residual array with dimensions (m, t, f, a, a, c, c).
ic (np.complex64 or np.complex128):
Typed memoryview of inverse covariance array with dimensions (4,4).
w (np.complex64 or np.complex128):
Typed memoryview of weight array with dimensions (m, t, f, a, a, 1).
v (float):
Degrees of freedom of the t-distribution.
npol (float):
Number of polarizations (correlations) in use.
"""
n_mod = r.shape[0]
n_tim = r.shape[1]
n_fre = r.shape[2]
n_ant = r.shape[3]
all_bls = np.array([[i,j] for i in range(n_ant) for j in range(n_ant) if i!=j], dtype=np.int32)
n_bl = all_bls.shape[0]
for ibl in prange(n_bl):
aa, ab = all_bls[ibl][0], all_bls[ibl][1]
for i in range(n_mod):
for t in range(n_tim):
for f in range(n_fre):
r00 = r[i,t,f,aa,ab,0,0]
r01 = r[i,t,f,aa,ab,0,1]
r10 = r[i,t,f,aa,ab,1,0]
r11 = r[i,t,f,aa,ab,1,1]
rc00 = r[i,t,f,aa,ab,0,0].conjugate()
rc01 = r[i,t,f,aa,ab,0,1].conjugate()
rc10 = r[i,t,f,aa,ab,1,0].conjugate()
rc11 = r[i,t,f,aa,ab,1,1].conjugate()
denom = (rc00*ic[0,0] + rc01*ic[1,0] + rc10*ic[2,0] + rc11*ic[3,0])*r00 + \
(rc00*ic[0,1] + rc01*ic[1,1] + rc10*ic[2,1] + rc11*ic[3,1])*r01 + \
(rc00*ic[0,2] + rc01*ic[1,2] + rc10*ic[2,2] + rc11*ic[3,2])*r10 + \
(rc00*ic[0,3] + rc01*ic[1,3] + rc10*ic[2,3] + rc11*ic[3,3])*r11
w[i,t,f,aa,ab,0] = (v + npol)/(v + denom.real) # using LB derivation
@jit(nopython=True, fastmath=True, parallel=use_parallel, cache=use_cache, nogil=True)
def compute_cov(r, ic, w):
"""
This function computes the un-normlaised weighted covariance matrix of
the visibilities using the expression:
cov = r.conj()*w.r
Args:
r (np.complex64 or np.complex128):
Typed memoryview of residual array with dimensions (m, t, f, a, a, c, c).
ic (np.complex64 or np.complex128):
Typed memoryview of weighted inverse covariance array with dimensions (4,4).
w (np.complex64 or np.complex128):
Typed memoryview of weight array with dimensions (m, t, f, a, a, 1).
"""
n_mod = r.shape[0]
n_tim = r.shape[1]
n_fre = r.shape[2]
n_ant = r.shape[3]
bls = np.array([[i,j] for i in range(n_ant) for j in range(i+1, n_ant)], dtype=np.int32)
n_bl = bls.shape[0]
for ibl in prange(n_bl):
aa, ab = bls[ibl][0], bls[ibl][1]
for i in range(n_mod):
for t in range(n_tim):
for f in range(n_fre):
r00 = r[i,t,f,aa,ab,0,0]
r01 = r[i,t,f,aa,ab,0,1]
r10 = r[i,t,f,aa,ab,1,0]
r11 = r[i,t,f,aa,ab,1,1]
rc00 = r[i,t,f,aa,ab,0,0].conjugate()
rc01 = r[i,t,f,aa,ab,0,1].conjugate()
rc10 = r[i,t,f,aa,ab,1,0].conjugate()
rc11 = r[i,t,f,aa,ab,1,1].conjugate()
w0 = w[i,t,f,aa,ab,0].real
w0r00 = w0*r00
w0r01 = w0*r01
w0r10 = w0*r10
w0r11 = w0*r11
ic[0,0] += rc00*w0r00
ic[0,1] += rc00*w0r01
ic[0,2] += rc00*w0r10
ic[0,3] += rc00*w0r11
ic[1,0] += rc01*w0r00
ic[1,1] += rc01*w0r01
ic[1,2] += rc01*w0r10
ic[1,3] += rc01*w0r11
ic[2,0] += rc10*w0r00
ic[2,1] += rc10*w0r01
ic[2,2] += rc10*w0r10
ic[2,3] += rc10*w0r11
ic[3,0] += rc11*w0r00
ic[3,1] += rc11*w0r01
ic[3,2] += rc11*w0r10
ic[3,3] += rc11*w0r11
|
ratt-ruREPO_NAMECubiCalPATH_START.@CubiCal_extracted@CubiCal-master@cubical@kernels@full_W_complex.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.