metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "compare_ion.py",
"repo_name": "sirocco-rt/sirocco",
"repo_path": "sirocco_extracted/sirocco-main/py_progs/compare_ion.py",
"type": "Python"
}
|
#!/usr/bin/env python
'''
This routine is used for diagnosing isses with spectra produced during ionization cycles
Command line usage (if any):
usage: Compare.py root1 root2
Description:
This routine compares two spectral files created in
ionization cycles, specifically the log_spec_tot files
Primary routines:
compare_ion
Notes:
As run from the command line, two WCreated spectra
are compared.
One has somewhat more flexibility if run from a
Jupyter notebook
History:
211003 ksl Coding begun
'''
import sys
from astropy.io import ascii
import os
from astropy.io import ascii
import matplotlib.pyplot as plt
import numpy as np
from scipy.signal.windows import boxcar
from scipy.signal import convolve
def xsmooth(flux,smooth=21):
'''
boxcar smooth the flux
'''
if (smooth)>1:
q=convolve(flux,boxcar(smooth)/float(smooth),mode='same')
return(q)
else:
return(flux)
def edge(ymin=1e45,ymax=2e45):
speed_of_light=3e18
nu=3e18/912
plt.plot([nu,nu],[ymin,ymax],'k')
plt.text(nu,ymax,r'Ly',ha='center',va='bottom')
def compare_ion(one='one/xmatrix_one',two='two/yjump',col='WCreated',outfile=''):
label1=one
label2=two
if outfile=='':
word1=one.split('/')
word2=two.split('/')
outfile='diff_%s_%s.png' % (word1[len(word1)-1],word2[len(word2)-1])
try:
xone=ascii.read(one+'.log_spec_tot')
except:
print('Could not find ',one)
return
try:
xtwo=ascii.read(two+'.log_spec_tot')
except:
print('Could not find ',two)
return
plt.semilogx(xone['Freq.'],xsmooth(xone['Freq.']*xone[col]),label=label1)
plt.semilogx(xtwo['Freq.'],xsmooth(xtwo['Freq.']*xtwo[col]),label=label2)
plt.semilogx(xone['Freq.'],(xsmooth(xone['Freq.']*xone[col])-xsmooth(xtwo['Freq.']*xtwo[col])),label='%s-%s' % (label1,label2))
# edge()
plt.xlim(1e15,1e17)
# plt.ylim(1e39,1e46)
plt.legend()
plt.savefig(outfile)
# Next lines permit one to run the routine from the command line
if __name__ == "__main__":
import sys
if len(sys.argv)>2:
compare_ion(sys.argv[1],sys.argv[2])
else:
print ('usage: Compare.py root1 root2')
|
sirocco-rtREPO_NAMEsiroccoPATH_START.@sirocco_extracted@sirocco-main@py_progs@compare_ion.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "glue-viz/glue",
"repo_path": "glue_extracted/glue-main/doc/conf.py",
"type": "Python"
}
|
# Configuration file for the Sphinx documentation builder.
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = "Glue"
copyright = "2012-2023, Chris Beaumont, Thomas Robitaille, Michelle Borkin"
author = "Chris Beaumont, Thomas Robitaille, Michelle Borkin"
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.todo",
"sphinx.ext.coverage",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinx.ext.intersphinx",
"numpydoc",
"sphinx_automodapi.automodapi",
"sphinx_automodapi.smart_resolver",
]
templates_path = ["_templates"]
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
autoclass_content = "both"
nitpick_ignore = [
("py:class", "glue.viewers.histogram.layer_artist.HistogramLayerBase"),
("py:class", "glue.viewers.scatter.layer_artist.ScatterLayerBase"),
("py:class", "glue.viewers.image.layer_artist.ImageLayerBase"),
("py:class", "glue.viewers.image.layer_artist.RGBImageLayerBase"),
("py:class", "glue.viewers.image.state.BaseImageLayerState"),
("py:class", "glue.viewers.common.stretch_state_mixin.StretchStateMixin")
]
viewcode_follow_imported_members = False
numpydoc_show_class_members = False
autosummary_generate = True
automodapi_toctreedirnm = "api"
linkcheck_ignore = [r"https://s3.amazonaws.com"]
linkcheck_retries = 5
linkcheck_timeout = 10
intersphinx_mapping = {
"python": ("https://docs.python.org/3.7", None),
"matplotlib": ("https://matplotlib.org", None),
"numpy": ("https://numpy.org/doc/stable/", None),
"astropy": ("https://docs.astropy.org/en/stable/", None),
"echo": ("https://echo.readthedocs.io/en/latest/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"shapely": ("https://shapely.readthedocs.io/en/stable/", None),
}
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = "sphinx_book_theme"
html_static_path = ["_static"]
html_logo = "_static/logo.png"
html_theme_options = {'navigation_with_keys': False}
|
glue-vizREPO_NAMEgluePATH_START.@glue_extracted@glue-main@doc@conf.py@.PATH_END.py
|
{
"filename": "x64_context_test.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/tests/x64_context_test.py",
"type": "Python"
}
|
# Copyright 2021 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
from functools import partial
import time
import unittest
from absl.testing import absltest
import numpy as np
import jax
from jax import lax
from jax import random
from jax.experimental import enable_x64, disable_x64
import jax.numpy as jnp
import jax._src.test_util as jtu
jax.config.parse_flags_with_absl()
class X64ContextTests(jtu.JaxTestCase):
@jtu.sample_product(jit=jtu.JIT_IMPLEMENTATION)
def test_make_array(self, jit):
func = jit(lambda: jnp.array(np.float64(0)))
dtype_start = func().dtype
with enable_x64():
self.assertEqual(func().dtype, "float64")
with disable_x64():
self.assertEqual(func().dtype, "float32")
self.assertEqual(func().dtype, dtype_start)
@jtu.sample_product(
jit=jtu.JIT_IMPLEMENTATION,
enable_or_disable=[enable_x64, disable_x64],
)
def test_correctly_capture_default(self, jit, enable_or_disable):
# The fact we defined a jitted function with a block with a different value
# of `jax.config.enable_x64` has no impact on the output.
with enable_or_disable():
func = jit(lambda: jnp.array(np.float64(0)))
func()
expected_dtype = "float64" if jax.config._read("jax_enable_x64") else "float32"
self.assertEqual(func().dtype, expected_dtype)
with enable_x64():
self.assertEqual(func().dtype, "float64")
with disable_x64():
self.assertEqual(func().dtype, "float32")
@jtu.sample_product(jit=jtu.JIT_IMPLEMENTATION)
@jtu.run_on_devices("cpu") # Test presumes CPU precision
def test_near_singular_inverse(self, jit):
rng = jtu.rand_default(self.rng())
@partial(jit, static_argnums=1)
def near_singular_inverse(N=5, eps=1E-40):
X = rng((N, N), dtype='float64')
X = jnp.asarray(X)
X = X.at[-1].mul(eps)
return jnp.linalg.inv(X)
with enable_x64():
result_64 = near_singular_inverse()
self.assertTrue(jnp.all(jnp.isfinite(result_64)))
with disable_x64():
result_32 = near_singular_inverse()
self.assertTrue(jnp.all(~jnp.isfinite(result_32)))
@jtu.sample_product(jit=jtu.JIT_IMPLEMENTATION)
def test_while_loop(self, jit):
@jit
def count_to(N):
return lax.while_loop(lambda x: x < N, lambda x: x + 1.0, 0.0)
with enable_x64():
self.assertArraysEqual(count_to(10), jnp.float64(10), check_dtypes=True)
with disable_x64():
self.assertArraysEqual(count_to(10), jnp.float32(10), check_dtypes=True)
def test_thread_safety(self):
def func_x32():
with disable_x64():
time.sleep(0.1)
return jnp.array(np.int64(0)).dtype
def func_x64():
with enable_x64():
time.sleep(0.1)
return jnp.array(np.int64(0)).dtype
with concurrent.futures.ThreadPoolExecutor() as executor:
x32 = executor.submit(func_x32)
x64 = executor.submit(func_x64)
self.assertEqual(x64.result(), jnp.int64)
self.assertEqual(x32.result(), jnp.int32)
@jax.legacy_prng_key('allow')
@jax.debug_key_reuse(False)
@jtu.ignore_warning(category=UserWarning,
message="Explicitly requested dtype float64 is not available")
def test_jit_cache(self):
if jtu.test_device_matches(["tpu"]):
self.skipTest("64-bit random not available on TPU")
f = partial(random.uniform, random.PRNGKey(0), (1,), 'float64', -1, 1)
with disable_x64():
for _ in range(2):
f()
with enable_x64():
for _ in range(2):
f()
@unittest.skip("test fails, see #8552")
def test_convert_element_type(self):
# Regression test for part of https://github.com/jax-ml/jax/issues/5982
with enable_x64():
x = jnp.int64(1)
self.assertEqual(x.dtype, jnp.int64)
y = x.astype(jnp.int32)
self.assertEqual(y.dtype, jnp.int32)
z = jax.jit(lambda x: x.astype(jnp.int32))(x)
self.assertEqual(z.dtype, jnp.int32)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@x64_context_test.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "icrar/daliuge",
"repo_path": "daliuge_extracted/daliuge-master/daliuge-engine/dlg/lifecycle/__init__.py",
"type": "Python"
}
|
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2015
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
|
icrarREPO_NAMEdaliugePATH_START.@daliuge_extracted@daliuge-master@daliuge-engine@dlg@lifecycle@__init__.py@.PATH_END.py
|
{
"filename": "tool_main_step2_model_plot_get.py",
"repo_name": "shihyuntang/igrins_rv",
"repo_path": "igrins_rv_extracted/igrins_rv-master/Tool/tool_main_step2_model_plot_get.py",
"type": "Python"
}
|
import sys
sys.path.append("..") # Adds higher directory to python modules path.
from Engine.importmodule import *
from Engine.importmodule import read_prepdata
from Engine.IO_AB import setup_templates, init_fitsread, stellarmodel_setup, setup_outdir
from Engine.clips import basicclip_above
from Engine.contfit import a0cont
from Engine.classes import FitObjs, InParams
from Engine.macbro import macbro
from Engine.rebin_jv import rebin_jv
from Engine.rotint import rotint
from Engine.opt import optimizer, fmod
from Engine.outplotter import outplotter_23
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
def ini_MPinst(args, inparam, orders, order_use, trk, step2or3, i):
# Main function for RV fitting that will be threaded over by multiprocessing
nights = inparam.nights
night = nights[i] # current looped night
order = order_use
xbounds = inparam.xbounddict[order]
print('Working on order {:02d}, night {:03d}/{:03d} ({}) PID:{}...'.format(int(order),
i+1,
len(inparam.nights),
night,
mp.current_process().pid) )
#-------------------------------------------------------------------------------
# Collect initial RV guesses
if type(inparam.initguesses) == dict:
initguesses = inparam.initguesses[night]
elif type(inparam.initguesses) == float:
initguesses = inparam.initguesses
else:
sys.exit('ERROR! EXPECING SINGAL NUMBER OR FILE FOR INITGUESSES! QUITTING!')
# Collect relevant beam and filenum info
tagsnight = []; beamsnight = [];
for tag in inparam.tagsA[night]:
tagsnight.append(tag)
beamsnight.append('A')
for tag in inparam.tagsB[night]:
tagsnight.append(tag)
beamsnight.append('B')
# Load synthetic telluric template generated during Step 1
# [:8] here is to ensure program works under Night_Split mode
A0loc = f'../Output/{args.targname}_{args.band}_tool/A0Fits/{night[:8]}A0_treated_{args.band}.fits'
try:
hdulist = fits.open(A0loc)
except IOError:
logger.warning(f' --> No A0-fitted template for night {night}, skipping...')
return night, np.nan, np.nan
# Find corresponding table in fits file, given the tables do not go sequentially by order number due to multiprocessing in Step 1
num_orders = 0
for i in range(25):
try:
hdulist[i].columns[0].name[9:]
num_orders += 1
except:
continue
# Check whether Telfit hit critical error in Step 1 for the chosen order with this night. If so, try another order. If all hit the error, skip the night.
nexto = 0
ordertry = order
while 1 == 1:
fits_layer = [ i for i in np.arange(num_orders)+1 if int(hdulist[i].columns[0].name[9:]) == ordertry ][0]
tbdata = hdulist[ fits_layer ].data
flag = np.array(tbdata[f'ERRORFLAG{ordertry}'])[0]
if flag == 1: # If Telfit hit unknown critical error in Step 1, this order can't be used for this night. Try another.
orderbad = ordertry
ordertry = orders[nexto]
logger.warning(f' --> TELFIT ENCOUNTERED CRITICAL ERROR IN ORDER: {orderbad} NIGHT: {night}, TRYING ORDER {ordertry} INSTEAD...')
else: # All good, continue
break
nexto += 1
if nexto == len(orders):
logger.warning(f' --> TELFIT ENCOUNTERED CRITICAL ERROR IN ALL ORDERS FOR NIGHT: {night}, skipping...')
return night, np.nan, np.nan
watm = tbdata['WATM'+str(order)]
satm = tbdata['SATM'+str(order)]
a0contx = tbdata['X'+str(order)]
continuum = tbdata['BLAZE'+str(order)]
# Remove extra rows leftover from having columns of unequal length
satm = satm[(watm != 0)]
watm = watm[(watm != 0)]
satm[(satm < 1e-4)] = 0. # set very low points to zero so that they don't go to NaN when taken to an exponent by template power in fmodel_chi
a0contx = a0contx[(continuum != 0)]
continuum = continuum[(continuum != 0)]
# Use instrumental profile FWHM dictionary corresponding to whether IGRINS mounting was loose or not
if int(night[:8]) < 20180401 or int(night[:8]) > 20190531:
IPpars = inparam.ips_tightmount_pars[args.band][order]
else:
IPpars = inparam.ips_loosemount_pars[args.band][order]
#-------------------------------------------------------------------------------
### Initialize parameter array for optimization as well as half-range values for each parameter during the various steps of the optimization.
### Many of the parameters initialized here will be changed throughout the code before optimization and in between optimization steps.
pars0 = np.array([np.nan, # 0: The shift of the stellar template (km/s)
0.3, # 1: The scale factor for the stellar template
0.0, # 2: The shift of the telluric template (km/s)
0.6, # 3: The scale factor for the telluric template
inparam.initvsini, # 4: vsini (km/s)
IPpars[2], # 5: The instrumental resolution (FWHM) in pixels
np.nan, # 6: Wavelength 0-pt
np.nan, # 7: Wavelength linear component
np.nan, # 8: Wavelength quadratic component
np.nan, # 9: Wavelength cubic component
1.0, #10: Continuum zero point
0., #11: Continuum linear component
0., #12: Continuum quadratic component
IPpars[1], #13: Instrumental resolution linear component
IPpars[0]]) #14: Instrumental resolution quadratic component
rvsmini = []; vsinismini = [];
# Iterate over all A/B exposures
for t in np.arange(len(tagsnight)):
tag = tagsnight[t]
beam = beamsnight[t]
# Retrieve pixel bounds for where within each other significant telluric absorption is present.
# If these bounds were not applied, analyzing some orders would give garbage fits.
if args.band=='K':
if int(order) in [11, 12, 13, 14]:
bound_cut = inparam.bound_cut_dic[args.band][order]
else:
bound_cut = [150, 150]
elif args.band=='H':
if int(order) in [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]:
bound_cut = inparam.bound_cut_dic[args.band][order]
else:
bound_cut = [150, 150]
# Load target spectrum
x,wave,s,u = init_fitsread(f'{inparam.inpath}{night}/{beam}/',
'target',
'separate',
night,
order,
tag,
args.band,
bound_cut)
#-------------------------------------------------------------------------------
# Execute S/N cut
s2n = s/u
if np.nanmedian(s2n) < float(args.SN_cut):
logger.warning(' --> Bad S/N {:1.3f} < {} for {}{} {}, SKIP'.format( np.nanmedian(s2n), args.SN_cut, night, beam, tag))
continue
# Trim obvious outliers above the blaze (i.e. cosmic rays)
nzones = 5
x = basicclip_above(x,s,nzones); wave = basicclip_above(wave,s,nzones);
u = basicclip_above(u,s,nzones); s = basicclip_above(s,s,nzones);
x = basicclip_above(x,s,nzones); wave = basicclip_above(wave,s,nzones);
u = basicclip_above(u,s,nzones); s = basicclip_above(s,s,nzones);
# Cut spectrum to within wavelength regions defined in input list
s_piece = s[ (x > xbounds[0]) & (x < xbounds[-1]) ]
u_piece = u[ (x > xbounds[0]) & (x < xbounds[-1]) ]
wave_piece = wave[ (x > xbounds[0]) & (x < xbounds[-1]) ]
x_piece = x[ (x > xbounds[0]) & (x < xbounds[-1]) ]
# Trim stellar template to relevant wavelength range
mwave_in,mflux_in = stellarmodel_setup(wave_piece, inparam.mwave0, inparam.mflux0)
# Trim telluric template to relevant wavelength range
satm_in = satm[(watm > min(wave_piece)*1e4 - 11) & (watm < max(wave_piece)*1e4 + 11)]
watm_in = watm[(watm > min(wave_piece)*1e4 - 11) & (watm < max(wave_piece)*1e4 + 11)]
# Make sure data is within telluric template range (shouldn't do anything)
s_piece = s_piece[ (wave_piece*1e4 > min(watm_in)+5) & (wave_piece*1e4 < max(watm_in)-5)]
u_piece = u_piece[ (wave_piece*1e4 > min(watm_in)+5) & (wave_piece*1e4 < max(watm_in)-5)]
x_piece = x_piece[ (wave_piece*1e4 > min(watm_in)+5) & (wave_piece*1e4 < max(watm_in)-5)]
wave_piece = wave_piece[(wave_piece*1e4 > min(watm_in)+5) & (wave_piece*1e4 < max(watm_in)-5)]
#-------------------------------------------------------------------------------
par = pars0.copy()
# Get initial guess for cubic wavelength solution from reduction pipeline
f = np.polyfit(x_piece,wave_piece,3)
par9in = f[0]*1e4; par8in = f[1]*1e4; par7in = f[2]*1e4; par6in = f[3]*1e4;
par[9] = par9in ; par[8] = par8in ; par[7] = par7in ; par[6] = par6in
par[0] = initguesses-inparam.bvcs[night+tag] # Initial RV with barycentric correction
# Arrays defining parameter variations during optimization steps.
# Optimization will cycle twice. In the first cycle, the RVs can vary more than in the second.
dpars1 = {'cont' : np.array([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0., 1e7, 1, 1, 0, 0]),
'wave' : np.array([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 5.00000e-5, 0., 0, 0, 0, 0, 0]),
't' : np.array([ 0.0, 0.0, 5.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0]),
'ip' : np.array([ 0.0, 0.0, 0.0, 0.0, 0, 0.5, 0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0]),
's' : np.array([20.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0]),
'v' : np.array([ 0.0, 0.0, 0.0, 0.0, inparam.vsinivary, 0.0, 0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0])}
dpars2 = {'cont' : np.array([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0., 1e7, 1, 1, 0, 0]),
'wave' : np.array([ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 10.0, 10.0, 5.00000e-5, 0., 0, 0, 0, 0, 0]),
't' : np.array([ 0.0, 0.0, 5.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0]),
'ip' : np.array([ 0.0, 0.0, 0.0, 0.0, 0, 0.5, 0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0]),
's' : np.array([ 5.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0]),
'v' : np.array([ 0.0, 0.0, 0.0, 0.0, inparam.vsinivary, 0.0, 0.0, 0.0, 0.0, 0, 0, 0, 0, 0, 0])}
continuum_in = rebin_jv(a0contx,continuum,x_piece,False)
s_piece /= np.median(s_piece)
fitobj = FitObjs(s_piece, x_piece, u_piece, continuum_in, watm_in,satm_in,mflux_in,mwave_in,ast.literal_eval(inparam.maskdict[order]))
#-------------------------------------------------------------------------------
# Initialize an array that puts hard bounds on vsini and the instrumental resolution to make sure they do not diverge to unphysical values
optimize = True
par_in = par.copy()
hardbounds = [par_in[4]-dpars1['v'][4], par_in[4]+dpars1['v'][4],
par_in[5]-dpars1['ip'][5], par_in[5]+dpars1['ip'][5]]
if hardbounds[0] < 0:
hardbounds[0] = 0
if hardbounds[3] < 0:
hardbounds[3] = 1
# Begin optimization. Fit the blaze, the wavelength solution, the telluric template power and RV, the stellar template power and RV, the
# zero point for the instrumental resolution, and the vsini of the star separately, iterating and cycling between each set of parameter fits.
cycles = 2
optgroup = ['cont', 'wave', 't', 'cont', 's',
'cont', 'wave', 't', 's', 'cont',
'wave',
'ip', 'v',
'ip', 'v',
't', 's',
't', 's']
for nc, cycle in enumerate(np.arange(cycles), start=1):
if cycle == 0:
parstart = par_in.copy()
dpars = dpars1
else:
dpars = dpars2
for optkind in optgroup:
parfit_1 = optimizer( parstart, dpars[optkind], hardbounds, fitobj, optimize)
parstart = parfit_1.copy()
if args.debug:
logger.debug(f'{order}_{tag}_{nc}_{optkind}:\n {parfit_1}')
parfit = parfit_1.copy()
#-------------------------------------------------------------------------------
# if best fit stellar template power is very low, throw out result
if parfit[1] < 0.1:
logger.warning(f' --> parfit[1] < 0.1, {night} parfit={parfit}')
continue
# if best fit stellar or telluric template powers are exactly equal to their starting values, optimization failed, throw out result
if parfit[1] == par_in[1] or parfit[3] == par_in[3]:
logger.warning(f' --> parfit[1] == par_in[1] or parfit[3] == par_in[3], {night}')
continue
# if best fit model dips below zero at any point, we're too close to edge of blaze, fit may be comrpomised, throw out result
smod,chisq = fmod(parfit,fitobj)
if len(smod[(smod < 0)]) > 0:
logger.warning(f' --> len(smod[(smod < 0)]) > 0, {night}')
continue
rv0 = parfit[0] - parfit[2] # Correct for RV of the atmosphere, since we're using that as the basis for the wavelength scale
rvsmini.append(rv0 + inparam.bvcs[night+tag] + rv0*inparam.bvcs[night+tag]/(3e5**2)) # Barycentric correction
vsinismini.append(parfit[4])
bestguess = round(np.nanmean(rvsmini),5)
vsinimini = round(np.nanmean(vsinismini),5)
return night, bestguess, vsinimini
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog = 'IGRINS Spectra Radial Velocity Pipeline - Step 2',
description = '''
Only required if the average RV of the target star is unknown to $>$ 5 \kms precision. \n
Performs an abbreviated analysis of the target star observations in order to converge to coarsely accurate RVs,
which will be used as starting points for the more precise analysis in the next step;
simultaneously does the same for target star's \vsini. \n
Only a single order is used - by default, the first one in the list of wavelength regions, but the user can specify otherwise. \n
All separate exposures for a given observation are combined into one higher S/N spectrum before fitting occurs.
''',
epilog = "Contact authors: asa.stahl@rice.edu; sytang@lowell.edu")
parser.add_argument("targname", action="store",
help="Enter your *target name, no space", type=str)
parser.add_argument("-HorK", dest="band", action="store",
help="Which band to process? H or K?. Default = K",
type=str, default='K')
parser.add_argument("-Wr", dest="WRegion", action="store",
help="Which list of wavelength regions file (./Input/UseWv/WaveRegions_X) to use? Defaults to those chosen by IGRINS RV team, -Wr 1",
type=int, default=int(1))
parser.add_argument("-l_use", dest="label_use", action="store",
help="Specify order used. Default is the first in WRegion list",
type=int, default=int(0))
parser.add_argument("-SN", dest="SN_cut", action="store",
help="Spectrum S/N quality cut. Spectra with median S/N below this will not be analyzed. Default = 50 ",
type=str, default='50')
parser.add_argument('-i', dest="initvsini", action="store",
help="Initial vsini (float, km/s)",
type=str, default='' )
parser.add_argument('-v', dest="vsinivary", action="store",
help="Range of allowed vsini variation during optimization (float, if set to zero vsini will be held constant), default = 5.0 km/s",
type=str, default='5' )
parser.add_argument('-g', dest="guesses", action="store",
help="Initial guess for RV (int or float, km/s) for all nights. Use -gX instead if you want to reference an Initguesser_results file from a previous run of this step, which will have a different initial guess for each night",
type=str, default='')
parser.add_argument('-gX', dest="guessesX", action="store",
help="The number, X, that refers to the ./*targname/Initguesser_results_X file you wish to use for initial RV guesses",
type=str, default='')
parser.add_argument('-t', dest="template", action="store",
help="Stellar template. Pick from 'synthetic', 'livingston', or 'user_defined'. Default = 'synthetic'",
type=str, default='synthetic' )
parser.add_argument('-sp', dest="sptype", action="store",
help="The spectral type of the *target. (Letter only)",
type=str, default='' )
parser.add_argument('-c', dest="Nthreads", action="store",
help="Number of cpu (threads) to use, default is 1/2 of avalible ones (you have %i cpus (threads) avaliable)"%(mp.cpu_count()),
type=int, default=int(mp.cpu_count()//2) )
parser.add_argument('-plot', dest="plotfigs", action="store_true",
help="If set, will generate plots of the basic fitting results under ./Output/*targname_*band/figs/main_step2_*band_*runnumber")
parser.add_argument('-n_use', dest="nights_use", action="store",
help="If you don't want to process all nights under the ./Input/*target/ folder, specify an array of night you wish to process here. e.g., [20181111,20181112]",
type=str, default='')
parser.add_argument('-DeBug', dest="debug", action="store_true",
help="If set, DeBug logging will be output, as well as (lots of) extra plots under ./Temp/Debug/*target_*band/main_step2")
parser.add_argument('-sk_check', dest="skip", action="store_true",
help="If set, will skip the input parameters check. Handy when running mutiple targets line by line")
parser.add_argument('--version', action='version', version='%(prog)s 0.85')
args = parser.parse_args()
inpath = '../Input/{}/'.format(args.targname)
cdbs_loc = '~/cdbs/'
#-------------------------------------------------------------------------------
#### Check user inputs
initvsini = float(args.initvsini)
vsinivary = float(args.vsinivary)
#------------------------------
if args.template.lower() not in ['synthetic', 'livingston', 'user_defined']:
sys.exit('ERROR: UNEXPECTED STELLAR TEMPLATE FOR "-t" INPUT!')
#------------------------------
if args.template.lower() in ['synthetic', 'livingston']:
if args.sptype not in ['F','G','K','M']:
sys.exit('ERROR: DEFAULT TEMPLATES ONLY COVER F G K M STARS!')
#------------------------------
if (args.guesses != '') & (args.guessesX != ''):
sys.exit('ERROR: YOU CAN ONLY CHOOSE EITHER -g OR -gX')
#------------------------------
# Specify initial RV guesses as a single value applied to all nights
if args.guesses != '':
try:
initguesses = float(args.guesses)
initguesses_show = initguesses
except:
sys.exit('ERROR: -g ONLY TAKES A NUMBER AS INPUT!')
#------------------------------
# Load initial RV guesses from file
if args.guessesX != '':
try:
guessdata = Table.read(f'../Output/{args.targname}_{args.band}_tool/Initguesser_results_{args.guessesX}.csv', format='csv')
except:
sys.exit(f'ERROR: "../Output/{args.targname}_{args.band}_tool/Initguesser_results_{args.guessesX}.csv" NOT FOUND!')
initnights = np.array(guessdata['night'])
initrvs = np.array(guessdata['bestguess'])
initguesses = {}
initguesses_show = f'Initguesser_results_{args.guessesX}.csv'
for hrt in range(len(initnights)):
initguesses[str(initnights[hrt])] = float(initrvs[hrt])
#-------------------------------------------------------------------------------
start_time = datetime.now()
print('####################################################################################\n')
print('---------------------------------------------------------------')
print(u'''
Input Parameters:
Tartget = {}
Filter = \33[41m {} band \033[0m
WaveLength file = \33[41m WaveRegions_{} \033[0m
S/N cut > \33[41m {} \033[0m
Order Use = \33[41m Order {} \033[0m
Initial vsini = \33[41m {} km/s \033[0m
vsini vary range \u00B1 \33[41m {} km/s \033[0m
RV initial guess = \33[41m {} \033[0m
Stellar template use= \33[41m {} \033[0m
Target Spectral Type= \33[41m {} \033[0m <------- [late K, M] recommended 'synthetic', [F, G, early K] SpTy recommended 'livingston'
'''.format(args.targname, args.band, args.WRegion, args.SN_cut, args.label_use,
initvsini, vsinivary, initguesses_show, args.template, args.sptype))
if not args.skip:
while True:
inpp = input("Press [Y]es to continue, [N]o to quit...\n --> ")
if 'n' in inpp.lower():
sys.exit('QUIT, PLEASE RE-ENTER YOUR PARAMETERS')
elif 'y' in inpp.lower():
break
else:
print('I cannot understand what you are saying... TRY AGAIN')
continue
print('---------------------------------------------------------------')
print('Running Step 2 for {}...'.format(args.targname))
print('This Will Take a While..........')
#-------------------------------------------------------------------------------
# Make output directories as needed
if not os.path.isdir('../Output'):
os.mkdir('../Output')
if not os.path.isdir(f'../Output/{args.targname}_{args.band}_tool'):
os.mkdir(f'../Output/{args.targname}_{args.band}_tool')
filesndirs = os.listdir(f'../Output/{args.targname}_{args.band}_tool')
trk = 1; go = True;
while go == True:
iniguess_dir = 'Initguesser_results_{}.csv'.format(trk)
if iniguess_dir not in filesndirs:
break
trk += 1
outpath = f'../Output/{args.targname}_{args.band}_tool'
#-------------------------------------------------------------------------------
# Set up logger
logger = logging.getLogger(__name__)
if args.debug:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s: %(module)s.py: %(levelname)s--> %(message)s')
file_hander = logging.FileHandler(f'{outpath}/{args.targname}_{args.band}_tool.log')
stream_hander= logging.StreamHandler()
# file_hander.setLevel()
file_hander.setFormatter(formatter)
logger.addHandler(file_hander)
logger.addHandler(stream_hander)
#-------------------------------------------------------------------------------
# Create output file to write to
logger.info(f'Writing output to ./Output/{args.targname}_{args.band}_tool/{iniguess_dir}')
filew = open(f'../Output/{args.targname}_{args.band}_tool/{iniguess_dir}','w')
filew.write('night, bestguess, vsini')
filew.write('\n')
#-------------------------------------------------------------------------------
# Read in the Prepdata under ./Input/Prpedata/
xbounddict, maskdict, tagsA, tagsB, mjds, bvcs, nightsFinal, orders = read_prepdata(args)
# Use subset of nights if specified
if args.nights_use != '':
nightstemp = np.array(ast.literal_eval(args.nights_use), dtype=str)
if len(nightstemp[0]) !=8:
nightstemp = [ f'{nnight[:8]}_{nnight[-4:]}' for nnight in nightstemp ]
for nnn in nightstemp:
if nnn not in nightsFinal:
sys.exit('NIGHT {} NOT FOUND UNDER ./Input/{}'.format(nnn, args.targname))
nightsFinal = nightstemp
print('Only processing nights: {}'.format(nightsFinal))
logger.info('Analyze with {} nights'.format(len(nightsFinal)))
#-------------------------------------------------------------------------------
# Retrieve stellar and telluric templates
watm,satm, mwave0, mflux0 = setup_templates(logger, args.template, args.band, args.sptype)
# Save pars in class for future use
inparam = InParams(inpath,outpath,initvsini,vsinivary,args.plotfigs,
initguesses,bvcs,tagsA,tagsB,nightsFinal,mwave0,mflux0,None,xbounddict,maskdict)
#-------------------------------------------------------------------------------
# Run order by order, multiprocessing over nights within an order
pool = mp.Pool(processes = args.Nthreads)
func = partial(ini_MPinst, args, inparam, orders, int(args.label_use), trk, None )
outs = pool.map(func, np.arange(len(nightsFinal)))
pool.close()
pool.join()
# Write outputs to file
vsinis = []; finalrvs = [];
for n in range(len(nightsFinal)):
nightout = outs[n]
filew.write('{}, {}, {}'.format(nightout[0], nightout[1], nightout[2]))
filew.write('\n')
vsinis.append(nightout[2])
finalrvs.append(nightout[1])
filew.close()
print('--------!Initial Guess!--------')
logger.info('RV results: mean= {:1.4f} km/s, median= {:1.4f} km/s, std= {:1.4f} km/s'.format(np.nanmean(finalrvs),
np.nanmedian(finalrvs),
np.nanstd(finalrvs) ))
logger.info('vsini results: mean= {:1.4f} km/s, median= {:1.4f} km/s, std= {:1.4f} km/s'.format(np.nanmean(vsinis),
np.nanmedian(vsinis),
np.nanstd(vsinis) ))
end_time = datetime.now()
logger.info('RV Initial Guess DONE... Duration: {}'.format(end_time - start_time))
logger.info(f'Output saved under ./Output/{args.targname}_{args.band}/{iniguess_dir}')
print('---------------------------------------------------------------')
print('You can now try to get a better RV initial guess with by rerunning Step 2 with -gX set to the run number you just completed.')
print('OR, you can go on to the full RV analysis in Step 3.')
print('####################################################################################')
|
shihyuntangREPO_NAMEigrins_rvPATH_START.@igrins_rv_extracted@igrins_rv-master@Tool@tool_main_step2_model_plot_get.py@.PATH_END.py
|
{
"filename": "test_interpreter.py",
"repo_name": "xraypy/xraylarch",
"repo_path": "xraylarch_extracted/xraylarch-master/tests/test_interpreter.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
Base TestCase for asteval
"""
import ast
import os
import textwrap
import time
import unittest
from sys import version_info
from tempfile import NamedTemporaryFile
from io import StringIO
from larch import Interpreter
HAS_NUMPY = False
try:
# noinspection PyUnresolvedReferences
import numpy as np
HAS_NUMPY = True
except ImportError:
print("Warning: numpy not available... functionality will be limited.")
pass
class TestCase(unittest.TestCase):
"""testing of asteval"""
def setUp(self):
self.interp = Interpreter()
self.symtable = self.interp.symtable
self.interp.eval('x = 1')
self.outfile = '_stdout_'
self.outfile = '_stdout_'
self.interp.writer = NamedTemporaryFile('w', delete=False,
prefix='larchtest')
if not HAS_NUMPY:
self.interp("arange = range")
def read_stdout(self):
time.sleep(0.1)
self.interp.writer.flush()
out = None
with open(self.interp.writer.name) as inp:
out = inp.read()
return out
def tearDown(self):
pass
# noinspection PyUnresolvedReferences
def isvalue(self, sym, val):
"""assert that a symboltable symbol has a particular value"""
tval = self.interp.symtable.get_symbol(sym)
if isinstance(val, np.ndarray):
return self.assertTrue(np.all(tval == val))
else:
return self.assertTrue(tval == val)
def isnear(self, expr, val, places=7):
"""assert that a symboltable symbol is near a particular value"""
oval = self.interp(expr)
if HAS_NUMPY and isinstance(val, np.ndarray):
for x, y in zip(oval, val):
self.assertAlmostEqual(x, y, places=places)
else:
return self.assertAlmostEqual(oval, val, places=places)
# noinspection PyUnresolvedReferences
def istrue(self, expr):
"""assert that an expression evaluates to True"""
val = self.interp(expr)
if HAS_NUMPY and isinstance(val, np.ndarray):
val = np.all(val)
return self.assertTrue(val)
# noinspection PyUnresolvedReferences
def isfalse(self, expr):
"""assert that an expression evaluates to False"""
val = self.interp(expr)
if HAS_NUMPY and isinstance(val, np.ndarray):
val = np.all(val)
return self.assertFalse(val)
def check_output(self, chk_str, exact=False):
self.interp.writer.flush()
out = self.read_stdout().split('\n')
if out:
if exact:
return chk_str == out[0]
return chk_str in out[0]
return False
def check_error(self, chk_type, chk_msg=''):
#trye
if chk_type is None:
self.assertEqual(0, len(self.interp.error))
else:
errtype, errmsg = self.interp.error[0].get_error()
self.assertEqual(errtype, chk_type)
if chk_msg:
self.assertTrue(chk_msg in errmsg)
class TestEval(TestCase):
"""testing of asteval"""
def test_dict_index(self):
"""dictionary indexing"""
self.interp("a_dict = {'a': 1, 'b': 2, 'c': 3, 'd': 4}")
self.istrue("a_dict['a'] == 1")
self.istrue("a_dict['d'] == 4")
def test_list_index(self):
"""list indexing"""
self.interp("a_list = ['a', 'b', 'c', 'd', 'o']")
self.istrue("a_list[0] == 'a'")
self.istrue("a_list[1] == 'b'")
self.istrue("a_list[2] == 'c'")
def test_tuple_index(self):
"""tuple indexing"""
self.interp("a_tuple = (5, 'a', 'x')")
self.istrue("a_tuple[0] == 5")
self.istrue("a_tuple[2] == 'x'")
def test_string_index(self):
"""string indexing"""
self.interp("a_string = 'hello world'")
self.istrue("a_string[0] == 'h'")
self.istrue("a_string[6] == 'w'")
self.istrue("a_string[-1] == 'd'")
self.istrue("a_string[-2] == 'l'")
def test_ndarray_index(self):
"""nd array indexing"""
self.interp("a_ndarray = 5*arange(20)")
self.istrue("a_ndarray[2] == 10")
self.istrue("a_ndarray[4] == 20")
def test_ndarrayslice(self):
"""array slicing"""
self.interp("a_ndarray = arange(200).reshape(10, 20)")
self.istrue("a_ndarray[1:3,5:7] == array([[25,26], [45,46]])")
self.interp("y = arange(20).reshape(4, 5)")
self.istrue("y[:,3] == array([3, 8, 13, 18])")
self.istrue("y[...,1] == array([1, 6, 11, 16])")
self.interp("y[...,1] = array([2, 2, 2, 2])")
self.istrue("y[1,:] == array([5, 2, 7, 8, 9])")
def test_while(self):
"""while loops"""
self.interp(textwrap.dedent("""
n=0
while n < 8:
n += 1
#endwhile
"""))
self.isvalue('n', 8)
self.interp(textwrap.dedent("""
n=0
while n < 8:
n += 1
if n > 3:
break
#endif
else:
n = -1
#endwhile
"""))
self.isvalue('n', 4)
self.interp(textwrap.dedent("""
n=0
while n < 8:
n += 1
else:
n = -1
#endwhile
"""))
self.isvalue('n', -1)
self.interp(textwrap.dedent("""
n, i = 0, 0
while n < 10:
n += 1
if n % 2:
continue
#endif
i += 1
#endwhile
print( 'finish: n, i = ', n, i)
"""))
self.isvalue('n', 10)
self.isvalue('i', 5)
self.interp(textwrap.dedent("""
n=0
while n < 10:
n += 1
print( ' n = ', n)
if n > 5:
break
#endif
#endwhile
print( 'finish: n = ', n)
"""))
self.isvalue('n', 6)
def test_while_continue(self):
self.interp(textwrap.dedent("""
n, i = 0, 0
while n < 10:
n += 1
if n % 2:
continue
#endif
i += 1
#endwhile
print( 'finish: n, i = ', n, i)
"""))
self.isvalue('n', 10)
self.isvalue('i', 5)
def test_while_break(self):
self.interp(textwrap.dedent("""
n = 0
while n < 10:
n += 1
if n > 6:
break
#endif
#endwhile
print( 'finish: n = ', n)
"""))
self.isvalue('n', 7)
# noinspection PyTypeChecker
def test_assert(self):
"""test assert statements"""
self.interp.error = []
self.interp('n=6')
self.interp('assert n==6')
self.check_error(None)
self.interp('assert n==7')
self.check_error('AssertionError')
def test_for(self):
"""for loops"""
self.interp(textwrap.dedent("""
n=0
for i in arange(10):
n += i
#endfor
"""))
self.isvalue('n', 45)
self.interp(textwrap.dedent("""
n=0
for i in arange(10):
n += i
else:
n = -1
#endfor
"""))
self.isvalue('n', -1)
def test_for_break(self):
self.interp(textwrap.dedent("""
n=0
for i in arange(10):
n += i
if n > 2:
break
#endif
else:
n = -1
#endfor
"""))
self.isvalue('n', 3)
def test_if(self):
"""runtime errors test"""
self.interp(textwrap.dedent("""
zero = 0
if zero == 0:
x = 1
#endif
if zero != 100:
x = x+1
#endif
if zero > 2:
x = x + 1
else:
y = 33
#endif
"""))
self.isvalue('x', 2)
self.isvalue('y', 33)
def test_print(self):
"""print (ints, str, ....)"""
self.interp("print(31)")
self.check_output('31\n', True)
self.interp("print('%s = %.3f' % ('a', 1.2012345))")
self.check_output('a = 1.201\n', True)
self.interp("print('{0:s} = {1:.2f}'.format('a', 1.2012345))")
self.check_output('a = 1.20\n', True)
def test_repr(self):
"""repr of dict, list"""
self.interp("x = {'a': 1, 'b': 2, 'c': 3}")
self.interp("y = ['a', 'b', 'c']")
self.interp("rep_x = repr(x['a'])")
self.interp("rep_y = repr(y)")
self.interp("rep_y , rep_x")
self.interp("repr(None)")
self.isvalue("rep_x", "1")
self.isvalue("rep_y", "['a', 'b', 'c']")
def test_cmp(self):
"""numeric comparisons"""
self.istrue("3 == 3")
self.istrue("3.0 == 3")
self.istrue("3.0 == 3.0")
self.istrue("3 != 4")
self.istrue("3.0 != 4")
self.istrue("3 >= 1")
self.istrue("3 >= 3")
self.istrue("3 <= 3")
self.istrue("3 <= 5")
self.istrue("3 < 5")
self.istrue("5 > 3")
self.isfalse("3 == 4")
self.isfalse("3 > 5")
self.isfalse("5 < 3")
def test_bool(self):
"""boolean logic"""
self.interp(textwrap.dedent("""
yes = True
no = False
nottrue = False
a = arange(7)"""))
self.istrue("yes")
self.isfalse("no")
self.isfalse("nottrue")
self.isfalse("yes and no or nottrue")
self.isfalse("yes and (no or nottrue)")
self.isfalse("(yes and no) or nottrue")
self.istrue("yes or no and nottrue")
self.istrue("yes or (no and nottrue)")
self.isfalse("(yes or no) and nottrue")
self.istrue("yes or not no")
self.istrue("(yes or no)")
self.isfalse("not (yes or yes)")
self.isfalse("not (yes or no)")
self.isfalse("not (no or yes)")
self.istrue("not no or yes")
self.isfalse("not yes")
self.istrue("not no")
def test_bool_coerce(self):
"""coercion to boolean"""
self.istrue("1")
self.isfalse("0")
self.istrue("'1'")
self.isfalse("''")
self.istrue("[1]")
self.isfalse("[]")
self.istrue("(1)")
self.istrue("(0,)")
self.isfalse("()")
self.istrue("dict(y=1)")
self.isfalse("{}")
# noinspection PyUnresolvedReferences
def test_assignment(self):
"""variables assignment"""
self.interp('n = 5')
self.isvalue("n", 5)
self.interp('s1 = "a string"')
self.isvalue("s1", "a string")
self.interp('b = (1,2,3)')
self.isvalue("b", (1, 2, 3))
if HAS_NUMPY:
self.interp('a = 1.*arange(10)')
self.isvalue("a", np.arange(10))
self.interp('a[1:5] = 1 + 0.5 * arange(4)')
self.isnear("a", np.array([0., 1., 1.5, 2., 2.5, 5., 6., 7., 8., 9.]))
def test_names(self):
"""names test"""
self.interp('nx = 1')
self.interp('nx1 = 1')
def test_syntaxerrors_1(self):
"""assignment syntax errors test"""
for expr in ('class = 1', 'for = 1', 'if = 1', 'raise = 1',
'1x = 1', '1.x = 1', '1_x = 1'):
self.interp(expr)
self.check_error('SyntaxError')
def test_unsupportednodes(self):
"""unsupported nodes"""
for expr in ('f = lambda x: x*x', 'yield 10'):
failed = False
self.interp.eval(expr)
self.check_error('NotImplementedError')
def test_syntaxerrors_2(self):
"""syntax errors test"""
for expr in ('x = (1/*)', 'x = 1.A', 'x = A.2'):
self.interp(expr)
self.check_error('SyntaxError')
def test_runtimeerrors_1(self):
"""runtime errors test"""
self.interp("zero = 0")
self.interp("astr ='a string'")
self.interp("atup = ('a', 'b', 11021)")
self.interp("arr = arange(20)")
for expr, errname in (('x = 1/zero', 'ZeroDivisionError'),
('x = zero + nonexistent', 'NameError'),
('x = zero + astr', 'TypeError'),
('x = zero()', 'TypeError'),
('x = astr * atup', 'TypeError'),
('x = arr.shapx', 'AttributeError'),
('arr.shapx = 4', 'AttributeError'),
('del arr.shapx', 'LookupError')):
errtype, errmsg = None, None
self.interp(expr)
self.check_error(errname)
# noinspection PyUnresolvedReferences
def test_ndarrays(self):
"""simple ndarrays"""
self.interp('n = array([11, 10, 9])')
self.istrue("isinstance(n, ndarray)")
self.istrue("len(n) == 3")
self.isvalue("n", np.array([11, 10, 9]))
self.interp('n = arange(20).reshape(5, 4)')
self.istrue("isinstance(n, ndarray)")
self.istrue("n.shape == (5, 4)")
self.interp("myx = n.shape")
self.interp("n.shape = (4, 5)")
self.istrue("n.shape == (4, 5)")
self.interp("a = arange(20)")
self.interp("gg = a[1:13:3]")
self.isvalue('gg', np.array([1, 4, 7, 10]))
self.interp("gg[:2] = array([0,2])")
self.isvalue('gg', np.array([0, 2, 7, 10]))
self.interp('a, b, c, d = gg')
self.isvalue('c', 7)
self.istrue('(a, b, d) == (0, 2, 10)')
def test_binop(self):
"""test binary ops"""
self.interp('a = 10.0')
self.interp('b = 6.0')
self.istrue("a+b == 16.0")
self.isnear("a-b", 4.0)
self.istrue("a/(b-1) == 2.0")
self.istrue("a*b == 60.0")
def test_unaryop(self):
"""test binary ops"""
self.interp('a = -10.0')
self.interp('b = -6.0')
self.isnear("a", -10.0)
self.isnear("b", -6.0)
def otest_del(self):
"""test del function"""
self.interp('a = -10.0')
self.interp('b = -6.0')
self.assertTrue('a' in self.symtable)
self.assertTrue('b' in self.symtable)
self.interp("del a")
self.interp("del b")
self.assertFalse('a' in self.symtable)
self.assertFalse('b' in self.symtable)
# noinspection PyUnresolvedReferences
def test_math1(self):
"""builtin math functions"""
self.interp('n = sqrt(4)')
self.istrue('n == 2')
self.isnear('sin(pi/2)', 1)
self.isnear('cos(pi/2)', 0)
self.istrue('exp(0) == 1')
self.isnear('exp(1)', np.e)
def test_list_comprehension(self):
"""test list comprehension"""
self.interp('x = [i*i for i in range(4)]')
self.isvalue('x', [0, 1, 4, 9])
self.interp('x = [i*i for i in range(6) if i > 1]')
self.isvalue('x', [4, 9, 16, 25])
def test_ifexp(self):
"""test if expressions"""
self.interp('x = 2')
self.interp('y = 4 if x > 0 else -1')
self.interp('z = 4 if x > 3 else -1')
self.isvalue('y', 4)
self.isvalue('z', -1)
# noinspection PyUnresolvedReferences
def test_index_assignment(self):
"""test indexing / subscripting on assignment"""
self.interp('x = arange(10)')
self.interp('l = [1,2,3,4,5]')
self.interp('l[0] = 0')
self.interp('l[3] = -1')
self.isvalue('l', [0, 2, 3, -1, 5])
self.interp('l[0:2] = [-1, -2]')
self.isvalue('l', [-1, -2, 3, -1, 5])
self.interp('x[1] = 99')
self.isvalue('x', np.array([0, 99, 2, 3, 4, 5, 6, 7, 8, 9]))
self.interp('x[0:2] = [9,-9]')
self.isvalue('x', np.array([9, -9, 2, 3, 4, 5, 6, 7, 8, 9]))
def test_reservedwords(self):
"""test reserved words"""
for w in ('and', 'as', 'while', 'raise', 'else',
'class', 'del', 'def', 'import', 'None', 'True', 'False'):
self.interp.error = []
self.interp("%s=2" % w)
self.check_error('SyntaxError')
def test_raise(self):
"""test raise"""
self.interp("raise NameError('bob')")
self.check_error('NameError', 'bob')
def test_tryexcept(self):
"""test try/except"""
self.interp(textwrap.dedent("""
x = 5
try:
x = x/0
except ZeroDivisionError:
print( 'Error Seen!')
x = -999
#endtry
"""))
self.isvalue('x', -999)
self.interp(textwrap.dedent("""
x = -1
try:
x = x/0
except ZeroDivisionError:
pass
#endtry
"""))
self.isvalue('x', -1)
def test_tryelsefinally(self):
self.interp(textwrap.dedent("""
def dotry(x, y):
out, ok, clean = 0, False, False
try:
out = x/y
except ZeroDivisionError:
out = -1
else:
ok = True
finally:
clean = True
#endtry
print(' DO TRY ', out, ok, clean)
return out, ok, clean
#enddef
"""))
self.interp("val, ok, clean = dotry(1.0, 2.0)")
self.interp("print( val, ok, clean)")
self.isnear("val", 0.5)
self.isvalue("ok", True)
self.isvalue("clean", True)
self.interp("val, ok, clean = dotry(1, 0.0)")
self.isvalue("val", -1)
self.isvalue("ok", False)
self.isvalue("clean", True)
def test_function1(self):
"""test function definition and running"""
self.interp(textwrap.dedent("""
def fcn(x, scale=2):
'test function'
out = sqrt(x)
if scale > 1:
out = out * scale
#endif
return out
#enddef
"""))
self.interp("a = fcn(4, scale=9)")
self.isvalue("a", 18)
self.interp("a = fcn(9, scale=0)")
self.isvalue("a", 3)
self.interp("print(fcn)")
self.check_output('<Procedure fcn(x, scale=')
self.interp("a = fcn()")
self.check_error('TypeError', 'expected at least')
self.interp("a = fcn(77.0, other='what?')")
self.check_error('TypeError', 'extra keyword arguments')
def test_function_vararg(self):
"""test function with var args"""
self.interp(textwrap.dedent("""
def fcn(*args):
'test varargs function'
out = 0
for i in args:
out = out + i*i
#endfor
return out
#enddef
"""))
self.interp("o = fcn(1,2,3)")
self.isvalue('o', 14)
self.interp("print(fcn)")
self.check_output('<Procedure fcn(')
def test_function_kwargs(self):
"""test function with kw args, no **kws"""
self.interp(textwrap.dedent("""
def fcn(square=False, x=0, y=0, z=0, t=0):
'test varargs function'
out = 0
for i in (x, y, z, t):
if square:
out = out + i*i
else:
out = out + i
#endif
#endfor
return out
#enddef
"""))
self.interp("print(fcn)")
self.check_output('<Procedure fcn(square')
self.interp("o = fcn(x=1, y=2, z=3, square=False)")
self.isvalue('o', 6)
self.interp("o = fcn(x=1, y=2, z=3, square=True)")
self.isvalue('o', 14)
self.interp("o = fcn(x=1, y=2, z=3, t=-2)")
self.isvalue('o', 4)
self.interp("o = fcn(x=1, y=2, z=3, t=-12, s=1)")
self.check_error('TypeError', 'extra keyword arg')
def test_function_kwargs1(self):
"""test function with **kws arg"""
self.interp(textwrap.dedent("""
def fcn(square=False, **kws):
'test varargs function'
out = 0
for i in kws.values():
if square:
out = out + i*i
else:
out = out + i
#endif
#endfor
return out
#enddef
"""))
self.interp("print(fcn)")
self.check_output('<Procedure fcn(square')
self.interp("o = fcn(x=1, y=2, z=3, square=False)")
self.isvalue('o', 6)
self.interp("o = fcn(x=1, y=2, z=3, square=True)")
self.isvalue('o', 14)
def test_function_kwargs2(self):
"""test function with positional and **kws args"""
self.interp(textwrap.dedent("""
def fcn(x, y):
'test function'
return x + y**2
#enddef
"""))
self.interp("print(fcn)")
self.check_output('<Procedure fcn(x,')
self.interp("o = -1")
self.interp("o = fcn(2, 1)")
self.isvalue('o', 3)
self.interp("o = fcn(x=1, y=2)")
self.isvalue('o', 5)
self.interp("o = fcn(y=2, x=7)")
self.isvalue('o', 11)
self.interp("o = fcn(1, y=2)")
self.isvalue('o', 5)
self.interp("o = fcn(1, x=2)")
self.check_error('TypeError')
def test_nested_functions(self):
setup="""
def a(x=10):
if x > 5:
return 1
#endif
return -1
#enddef
def b():
return 2.5
#enddef
def c(x=10):
x = a(x=x)
y = b()
return x + y
#enddef
"""
self.interp(textwrap.dedent(setup))
self.interp("o1 = c()")
self.interp("o2 = c(x=0)")
self.isvalue('o1', 3.5)
self.isvalue('o2', 1.5)
def test_astdump(self):
"""test ast parsing and dumping"""
astnode = self.interp.parse('x = 1')
self.assertTrue(isinstance(astnode, ast.Module))
self.assertTrue(isinstance(astnode.body[0], ast.Assign))
self.assertTrue(isinstance(astnode.body[0].targets[0], ast.Name))
self.assertTrue(isinstance(astnode.body[0].value, ast.Constant))
self.assertTrue(astnode.body[0].targets[0].id == 'x')
self.assertTrue(astnode.body[0].value.value == 1)
dumped = self.interp.dump(astnode.body[0])
self.assertTrue(dumped.startswith('Assign'))
def test_exit_value(self):
"""test expression eval - last exp. is returned by interpreter"""
z = self.interp("True")
self.assertTrue(z)
z = self.interp("x = 1\ny = 2\ny == x + x\n")
self.assertTrue(z)
z = self.interp("x = 42\nx")
self.assertEqual(z, 42)
self.isvalue('x', 42)
z = self.interp("""def foo(): return 42\nfoo()""")
self.assertEqual(z, 42)
if __name__ == '__main__':
for suite in (TestEval,):
suite = unittest.TestLoader().loadTestsFromTestCase(suite)
unittest.TextTestRunner(verbosity=2).run(suite)
|
xraypyREPO_NAMExraylarchPATH_START.@xraylarch_extracted@xraylarch-master@tests@test_interpreter.py@.PATH_END.py
|
{
"filename": "urls.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/webserver/lasair/apps/search/urls.py",
"type": "Python"
}
|
from . import views
from django.urls import path
from django.views.generic import TemplateView
urlpatterns = [
path('simple_search', TemplateView.as_view(template_name='simple_search.html')),
path('search/', views.search, name='search'),
path('search/<query>', views.search, name='search'),
]
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@webserver@lasair@apps@search@urls.py@.PATH_END.py
|
{
"filename": "11166_rval_100015.py",
"repo_name": "shreeyesh-biswal/Rvalue_3D",
"repo_path": "Rvalue_3D_extracted/Rvalue_3D-main/Codes/X-class/AR_11166/11166_rval_100015.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 10 18:49:10 2023
@author: shreeyeshbiswal
"""
import os
import numpy as np
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.pyplot import figure
AR = "11166"
core_dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/"
base_dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/AR_" + AR
dir_list = sorted(os.listdir(base_dir))
n = len(dir_list)
m = 10 # values per file
d = '15'
th = '100'
rval_matrix = np.zeros(shape=(n,m))
index = np.arange(0,n)
height = np.arange(0,m)*0.36
P4 = 'Log of R-value (Mx); AR ' + AR
colorbarticks = [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
cbar_min = 15
cbar_max = 23
flare_time = 119.2
for i in range(0,n):
Time_tag = dir_list[i]
Time = Time_tag[0:19]
Hour = Time[11:13]
print(Time)
dir = "/home/shreeyeshbiswal/IDLWorkspace/Dataset_PF/AR_" + AR + "/" + Time_tag
os.chdir(dir)
# the if-else statement takes care of missing data
if len(os.listdir(dir)) != 0:
rval = np.loadtxt("PF_ext_rvals_100015_" + Time + ".dat")
rval = rval + 15.1172 # LOG FACTOR FOR 1.3141 x 10^15
print(rval)
print(np.shape(rval))
rval_matrix[i,:] = rval
print(Hour)
else:
rval_matrix[i,:] = np.nan
print("Empty directory")
os.chdir(core_dir)
x = np.arange(0,n)
figure(figsize=(10,10), dpi=100000)
figure, axs = plt.subplots(10)
figure.set_figheight(15)
figure.set_figwidth(9)
cm = plt.cm.get_cmap('afmhot')
mpl.rc('xtick', labelsize=13)
# Plot
sc = axs[0].scatter(x, rval_matrix[:,9], c = rval_matrix[:,9], vmin=cbar_min, vmax=cbar_max, s=10, cmap=cm)
for i in range(0,m):
axs[i].scatter(x, rval_matrix[:,9-i], c = rval_matrix[:,9-i], vmin=cbar_min, vmax=cbar_max, s=10, cmap=cm)
for i in range(0,m):
axs[i].set_ylim([cbar_min, cbar_max])
plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[]);
axs[9].tick_params(axis='x', labelsize=16)
axs[9].set_xticks(np.arange(0,n,24))
# Hide the ylims of individual boxes
for i in range(0,m):
axs[i].set_yticks([])
# Show heights in the altitude
heightfont = 16
for i in range(0,m):
max_alt = (m-1)*0.36
altitude = max_alt-(i*0.36)
alt_str = "{:.2f}".format(altitude)
axs[i].set_ylabel(alt_str + ' ', fontsize = heightfont, rotation = 0)
# Show flare occurence in dotted lines
for i in range(0,m):
axs[i].axvline(x = flare_time, ymin = 0, ymax = 1, linestyle = '--', color = 'k', alpha=0.40)# Show heights in the altitude
# Orient the text
st = dir_list[0]
start_time = st[0:4] + '/' + st[5:7] + '/' + st[8:10] + '/' + st[11:13] + ':' + st[14:16]
axs[0].text(8, (cbar_max + (0.35*(cbar_max - cbar_min))), P4, fontsize=23)
axs[5].text(-33, cbar_min + 0.5*(cbar_max - cbar_min), 'Height (Mm)', rotation = 90, fontsize=18)
axs[9].text(-15, (cbar_min - (0.65*(cbar_max - cbar_min))), 'Time after ' + start_time + ' (hrs)' + '; ($B_{th}$, $D_{sep}$) = ' + '(' + th + ',' + d + ')', rotation = 0, fontsize=18)
figure.subplots_adjust(right=0.8)
cbar_ax = figure.add_axes([0.85, 0.15, 0.05, 0.7])
cbar_ax.tick_params(labelsize=16)
figure.colorbar(sc, cax=cbar_ax, ticks=range(cbar_min,cbar_max+1,1))
plt.subplots_adjust(wspace=0.5, hspace=0)
plt.show()
mpl.rcParams.update(mpl.rcParamsDefault)
|
shreeyesh-biswalREPO_NAMERvalue_3DPATH_START.@Rvalue_3D_extracted@Rvalue_3D-main@Codes@X-class@AR_11166@11166_rval_100015.py@.PATH_END.py
|
{
"filename": "AUTHORS.md",
"repo_name": "rpoleski/MulensModel",
"repo_path": "MulensModel_extracted/MulensModel-master/AUTHORS.md",
"type": "Markdown"
}
|
MulensModel is developed as an open-source code with contributions from many individuals. These contributions are of various kinds and we would like to thank and give credit to all those people.
Main contributors:
* [Radek Poleski](https://github.com/rpoleski)
* [Yee Jennifer](https://github.com/jenniferyee)
People who have written parts of the code:
* [Lawrence Peirson](https://github.com/alpv95) - point and binary lens models with shear and convergence
* [Keto Zhang](https://github.com/ketozhang) - PyPI installation, Extensions etc.
* [Mateusz Mroz](https://github.com/mjmroz) - YAML output in example 16
* [Raphael A. P. Oliveira](https://github.com/rapoliveira) - small updates in various places
* [Hariharan Srinivasulu](https://github.com/meethari) - for helping with development of FitData class
* [Justyna Wojtczak](https://github.com/justi) - github actions
The code would not be in the state it is without modules that are imported by MulensModel. Special thanks to:
* [Valerio Bozza](https://github.com/valboz) - for VBBL
* [Jan Skowron](https://github.com/jskowron) - for Skowron & Gould root solver
* Andrew Gould - for Skowron & Gould root solver
* Martin Dominik - for AdaptiveContouring
* [Dan Foreman-Mackey](https://github.com/dfm) - for EMCEE
* AstroPy Team - for AstroPy
We also thank those who have not directly contributed to the code, but their suggestions and comments helped in code development. We thus thank (random order):
* [Kasia Kruszynska](https://github.com/KKruszynska)
* David Specht
* [Demitri Muna](https://github.com/demitri) - for giving excellent suggestions just before the code was born
* Zoey Samples
* [Jan Skowron](https://github.com/jskowron) - for spotting bugs and letting us know about them
* [Jason Eastman](https://github.com/jdeast)
* Samson Johnson - for requesting improvements in FSPL calculations
* Adrian Hernandez
* [Przemek Mroz](https://github.com/przemekmroz)
* [Geoff Bryden](https://github.com/gryden) - for early discussions about the code
|
rpoleskiREPO_NAMEMulensModelPATH_START.@MulensModel_extracted@MulensModel-master@AUTHORS.md@.PATH_END.py
|
{
"filename": "210_DataLoading_Automatic.md",
"repo_name": "rometsch/fargocpt",
"repo_path": "fargocpt_extracted/fargocpt-master/docs_source/source/Examples/210_DataLoading_Automatic.md",
"type": "Markdown"
}
|
## Loading data from the simulation output - the automatic way
This notebook teaches you how to load data from the simulation output using a helper tool.
We will use the data from the simulation in the quickstart example, so make sure you ran this beforehand.
We'll first create a Data object and ask what's available in the output.
```python
from fargocpt import Loader
l = Loader("example_dirs/100_quickstart/output/out")
l
```
Loader
====================
| output_dir: example_dirs/100_quickstart/output/out
| snapshots: 0 ... 10
| special_snapshots: ['reference']
| snapshot_time: 0.0 5.02257e+06 s ... 62.8 5.02257e+06 s
| monitor_number: 0 ... 200
| units: Units
| target_units = None
| gas: Hydro
| nbody: Nbody
| params: Params
| particles = None
====================
We can explore this data object further by looking at some of its member variables.
```python
l.nbody
```
[ Nbody
====================
| filepath: 100_quickstart/output/out/monitor/nbody0.dat
| varnames:
| time
| snapshot_number
| monitor_number
| x
| y
| vx
| vy
| mass
| physical_time
| omega_frame
| mdcp
| eccentricity
| angular_momentum
| semi_major_axis
| omega_kepler
| mean_anomaly
| eccentric_anomaly
| true_anomaly
| pericenter_angle
| torque
| accreted_mass
| accretion_rate
====================,
Nbody
====================
| filepath: 100_quickstart/output/out/monitor/nbody1.dat
| varnames:
| time
| snapshot_number
| monitor_number
| x
| y
| vx
| vy
| mass
| physical_time
| omega_frame
| mdcp
| eccentricity
| angular_momentum
| semi_major_axis
| omega_kepler
| mean_anomaly
| eccentric_anomaly
| true_anomaly
| pericenter_angle
| torque
| accreted_mass
| accretion_rate
====================]
```python
l.gas
```
Hydro
====================
| output_dir: 100_quickstart/output/out
| units: Units
| target_units= None
| grid: Grid
| timestepping: Scalar
| scalars: Scalar
| vars1D: Vars1D
| vars2D: Vars2D
====================
```python
l.units
```
Units
====================
| base:
| length: 1.49598e+13 cm
| time: 5.02257e+06 s
| mass: 1.98847e+33 g
| temperature: 106700 K
| derived:
| energy: 1.76408e+46 erg
| energy surface density: 7.88257e+19 erg / cm2
| density: 5.9394e-07 g / cm3
| mass surface density: 8.88522e+06 g / cm2
| opacity: 1.12546e-07 cm2 / g
| energy flux: 1.56943e+13 erg / (s cm2)
| velocity: 2.97851e+06 cm / s
| angular momentum: 8.86021e+52 cm2 g / s
| kinematic viscosity: 4.45579e+19 cm2 / s
| dynamic viscosity: 2.64648e+13 P
| acceleration: 0.593026 cm / s2
| stress: 7.88257e+19 g / s2
| pressure: 7.88257e+19 dyn / cm
| power: 3.51231e+39 erg / s
| potential: 8.87155e+12 erg / g
| torque: 1.76408e+46 erg
| force: 1.17921e+33 dyn
| mass accretion rate: 3.95907e+26 g / s
====================
To see all at once, run the following cell.
```python
l.print(recursive=True)
```
Loader
====================
| output_dir: 100_quickstart/output/out
| snapshots: 0 ... 10
| special_snapshots: ['reference']
| snapshot_time: 0.0 5.02257e+06 s ... 62.8 5.02257e+06 s
| monitor_number: 0 ... 200
| units: Units
| target_units = None
| gas: Hydro
| nbody: Nbody
| params: Params
| particles = None
====================
| Units
| ====================
| | base:
| | length: 1.49598e+13 cm
| | time: 5.02257e+06 s
| | mass: 1.98847e+33 g
| | temperature: 106700 K
| | derived:
| | energy: 1.76408e+46 erg
| | energy surface density: 7.88257e+19 erg / cm2
| | density: 5.9394e-07 g / cm3
| | mass surface density: 8.88522e+06 g / cm2
| | opacity: 1.12546e-07 cm2 / g
| | energy flux: 1.56943e+13 erg / (s cm2)
| | velocity: 2.97851e+06 cm / s
| | angular momentum: 8.86021e+52 cm2 g / s
| | kinematic viscosity: 4.45579e+19 cm2 / s
| | dynamic viscosity: 2.64648e+13 P
| | acceleration: 0.593026 cm / s2
| | stress: 7.88257e+19 g / s2
| | pressure: 7.88257e+19 dyn / cm
| | power: 3.51231e+39 erg / s
| | potential: 8.87155e+12 erg / g
| | torque: 1.76408e+46 erg
| | force: 1.17921e+33 dyn
| | mass accretion rate: 3.95907e+26 g / s
| ====================
|
| FargoCPT data Params
| ====================
| | filename: 100_quickstart/output/out/parameters/setup.yml
| | params:
| | Disk: True
| | DiskFeedback: True
| | SelfGravity: False
| | IntegrateParticles: False
| | l0: 1 au
| | m0: 1 solMass
| | mu: 2.35
| | HydroFrameCenter: primary
| | IndirectTermMode: 0
| | OmegaFrame: 1.0
| | Frame: F
| | MonitorTimestep: 0.314
| | Nmonitor: 20
| | Nsnapshots: 10
| | FirstDT: 0.1
| | nbody: [{'name': 'Star', 'semi-major axis': '0.0 au', 'mass': '1.0 solMass', 'eccentricity': 0.0, 'radius': '1.0 solRadius', 'temperature': '5778 K'}, {'name': 'Jupiter', 'semi-major axis': '1.0 au', 'mass': '1 jupiterMass', 'accretion efficiency': '2', 'accretion method': 'kley', 'eccentricity': 0, 'radius': '0.01 solRadius', 'ramp-up time': 0.0}]
| | NumberOfParticles: 2000
| | ParticleGasDragEnabled: True
| | ParticleDustDiffusion: True
| | ParticleDiskGravityEnabled: False
| | ParticleMinimumEscapeRadius: 0.4
| | ParticleMaximumEscapeRadius: 2.5
| | ParticleMinimumRadius: 0.4
| | ParticleMaximumRadius: 2.5
| | ParticleSurfaceDensitySlope: 0.5
| | ParticleSpeciesNumber: 7
| | ParticleRadius: 1 cm
| | ParticleRadiusIncreaseFactor: 1e-1
| | ParticleEccentricity: 0.03
| | ParticleDensity: 2.65 g/cm3
| | ParticleIntegrator: Midpoint
| | CartesianParticles: True
| | Transport: FARGO
| | Integrator: Euler
| | CFL: 0.5
| | CFLmaxVar: 1.1
| | cps: 2
| | Nrad: 128
| | Naz: 384
| | Rmin: 0.4
| | Rmax: 2.5
| | RadialSpacing: Logarithmic
| | ThicknessSmoothing: 0.6
| | ThicknessSmoothingSG: 0.6
| | KlahrSmoothingRadius: 0.3
| | MassAccretionRadius: 0.3
| | Sigma0: 200 g/cm2
| | SigmaSlope: 0.5
| | SigmaFloor: 1e-9
| | AspectRatio: 0.05
| | FlaringIndex: 0.0
| | AspectRatioMode: 0
| | RandomSeed: 1337
| | RandomSigma: False
| | RandomFactor: 0.1
| | FeatureSize: 0.05
| | ViscousAlpha: 0.001
| | ArtificialViscosity: TW
| | ArtificialViscosityDissipation: True
| | ArtificialViscosityFactor: 1.41
| | EquationOfState: isothermal
| | AdiabaticIndex: 1.4
| | HydrogenMassFraction: 0.75
| | SurfaceCooling: thermal
| | RadiativeDiffusion: False
| | CoolingBetaLocal: False
| | CoolingRadiativeFactor: 1.0
| | CoolingBeta: 10
| | CoolingBetaRampUp: 0.0
| | CoolingBetaReference: floor
| | ScurveType: Kimura
| | RadiativeDiffusionOmega: 1.5
| | RadiativeDiffusionAutoOmega: False
| | RadiativeDiffusionMaxIterations: 50000
| | RadiativeDiffusionTolerance: 1.5
| | RadiativeDiffusionInnerBoundary: zerogradient
| | RadiativeDiffusionOuterBoundary: zerogradient
| | Opacity: Lin
| | KappaConst: 2e-06
| | HeatingViscous: True
| | MinimumTemperature: 3 K
| | MaximumTemperature: 1e100 K
| | HeatingCoolingCFLlimit: 1.0
| | InnerBoundary: Reflecting
| | OuterBoundary: Reflecting
| | Damping: True
| | DampingInnerLimit: 1.1
| | DampingOuterLimit: 0.9
| | DampingTimeFactor: 0.1
| | DampingTimeRadiusOuter: 2.5
| | DampingEnergyInner: Initial
| | DampingVRadialInner: Initial
| | DampingVAzimuthalInner: Initial
| | DampingSurfaceDensityInner: Initial
| | DampingEnergyOuter: Initial
| | DampingVRadialOuter: Initial
| | DampingVAzimuthalOuter: Initial
| | DampingSurfaceDensityOuter: Initial
| | OutputDir: output/out
| | LogAfterRealSeconds: 10
| | LogAfterSteps: 0
| | WriteAtEveryTimestep: True
| | WriteDensity: True
| | WriteVelocity: True
| | WriteEnergy: True
| | WriteTemperature: False
| | WriteSoundspeed: False
| | WriteEccentricityChange: False
| | WriteEffectiveGamma: False
| | WriteFirstAdiabaticIndex: False
| | WriteMeanMolecularWeight: False
| | WriteToomre: False
| | WriteQMinus: False
| | WriteQPlus: False
| | WriteTauCool: False
| | WriteViscosity: False
| | WriteAlpha: False
| | WriteKappa: False
| | WriteAlphaGrav: False
| | WriteAlphaGravMean: False
| | WriteAlphaReynolds: False
| | WriteAlphaReynoldsMean: False
| | WriteEccentricity: False
| | WriteTReynolds: False
| | WriteTGravitational: False
| | WritepdV: False
| | WriteDiskQuantities: True
| | WriteRadialLuminosity: False
| | WriteRadialDissipation: False
| | WriteLightCurves: False
| | WriteLightcurvesRadii: 0.4,5.2
| | WriteMassFlow: False
| | WriteGasTorques: False
| | WritePressure: False
| | WriteScaleHeight: False
| | WriteAspectratio: False
| | WriteTorques: False
| | WriteVerticalOpticalDepth: False
| | WriteSGAccelRad: False
| | WriteSGAccelAzi: False
| ====================
|
| Nbody
| ====================
| | filepath: 100_quickstart/output/out/monitor/nbody0.dat
| | varnames:
| | time
| | snapshot_number
| | monitor_number
| | x
| | y
| | vx
| | vy
| | mass
| | physical_time
| | omega_frame
| | mdcp
| | eccentricity
| | angular_momentum
| | semi_major_axis
| | omega_kepler
| | mean_anomaly
| | eccentric_anomaly
| | true_anomaly
| | pericenter_angle
| | torque
| | accreted_mass
| | accretion_rate
| ====================
|
| Nbody
| ====================
| | filepath: 100_quickstart/output/out/monitor/nbody1.dat
| | varnames:
| | time
| | snapshot_number
| | monitor_number
| | x
| | y
| | vx
| | vy
| | mass
| | physical_time
| | omega_frame
| | mdcp
| | eccentricity
| | angular_momentum
| | semi_major_axis
| | omega_kepler
| | mean_anomaly
| | eccentric_anomaly
| | true_anomaly
| | pericenter_angle
| | torque
| | accreted_mass
| | accretion_rate
| ====================
|
| Hydro
| ====================
| | output_dir: 100_quickstart/output/out
| | units: Units
| | target_units= None
| | grid: Grid
| | timestepping: Scalar
| | scalars: Scalar
| | vars1D: Vars1D
| | vars2D: Vars2D
| ====================
|
| | Grid
| | ====================
| | | radi: 0.3899474657238236 1.49598e+13 cm ... 2.564448003640175 1.49598e+13 cm
| | | phii: 0.0 ... 6.283185307179586
| | | Nrad: 74
| | | Naz: 251
| | | Spacing: Logarithmic
| | ====================
| |
| | Scalars
| | ====================
| | | filepath: 100_quickstart/output/out/monitor/Quantities.dat
| | | varnames:
| | | snapshot_number
| | | monitor_number
| | | time
| | | mass
| | | radius
| | | angular_momentum
| | | total_energy
| | | internal_energy
| | | kinematic_energy
| | | potential_energy
| | | radial_kinetic_energy
| | | azimuthal_kinetic_energy
| | | eccentricity
| | | periastron
| | | viscous_dissipation
| | | luminosity
| | | pdivv
| | | inner_boundary_mass_inflow
| | | inner_boundary_mass_outflow
| | | outer_boundary_mass_inflow
| | | outer_boundary_mass_outflow
| | | wave_damping_inner_mass_creation
| | | wave_damping_inner_mass_removal
| | | wave_damping_outer_mass_creation
| | | wave_damping_outer_mass_removal
| | | density_floor_mass_creation
| | | aspect
| | | indirect_term_nbody_x
| | | indirect_term_nbody_y
| | | indirect_term_disk_x
| | | indirect_term_disk_y
| | | frame_angle
| | | advection_torque
| | | viscous_torque
| | | gravitational_torque
| | ====================
| |
| | TimeStepInfo
| | ====================
| | | filepath: 100_quickstart/output/out/monitor/timestepLogging.dat
| | | varnames:
| | | snapshot_number
| | | monitor_number
| | | hydrostep_number
| | | Number_of_Hydrosteps_in_last_monitor_timestep
| | | time
| | | walltime
| | | walltime_per_hydrostep
| | | mean_dt
| | | min_dt
| | | std_dev_dt
| | ====================
| |
| | Vars1D
| | ====================
| | | output_dir: 100_quickstart/output/out
| | | target_units= None
| | | grid: Grid
| | | var_names:
| | | Sigma
| | | vrad
| | | vazi
| | | energy
| | ====================
| |
| | Vars2D
| | ====================
| | | output_dir: 100_quickstart/output/out
| | | target_units= None
| | | grid: Grid
| | | var_names:
| | | Sigma
| | | vrad
| | | vazi
| | | energy
| | ====================
| |
Scalar quantities can be loaded directly by accessing a member:
```python
print(l.gas.scalars.mass[:10])
print(l.nbody[1].x[:10])
```
[0.00034884 0.00034884 0.00034884 0.00034884 0.00034884 0.00034884
0.00034884 0.00034885 0.00034885 0.00034886] 1.98847e+33 g
[1. 0.99999997 0.99999969 0.99999872 0.99999644 0.99999217
0.99998542 0.99997613 0.99996456 0.99995133] 1.49598e+13 cm
We can also use a getter function to do this. See its use below.
```python
print(l.gas.scalars.get("mass")[:10])
```
[0.00034884 0.00034884 0.00034884 0.00034884 0.00034884 0.00034884
0.00034884 0.00034885 0.00034885 0.00034886] 1.98847e+33 g
Let's plot some data for the second nbody object, the planet in this case.
```python
import matplotlib.pyplot as plt
for varname in ["mass", "eccentricity"]:
fig, ax = plt.subplots()
x = l.nbody[1].get(varname)
t = l.nbody[1].time.to("yr")
ax.plot(t, x)
ax.set_ylabel(f"{varname} [{x.unit}]")
ax.set_xlabel(f"time [yr]")
```


## 2D data
To obtain 2D data for the gas, let's inspect
```python
l.gas
```
Hydro
====================
| output_dir: 100_quickstart/output/out
| units: Units
| target_units= None
| grid: Grid
| timestepping: Scalar
| scalars: Scalar
| vars1D: Vars1D
| vars2D: Vars2D
====================
There seems to be a `vars2D` member.
```python
l.gas.vars2D
```
Vars2D
====================
| output_dir: 100_quickstart/output/out
| target_units= None
| grid: Grid
| var_names:
| Sigma
| vrad
| vazi
| energy
====================
And some data is available.
Here, we can't just access the data via member variables, because we need to specify a snaphost number.
```python
R, PHI, vals = l.gas.vars2D.get("Sigma", 5)
print(R.shape, PHI.shape, vals.shape)
```
(74, 251) (74, 251) (74, 251)
This returns meshgrids for radius and azimuth coordinates and the values as 2D array.
If you want to use this for plotting with pcolormesh, add the following. This returns meshgrids which are 1 cell larger in each direction as needed by plt.pcolormesh.
```python
R, PHI, vals = l.gas.vars2D.get("Sigma", 5, grid_for_plot=True)
print(R.shape, PHI.shape, vals.shape)
```
(75, 252) (75, 252) (74, 251)
Now in action!
```python
import numpy as np
import matplotlib.colors as mplcolors
def plot_field(loader, name, N, ax=None, dataunit=None, vmin=None, vmax=None, cmap="viridis"):
R, PHI, vals = loader.gas.vars2D.get(name, N, grid_for_plot=True)
if dataunit is None:
dataunit = vals.unit
Z = vals.to_value(dataunit)
X = R*np.cos(PHI)
Y = R*np.sin(PHI)
if ax is None:
fig, ax = plt.subplots(dpi=150)
else:
fig = ax.get_figure()
norm = mplcolors.Normalize(vmin=vmin, vmax=vmax)
pcm = ax.pcolormesh(X,Y,Z, norm=norm, cmap=cmap)
ax.set_aspect("equal")
t = loader.snapshot_time[N].to_value("kyr")
ax.set_title(f" t={t:.2e}kyr, N={N}")
cbar = fig.colorbar(pcm, ax=ax)
cbar.set_label(f"{name} [{dataunit}]")
return fig
```
```python
plot_field(l, "Sigma", l.snapshots[-1], dataunit="g/cm2", cmap="magma", vmax=800);
```

We can also reduce the data to azimuthal averages, minimums or maximums as follows
```python
from matplotlib import colormaps
name = "Sigma"
dataunit = "g/cm2"
Nfirst = l.snapshots[0]
Nlast = l.snapshots[-1]
fig, ax = plt.subplots(dpi=150)
cmap = colormaps.get_cmap("viridis")
inds = np.linspace(Nfirst, Nlast, 10, dtype=int)
for k, n in enumerate(inds):
color = cmap(k/(len(inds)-1))
r, vals = l.gas.vars2D.avg(name, n) # here, we automatically get a grid, i.e. an array of radii
r = r.to_value("au")
y = vals.to_value(dataunit)
t = l.snapshot_time[n].to_value("yr")
# ax.plot(r, (profile-profile0)/profile0, label=f"t={t:.3f}yr")
line, = ax.plot(r, y, label=f"t={t:.3f}yr", color=color)
ax.legend()
ax.set_yscale("log")
ax.set_xlabel("r [au]")
ax.set_ylabel(fr"$\Sigma$ [{dataunit}]")
```
Text(0, 0.5, '$\\Sigma$ [g/cm2]')

```python
```
|
rometschREPO_NAMEfargocptPATH_START.@fargocpt_extracted@fargocpt-master@docs_source@source@Examples@210_DataLoading_Automatic.md@.PATH_END.py
|
{
"filename": "exc.py",
"repo_name": "simonsobs/apluggy",
"repo_path": "apluggy_extracted/apluggy-main/tests/stack/sync/exc.py",
"type": "Python"
}
|
class Thrown(Exception):
'''To be thrown to the context manager.
An argument of `contextmanager.gen.throw()`.
'''
pass
class Raised(Exception):
pass
|
simonsobsREPO_NAMEapluggyPATH_START.@apluggy_extracted@apluggy-main@tests@stack@sync@exc.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "PlasmaPy/PlasmaPy",
"repo_path": "PlasmaPy_extracted/PlasmaPy-main/tests/utils/_pytest_helpers/__init__.py",
"type": "Python"
}
|
PlasmaPyREPO_NAMEPlasmaPyPATH_START.@PlasmaPy_extracted@PlasmaPy-main@tests@utils@_pytest_helpers@__init__.py@.PATH_END.py
|
|
{
"filename": "e2_envs.py",
"repo_name": "mit-ll/spacegym-kspdg",
"repo_path": "spacegym-kspdg_extracted/spacegym-kspdg-main/src/kspdg/pe1/e2_envs.py",
"type": "Python"
}
|
# Copyright (c) 2024, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
# Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014).
# SPDX-License-Identifier: MIT
import time
from numpy.random import rand
from kspdg.pe1.pe1_base import PursuitEvadeGroup1Env
class PE1_E2_ParentEnv(PursuitEvadeGroup1Env):
def __init__(self, loadfile: str, **kwargs):
super().__init__(loadfile=loadfile, **kwargs)
def evasive_maneuvers(self):
'''Randomized evasive manuevers
'''
# variable to set sas on once
is_control_set = False
while not self.stop_bot_thread:
# check for control range
if self.get_pe_relative_distance() < self.PARAMS.EVADER.CONTROL_RANGE:
# turn on evader sas if not yet active
if not is_control_set:
self.logger.info("Activating Evader SAS and RCS...")
self.vesEvade.control.sas = True
self.vesEvade.control.sas_mode = self.vesEvade.control.sas_mode.prograde
self.vesEvade.control.rcs = True
is_control_set = True
# randomly select duration
dur = 2*rand()
# randomize if throttle and attitude control will be executed
thr_on = rand() < 0.5
att_on = rand() < 0.5
# randomly select throttle and attitude ctrl values
thr = thr_on * (2*rand(3) - 1)
att = att_on * (2*rand(3) - 1)
# actuate throttle and attitude ctrl
self.vesEvade.control.forward = thr[0]
self.vesEvade.control.right = thr[1]
self.vesEvade.control.up = -thr[2]
self.vesEvade.control.pitch = att[0]
self.vesEvade.control.yaw = att[1]
self.vesEvade.control.roll = att[2]
# execute throttle for randomized amount of time
time.sleep(dur)
# terminate throttle
if self.get_pe_relative_distance() < self.PARAMS.EVADER.CONTROL_RANGE:
self.vesEvade.control.forward = 0.0
self.vesEvade.control.right = 0.0
self.vesEvade.control.up = 0.0
class PE1_E2_I1_Env(PE1_E2_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=PursuitEvadeGroup1Env.LOADFILE_I1, **kwargs)
class PE1_E2_I2_Env(PE1_E2_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=PursuitEvadeGroup1Env.LOADFILE_I2, **kwargs)
class PE1_E2_I3_Env(PE1_E2_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=PursuitEvadeGroup1Env.LOADFILE_I3, **kwargs)
class PE1_E2_I4_Env(PE1_E2_ParentEnv):
def __init__(self, **kwargs):
super().__init__(loadfile=PursuitEvadeGroup1Env.LOADFILE_I4, **kwargs)
|
mit-llREPO_NAMEspacegym-kspdgPATH_START.@spacegym-kspdg_extracted@spacegym-kspdg-main@src@kspdg@pe1@e2_envs.py@.PATH_END.py
|
{
"filename": "tests.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/mica/tests/tests.py",
"type": "Python"
}
|
import os
import unittest
from pytplot import data_exists
import pyspedas
class LoadTestCases(unittest.TestCase):
def test_load_none_data(self):
induction_vars = pyspedas.projects.mica.induction()
self.assertTrue(induction_vars == None)
def test_load_notplot(self):
induction_vars = pyspedas.projects.mica.induction(site='nal', notplot=True)
self.assertTrue('spectra_x_1Hz_NAL' in induction_vars)
def test_load_NAL_data(self):
induction_vars = pyspedas.projects.mica.induction(site='nal', time_clip=True)
self.assertTrue(data_exists('spectra_x_1Hz_NAL'))
def test_downloadonly(self):
files = pyspedas.projects.mica.induction(site='nal', downloadonly=True, trange=['2014-2-15', '2014-2-16'])
self.assertTrue(os.path.exists(files[0]))
if __name__ == '__main__':
unittest.main()
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@mica@tests@tests.py@.PATH_END.py
|
{
"filename": "beam.py",
"repo_name": "transientskp/tkp",
"repo_path": "tkp_extracted/tkp-master/tkp/telescope/lofar/beam.py",
"type": "Python"
}
|
"""
Beam characterization calculations.
For more information and the math behind this code go to the `LOFAR imaging
capabilities page
<http://www.astron.nl/radio-observatory/astronomers/lofar-imaging-capabilities-sensitivity/lofar-imaging-capabilities/lofa>`_.
"""
import math
def fwhm(lambda_, d, alpha1=1.3):
"""
The nominal Full Width Half Maximum (FWHM) of a LOFAR Station beam.
:param lambda_: wavelength in meters
:param d: station diameter.
:param alpha1: depends on the tapering intrinsic to the layout of the station,
and any additional tapering which may be used to form the
station beam. No electronic tapering is presently applied to
LOFAR station beamforming. For a uniformly illuminated circular
aperture, alpha1 takes the value of 1.02, and the value increases
with tapering (Napier 1999).
:returns: the nominal Full Width Half Maximum (FWHM)
"""
return alpha1 * lambda_ / d
def fov(fwhm):
"""
The Field of View (FoV) of a LOFAR station
:param fwhm: nominal Full Width Half Maximum, caulculated with :func:`fwhm`.
"""
return math.pi * ((fwhm / 2) ** 2)
|
transientskpREPO_NAMEtkpPATH_START.@tkp_extracted@tkp-master@tkp@telescope@lofar@beam.py@.PATH_END.py
|
{
"filename": "plot_learning_curve.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/examples/model_selection/plot_learning_curve.py",
"type": "Python"
}
|
"""
=========================================================
Plotting Learning Curves and Checking Models' Scalability
=========================================================
In this example, we show how to use the class
:class:`~sklearn.model_selection.LearningCurveDisplay` to easily plot learning
curves. In addition, we give an interpretation to the learning curves obtained
for a naive Bayes and SVM classifiers.
Then, we explore and draw some conclusions about the scalability of these predictive
models by looking at their computational cost and not only at their statistical
accuracy.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Learning Curve
# ==============
#
# Learning curves show the effect of adding more samples during the training
# process. The effect is depicted by checking the statistical performance of
# the model in terms of training score and testing score.
#
# Here, we compute the learning curve of a naive Bayes classifier and a SVM
# classifier with a RBF kernel using the digits dataset.
from sklearn.datasets import load_digits
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
X, y = load_digits(return_X_y=True)
naive_bayes = GaussianNB()
svc = SVC(kernel="rbf", gamma=0.001)
# %%
# The :meth:`~sklearn.model_selection.LearningCurveDisplay.from_estimator`
# displays the learning curve given the dataset and the predictive model to
# analyze. To get an estimate of the scores uncertainty, this method uses
# a cross-validation procedure.
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import LearningCurveDisplay, ShuffleSplit
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 6), sharey=True)
common_params = {
"X": X,
"y": y,
"train_sizes": np.linspace(0.1, 1.0, 5),
"cv": ShuffleSplit(n_splits=50, test_size=0.2, random_state=0),
"score_type": "both",
"n_jobs": 4,
"line_kw": {"marker": "o"},
"std_display_style": "fill_between",
"score_name": "Accuracy",
}
for ax_idx, estimator in enumerate([naive_bayes, svc]):
LearningCurveDisplay.from_estimator(estimator, **common_params, ax=ax[ax_idx])
handles, label = ax[ax_idx].get_legend_handles_labels()
ax[ax_idx].legend(handles[:2], ["Training Score", "Test Score"])
ax[ax_idx].set_title(f"Learning Curve for {estimator.__class__.__name__}")
# %%
# We first analyze the learning curve of the naive Bayes classifier. Its shape
# can be found in more complex datasets very often: the training score is very
# high when using few samples for training and decreases when increasing the
# number of samples, whereas the test score is very low at the beginning and
# then increases when adding samples. The training and test scores become more
# realistic when all the samples are used for training.
#
# We see another typical learning curve for the SVM classifier with RBF kernel.
# The training score remains high regardless of the size of the training set.
# On the other hand, the test score increases with the size of the training
# dataset. Indeed, it increases up to a point where it reaches a plateau.
# Observing such a plateau is an indication that it might not be useful to
# acquire new data to train the model since the generalization performance of
# the model will not increase anymore.
#
# Complexity analysis
# ===================
#
# In addition to these learning curves, it is also possible to look at the
# scalability of the predictive models in terms of training and scoring times.
#
# The :class:`~sklearn.model_selection.LearningCurveDisplay` class does not
# provide such information. We need to resort to the
# :func:`~sklearn.model_selection.learning_curve` function instead and make
# the plot manually.
# %%
from sklearn.model_selection import learning_curve
common_params = {
"X": X,
"y": y,
"train_sizes": np.linspace(0.1, 1.0, 5),
"cv": ShuffleSplit(n_splits=50, test_size=0.2, random_state=0),
"n_jobs": 4,
"return_times": True,
}
train_sizes, _, test_scores_nb, fit_times_nb, score_times_nb = learning_curve(
naive_bayes, **common_params
)
train_sizes, _, test_scores_svm, fit_times_svm, score_times_svm = learning_curve(
svc, **common_params
)
# %%
fig, ax = plt.subplots(nrows=2, ncols=2, figsize=(16, 12), sharex=True)
for ax_idx, (fit_times, score_times, estimator) in enumerate(
zip(
[fit_times_nb, fit_times_svm],
[score_times_nb, score_times_svm],
[naive_bayes, svc],
)
):
# scalability regarding the fit time
ax[0, ax_idx].plot(train_sizes, fit_times.mean(axis=1), "o-")
ax[0, ax_idx].fill_between(
train_sizes,
fit_times.mean(axis=1) - fit_times.std(axis=1),
fit_times.mean(axis=1) + fit_times.std(axis=1),
alpha=0.3,
)
ax[0, ax_idx].set_ylabel("Fit time (s)")
ax[0, ax_idx].set_title(
f"Scalability of the {estimator.__class__.__name__} classifier"
)
# scalability regarding the score time
ax[1, ax_idx].plot(train_sizes, score_times.mean(axis=1), "o-")
ax[1, ax_idx].fill_between(
train_sizes,
score_times.mean(axis=1) - score_times.std(axis=1),
score_times.mean(axis=1) + score_times.std(axis=1),
alpha=0.3,
)
ax[1, ax_idx].set_ylabel("Score time (s)")
ax[1, ax_idx].set_xlabel("Number of training samples")
# %%
# We see that the scalability of the SVM and naive Bayes classifiers is very
# different. The SVM classifier complexity at fit and score time increases
# rapidly with the number of samples. Indeed, it is known that the fit time
# complexity of this classifier is more than quadratic with the number of
# samples which makes it hard to scale to dataset with more than a few
# 10,000 samples. In contrast, the naive Bayes classifier scales much better
# with a lower complexity at fit and score time.
#
# Subsequently, we can check the trade-off between increased training time and
# the cross-validation score.
# %%
fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(16, 6))
for ax_idx, (fit_times, test_scores, estimator) in enumerate(
zip(
[fit_times_nb, fit_times_svm],
[test_scores_nb, test_scores_svm],
[naive_bayes, svc],
)
):
ax[ax_idx].plot(fit_times.mean(axis=1), test_scores.mean(axis=1), "o-")
ax[ax_idx].fill_between(
fit_times.mean(axis=1),
test_scores.mean(axis=1) - test_scores.std(axis=1),
test_scores.mean(axis=1) + test_scores.std(axis=1),
alpha=0.3,
)
ax[ax_idx].set_ylabel("Accuracy")
ax[ax_idx].set_xlabel("Fit time (s)")
ax[ax_idx].set_title(
f"Performance of the {estimator.__class__.__name__} classifier"
)
plt.show()
# %%
# In these plots, we can look for the inflection point for which the
# cross-validation score does not increase anymore and only the training time
# increases.
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@examples@model_selection@plot_learning_curve.py@.PATH_END.py
|
{
"filename": "test_Sk.py",
"repo_name": "sambit-giri/BCemu",
"repo_path": "BCemu_extracted/BCemu-master/tests/test_Sk.py",
"type": "Python"
}
|
import numpy as np
import pickle
from BCemu import *
### Cosmology
Ob, Om = 0.0463, 0.2793
bcmdict = {'log10Mc': 13.32,
'mu' : 0.93,
'thej' : 4.235,
'gamma' : 2.25,
'delta' : 6.40,
'eta' : 0.15,
'deta' : 0.14,
}
k_eval = 10**np.linspace(-1,1.08,50)
def test_BCM_7param():
'''
With this test, the 7 parameter baryonic power suppression is tested.
'''
bfcemu = BCM_7param(Ob=Ob, Om=Om)
p0 = bfcemu.get_boost(0.0, bcmdict, k_eval)
p0p5 = bfcemu.get_boost(0.5, bcmdict, k_eval)
p1 = bfcemu.get_boost(1.0, bcmdict, k_eval)
p1p5 = bfcemu.get_boost(1.5, bcmdict, k_eval)
p2 = bfcemu.get_boost(2.0, bcmdict, k_eval)
assert np.abs(p0[0]-0.999129)<0.00001 and np.abs(p0p5[0]-0.998741)<0.00001 and np.abs(p1[0]-0.998928)<0.00001 and np.abs(p1p5[0]-0.999030)<0.00001 and np.abs(p2[0]-0.999575)<0.00001
|
sambit-giriREPO_NAMEBCemuPATH_START.@BCemu_extracted@BCemu-master@tests@test_Sk.py@.PATH_END.py
|
{
"filename": "sb1_base.py",
"repo_name": "ARCLab-MIT/kspdg",
"repo_path": "kspdg_extracted/kspdg-main/src/kspdg/sb1/sb1_base.py",
"type": "Python"
}
|
# Copyright (c) 2023, MASSACHUSETTS INSTITUTE OF TECHNOLOGY
# Subject to FAR 52.227-11 – Patent Rights – Ownership by the Contractor (May 2014).
# SPDX-License-Identifier: MIT
import gymnasium as gym
import numpy as np
from typing import List, Dict
from copy import deepcopy
import kspdg.utils.utils as U
from kspdg.pe1.pe1_base import PursuitEvadeGroup1Env
DEFAULT_EPISODE_TIMEOUT = 240.0 # [sec]
DEFAULT_TARGET_VIEWING_DISTANCE = 100.0 # [m]
DEFAULT_REWARD_DECAY_COEF = 1e-5 # [1/m^2]
class SunBlockingGroup1Env(PursuitEvadeGroup1Env):
'''
Base environment for 1v1 sun-blocking orbital scenarios, which
are direct variants of the pursuit-evasion environments
All inherited classes share the following
- Agent controls the pursuer and evader has a scripted policy (although specific policy varies)
- Pursuer and evader vehicles are identical and are meant to be the same through all
inherited scenarios (although there is not a current enforcement of this)
- Observation and Action spaces shared between all child envs
'''
# mission loadfile names for variou initial condition
LOADFILE_I1 = "sb1_i1_init"
LOADFILE_I2 = "sb1_i2_init"
LOADFILE_I3 = "sb1_i3_init"
LOADFILE_I4 = "sb1_i4_init"
LOADFILE_I5 = "sb1_i5_init"
PARAMS = deepcopy(PursuitEvadeGroup1Env.PARAMS)
# info metric parameters
# PARAMS.INFO.K_CUM_REWARD = "cumulative_reward"
PARAMS.INFO.K_MAX_REWARD = "max_reward"
PARAMS.INFO.K_MIN_REWARD = "min_reward"
def __init__(self, loadfile:str,
episode_timeout:float = DEFAULT_EPISODE_TIMEOUT,
target_viewing_distance:float = DEFAULT_TARGET_VIEWING_DISTANCE,
reward_decay_coef:float = DEFAULT_REWARD_DECAY_COEF,
**kwargs):
"""
Args:
episode_timeout : float
max length of episode [sec]
capture_dist : float
distance at which evader is considered captured [m]
"""
super().__init__(
loadfile=loadfile,
episode_timeout=episode_timeout,
capture_dist=None,
**kwargs
)
assert target_viewing_distance > 0.0
assert reward_decay_coef > 0.0
self.target_viewing_distance = target_viewing_distance
self.reward_decay_coef = reward_decay_coef
# overwrite the pe1 observation space
# to include sun position information
# (see get_observation for mapping)
self.observation_space = gym.spaces.Box(
low = np.concatenate((np.zeros(3), -np.inf*np.ones(15))),
high = np.inf * np.ones(18)
)
# don't call reset. This allows instantiation and partial testing
# without connecting to krpc server
def _reset_episode_metrics(self) -> None:
""" Reset attributes that track proximity, timing, and propellant use metrics
"""
super()._reset_episode_metrics()
self.cum_reward = 0.0
self.prev_reward = 0.0
self.prev_time = self.vesPursue.met
self.min_reward = np.inf
self.max_reward = -np.inf
def get_reward(self, *args) -> float:
""" Compute reward value
Reward is a function of evader-pursuer-sun angle and pursuer-evader distance
Returns:
rew : float
reward at current step
args
unused dummy var included to match signature of calls to get_reward in parent class
"""
# get evader position, distance, and unit vector relative to pursuer
p_vesE_vesP__lhpbody = self.vesEvade.position(self.vesPursue.reference_frame)
d_vesE_vesP = np.linalg.norm(p_vesE_vesP__lhpbody)
u_vesE_vesP__lhpbody = p_vesE_vesP__lhpbody/d_vesE_vesP
# get sun unit vector relative to pursuer
p_sun_vesP__lhpbody = self.conn.space_center.bodies['Sun'].position(
self.vesPursue.reference_frame)
d_sun_vesP = np.linalg.norm(p_sun_vesP__lhpbody)
u_sun_vesP__lhpbody = p_sun_vesP__lhpbody/d_sun_vesP
# compute reward. See sb_objective_plot.py for intuition
# about reward surface shape
rew = -np.dot(u_vesE_vesP__lhpbody, u_sun_vesP__lhpbody)
rew *= np.exp(-self.reward_decay_coef * (d_vesE_vesP - self.target_viewing_distance)**2)
return rew
def get_info(self, observation: List, done: bool) -> Dict:
"""compute performance metrics
Args:
observation : List
current / most recent observation of environment
done : bool
True if last step of episode
"""
# call the "grandparent" info method, skipping the
# parent info in PursuitEvadeGroup1Env because
# there is a lot of bloat in the parent method that is not needed
info = super(PursuitEvadeGroup1Env, self).get_info(observation=observation, done=done)
# sun-blocking reward metrics
cur_reward = self.get_reward()
cur_time = observation[self.PARAMS.OBSERVATION.I_MET]
if cur_reward < self.min_reward:
self.min_reward = cur_reward
if cur_reward > self.max_reward:
self.max_reward = cur_reward
info[self.PARAMS.INFO.K_MIN_REWARD] = self.min_reward
info[self.PARAMS.INFO.K_MAX_REWARD] = self.max_reward
# fuel usage
info[self.PARAMS.INFO.K_PURSUER_FUEL_USAGE] = self.pursuer_init_mass - self.vesPursue.mass
info[self.PARAMS.INFO.K_EVADER_FUEL_USAGE] = self.evader_init_mass - self.vesEvade.mass
# weighted score is trapezoid approximation of integral of reward function
self.cum_reward += 0.5*(cur_reward + self.prev_reward)*(cur_time - self.prev_time)
info[self.PARAMS.INFO.K_WEIGHTED_SCORE] = self.cum_reward
# store reward and time value for trapezoid approx in next step
self.prev_reward = cur_reward
self.prev_time = cur_time
return info
def get_observation(self):
""" return observation of pursuit and evader vessels from referee ref frame
Returns:
obs : list
[0] : mission elapsed time [s]
[1] : current vehicle (pursuer) mass [kg]
[2] : current vehicle (pursuer) propellant (mono prop) [kg]
[3:6] : pursuer position wrt CB in right-hand CBCI coords [m]
[6:9] : pursuer velocity wrt CB in right-hand CBCI coords [m/s]
[9:12] : evader position wrt CB in right-hand CBCI coords [m]
[12:15] : evader velocity wrt CB in right-hand CBCI coords [m/s]
[15:18] : pursuer's sun-pointing unit vector in right-hand CBCI coords [-]
Ref:
- CBCI stands for celestial-body-centered inertial which is a coralary to ECI coords
(see notation: https://github.com/mit-ll/spacegym-kspdg#code-notation)
- KSP's body-centered inertial reference frame is left-handed
(see https://krpc.github.io/krpc/python/api/space-center/celestial-body.html#SpaceCenter.CelestialBody.non_rotating_reference_frame)
"""
obs = super().get_observation()
# get sun position relative to pursuer
p_sun_vesP__lhcbci = np.array(self.conn.space_center.bodies['Sun'].position(
self.vesPursue.orbit.body.non_rotating_reference_frame))
# convert to right-handed CBCI coords
p_sun_vesP__rhcbci = U.convert_lhcbci_to_rhcbci(p_sun_vesP__lhcbci)
d_sun_vesP = np.linalg.norm(p_sun_vesP__rhcbci)
u_sun_vesP__rhcbci = p_sun_vesP__rhcbci/d_sun_vesP
# encode into observation
obs.extend(u_sun_vesP__rhcbci)
return obs
@classmethod
def observation_list_to_dict(cls, obs_list):
"""convert observation from list to dict"""
raise NotImplementedError
@classmethod
def observation_dict_to_list(cls, obs_dict):
"""convert observation from list to dict"""
raise NotImplementedError
|
ARCLab-MITREPO_NAMEkspdgPATH_START.@kspdg_extracted@kspdg-main@src@kspdg@sb1@sb1_base.py@.PATH_END.py
|
{
"filename": "test_st_stdout.py",
"repo_name": "simonsobs/nextline-rdb",
"repo_path": "nextline-rdb_extracted/nextline-rdb-main/src/nextline_rdb/alembic/models/rev_f9a742bb2297/strategies/tests/test_st_stdout.py",
"type": "Python"
}
|
from hypothesis import given
from hypothesis import strategies as st
from sqlalchemy import select
from sqlalchemy.orm import selectinload
from nextline_rdb.db import DB
from ... import Model, Stdout
from .. import st_model_stdout
@given(st.data())
async def test_st_model_stdout(data: st.DataObject) -> None:
stdout = data.draw(st_model_stdout())
async with DB(use_migration=False, model_base_class=Model) as db:
async with db.session.begin() as session:
session.add(stdout)
async with db.session() as session:
select_prompt = select(Stdout)
stdout_ = await session.scalar(
select_prompt.options(
selectinload(Stdout.run), selectinload(Stdout.trace)
)
)
assert stdout_
session.expunge_all()
assert repr(stdout) == repr(stdout_)
|
simonsobsREPO_NAMEnextline-rdbPATH_START.@nextline-rdb_extracted@nextline-rdb-main@src@nextline_rdb@alembic@models@rev_f9a742bb2297@strategies@tests@test_st_stdout.py@.PATH_END.py
|
{
"filename": "octree_raytracing.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/utilities/lib/octree_raytracing.py",
"type": "Python"
}
|
from itertools import product
import numpy as np
from yt.funcs import mylog
from yt.utilities.lib._octree_raytracing import _OctreeRayTracing
class OctreeRayTracing:
octree = None
data_source = None
log_fields = None
fields = None
# Internal data
_cell_index = None
_tvalues = None
def __init__(self, data_source):
self.data_source = data_source
ds = data_source.ds
LE = np.array([0, 0, 0], dtype=np.float64)
RE = np.array([1, 1, 1], dtype=np.float64)
lvl_min = ds.min_level + 1
# This is the max refinement so that the smallest cells have size
# 1/2**depth
depth = lvl_min + ds.max_level + 1
self.octree = _OctreeRayTracing(LE, RE, depth)
ds = data_source.ds
xyz = np.stack([data_source[key].to_value("unitary") for key in "xyz"], axis=-1)
lvl = data_source["grid_level"].value.astype("int64", copy=False) + lvl_min
ipos = np.floor(xyz * (1 << depth)).astype("int64")
mylog.debug("Adding cells to volume")
self.octree.add_nodes(
ipos.astype(np.int32),
lvl.astype(np.int32),
np.arange(len(ipos), dtype=np.int32),
)
def vertex_centered_data(self, field):
data_source = self.data_source
chunks = data_source.index._chunk(data_source, "spatial", ngz=1)
finfo = data_source.ds._get_field_info(field)
units = finfo.units
rv = data_source.ds.arr(
np.zeros((2, 2, 2, data_source.ires.size), dtype="float64"), units
)
binary_3D_index_iter = product(*[range(2)] * 3)
ind = {(i, j, k): 0 for i, j, k in binary_3D_index_iter}
for chunk in chunks:
with data_source._chunked_read(chunk):
gz = data_source._current_chunk.objs[0]
gz.field_parameters = data_source.field_parameters
wogz = gz._base_grid
vertex_data = gz.get_vertex_centered_data([field])[field]
for i, j, k in product(*[range(2)] * 3):
ind[i, j, k] += wogz.select(
data_source.selector,
vertex_data[i : i + 2, j : j + 2, k : k + 2, ...],
rv[i, j, k, :],
ind[i, j, k],
)
return rv
def set_fields(self, fields, log_fields, no_ghost, force=False):
if no_ghost:
raise NotImplementedError("Ghost zones are required with Octree datasets")
if len(fields) != 1:
raise ValueError(
"Can only set one fields at a time. "
"This is likely a bug, and should be reported."
)
field = self.data_source._determine_fields(fields)[0]
take_log = log_fields[0]
vertex_data = self.vertex_centered_data(field)
if take_log:
vertex_data = np.log10(vertex_data)
# Vertex_data has shape (2, 2, 2, ...)
# Note: here we have the wrong ordering within the oct (classical Fortran/C
# ordering issue) so we need to swap axis 0 and 2.
self.data = vertex_data.swapaxes(0, 2).reshape(8, -1)
def cast_rays(self, vp_pos, vp_dir):
"""Cast the rays through the oct.
Parameters
----------
vp_pos : float arrays (Nrays, Ndim)
vp_dir : float arrays (Nrays, Ndim)
The position (unitary) and direction of each ray
Returns
-------
cell_index : list of integer arrays of shape (Ncell)
For each ray, contains an ordered array of cell ids
that it intersects with
tvalues : list of float arrays of shape (Ncell, 2)
The t value at entry and exit for each cell.
"""
if not self._cell_index:
self._cell_index, self._tvalues = self.octree.cast_rays(vp_pos, vp_dir)
return self._cell_index, self._tvalues
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@utilities@lib@octree_raytracing.py@.PATH_END.py
|
{
"filename": "Figure56.ipynb",
"repo_name": "birnstiel/dsharp_opac",
"repo_path": "dsharp_opac_extracted/dsharp_opac-master/notebooks/Figure56.ipynb",
"type": "Jupyter Notebook"
}
|
# Figure 5 & 6: Size Averaged $\lambda$ dependence, Size Distributions
```python
%run header.py
```
```python
d = np.load(opacity.get_datafile('default_opacities_smooth.npz'))
a = d['a']
lam = d['lam']
k_abs = d['k_abs']
k_sca = d['k_sca']
```
```python
# Download one of the Weingartner & Draine 2001 dust models for comparison
filename = 'kext_albedo_WD_MW_3.1_60'
if not os.path.isfile(filename):
from urllib.request import urlretrieve
print('Downloading file \'{}\' ... '.format(filename), end='')
urlretrieve(
f'https://www.astro.princeton.edu/~draine/dust/extcurvs/{filename}',
filename=filename)
print('Done!')
with open(filename) as f:
while not f.readline().startswith('--------'):
pass
lam_dr,_,_,_,k_dr = np.loadtxt(f).T
```
```python
# a 1g-normalized size distribution (bin-integrated) up to 1 mm
s = a**0.5
s[a > 0.1] = 0
s= s / s.sum()
# size average the absorption opacities
k_powerlaw = (k_abs * s[:,None]).sum(0)
```
Create a coagulation-fragmentation size distribution with similar upper size limit.
```python
r = c.au.cgs.value # position in the disk in cm
T = 200. # temperature in K
sigma_g = 200. # gas surface density [g/cm^2]
d2g = 0.01 # dust-to-gas ratio
rho_s = 1.6686 # material density [g/cm^3]
M_star = c.M_sun.cgs.value # stellar mass [g]
v_frag = 100. # fragmentation velocity [cm/s]
alpha = 4e-4 # turbulence parameter
m = 4 * np.pi / 3 * rho_s * a**3 # mass grid
# create the size distribution, FIRST: the fortran code
fit, a_01, a_12, a_l, a_p, a_r, a_sett = opacity.distribution(1.8, T, alpha, sigma_g, sigma_g * d2g, rho_s, m, a, M_star, r, v_frag)
# SECOND: a simplified version. Need to convert to bin-integrated values.
fit2, a_f = opacity.get_B11S_fit(T, a, r=r, sigma_g=sigma_g, d2g=d2g, rho_s=rho_s, M_star=M_star, v_frag=v_frag, alpha=alpha)
fit2 = fit2 * np.diff(np.hstack((a[0]**2 / a[1], a)))
fit2 = fit2 / fit2.sum() * sigma_g * d2g
# plot the size distribution
fig, ax = plt.subplots(figsize=(3.5, 3.5 * 0.66))
ax.loglog(a, s * sigma_g * d2g, label='power-law distribution')
ax.loglog(a, fit, label='B11')
ax.loglog(a, fit2, label='B11S')
ax.set_xlim(1e-5, 2e-1)
ax.set_ylim(1e-4, 2e-1)
ax.set_xlabel('particle size [cm]')
ax.set_ylabel('$\sigma$ [g cm$^{-2}$]')
ax.legend(fontsize='small')
fig.subplots_adjust(
bottom=0.17, top=0.95,
left=0.14, right=0.97
)
fig.savefig('figures/fig5_size_distri.pdf')
```
```python
# size average the absorption opacities
s2 = fit / fit.sum()
k_fit = (k_abs * s2[:,None]).sum(0)
s3 = opacity.get_B11S_fit(T, a)[0]
s3 *= a
s3 = s3 / s3.sum()
k_fit2 = (k_abs * s3[:,None]).sum(0)
```
```python
# where to measure the reference value
lam_obs = 0.087
# load the D'Alessio opacity
d2g = sum([0.0056, 0.0034, 0.000768, 0.0041])
data_d01 = np.loadtxt(opacity.get_datafile('kappa_D01_T100K_p3.5_amax1mm.csv'))
lam_d01 = 10.**data_d01[:,0] * 1e-4
kap_d01 = 10.**(data_d01[:,1]) / d2g
```
```python
# the Beckwith 1990 law
kb = 3.446 * (lam / 0.087)**(-1) # Beckwith 1990
# the opacities from Andrews et al. 2009
la, ka = np.loadtxt(opacity.get_datafile('andrews2009.dat')).T
# now the plot
f, ax = plt.subplots(figsize=(7, 7 * 0.64))
ax.loglog(1e-4*lam_dr, k_dr, 'k', zorder=-100, alpha=0.5, label='Weingartner & Draine 2001')
ax.loglog(lam, kb, 'k--', zorder=-100, alpha=0.5, label='Beckwith et al. 1990')
ax.loglog(1e-4*la, ka, 'k--', zorder=-100, alpha=1.0, label='Andrews et al. 2009')
ax.loglog(lam_d01, kap_d01, 'k:', zorder=-100, alpha=1.0, label='D\'Alessio et al. 2001')
l1, = ax.loglog(lam, k_powerlaw, zorder=-100, alpha=1.0, label='DSHARP, power-law')
l2, = ax.loglog(lam, k_fit, zorder=-100, alpha=1.0, label='DSHARP, B11', ls='--')
l2, = ax.loglog(lam, k_fit2, zorder=-100, alpha=1.0, label='DSHARP, B11S', ls='--')
ax.legend(loc=3, fontsize='small').get_frame().set_edgecolor('none')
ax.set_xlim(1e-5,1e0)
ax.set_ylim(1e-2,1e5)
ax.set_xlabel('$\lambda$ [cm]')
ax.set_ylabel(r'$\kappa^\mathrm{abs,tot}_\nu$ [cm$^2$/g]')
#f.subplots_adjust(
# bottom=0.171, top=0.94,
# left=0.15, right=0.97)
f.subplots_adjust(
bottom=0.09, top=0.94,
left=0.09, right=0.97)
plt.savefig('figures/fig6_comparison.pdf')
```
|
birnstielREPO_NAMEdsharp_opacPATH_START.@dsharp_opac_extracted@dsharp_opac-master@notebooks@Figure56.ipynb@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "rhysrb/Pq_server",
"repo_path": "Pq_server_extracted/Pq_server-master/README.md",
"type": "Markdown"
}
|
# Pq_server
v1.3 28th August 2020
Fixes for flux values of exactly 0 Jy and more than 10 photometry measurements in a single filter.
Proper i-band quasar templates included - useful for z~6 calculations.
Slight improvements to plotting routines - filters presented as a list rather than a smushed line under the plot.
###########################################################
v1.2 4th August 2020
Full support for ugriz optical bands
Tweak to shelve procedure to prevent dbm errors on OSX
Tweak to pqrun.py to make code easier to integrate into other pipelines
###########################################################
v1.1 2nd July 2020
Luptitude support added for SDSS iz - see updated guide.pdf for more details
Some minor tweaks to plot routines
###########################################################
First upload 27th March 2020
Instructions for using the Pq code are in the document "guide.pdf"
|
rhysrbREPO_NAMEPq_serverPATH_START.@Pq_server_extracted@Pq_server-master@README.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "maxmahlke/ssos",
"repo_path": "ssos_extracted/ssos-master/ssos/__init__.py",
"type": "Python"
}
|
name = "ssos"
__version__ = "1.3.5"
GREETING = (
"""
* + ssos %s
+ x
' https://ssos.readthedocs.io
* + _.::' |
:_.' - o - ssos --default : Copy default settings to CWD
| | ssos --help : Print detailed help
O - * ' ssos [-PARAMETER] [-VALUE] path/to/images
| ssos --inspect path/to/output : Inspect SSO candidates
_..:'
+ (_.: max.mahlke(at)ias.u-psud.fr
"""
% __version__
)
|
maxmahlkeREPO_NAMEssosPATH_START.@ssos_extracted@ssos-master@ssos@__init__.py@.PATH_END.py
|
{
"filename": "reddening.py",
"repo_name": "IvS-KULeuven/IvSPythonRepository",
"repo_path": "IvSPythonRepository_extracted/IvSPythonRepository-master/sed/reddening.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Definitions of interstellar reddening curves
Section 1. General interface
============================
Use the general interface to get different curves:
>>> wave = np.r_[1e3:1e5:10]
>>> for name in ['chiar2006','fitzpatrick1999','fitzpatrick2004','cardelli1989','seaton1979']:
... wave_,mag_ = get_law(name,wave=wave)
... p = pl.plot(1e4/wave_,mag_,label=name)
>>> p = pl.xlim(0,10)
>>> p = pl.ylim(0,12)
Use the general interface to get the same curves but with different Rv:
>>> for Rv in [2.0,3.1,5.1]:
... wave_,mag_ = get_law('cardelli1989',wave=wave,Rv=Rv)
... p = pl.plot(1e4/wave_,mag_,'--',lw=2,label='cardelli1989 Rv=%.1f'%(Rv))
>>> p = pl.xlim(0,10)
>>> p = pl.ylim(0,12)
>>> p = pl.xlabel('1/$\lambda$ [1/$\mu$m]')
>>> p = pl.ylabel(r'Extinction E(B-$\lambda$) [mag]')
>>> p = pl.legend(prop=dict(size='small'),loc='lower right')
]include figure]]ivs_sed_reddening_curves.png]
Section 2. Individual curve definitions
=======================================
Get the curves seperately:
>>> wave1,mag1 = cardelli1989()
>>> wave2,mag2 = chiar2006()
>>> wave3,mag3 = seaton1979()
>>> wave4,mag4 = fitzpatrick1999()
>>> wave5,mag5 = fitzpatrick2004()
Section 3. Normalisations
=========================
>>> wave = np.logspace(3,6,1000)
>>> photbands = ['JOHNSON.V','JOHNSON.K']
Retrieve two interstellar reddening laws, normalise them to Av and see what
the ratio between Ak and Av is. Since the L{chiar2006} law is not defined
in the optical, this procedure actually doesn't make sense for that law. In the
case of L{cardelli1989}, compute the ratio C{Ak/Av}. Note that you cannot
ask for the L{chiar2006} to be normalised in the visual, since the curve is
not defined their! If you really want to do that anyway, you need to derive
a Ak/Av factor from another curve (e.g. the L{cardelli1989}).
>>> p = pl.figure()
>>> for name,norm in zip(['chiar2006','cardelli1989'],['Ak','Av']):
... wave_,mag_ = get_law(name,wave=wave,norm=norm)
... photwave,photflux = get_law(name,wave=wave,norm=norm,photbands=photbands)
... p = pl.plot(wave_/1e4,mag_,label=name)
... p = pl.plot(photwave/1e4,photflux,'s')
... if name=='cardelli1989': print 'Ak/Av = %.3g'%(photflux[1]/photflux[0])
Ak/Av = 0.114
>>> p = pl.gca().set_xscale('log')
>>> p = pl.gca().set_yscale('log')
>>> p = pl.xlabel('Wavelength [micron]')
>>> p = pl.ylabel('Extinction A($\lambda$)/Av [mag]')
]include figure]]ivs_sed_reddening_02.png]
Compute the Cardelli law normalised to Ak and Av.
>>> p = pl.figure()
>>> wave_av1,mag_av1 = get_law('cardelli1989',wave=wave,norm='Av')
>>> wave_av2,mag_av2 = get_law('cardelli1989',wave=wave,norm='Av',photbands=['JOHNSON.V'])
>>> p = pl.plot(wave_av1,mag_av1,'k-',lw=2,label='Av')
>>> p = pl.plot(wave_av2,mag_av2,'ks')
>>> wave_ak1,mag_ak1 = get_law('cardelli1989',wave=wave,norm='Ak')
>>> wave_ak2,mag_ak2 = get_law('cardelli1989',wave=wave,norm='Ak',photbands=['JOHNSON.K'])
>>> p = pl.plot(wave_ak1,mag_ak1,'r-',lw=2,label='Ak')
>>> p = pl.plot(wave_ak2,mag_ak2,'rs')
>>> p = pl.gca().set_xscale('log')
>>> p = pl.gca().set_yscale('log')
>>> p = pl.xlabel('Wavelength [micron]')
>>> p = pl.ylabel('Extinction A($\lambda$)/A($\mu$) [mag]')
]include figure]]ivs_sed_reddening_03.png]
"""
import os
import numpy as np
import logging
from ivs.inout import ascii
from ivs.aux import loggers
from ivs.aux.decorators import memoized
from ivs.units import conversions
from ivs.sed import filters
from ivs.sed import model
logger = logging.getLogger("SED.RED")
logger.addHandler(loggers.NullHandler())
basename = os.path.join(os.path.dirname(__file__),'redlaws')
#{ Main interface
def get_law(name,norm='E(B-V)',wave_units='AA',photbands=None,**kwargs):
"""
Retrieve an interstellar reddening law.
Parameter C{name} must be the function name of one of the laws defined in
this module.
By default, the law will be interpolated on a grid from 100 angstrom to
10 micron in steps of 10 angstrom. This can be adjusted with the parameter
C{wave} (array), which B{must} be in angstrom. You can change the units
ouf the returned wavelength array via C{wave_units}.
By default, the curve is normalised with respect to E(B-V) (you get
A(l)/E(B-V)). You can set the C{norm} keyword to Av if you want A(l)/Av.
Remember that
A(V) = Rv * E(B-V)
The parameter C{Rv} is by default 3.1, other reasonable values lie between
2.0 and 5.1
Extra accepted keywords depend on the type of reddening law used.
Example usage:
>>> wave = np.r_[1e3:1e5:10]
>>> wave,mag = get_law('cardelli1989',wave=wave,Rv=3.1)
@param name: name of the interstellar law
@type name: str, one of the functions defined here
@param norm: type of normalisation of the curve
@type norm: str (one of E(B-V), Av)
@param wave_units: wavelength units
@type wave_units: str (interpretable for units.conversions.convert)
@param photbands: list of photometric passbands
@type photbands: list of strings
@keyword wave: wavelength array to interpolate the law on
@type wave: ndarray
@return: wavelength, reddening magnitude
@rtype: (ndarray,ndarray)
"""
#-- get the inputs
wave_ = kwargs.pop('wave',None)
Rv = kwargs.setdefault('Rv',3.1)
#-- get the curve
wave,mag = globals()[name.lower()](**kwargs)
wave_orig,mag_orig = wave.copy(),mag.copy()
#-- interpolate on user defined grid
if wave_ is not None:
if wave_units != 'AA':
wave_ = conversions.convert(wave_units,'AA',wave_)
mag = np.interp(wave_,wave,mag,right=0)
wave = wave_
#-- pick right normalisation: convert to A(lambda)/Av if needed
if norm.lower()=='e(b-v)':
mag *= Rv
else:
#-- we allow ak and av as shortcuts for normalisation in JOHNSON K and
# V bands
if norm.lower()=='ak':
norm = 'JOHNSON.K'
elif norm.lower()=='av':
norm = 'JOHNSON.V'
norm_reddening = model.synthetic_flux(wave_orig,mag_orig,[norm])[0]
logger.info('Normalisation via %s: Av/%s = %.6g'%(norm,norm,1./norm_reddening))
mag /= norm_reddening
#-- maybe we want the curve in photometric filters
if photbands is not None:
mag = model.synthetic_flux(wave,mag,photbands)
wave = filters.get_info(photbands)['eff_wave']
#-- set the units of the wavelengths
if wave_units != 'AA' and photbands is not None:
wave = conversions.convert('AA',wave_units,wave)
return wave,mag
def redden(flux,wave=None,photbands=None,ebv=0.,rtype='flux',law='cardelli1989',**kwargs):
"""
Redden flux or magnitudes
The reddening parameters C{ebv} means E(B-V).
If it is negative, we B{deredden}.
If you give the keyword C{wave}, it is assumed that you want to (de)redden
a B{model}, i.e. a spectral energy distribution.
If you give the keyword C{photbands}, it is assumed that you want to (de)redden
B{photometry}, i.e. integrated fluxes.
@param flux: fluxes to (de)redden (magnitudes if C{rtype='mag'})
@type flux: ndarray (floats)
@param wave: wavelengths matching the fluxes (or give C{photbands})
@type wave: ndarray (floats)
@param photbands: photometry bands matching the fluxes (or give C{wave})
@type photbands: ndarray of str
@param ebv: reddening parameter E(B-V)
@type ebv: float
@param rtype: type of dereddening (magnituds or fluxes)
@type rtype: str ('flux' or 'mag')
@return: (de)reddened flux/magnitude
@rtype: ndarray (floats)
"""
if photbands is not None:
wave = filters.get_info(photbands)['eff_wave']
old_settings = np.seterr(all='ignore')
wave, reddeningMagnitude = get_law(law,wave=wave,**kwargs)
if rtype=='flux':
# In this case flux means really flux
flux_reddened = flux / 10**(reddeningMagnitude*ebv/2.5)
np.seterr(**old_settings)
return flux_reddened
elif rtype=='mag':
# In this case flux means actually a magnitude
magnitude = flux
magnitude_reddened = magnitude + reddeningMagnitude*ebv
np.seterr(**old_settings)
return magnitude_reddened
def deredden(flux,wave=None,photbands=None,ebv=0.,rtype='flux',**kwargs):
"""
Deredden flux or magnitudes.
@param flux: fluxes to (de)redden (NOT magnitudes)
@type flux: ndarray (floats)
@param wave: wavelengths matching the fluxes (or give C{photbands})
@type wave: ndarray (floats)
@param photbands: photometry bands matching the fluxes (or give C{wave})
@type photbands: ndarray of str
@param ebv: reddening parameter E(B-V)
@type ebv: float
@param rtype: type of dereddening (magnituds or fluxes)
@type rtype: str ('flux' or 'mag')
@return: (de)reddened flux
@rtype: ndarray (floats)
"""
return redden(flux,wave=wave,photbands=photbands,ebv=-ebv,rtype=rtype,**kwargs)
#}
#{ Curve definitions
@memoized
def chiar2006(Rv=3.1,curve='ism',**kwargs):
"""
Extinction curve at infrared wavelengths from Chiar and Tielens (2006)
We return A(lambda)/E(B-V), by multiplying A(lambda)/Av with Rv.
This is only defined for Rv=3.1. If it is different, this will raise an
AssertionError
Extra kwags are to catch unwanted keyword arguments.
UNCERTAIN NORMALISATION
@param Rv: Rv
@type Rv: float
@param curve: extinction curve
@type curve: string (one of 'gc' or 'ism', galactic centre or local ISM)
@return: wavelengths (A), A(lambda)/Av
@rtype: (ndarray,ndarray)
"""
source = os.path.join(basename,'Chiar2006.red')
#-- check Rv
assert(Rv==3.1)
wavelengths,gc,ism = ascii.read2array(source).T
if curve=='gc':
alam_ak = gc
elif curve=='ism':
keep = ism>0
alam_ak = ism[keep]
wavelengths = wavelengths[keep]
else:
raise ValueError('no curve %s'%(curve))
alam_aV = alam_ak * 0.09
#plot(1/wavelengths,alam_aV,'o-')
return wavelengths*1e4,alam_aV
@memoized
def fitzpatrick1999(Rv=3.1,**kwargs):
"""
From Fitzpatrick 1999 (downloaded from ASAGIO database)
This function returns A(lambda)/A(V).
To get A(lambda)/E(B-V), multiply the return value with Rv (A(V)=Rv*E(B-V))
Extra kwags are to catch unwanted keyword arguments.
@param Rv: Rv (2.1, 3.1 or 5.0)
@type Rv: float
@return: wavelengths (A), A(lambda)/Av
@rtype: (ndarray,ndarray)
"""
filename = 'Fitzpatrick1999_Rv_%.1f'%(Rv)
filename = filename.replace('.','_') + '.red'
myfile = os.path.join(basename,filename)
wave,alam_ebv = ascii.read2array(myfile).T
alam_av = alam_ebv/Rv
logger.info('Fitzpatrick1999 curve with Rv=%.2f'%(Rv))
return wave,alam_av
@memoized
def fitzpatrick2004(Rv=3.1,**kwargs):
"""
From Fitzpatrick 2004 (downloaded from FTP)
This function returns A(lambda)/A(V).
To get A(lambda)/E(B-V), multiply the return value with Rv (A(V)=Rv*E(B-V))
Extra kwags are to catch unwanted keyword arguments.
@param Rv: Rv (2.1, 3.1 or 5.0)
@type Rv: float
@return: wavelengths (A), A(lambda)/Av
@rtype: (ndarray,ndarray)
"""
filename = 'Fitzpatrick2004_Rv_%.1f.red'%(Rv)
myfile = os.path.join(basename,filename)
wave_inv,elamv_ebv = ascii.read2array(myfile,skip_lines=15).T
logger.info('Fitzpatrick2004 curve with Rv=%.2f'%(Rv))
return 1e4/wave_inv[::-1],((elamv_ebv+Rv)/Rv)[::-1]
@memoized
def donnell1994(**kwargs):
"""
Small improvement on Cardelli 1989 by James E. O'Donnell (1994).
Extra kwags are to catch unwanted keyword arguments.
@keyword Rv: Rv
@type Rv: float
@keyword wave: wavelengths to compute the curve on
@type wave: ndarray
@return: wavelengths (A), A(lambda)/Av
@rtype: (ndarray,ndarray)
"""
return cardelli1989(curve='donnell',**kwargs)
@memoized
def cardelli1989(Rv=3.1,curve='cardelli',wave=None,**kwargs):
"""
Construct extinction laws from Cardelli (1989).
Improvement in optical by James E. O'Donnell (1994)
wavelengths in Angstrom!
This function returns A(lambda)/A(V).
To get A(lambda)/E(B-V), multiply the return value with Rv (A(V)=Rv*E(B-V))
Extra kwags are to catch unwanted keyword arguments.
@param Rv: Rv
@type Rv: float
@param curve: extinction curve
@type curve: string (one of 'cardelli' or 'donnell')
@param wave: wavelengths to compute the curve on
@type wave: ndarray
@return: wavelengths (A), A(lambda)/Av
@rtype: (ndarray,ndarray)
"""
if wave is None:
wave = np.r_[100.:100000.:10]
all_x = 1./(wave/1.0e4)
alam_aV = np.zeros_like(all_x)
#-- infrared
infrared = all_x<1.1
x = all_x[infrared]
ax = +0.574*x**1.61
bx = -0.527*x**1.61
alam_aV[infrared] = ax + bx/Rv
#-- optical
optical = (1.1<=all_x) & (all_x<3.3)
x = all_x[optical]
y = x-1.82
if curve=='cardelli':
ax = 1 + 0.17699*y - 0.50447*y**2 - 0.02427*y**3 + 0.72085*y**4 \
+ 0.01979*y**5 - 0.77530*y**6 + 0.32999*y**7
bx = 1.41338*y + 2.28305*y**2 + 1.07233*y**3 - 5.38434*y**4 \
- 0.62251*y**5 + 5.30260*y**6 - 2.09002*y**7
elif curve=='donnell':
ax = 1 + 0.104*y - 0.609*y**2 + 0.701*y**3 + 1.137*y**4 \
- 1.718*y**5 - 0.827*y**6 + 1.647*y**7 - 0.505*y**8
bx = 1.952*y + 2.908*y**2 - 3.989*y**3 - 7.985*y**4 \
+ 11.102*y**5 + 5.491*y**6 -10.805*y**7 + 3.347*y**8
else:
raise ValueError('curve %s not found'%(curve))
alam_aV[optical] = ax + bx/Rv
#-- ultraviolet
ultraviolet = (3.3<=all_x) & (all_x<8.0)
x = all_x[ultraviolet]
Fax = -0.04473*(x-5.9)**2 - 0.009779*(x-5.9)**3
Fbx = +0.21300*(x-5.9)**2 + 0.120700*(x-5.9)**3
Fax[x<5.9] = 0
Fbx[x<5.9] = 0
ax = +1.752 - 0.316*x - 0.104 / ((x-4.67)**2 + 0.341) + Fax
bx = -3.090 + 1.825*x + 1.206 / ((x-4.62)**2 + 0.263) + Fbx
alam_aV[ultraviolet] = ax + bx/Rv
#-- far UV
fuv = 8.0<=all_x
x = all_x[fuv]
ax = -1.073 - 0.628*(x-8) + 0.137*(x-8)**2 - 0.070*(x-8)**3
bx = 13.670 + 4.257*(x-8) - 0.420*(x-8)**2 + 0.374*(x-8)**3
alam_aV[fuv] = ax + bx/Rv
logger.info('%s curve with Rv=%.2f'%(curve.title(),Rv))
return wave,alam_aV
@memoized
def seaton1979(Rv=3.1,wave=None,**kwargs):
"""
Extinction curve from Seaton, 1979.
This function returns A(lambda)/A(V).
To get A(lambda)/E(B-V), multiply the return value with Rv (A(V)=Rv*E(B-V))
Extra kwags are to catch unwanted keyword arguments.
@param Rv: Rv
@type Rv: float
@param wave: wavelengths to compute the curve on
@type wave: ndarray
@return: wavelengths (A), A(lambda)/Av
@rtype: (ndarray,ndarray)
"""
if wave is None: wave = np.r_[1000.:10000.:10]
all_x = 1e4/(wave)
alam_aV = np.zeros_like(all_x)
#-- far infrared
x_ = np.r_[1.0:2.8:0.1]
X_ = np.array([1.36,1.44,1.84,2.04,2.24,2.44,2.66,2.88,3.14,3.36,3.56,3.77,3.96,4.15,4.26,4.40,4.52,4.64])
fir = all_x<=2.7
alam_aV[fir] = np.interp(all_x[fir][::-1],x_,X_,left=0)[::-1]
#-- infrared
infrared = (2.70<=all_x) & (all_x<3.65)
x = all_x[infrared]
alam_aV[infrared] = 1.56 + 1.048*x + 1.01 / ( (x-4.60)**2 + 0.280)
#-- optical
optical = (3.65<=all_x) & (all_x<7.14)
x = all_x[optical]
alam_aV[optical] = 2.29 + 0.848*x + 1.01 / ( (x-4.60)**2 + 0.280)
#-- ultraviolet
ultraviolet = (7.14<=all_x) & (all_x<=10)
x = all_x[ultraviolet]
alam_aV[ultraviolet] = 16.17 - 3.20*x + 0.2975*x**2
logger.info('Seaton curve with Rv=%.2f'%(Rv))
return wave,alam_aV/Rv
#}
if __name__=="__main__":
import doctest
import pylab as pl
doctest.testmod()
pl.show()
|
IvS-KULeuvenREPO_NAMEIvSPythonRepositoryPATH_START.@IvSPythonRepository_extracted@IvSPythonRepository-master@sed@reddening.py@.PATH_END.py
|
{
"filename": "classification.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/tutorials/classification.ipynb",
"type": "Jupyter Notebook"
}
|
---
title: Tagging
sidebar_class_name: hidden
---
[](https://colab.research.google.com/github/langchain-ai/langchain/blob/master/docs/docs/use_cases/tagging.ipynb)
# Classify Text into Labels
Tagging means labeling a document with classes such as:
- Sentiment
- Language
- Style (formal, informal etc.)
- Covered topics
- Political tendency

## Overview
Tagging has a few components:
* `function`: Like [extraction](/docs/tutorials/extraction), tagging uses [functions](https://openai.com/blog/function-calling-and-other-api-updates) to specify how the model should tag a document
* `schema`: defines how we want to tag the document
## Quickstart
Let's see a very straightforward example of how we can use OpenAI tool calling for tagging in LangChain. We'll use the [`with_structured_output`](/docs/how_to/structured_output) method supported by OpenAI models.
```python
%pip install --upgrade --quiet langchain-core
```
We'll need to load a [chat model](/docs/integrations/chat/):
import ChatModelTabs from "@theme/ChatModelTabs";
<ChatModelTabs customVarName="llm" />
```python
# | output: false
# | echo: false
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-4o-mini")
```
Let's specify a Pydantic model with a few properties and their expected type in our schema.
```python
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, Field
tagging_prompt = ChatPromptTemplate.from_template(
"""
Extract the desired information from the following passage.
Only extract the properties mentioned in the 'Classification' function.
Passage:
{input}
"""
)
class Classification(BaseModel):
sentiment: str = Field(description="The sentiment of the text")
aggressiveness: int = Field(
description="How aggressive the text is on a scale from 1 to 10"
)
language: str = Field(description="The language the text is written in")
# LLM
llm = ChatOpenAI(temperature=0, model="gpt-4o-mini").with_structured_output(
Classification
)
```
```python
inp = "Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!"
prompt = tagging_prompt.invoke({"input": inp})
response = llm.invoke(prompt)
response
```
Classification(sentiment='positive', aggressiveness=1, language='Spanish')
If we want dictionary output, we can just call `.dict()`
```python
inp = "Estoy muy enojado con vos! Te voy a dar tu merecido!"
prompt = tagging_prompt.invoke({"input": inp})
response = llm.invoke(prompt)
response.dict()
```
{'sentiment': 'enojado', 'aggressiveness': 8, 'language': 'es'}
As we can see in the examples, it correctly interprets what we want.
The results vary so that we may get, for example, sentiments in different languages ('positive', 'enojado' etc.).
We will see how to control these results in the next section.
## Finer control
Careful schema definition gives us more control over the model's output.
Specifically, we can define:
- Possible values for each property
- Description to make sure that the model understands the property
- Required properties to be returned
Let's redeclare our Pydantic model to control for each of the previously mentioned aspects using enums:
```python
class Classification(BaseModel):
sentiment: str = Field(..., enum=["happy", "neutral", "sad"])
aggressiveness: int = Field(
...,
description="describes how aggressive the statement is, the higher the number the more aggressive",
enum=[1, 2, 3, 4, 5],
)
language: str = Field(
..., enum=["spanish", "english", "french", "german", "italian"]
)
```
```python
tagging_prompt = ChatPromptTemplate.from_template(
"""
Extract the desired information from the following passage.
Only extract the properties mentioned in the 'Classification' function.
Passage:
{input}
"""
)
llm = ChatOpenAI(temperature=0, model="gpt-4o-mini").with_structured_output(
Classification
)
```
Now the answers will be restricted in a way we expect!
```python
inp = "Estoy increiblemente contento de haberte conocido! Creo que seremos muy buenos amigos!"
prompt = tagging_prompt.invoke({"input": inp})
llm.invoke(prompt)
```
Classification(sentiment='positive', aggressiveness=1, language='Spanish')
```python
inp = "Estoy muy enojado con vos! Te voy a dar tu merecido!"
prompt = tagging_prompt.invoke({"input": inp})
llm.invoke(prompt)
```
Classification(sentiment='enojado', aggressiveness=8, language='es')
```python
inp = "Weather is ok here, I can go outside without much more than a coat"
prompt = tagging_prompt.invoke({"input": inp})
llm.invoke(prompt)
```
Classification(sentiment='neutral', aggressiveness=1, language='English')
The [LangSmith trace](https://smith.langchain.com/public/38294e04-33d8-4c5a-ae92-c2fe68be8332/r) lets us peek under the hood:

### Going deeper
* You can use the [metadata tagger](/docs/integrations/document_transformers/openai_metadata_tagger) document transformer to extract metadata from a LangChain `Document`.
* This covers the same basic functionality as the tagging chain, only applied to a LangChain `Document`.
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@tutorials@classification.ipynb@.PATH_END.py
|
{
"filename": "_size.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterpolargl/marker/colorbar/title/font/_size.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="size",
parent_name="scatterpolargl.marker.colorbar.title.font",
**kwargs
):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterpolargl@marker@colorbar@title@font@_size.py@.PATH_END.py
|
{
"filename": "gadget.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/io/gadget.py",
"type": "Python"
}
|
import struct
import numpy
from collections import namedtuple
from amuse.io import base
from amuse.units import units
from amuse.units import nbody_system
from amuse.support.core import late
from amuse import datamodel
class GadgetFileFormatProcessor(base.FortranFileFormatProcessor):
"""
Process a Gadget binary data file
"""
provided_formats = ['gadget']
GAS = 0
HALO = 1
DISK = 2
BULGE = 3
STARS = 4
BNDRY = 5
@late
def header_format(self):
collate = []
collate.append(self.endianness)
for name, times, formatchar in self.header_struct_format:
for t in range(times):
collate.append(formatchar)
return ''.join(collate)
@base.format_option
def header_struct_format(self):
"""The format of the header structure of the gadget file."""
return (
('Npart', 6, 'I'),
('Massarr', 6, 'd'),
('Time', 1, 'd'),
('Redshift', 1, 'd'),
('FlagSfr', 1, 'i'),
('FlagFeedback', 1, 'i'),
('Nall', 6, 'i'),
('FlagCooling', 1, 'i'),
('NumFiles', 1, 'i'),
('BoxSize', 1, 'd'),
('Omega0', 1, 'd'),
('OmegaLambda', 1, 'd'),
('HubbleParam', 1, 'd'),
('FlagAge', 1, 'd'),
('FlagMetals', 1, 'd'),
('NallHW', 6, 'i'),
('flag_entr_ics', 1, 'i'),
)
@base.format_option
def has_potential_energy(self):
"""Set to true if the file has a potential energy block"""
return False
@base.format_option
def has_acceleration(self):
"""Set to true if the file has a acceleration block"""
return False
@base.format_option
def has_rate_of_entropy_production(self):
"""Set to true if the file has a block with the
rate of change of the entropic function of each gas particle
"""
return False
@base.format_option
def has_timestep(self):
"""Set to true if the file has a block with the
individual timestep for each particle.
"""
return False
@base.format_option
def is_initial_conditions_format(self):
"""Set to true if the file contains
initial conditions. An initial conditions
gadget file contains less data.
"""
return True
@base.format_option
def ids_are_keys(self):
"""Set to True if the file contains
correct keys. Set to False to generate
the keys in amuse and prove an id attribute for
the id's in the gadget file"""
return True
@base.format_option
def equal_mass_array(self):
"""If filled with an array with masses > 0.0
assume equal mass for the corresponding set"""
return ([0.0] * 6) | nbody_system.mass
@base.format_option
def ids_are_long(self):
"""Set to true the ids will be written
as longs in the gadget file"""
return True
@base.format_option
def return_header(self):
"""Set to True to return both the particles and the header from the gadget file"""
return False
@base.format_option
def write_header_from(self):
"""Pass a namedtuple to store non-default values in the header of the gadget file"""
return None
@base.format_option
def convert_gadget_w_to_velocity(self):
"""Set to True to convert the w=sqrt(a)*dx/dt to (comoving) velocity in comoving integrations"""
return False
@late
def header_size(self):
return struct.calcsize(self.header_format)
@late
def struct_class_name(self):
return 'GadgetHeader'
@late
def struct_class(self):
collate = []
for name, times, formatchar in self.header_struct_format:
collate.append(name)
attribute_names = ' '.join(collate)
return namedtuple(self.struct_class_name, attribute_names)
@late
def total_number_of_particles(self):
return sum(self.header_struct.Npart)
@late
def total_number_of_particles_with_variable_masses(self):
result = 0
for x, n in zip(self.header_struct.Massarr, self.header_struct.Npart):
if x == 0.0:
result += n
return result
@late
def number_of_gas_particles(self):
return self.header_struct.Npart[self.GAS]
def collect_values(self, values):
offset = 0
result = []
for name, times, formatchar in self.header_struct_format:
if times > 1:
result.append(values[offset: offset+times])
else:
result.append(values[offset])
offset += times
return result
def load_header(self, file):
header_bytes = self.read_fortran_block(file)
values = struct.unpack(self.header_format, header_bytes[0:self.header_size])
values = self.collect_values(values)
self.header_struct = self.struct_class(*values)
def load_body(self, file):
self.positions = self.read_fortran_block_float_vectors(file)
self.velocities = self.read_fortran_block_float_vectors(file)
id_bytes = self.read_fortran_block(file)
if len(id_bytes) == 4*self.total_number_of_particles:
self.ids = numpy.frombuffer(id_bytes, dtype=self.uint_type)
else:
self.ids = numpy.frombuffer(id_bytes, dtype=self.ulong_type)
if self.total_number_of_particles_with_variable_masses > 0:
self.masses = self.read_fortran_block_floats(file)
else:
self.masses = None
if self.number_of_gas_particles > 0:
self.u = self.read_fortran_block_floats(file)[:self.number_of_gas_particles]
#print self.u, self.number_of_gas_particles, len(self.u)
else:
self.u = None
if self.is_initial_conditions_format:
self.density = None
self.hsml = None
self.pot = None
self.acc = None
self.da_dt = None
self.dt = None
return
if self.number_of_gas_particles > 0:
self.density = self.read_fortran_block_floats(file)
self.hsml = self.read_fortran_block_floats(file)
else:
self.density = None
self.hsml = None
if self.has_potential_energy:
self.pot = self.read_fortran_block_floats(file)
else:
self.pot = None
if self.has_acceleration:
self.acc = self.read_fortran_block_floats(file)
else:
self.acc = None
if self.has_rate_of_entropy_production:
self.da_dt = self.read_fortran_block_floats(file)
else:
self.da_dt = None
if self.has_timestep:
self.dt = self.read_fortran_block_floats(file)
else:
self.dt = None
def new_sets_from_arrays(self):
offset = 0
ids_per_set = []
for x in self.header_struct.Npart:
ids_per_set.append(self.ids[offset:offset+x])
offset += x
if self.ids_are_keys:
sets = [datamodel.Particles(len(x), keys=x) for x in ids_per_set]
else:
sets = [datamodel.Particles(len(x)) for x in ids_per_set]
for set, x in zip(sets, ids_per_set):
if len(set) > 0:
set.id = x
offset = 0
for x in sets:
length = len(x)
if length == 0:
continue
x.position = nbody_system.length.new_quantity(self.positions[offset:offset+length])
x.velocity = nbody_system.speed.new_quantity(self.velocities[offset:offset+length])
if self.convert_gadget_w_to_velocity:
x.velocity *= numpy.sqrt(1.0 + self.header_struct.Redshift)
if not self.pot is None:
x.potential_energy = nbody_system.energy.new_quantity(self.pot[offset:offset+length])
if not self.acc is None:
x.acceleration = nbody_system.acceleration.new_quantity(self.acc[offset:offset+length])
if not self.dt is None:
x.timestep = nbody_system.time.new_quantity(self.dt[offset:offset+length])
offset += length
offset = 0
for x, mass in zip(sets, self.header_struct.Massarr):
length = len(x)
if length == 0:
continue
if mass == 0.0:
x.mass = nbody_system.mass.new_quantity(self.masses[offset:offset+length])
offset += length
else:
x.mass = nbody_system.mass.new_quantity(mass)
if self.number_of_gas_particles > 0:
gas_set = sets[self.GAS]
unit = (nbody_system.length / nbody_system.time) ** 2
gas_set.u = unit.new_quantity(self.u)
unit = nbody_system.mass / nbody_system.length ** 3
if not self.density is None:
gas_set.rho = unit.new_quantity(self.density)
if not self.hsml is None:
gas_set.h_smooth = nbody_system.length.new_quantity(self.hsml)
return sets
def load_file(self, file):
self.load_header(file)
self.load_body(file)
attribute_names = ["gas","halo","disk","bulge","stars","bndry"]
values = self.new_sets_from_arrays()
if self.return_header:
attribute_names += [name for name, times, formatchar in self.header_struct_format]
values += list(self.header_struct)
return namedtuple("GadgetData", attribute_names)(*values)
@late
def sets_to_save(self):
if isinstance(self.set, (tuple, list)):
sets_to_save = self.set
elif hasattr(self.set, 'key'):
sets_to_save = (self.set, )
else:
raise Exception("The Gadget binary file writer can write a particle set or a list of sets but nothing else")
if len(sets_to_save) < 6:
sets_to_save = list(sets_to_save)
sets_to_save.extend([()] * (6 - len(sets_to_save)))
return sets_to_save
def store_file(self, file):
self.store_header(file)
self.store_body(file)
def header_field_value(self, name):
if hasattr(self.write_header_from, name):
return getattr(self.write_header_from, name)
else:
return self.default_header_field_value(name)
def default_header_field_value(self, name):
return self.default_header[name]
def store_header(self, file):
self.default_header = dict(
Massarr = list(self.equal_mass_array.value_in(nbody_system.mass)),
Time = 0.0,
Redshift = 0.0,
FlagSfr = 0,
FlagFeedback = 0,
Nall = [len(x) for x in self.sets_to_save],
FlagCooling = 0,
NumFiles = 1,
BoxSize = 0,
Omega0 = 0,
OmegaLambda = 0,
HubbleParam = 0,
FlagAge = 0.0,
FlagMetals = 0.0,
NallHW = [0]*6,
flag_entr_ics = 0
)
if self.write_header_from is None:
self.header_field_value = self.default_header_field_value
self.header_struct = self.struct_class(
Npart = [len(x) for x in self.sets_to_save],
Massarr = list(self.header_field_value("Massarr")),
Time = self.header_field_value("Time"),
Redshift = self.header_field_value("Redshift"),
FlagSfr = self.header_field_value("FlagSfr"),
FlagFeedback = self.header_field_value("FlagFeedback"),
Nall = list(self.header_field_value("Nall")),
FlagCooling = self.header_field_value("FlagCooling"),
NumFiles = self.header_field_value("NumFiles"),
BoxSize = self.header_field_value("BoxSize"),
Omega0 = self.header_field_value("Omega0"),
OmegaLambda = self.header_field_value("OmegaLambda"),
HubbleParam = self.header_field_value("HubbleParam"),
FlagAge = self.header_field_value("FlagAge"),
FlagMetals = self.header_field_value("FlagMetals"),
NallHW = list(self.header_field_value("NallHW")),
flag_entr_ics = self.header_field_value("flag_entr_ics")
)
bytes = bytearray(256)
parts = list()
for x in list(self.header_struct):
if isinstance(x, list):
parts.extend(x)
else:
parts.append(x)
struct.pack_into(self.header_format, bytes, 0, *parts)
self.write_fortran_block(file, bytes)
def _pack_sets(self, attributename, unit = None):
result = []
for x in self.sets_to_save:
if len(x) > 0:
if unit is None:
result.extend(getattr(x,attributename))
else:
result.extend(getattr(x,attributename).value_in(unit))
return result
def store_body(self, file):
self.positions = self._pack_sets('position', nbody_system.length)
self.velocities = self._pack_sets('velocity', nbody_system.speed)
if self.ids_are_keys:
self.ids = self._pack_sets('key')
else:
self.ids = self._pack_sets('id')
self.masses = []
for equal_mass, x in zip(self.equal_mass_array, self.sets_to_save):
if len(x) > 0 and not equal_mass > (0.0 | nbody_system.mass):
self.masses.extend(x.mass.value_in(nbody_system.mass))
if len(self.masses) == 0:
self.masses = None
number_of_gas_particles = len(self.sets_to_save[self.GAS])
if number_of_gas_particles > 0:
self.u = self.sets_to_save[0].u.value_in(nbody_system.potential)
else:
self.u = None
self.write_fortran_block_float_vectors(file, self.positions)
self.write_fortran_block_float_vectors(file, self.velocities)
if self.ids_are_long:
self.write_fortran_block_ulongs(file, self.ids)
else:
self.write_fortran_block_uints(file, self.ids)
if not self.masses is None:
self.write_fortran_block_floats(file, self.masses)
if not self.u is None:
self.write_fortran_block_floats(file, self.u)
if self.is_initial_conditions_format:
return
if number_of_gas_particles > 0:
self.density = self.sets_to_save[0].rho.value_in(nbody_system.density)
self.hsml = self.sets_to_save[0].h_smooth.value_in(nbody_system.length)
else:
self.density = None
self.hsml = None
if self.has_potential_energy:
self.pot = self._pack_sets('potential_energy', nbody_system.energy)
else:
self.pot = None
if self.has_acceleration:
self.acc = self._pack_sets('acceleration', nbody_system.acceleration)
else:
self.acc = None
if self.has_rate_of_entropy_production:
self.da_dt = None
else:
self.da_dt = None
if self.has_timestep:
self.dt = self._pack_sets('timestep', nbody_system.time)
else:
self.dt = None
if not self.density is None:
self.write_fortran_block_floats(file, self.density)
if not self.hsml is None:
self.write_fortran_block_floats(file, self.hsml)
if not self.pot is None:
self.write_fortran_block_floats(file, self.pot)
if not self.acc is None:
self.write_fortran_block_floats(file, self.acc)
if not self.da_dt is None:
self.write_fortran_block_floats(file, self.da_dt)
if not self.dt is None:
self.write_fortran_block_floats(file, self.dt)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@io@gadget.py@.PATH_END.py
|
{
"filename": "test_cascade.py",
"repo_name": "ChrisBeaumont/brut",
"repo_path": "brut_extracted/brut-master/bubbly/test/test_cascade.py",
"type": "Python"
}
|
from sklearn.datasets import make_moons
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import recall_score
import numpy as np
import pytest
from ..cascade import CascadedBooster, _recall_bias, _fpos_bias
from ..util import false_pos
def test_recall_bias():
df = np.arange(5)
for thresh in [0, .1, .2, .21, .5, 1]:
bias = _recall_bias(df, thresh)
assert (df >= bias).mean() >= thresh
def test_fpos_bias():
df = np.arange(5)
for thresh in [0, .1, .2, .21, .5, 1]:
bias = _fpos_bias(df, thresh, 5)
assert (df >= bias).mean() <= thresh
class TestCascade(object):
def setup_method(self, method):
base = GradientBoostingClassifier(n_estimators=2)
self.clf = CascadedBooster(base_clf=base)
n_samples = 500
np.random.seed(42)
X, Y = make_moons(n_samples=n_samples, noise=.05)
self.X = X
self.Y = Y
self.clf.fit(X, Y)
def teardown_method(self, method):
self.check_clf(self.clf, self.X, self.Y)
def test_multi_layer(self):
assert len(self.clf.estimators_) > 1
def check_recall_constraints(self, clf, x, y):
#recall is at least as good as required
recall_i = clf.layer_recall
for i, yp in enumerate(clf.staged_predict(x), 1):
assert recall_score(y, yp) >= (recall_i ** i)
if len(clf.estimators_) > 0:
np.testing.assert_array_equal(yp, clf.predict(x))
def check_fpos_constraints(self, clf, x, y):
# if classifier converged, false pos rate on training data <= false_pos
if clf.converged:
assert false_pos(y, clf.predict(x)) <= clf.false_pos
def check_staged_decision(self, clf, x, y):
for i, yp in enumerate(clf.staged_decision_function(x)):
good = yp > 0
#df for positive examples == df for last stage
expect = clf.estimators_[i].decision_function(x[
good]) - clf.bias_[i]
np.testing.assert_array_equal(yp[good].ravel(), expect.ravel())
if len(clf.estimators_) > 0:
np.testing.assert_array_equal(clf.decision_function(x), yp)
def check_staged_predict(self, clf, x, y):
#staged predict and decision function are consistent
from itertools import izip
for df, yp in izip(clf.staged_decision_function(x),
clf.staged_predict(x)):
np.testing.assert_array_equal((df >= 0).ravel(), (yp > 0).ravel())
def check_clf(self, clf, x, y):
self.check_recall_constraints(clf, x, y)
self.check_fpos_constraints(clf, x, y)
self.check_staged_decision(clf, x, y)
self.check_staged_predict(clf, x, y)
def test_recall_stages(self):
self.check_recall_constraints(self.clf, self.X, self.Y)
self.clf.layer_recall = 1.0
self.clf.fit(self.X, self.Y)
self.check_recall_constraints(self.clf, self.X, self.Y)
assert recall_score(self.clf.predict(self.X), self.Y) == 1
def test_fit_converge(self):
self.clf.false_pos = 0.5
self.clf.fit(self.X, self.Y)
assert self.clf.converged
def test_fit_maxiter(self):
self.clf.false_pos = 0
self.clf.layer_recall = .6
self.clf.max_layers = 4
np.random.shuffle(self.Y)
self.clf.fit(self.X, self.Y)
assert not self.clf.converged
assert len(self.clf.bias_) == 4
def test_impossible_filter(self):
self.clf.layer_recall = 1
self.clf.false_pos = 0
np.random.shuffle(self.Y)
self.clf.fit(self.X, self.Y)
assert not self.clf.converged
assert len(self.clf.bias_) < 4
def test_separable_fit(self):
self.clf.layer_recall = .5
self.clf.false_pos = .5
self.X = np.arange(10).reshape(-1, 1)
self.Y = (self.X.ravel() > 5).astype(np.int)
self.clf.fit(self.X, self.Y)
assert self.clf.converged
assert len(self.clf.estimators_) == 1
assert recall_score(self.Y, self.clf.predict(self.X)) == 1
assert false_pos(self.Y, self.clf.predict(self.X)) == 0
def test_add_layer(self):
recall = recall_score(self.Y, self.clf.predict(self.X))
fpos = false_pos(self.Y, self.clf.predict(self.X))
n_layers = len(self.clf.estimators_)
self.clf.add_cascade_layer(self.X, self.Y)
assert len(self.clf.estimators_) == n_layers + 1
yp = self.clf.predict(self.X)
assert false_pos(self.Y, yp) <= fpos
assert recall_score(self.Y, yp) >= recall * self.clf.layer_recall
def test_predict_before_fit(self):
clf = CascadedBooster()
for func in [clf.staged_predict,
clf.staged_decision_function,
clf.predict,
clf.decision_function,
]:
with pytest.raises(ValueError) as exc:
func()
def _assert_clones(self, clf, clf2):
assert clf2.layer_recall == clf.layer_recall
assert clf2.false_pos == clf.false_pos
assert clf2.max_layers == clf.max_layers
assert clf2.verbose == clf.verbose
assert clf.base_clf is not clf2.base_clf
assert isinstance(clf2.base_clf, type(clf.base_clf))
def test_clone(self):
from sklearn.base import clone
clf = CascadedBooster(layer_recall=0.33, false_pos=0.12,
max_layers=99, verbose=5)
clf2 = clone(clf)
self._assert_clones(clf, clf2)
assert not hasattr(clf2, 'estimators_')
assert not hasattr(clf2, 'bias_')
def test_pickle(self):
from cPickle import dumps, loads
clf2 = loads(dumps(self.clf))
np.testing.assert_array_equal(self.clf.decision_function(self.X),
clf2.decision_function(self.X))
self._assert_clones(clf2, self.clf)
def test_pop_layer(self):
l = len(self.clf.estimators_)
self.clf.pop_cascade_layer()
assert len(self.clf.estimators_) == l - 1
assert len(self.clf.bias_) == l - 1
def test_pop_empty(self):
l = len(self.clf.estimators_)
for i in range(l):
self.clf.pop_cascade_layer()
with pytest.raises(IndexError):
self.clf.pop_cascade_layer()
with pytest.raises(IndexError):
clf = CascadedBooster()
clf.pop_cascade_layer()
|
ChrisBeaumontREPO_NAMEbrutPATH_START.@brut_extracted@brut-master@bubbly@test@test_cascade.py@.PATH_END.py
|
{
"filename": "line_intensity.py",
"repo_name": "sambit-giri/tools21cm",
"repo_path": "tools21cm_extracted/tools21cm-master/src/tools21cm/line_intensity.py",
"type": "Python"
}
|
import numpy as np
from glob import glob
import astropy
from astropy import constants, units
from astropy.cosmology import Planck18
from .nbody_file import halo_list_to_grid
def M_HI_on_grid(z, mass, pos_xyz, box_dim, n_grid, cosmo=None,
recipe='Padmanabhan2017', **kwargs):
"""
Assign HI line intensity to dark matter halos and distribute them on a grid.
Parameters
----------
z : float
Redshift.
mass : ndarray
Mass of halos.
pos_xyz : ndarray
Positions of the halos.
box_dim : float
Length of the simulation box in each direction.
n_grid : int
Number of grid cells along each direction.
cosmo : astropy.cosmology.Cosmology, optional
Cosmology object. If None, assumes Planck18 cosmology.
recipe : str, optional
The recipe to use for mapping the halo masses to the HI signal. Implemented recipes:
- 'Padmanabhan2017' (1611.06235). Default is 'Padmanabhan2017'.
**kwargs : dict, optional
Additional parameters for the specific recipe.
Returns
----------
lim_map : ndarray
The HI line intensity distributed on a grid of shape (n_grid, n_grid, n_grid).
"""
assert recipe.lower() in ['padmanabhan2017']
if cosmo is None:
print('Assuming Planck18 cosmology.')
cosmo = Planck18
Ob = cosmo.Ob0
Om = cosmo.Om0
hlittle = cosmo.h
if isinstance(mass, units.quantity.Quantity):
mhalo = mass.to('Msun')
else:
print('The provided halo mass is assumed to be in Msun units.')
mhalo = mass*units.Msun
if isinstance(mass, units.quantity.Quantity):
srcpos = pos_xyz.to('Mpc')
else:
print('The provided halo mass is assumed to be in Mpc units.')
srcpos = pos_xyz*units.Mpc
if not isinstance(box_dim, units.quantity.Quantity):
box_dim = box_dim*units.Mpc
if recipe.lower() in ['padmanabhan2017']:
Y_p = kwargs.get('Y_p', 0.249)
alpha = kwargs.get('alpha', 0.9)
v_c0 = kwargs.get('v_c0', 10**1.56*units.km/units.s)
beta = kwargs.get('beta', -0.58)
fHc = (1-Y_p)*Ob/Om
Delc = 178
v_c = lambda Mh,z: 96.6*units.km/units.s*(Delc*Om/54.4)**(1/6)*((1+z)/3.3)**(1/2)*(Mh/(1e11*units.Msun))**(1/3)
v_c_Mh = v_c(mhalo,z)
M_hi = lambda Mh: alpha*fHc*Mh*(Mh.to('Msun').value/hlittle/1e11)**beta*np.exp(-(v_c0/v_c_Mh)**3)
mhi_msun = M_hi(mhalo).to('Msun').value
binned_mhi, bin_edges, bin_num = halo_list_to_grid(mhi_msun, srcpos, box_dim, n_grid)
return binned_mhi*units.Msun
def L_CII_on_grid(z, mass, pos_xyz, box_dim, n_grid, SFR=None, cosmo=None,
recipe='Silva2015m1', **kwargs):
"""
Assign HI line intensity to dark matter halos and distribute them on a grid.
Parameters
----------
z : float
Redshift.
mass : ndarray
Mass of halos.
pos_xyz : ndarray
Positions of the halos.
box_dim : float
Length of the simulation box in each direction.
n_grid : int
Number of grid cells along each direction.
SFR : function or dict
A function taking halo mass and redshift to give the star formation rate
or a dictionary containing parameters values for the provided recipe.
cosmo : astropy.cosmology.Cosmology, optional
Cosmology object. If None, assumes Planck18 cosmology.
recipe : str, optional
The recipe to use for mapping the halo masses to the CII signal. Implemented recipes:
- 'Silva2015m1' (1410.4808). Default is 'Silva2015m1'.
**kwargs : dict, optional
Additional parameters for the specific recipe.
Returns
----------
lim_map : ndarray
The HI line intensity distributed on a grid of shape (n_grid, n_grid, n_grid).
"""
assert recipe.lower() in ['silva2015m1', 'silva2015m2', 'silva2015m3', 'silva2015m4']
if cosmo is None:
print('Assuming Planck18 cosmology.')
cosmo = Planck18
Ob = cosmo.Ob0
Om = cosmo.Om0
hlittle = cosmo.h
if isinstance(mass, units.quantity.Quantity):
mhalo = mass.to('Msun')
else:
print('The provided halo mass is assumed to be in Msun units.')
mhalo = mass*units.Msun
if isinstance(pos_xyz, units.quantity.Quantity):
srcpos = pos_xyz.to('Mpc')
else:
print('The provided halo mass is assumed to be in Mpc units.')
srcpos = pos_xyz*units.Mpc
if not isinstance(box_dim, units.quantity.Quantity):
box_dim = box_dim*units.Mpc
if SFR is None: SFR = 'silva2013'
if isinstance(SFR, str):
if SFR.lower() in ['silva2013', 'silva2015']:
SFR0 = kwargs.get('SFR0', 2.25e-26*units.Msun/units.yr)
a_SFR = kwargs.get('a_SFR', 2.59)
b_SFR = kwargs.get('b_SFR', -0.62)
d_SFR = kwargs.get('d_SFR', 0.40)
e_SFR = kwargs.get('e_SFR', -2.25)
c1_SFR = kwargs.get('c1_SFR', 8e8*units.Msun)
c2_SFR = kwargs.get('c2_SFR', 7e9*units.Msun)
c3_SFR = kwargs.get('c3_SFR', 1e11*units.Msun)
SFR = lambda M,z: SFR0*(1+(z-7)*7.5e-2)*M.to('Msun').value**a_SFR*(1+M/c1_SFR)**b_SFR*(1+M/c2_SFR)**d_SFR*(1+M/c3_SFR)**e_SFR
if recipe.lower() in ['silva2015m1']:
a_LCII = kwargs.get('a_LCII', 0.8475)
b_LCII = kwargs.get('b_LCII', 7.2203)
elif recipe.lower() in ['silva2015m2']:
a_LCII = kwargs.get('a_LCII', 1.0000)
b_LCII = kwargs.get('b_LCII', 6.9647)
elif recipe.lower() in ['silva2015m3']:
a_LCII = kwargs.get('a_LCII', 0.8727)
b_LCII = kwargs.get('b_LCII', 6.7250)
elif recipe.lower() in ['silva2015m4']:
a_LCII = kwargs.get('a_LCII', 0.9231)
b_LCII = kwargs.get('b_LCII', 6.5234)
else:
print(f'{recipe} is an unknown recipe for assigning L_CII signal.')
return None
if recipe.lower() in ['silva2015m1', 'silva2015m2', 'silva2015m3', 'silva2015m4']:
LCII_lsun = 10**(a_LCII*np.log10(SFR(mhalo,z).to('Msun/yr').value)+b_LCII)
binned_Lcii, bin_edges, bin_num = halo_list_to_grid(LCII_lsun, srcpos, box_dim, n_grid)
return binned_Lcii*units.Lsun
|
sambit-giriREPO_NAMEtools21cmPATH_START.@tools21cm_extracted@tools21cm-master@src@tools21cm@line_intensity.py@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/indexes/multi/conftest.py",
"type": "Python"
}
|
import numpy as np
import pytest
from pandas import (
Index,
MultiIndex,
)
# Note: identical the "multi" entry in the top-level "index" fixture
@pytest.fixture
def idx():
# a MultiIndex used to test the general functionality of the
# general functionality of this object
major_axis = Index(["foo", "bar", "baz", "qux"])
minor_axis = Index(["one", "two"])
major_codes = np.array([0, 0, 1, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 0, 1])
index_names = ["first", "second"]
mi = MultiIndex(
levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes],
names=index_names,
verify_integrity=False,
)
return mi
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@indexes@multi@conftest.py@.PATH_END.py
|
{
"filename": "wbd.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/cluster/wbd.py",
"type": "Python"
}
|
from .load import load
from typing import List, Union, Optional
def wbd(trange:List[str]=['2003-11-01/14:00:00','2003-11-01/14:05:00'],
probe:Union[str,List[str]]='1',
datatype:str='waveform',
prefix:str='',
suffix:str='',
get_support_data:bool=False,
varformat:str=None,
varnames:List[str]=[],
downloadonly:bool=False,
notplot:bool=False,
no_update:bool=False,
time_clip:bool=False,
force_download=False) -> List[str]:
"""
Load data from the Cluster Wide Band Data receiver
Parameters
----------
trange : list of str
time range of interest [starttime, endtime] with the format
['YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
Default: ['2003-11-01/14:00:00','2003-11-01/14:05:00']
probe: list of str
List of probes to load. Valid options: '1','2','3','4'
Default: '1'
datatype: str
Data type; Valid options:
Default: 'waveform'
prefix: str
The tplot variable names will be given this prefix.
Default: ''
suffix: str
The tplot variable names will be given this suffix.
Default: ''
get_support_data: bool
If True, Data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot.
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted. If empty or None, all variables will be loaded.
Default: None (all variables loaded)
varnames: list of str
List of CDF variable names to load (if empty or not specified,
all data variables are loaded)
Default: [] (all variables loaded)
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
Default: False
notplot: bool
Return the data in hash tables instead of creating tplot variables
Default: False
no_update: bool
If set, only load data from your local cache
Default: False
time_clip: bool
Time clip the variables to exactly the range specified in the trange keyword
Default: False
force_download: bool
Download file even if local version is more recent than server version
Default: False
Returns
-------
list of str
List of tplot variables created.
Examples
--------
>>> import pyspedas
>>> from pytplot import tplot
>>> wbd_vars = pyspedas.projects.cluster.wbd(trange=['2003-11-01/14:00:00','2003-11-01/14:05:00'],probe=['1'])
>>> # Note lack of probe IDs in the variables loaded -- only load one probe at a time
>>> tplot('WBD_Elec')
"""
return load(instrument='wbd', trange=trange, probe=probe, datatype=datatype, prefix=prefix, suffix=suffix, get_support_data=get_support_data, varformat=varformat, varnames=varnames, downloadonly=downloadonly, notplot=notplot, no_update=no_update, time_clip=time_clip, force_download=force_download)
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@cluster@wbd.py@.PATH_END.py
|
{
"filename": "SUSHI Test Notebook (old version).ipynb",
"repo_name": "jmlascar/SUSHI",
"repo_path": "SUSHI_extracted/SUSHI-main/older_version/SUSHI Test Notebook (old version).ipynb",
"type": "Jupyter Notebook"
}
|
This notebook shows an example of SUSHI being used to unmix the components of the toymodel described in Lascar, Acero, Bobin 2023.
```python
# Imports
%matplotlib inline
#autorealod library when an external lib is changed.
import sys, os
%load_ext autoreload
%autoreload 2
import numpy as np
import matplotlib.pyplot as plt
import pickle
from ipywidgets import interact, fixed
import numba #not mandatory, but without numba, the wavelet transform is much slower.
from Sushi import SUSHI
import jax
if int(jax.__version__[2])<4:
import IAE_JAX_v2_devl_2022 as mld
else:
import IAE_JAX_v2_devl_2023 as mld
```
Numba imported
# Loading the toymodel
```python
with open("data/toymodel_data.p","rb") as f:
DATA=pickle.load(f)
with open("data/toymodel_ground_truth.p","rb") as f:
GT=pickle.load(f)
```
```python
def show_data_cube(i,j,param):
#i,j: pixel position
#param: parameter to show the ground truth of.
# kT: temperature. z: velocity redshift.
# pho: photon index (synchrotron component)
E=GT["channels"]
fig,ax=plt.subplots(1,2,figsize=(11,4),gridspec_kw={'width_ratios': [3, 2]})
ax[0].plot(E,DATA[:,i,j],alpha=0.5,label="data")
ax[0].plot(E,GT["Total"][:,i,j],"k--")
ax[0].plot(E,GT["Thermal"][:,i,j],"r--")
ax[0].plot(E,GT["Synch"][:,i,j],"g--")
pcm=ax[1].imshow(GT["Params"][param])
ax[1].scatter(j,i,c="r",marker="+")
fig.colorbar(pcm,ax=ax[1],shrink=0.8)
interact(show_data_cube,i=(0,93,1),j=(0,93,1),param=["kT","z","pho"])
```
interactive(children=(IntSlider(value=46, description='i', max=93), IntSlider(value=46, description='j', max=9…
<function __main__.show_data_cube(i, j, param)>
# Importing the trained IAE models
```python
if int(jax.__version__[2])<4:
#old version of Jax
model_name="IAE_models/IAE_thermal_abdapec_kT_z_3keV-8keV_4AP"
model = mld.load_model(model_name)
IAE_Thermal = mld.IAE(Model = model)
model_name="IAE_models/IAE_synchrotron_powerlaw_pho_3keV-8keV_2AP"
model= mld.load_model(model_name)
IAE_Synch= mld.IAE(Model = model)
else:
#From version 0.4 on
model_name="IAE_models/IAE_thermal_abdapec_kT_z_3keV-8keV_4AP_jax_numpy.npy"
model = np.load(model_name,allow_pickle=True).item()
IAE_Thermal = mld.IAE(Model = model)
model_name="IAE_models/IAE_synchrotron_powerlaw_pho_3keV-8keV_2AP_jax_numpy.npy"
model= np.load(model_name,allow_pickle=True).item()
IAE_Synch= mld.IAE(Model = model)
```
# Using SUSHI
```python
res_toymodel=SUSHI(DATA,self_T=IAE_Thermal,self_S=IAE_Synch,
Amplitude_S=None,Amp_fixed=False,Amplitude_T=None,
niter=10000,stop=1e-7,J=2,kmad=1,mask_amp=10,background=None,
Cost_function="Poisson",Chi_2_sigma=None,Custom_Cost=None)
#To save the results in a pickle file, uncomment these lines:
#with open("res_sushi_toymodel.p","wb") as f:
# pickle.dump(res_toymodel,f)
```
```python
plt.plot(res_toymodel['Likelihood'])
```
[<matplotlib.lines.Line2D at 0x290dea7d0>]

```python
def show_data_cube(i,j,param):
#i,j: pixel position
#param: parameter to show the ground truth of.
# kT: temperature. z: velocity redshift.
# pho: photon index (synchrotron component)
E=GT["channels"]
fig,ax=plt.subplots(1,2,figsize=(11,4),gridspec_kw={'width_ratios': [3, 2]})
ax[0].plot(E,DATA[:,i,j],alpha=0.5,label="data")
ax[0].plot(E,GT["Total"][:,i,j],"k--")
ax[0].plot(E,GT["Thermal"][:,i,j],"r--")
ax[0].plot(E,GT["Synch"][:,i,j],"g--")
XRec_total=np.reshape(res_toymodel["XRec"]["Total"].T,DATA.shape)
ax[0].plot(E,XRec_total[:,i,j],"k")
XRec_thermal=np.reshape(res_toymodel["XRec"]["Total"].T,DATA.shape)
ax[0].plot(E,XRec_thermal[:,i,j],"r")
XRec_synch=np.reshape(res_toymodel["XRec"]["Total"].T,DATA.shape)
ax[0].plot(E,XRec_synch[:,i,j],"g")
pcm=ax[1].imshow(GT["Params"][param])
ax[1].scatter(j,i,c="r",marker="+")
fig.colorbar(pcm,ax=ax[1],shrink=0.8)
interact(show_data_cube,i=(0,93,1),j=(0,93,1),param=["kT","z","pho"])
```
interactive(children=(IntSlider(value=46, description='i', max=93), IntSlider(value=46, description='j', max=9…
<function __main__.show_data_cube(i, j, param)>
To obtain the physical parameters from this fit, at this point is to perform a 1D fit on the unmixed pixels obtained by SUSHI. A method that integrates this to SUSHI directly is in the works.
```python
```
|
jmlascarREPO_NAMESUSHIPATH_START.@SUSHI_extracted@SUSHI-main@older_version@SUSHI Test Notebook (old version).ipynb@.PATH_END.py
|
{
"filename": "_fill.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter/_fill.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="fill", parent_name="scatter", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "style"),
values=kwargs.pop(
"values",
[
"none",
"tozeroy",
"tozerox",
"tonexty",
"tonextx",
"toself",
"tonext",
],
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter@_fill.py@.PATH_END.py
|
{
"filename": "_base.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/joblib/joblib/externals/loky/_base.py",
"type": "Python"
}
|
###############################################################################
# Modification of concurrent.futures.Future
#
# author: Thomas Moreau and Olivier Grisel
#
# adapted from concurrent/futures/_base.py (17/02/2017)
# * Do not use yield from
# * Use old super syntax
#
# Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
from concurrent.futures import Future as _BaseFuture
from concurrent.futures._base import LOGGER
# To make loky._base.Future instances awaitable by concurrent.futures.wait,
# derive our custom Future class from _BaseFuture. _invoke_callback is the only
# modification made to this class in loky.
# TODO investigate why using `concurrent.futures.Future` directly does not
# always work in our test suite.
class Future(_BaseFuture):
def _invoke_callbacks(self):
for callback in self._done_callbacks:
try:
callback(self)
except BaseException:
LOGGER.exception(f"exception calling callback for {self!r}")
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@joblib@joblib@externals@loky@_base.py@.PATH_END.py
|
{
"filename": "miriad_test.py",
"repo_name": "HERA-Team/aipy",
"repo_path": "aipy_extracted/aipy-main/tests/miriad_test.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 Aaron Parsons
# Licensed under the GPLv3
import pytest
import numpy as np
from aipy import miriad
from aipy import _miriad
@pytest.fixture(scope="function")
def test_file_r(tmp_path):
filename1 = str(tmp_path / "test1.uv")
filename2 = str(tmp_path / "test2.uv")
uv = miriad.UV(filename1, status="new", corrmode="r")
uv["history"] = "Made this file from scratch.\n"
uv.add_var("nchan", "i")
uv.add_var("pol", "i")
uv["nchan"] = 4
uv["pol"] = -5
uvw = np.array([1, 2, 3], dtype=np.float64)
preamble = (uvw, 12345.6789, (0, 1))
data = np.ma.array([1j, 2, 3j, 4], mask=[0, 0, 1, 0], dtype=np.complex64)
uv.write(preamble, data)
uv["pol"] = -6
uv.write(preamble, data)
del uv
yield filename1, filename2, data
return
@pytest.fixture(scope="function")
def test_file_j(tmp_path):
filename = str(tmp_path / "test1.uv")
uv = miriad.UV(filename, status="new", corrmode="j")
uv["history"] = "Made this file from scratch.\n"
uv.add_var("nchan", "i")
uv.add_var("pol", "i")
uv["nchan"] = 4
uv["pol"] = -5
uvw = np.array([1, 2, 3], dtype=np.float64)
preamble = (uvw, 12345.6789, (0, 1))
data = np.ma.array([1j, 2, 3j, 4], mask=[0, 0, 1, 0], dtype=np.complex64)
uv.write(preamble, data)
uv["pol"] = -6
uv.write(preamble, data)
del uv
yield filename, data
return
def test_maxchan():
assert type(_miriad.MAXCHAN) == int
return
def test_immediate_corr(test_file_r):
"""Test immediate corr of a Miriad UV file"""
filename1, filename2, data = test_file_r
uv = miriad.UV(filename2, status="new")
assert uv.vartable["corr"] == "r"
return
def test_vartable_r(test_file_r):
"""Test accesing vartable data in a Miriad UV file"""
filename1, filename2, data = test_file_r
uv1 = miriad.UV(filename1)
assert uv1.vartable["corr"] == "r"
assert uv1.vartable["nchan"] == "i"
assert uv1.vartable["pol"] == "i"
return
def test_data_r(test_file_r):
"""Test writing data from a Miriad UV file"""
filename1, filename2, data = test_file_r
uv = miriad.UV(filename1)
assert uv["history"] == "Made this file from scratch.\n"
(uvw, t, bl), d = uv.read()
assert uv["nchan"] == 4
assert uv["pol"] == -5
assert bl == (0, 1)
assert np.isclose(t, 12345.6789)
assert np.allclose(uvw, np.array([1, 2, 3], dtype=np.float64))
assert np.allclose(d, data)
(uvw, t, bl), d = uv.read()
assert uv["nchan"] == 4
assert uv["pol"] == -6
assert bl == (0, 1)
assert np.isclose(t, 12345.6789)
assert np.allclose(uvw, np.array([1, 2, 3], dtype=np.float64))
assert np.allclose(d, data)
return
def test_vartable_j(test_file_j):
"""Test accesing vartable data in a Miriad UV file"""
filename, data = test_file_j
uv = miriad.UV(filename, corrmode="j")
assert uv.vartable["corr"] == "j"
assert uv.vartable["nchan"] == "i"
assert uv.vartable["pol"] == "i"
return
def test_data_j(test_file_j):
"""Test writing data from a Miriad UV file"""
filename, data = test_file_j
uv = miriad.UV(filename)
assert uv["history"] == "Made this file from scratch.\n"
(uvw, t, bl), d = uv.read()
assert uv["nchan"] == 4
assert uv["pol"] == -5
assert bl == (0, 1)
assert np.isclose(t, 12345.6789)
assert np.allclose(uvw, np.array([1, 2, 3], dtype=np.float64))
assert np.allclose(np.abs(d - data), 0.0, atol=1e-4)
(uvw, t, bl), d = uv.read()
assert uv["nchan"] == 4
assert uv["pol"] == -6
assert bl == (0, 1)
assert np.isclose(t, 12345.6789)
assert np.allclose(uvw, np.array([1, 2, 3], dtype=np.float64))
assert np.allclose(np.abs(d - data), 0.0, atol=1e-4)
return
|
HERA-TeamREPO_NAMEaipyPATH_START.@aipy_extracted@aipy-main@tests@miriad_test.py@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "NASA-Planetary-Science/sbpy",
"repo_path": "sbpy_extracted/sbpy-main/sbpy/conftest.py",
"type": "Python"
}
|
"""Configure Test Suite.
This file is used to configure the behavior of pytest when using the Astropy
test infrastructure. It needs to live inside the package in order for it to
get picked up when running the tests inside an interpreter using
packagename.test
"""
import os
try:
from pytest_astropy_header.display import (
PYTEST_HEADER_MODULES, TESTED_VERSIONS
)
ASTROPY_HEADER = True
except ImportError:
ASTROPY_HEADER = False
def pytest_configure(config):
"""Configure Pytest with Astropy.
Parameters
----------
config : pytest configuration
"""
if ASTROPY_HEADER:
config.option.astropy_header = True
# Customize the following lines to add/remove entries from the list of
# packages for which version numbers are displayed when running the
# tests.
PYTEST_HEADER_MODULES.pop('Pandas', None)
PYTEST_HEADER_MODULES['scikit-image'] = 'skimage'
from . import __version__
packagename = os.path.basename(os.path.dirname(__file__))
TESTED_VERSIONS[packagename] = __version__
|
NASA-Planetary-ScienceREPO_NAMEsbpyPATH_START.@sbpy_extracted@sbpy-main@sbpy@conftest.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "simonsobs/socs",
"repo_path": "socs_extracted/socs-main/socs/agents/ibootbar/__init__.py",
"type": "Python"
}
|
simonsobsREPO_NAMEsocsPATH_START.@socs_extracted@socs-main@socs@agents@ibootbar@__init__.py@.PATH_END.py
|
|
{
"filename": "test_util.py",
"repo_name": "adrn/gala",
"repo_path": "gala_extracted/gala-main/gala/tests/test_util.py",
"type": "Python"
}
|
# Third-party
import pytest
# Project
from ..util import ImmutableDict
def test_immutabledict():
a = dict(a=5, c=6)
b = ImmutableDict(**a)
with pytest.raises(TypeError):
b['test'] = 5
|
adrnREPO_NAMEgalaPATH_START.@gala_extracted@gala-main@gala@tests@test_util.py@.PATH_END.py
|
{
"filename": "arbitrary_fn.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/docstore/arbitrary_fn.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.docstore.arbitrary_fn import DocstoreFn
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"DocstoreFn": "langchain_community.docstore.arbitrary_fn"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"DocstoreFn",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@docstore@arbitrary_fn.py@.PATH_END.py
|
{
"filename": "CustomPrior_trueback.py",
"repo_name": "ThomasEdwardRiley/xpsi-pre-transfer",
"repo_path": "xpsi-pre-transfer_extracted/xpsi-pre-transfer-master/examples/true_background/CustomPrior_trueback.py",
"type": "Python"
}
|
from __future__ import print_function, division
import numpy as np
import math
from scipy.stats import truncnorm
import xpsi
from xpsi.global_imports import _G, _csq, _km, _M_s, _2pi, gravradius
class CustomPrior(xpsi.Prior):
""" A custom (joint) prior distribution.
Currently tailored to the NICER light-curve SWG model specification.
Source: Imaginary
Model variant: ST-U
Parameter vector:
* p[0] = distance (kpc)
* p[1] = (rotationally deformed) gravitational mass (solar masses)
* p[2] = coordinate equatorial radius (km)
* p[3] = inclination of Earth to rotational axis (radians)
* p[4] = primary cap centre colatitude (radians)
* p[5] = primary cap angular radius (radians)
* p[6] = primary cap log10(comoving blackbody temperature [K])
* p[7] = secondary cap centre colatitude (radians)
* p[8] = secondary cap angular radius (radians)
* p[9] = secondary cap log10(comoving blackbody temperature [K])
* p[10] = powerlaw index
* p[11] = powerlaw norm
* p[12] = primary cap phase shift (cycles); (alias for initial azimuth, periodic)
* p[13] = secondary cap phase shift (cycles)
"""
def __init__(self, bounds, spacetime):
# Execute abstract parent initialiser
super(CustomPrior, self).__init__(bounds)
assert isinstance(spacetime, xpsi.Spacetime),\
'Invalid type for ambient spacetime object.'
self._spacetime = spacetime
def __call__(self, p):
""" Evaluate distribution at :obj:`p`.
:param list p: Model parameters values.
:return: Logarithm of the distribution evaluated at :obj:`p`.
"""
i = self._spacetime.num_params
self._spacetime.update(*p[:i])
if not self._spacetime.R <= 16.0*_km:
return -np.inf
if not 1.5 < self._spacetime.R_r_s:
return -np.inf
epsilon = self._spacetime.epsilon
zeta = self._spacetime.zeta
mu = math.sqrt(-1.0 / (3.0 * epsilon * (-0.788 + 1.030 * zeta)))
# 2-surface cross-section have a single maximum in |z|
# i.e., an elliptical surface
if mu < 1.0:
return -np.inf
# polar radius causality for ~static star (static ambient spacetime)
R_p = 1.0 + epsilon * (-0.788 + 1.030 * zeta)
if R_p < 1.5 / self._spacetime.R_r_s:
return -np.inf
if p[4] > p[7]:
return -np.inf
# spots cannot overlap
theta_p = p[4]
phi = (p[12] - 0.5 - p[13]) * _2pi
rho_p = p[5]
theta_s = p[7]
rho_s = p[8]
ang_sep = xpsi.Spot._psi(theta_s, phi, theta_p)
if ang_sep < rho_p + rho_s:
return -np.inf
return 0.0
def inverse_sample(self, hypercube):
""" Draw sample uniformly from the distribution via inverse sampling.
:param hypercube: A pseudorandom point in an n-dimensional hypercube.
:return: A parameter ``list``.
"""
p = super(CustomPrior, self).inverse_sample(hypercube)
# distance
p[0] = truncnorm.ppf(hypercube[0], -2.0, 7.0, loc=0.3, scale=0.1)
if p[12] > 0.5:
p[12] -= 1.0
if p[13] > 0.5:
p[13] -= 1.0
return p
def inverse_sample_and_transform(self, hypercube):
""" A transformation for post-processing. """
p = self.transform(self.inverse_sample(hypercube))
return p
@staticmethod
def transform(p):
""" A transformation for post-processing. """
if not isinstance(p, list):
p = list(p)
p += [gravradius(p[1]) / p[2]]
if p[12] < 0.0:
tempp = p[12] + 1.0
else:
tempp = p[12]
temps = 0.5 + p[13]
if temps >= tempp:
p += [temps - tempp]
else:
p += [1.0 - tempp + temps]
return p
|
ThomasEdwardRileyREPO_NAMExpsi-pre-transferPATH_START.@xpsi-pre-transfer_extracted@xpsi-pre-transfer-master@examples@true_background@CustomPrior_trueback.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/pointcloud/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._ysrc import YsrcValidator
from ._yboundssrc import YboundssrcValidator
from ._ybounds import YboundsValidator
from ._yaxis import YaxisValidator
from ._y import YValidator
from ._xysrc import XysrcValidator
from ._xy import XyValidator
from ._xsrc import XsrcValidator
from ._xboundssrc import XboundssrcValidator
from ._xbounds import XboundsValidator
from ._xaxis import XaxisValidator
from ._x import XValidator
from ._visible import VisibleValidator
from ._uirevision import UirevisionValidator
from ._uid import UidValidator
from ._textsrc import TextsrcValidator
from ._text import TextValidator
from ._stream import StreamValidator
from ._showlegend import ShowlegendValidator
from ._opacity import OpacityValidator
from ._name import NameValidator
from ._metasrc import MetasrcValidator
from ._meta import MetaValidator
from ._marker import MarkerValidator
from ._legendgroup import LegendgroupValidator
from ._indicessrc import IndicessrcValidator
from ._indices import IndicesValidator
from ._idssrc import IdssrcValidator
from ._ids import IdsValidator
from ._hoverlabel import HoverlabelValidator
from ._hoverinfosrc import HoverinfosrcValidator
from ._hoverinfo import HoverinfoValidator
from ._customdatasrc import CustomdatasrcValidator
from ._customdata import CustomdataValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._ysrc.YsrcValidator",
"._yboundssrc.YboundssrcValidator",
"._ybounds.YboundsValidator",
"._yaxis.YaxisValidator",
"._y.YValidator",
"._xysrc.XysrcValidator",
"._xy.XyValidator",
"._xsrc.XsrcValidator",
"._xboundssrc.XboundssrcValidator",
"._xbounds.XboundsValidator",
"._xaxis.XaxisValidator",
"._x.XValidator",
"._visible.VisibleValidator",
"._uirevision.UirevisionValidator",
"._uid.UidValidator",
"._textsrc.TextsrcValidator",
"._text.TextValidator",
"._stream.StreamValidator",
"._showlegend.ShowlegendValidator",
"._opacity.OpacityValidator",
"._name.NameValidator",
"._metasrc.MetasrcValidator",
"._meta.MetaValidator",
"._marker.MarkerValidator",
"._legendgroup.LegendgroupValidator",
"._indicessrc.IndicessrcValidator",
"._indices.IndicesValidator",
"._idssrc.IdssrcValidator",
"._ids.IdsValidator",
"._hoverlabel.HoverlabelValidator",
"._hoverinfosrc.HoverinfosrcValidator",
"._hoverinfo.HoverinfoValidator",
"._customdatasrc.CustomdatasrcValidator",
"._customdata.CustomdataValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@pointcloud@__init__.py@.PATH_END.py
|
{
"filename": "limbdark.py",
"repo_name": "rodluger/starry",
"repo_path": "starry_extracted/starry-master/starry/_core/ops/limbdark/limbdark.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
__all__ = ["LimbDarkOp"]
from ....compat import Apply, theano, tt
from .base_op import LimbDarkBaseOp
class LimbDarkOp(LimbDarkBaseOp):
__props__ = ()
func_file = "./limbdark.cc"
func_name = "APPLY_SPECIFIC(limbdark)"
def make_node(self, c, b, r, los):
in_args = []
dtype = theano.config.floatX
for a in [c, b, r, los]:
try:
a = tt.as_tensor_variable(a)
except tt.AsTensorError:
pass
else:
dtype = theano.scalar.upcast(dtype, a.dtype)
in_args.append(a)
out_args = [
in_args[1].type(),
tt.TensorType(
dtype=dtype, broadcastable=[False] * (in_args[1].ndim + 1)
)(),
in_args[1].type(),
in_args[2].type(),
]
return Apply(self, in_args, out_args)
def infer_shape(self, *args):
shapes = args[-1]
return (
shapes[1],
list(shapes[0]) + list(shapes[1]),
shapes[1],
shapes[2],
)
def grad(self, inputs, gradients):
c, b, r, los = inputs
f, dfdcl, dfdb, dfdr = self(*inputs)
bf = gradients[0]
for i, g in enumerate(gradients[1:]):
if not isinstance(g.type, theano.gradient.DisconnectedType):
raise ValueError(
"can't propagate gradients wrt parameter {0}".format(i + 1)
)
bc = tt.sum(
tt.reshape(bf, (1, bf.size))
* tt.reshape(dfdcl, (c.size, bf.size)),
axis=-1,
)
bb = bf * dfdb
br = bf * dfdr
return bc, bb, br, tt.zeros_like(los)
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
|
rodlugerREPO_NAMEstarryPATH_START.@starry_extracted@starry-master@starry@_core@ops@limbdark@limbdark.py@.PATH_END.py
|
{
"filename": "CmdLine.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/cython/Cython/Compiler/CmdLine.py",
"type": "Python"
}
|
#
# Cython - Command Line Parsing
#
from __future__ import absolute_import
import os
import sys
from . import Options
usage = """\
Cython (http://cython.org) is a compiler for code written in the
Cython language. Cython is based on Pyrex by Greg Ewing.
Usage: cython [options] sourcefile.{pyx,py} ...
Options:
-V, --version Display version number of cython compiler
-l, --create-listing Write error messages to a listing file
-I, --include-dir <directory> Search for include files in named directory
(multiple include directories are allowed).
-o, --output-file <filename> Specify name of generated C file
-t, --timestamps Only compile newer source files
-f, --force Compile all source files (overrides implied -t)
-v, --verbose Be verbose, print file names on multiple compilation
-p, --embed-positions If specified, the positions in Cython files of each
function definition is embedded in its docstring.
--cleanup <level> Release interned objects on python exit, for memory debugging.
Level indicates aggressiveness, default 0 releases nothing.
-w, --working <directory> Sets the working directory for Cython (the directory modules
are searched from)
--gdb Output debug information for cygdb
--gdb-outdir <directory> Specify gdb debug information output directory. Implies --gdb.
-D, --no-docstrings Strip docstrings from the compiled module.
-a, --annotate Produce a colorized HTML version of the source.
--annotate-coverage <cov.xml> Annotate and include coverage information from cov.xml.
--line-directives Produce #line directives pointing to the .pyx source
--cplus Output a C++ rather than C file.
--embed[=<method_name>] Generate a main() function that embeds the Python interpreter.
-2 Compile based on Python-2 syntax and code semantics.
-3 Compile based on Python-3 syntax and code semantics.
--3str Compile based on Python-3 syntax and code semantics without
assuming unicode by default for string literals under Python 2.
--lenient Change some compile time errors to runtime errors to
improve Python compatibility
--capi-reexport-cincludes Add cincluded headers to any auto-generated header files.
--fast-fail Abort the compilation on the first error
--warning-errors, -Werror Make all warnings into errors
--warning-extra, -Wextra Enable extra warnings
-X, --directive <name>=<value>[,<name=value,...] Overrides a compiler directive
-E, --compile-time-env name=value[,<name=value,...] Provides compile time env like DEF would do.
--module-name Fully qualified module name. If not given, it is deduced from the
import path if source file is in a package, or equals the
filename otherwise.
-M, --depfile Produce depfiles for the sources
"""
# The following experimental options are supported only on MacOSX:
# -C, --compile Compile generated .c file to .o file
# --link Link .o file to produce extension module (implies -C)
# -+, --cplus Use C++ compiler for compiling and linking
# Additional .o files to link may be supplied when using -X."""
def bad_usage():
sys.stderr.write(usage)
sys.exit(1)
def parse_command_line(args):
from .Main import CompilationOptions, default_options
pending_arg = []
def pop_arg():
if not args or pending_arg:
bad_usage()
if '=' in args[0] and args[0].startswith('--'): # allow "--long-option=xyz"
name, value = args.pop(0).split('=', 1)
pending_arg.append(value)
return name
return args.pop(0)
def pop_value(default=None):
if pending_arg:
return pending_arg.pop()
elif default is not None:
return default
elif not args:
bad_usage()
return args.pop(0)
def get_param(option):
tail = option[2:]
if tail:
return tail
else:
return pop_arg()
options = CompilationOptions(default_options)
sources = []
while args:
if args[0].startswith("-"):
option = pop_arg()
if option in ("-V", "--version"):
options.show_version = 1
elif option in ("-l", "--create-listing"):
options.use_listing_file = 1
elif option in ("-+", "--cplus"):
options.cplus = 1
elif option == "--embed":
Options.embed = pop_value("main")
elif option.startswith("-I"):
options.include_path.append(get_param(option))
elif option == "--include-dir":
options.include_path.append(pop_value())
elif option in ("-w", "--working"):
options.working_path = pop_value()
elif option in ("-o", "--output-file"):
options.output_file = pop_value()
elif option in ("-t", "--timestamps"):
options.timestamps = 1
elif option in ("-f", "--force"):
options.timestamps = 0
elif option in ("-v", "--verbose"):
options.verbose += 1
elif option in ("-p", "--embed-positions"):
Options.embed_pos_in_docstring = 1
elif option in ("-z", "--pre-import"):
Options.pre_import = pop_value()
elif option == "--cleanup":
Options.generate_cleanup_code = int(pop_value())
elif option in ("-D", "--no-docstrings"):
Options.docstrings = False
elif option in ("-a", "--annotate"):
Options.annotate = True
elif option == "--annotate-coverage":
Options.annotate = True
Options.annotate_coverage_xml = pop_value()
elif option == "--convert-range":
Options.convert_range = True
elif option == "--line-directives":
options.emit_linenums = True
elif option == "--no-c-in-traceback":
options.c_line_in_traceback = False
elif option == "--gdb":
options.gdb_debug = True
options.output_dir = os.curdir
elif option == "--gdb-outdir":
options.gdb_debug = True
options.output_dir = pop_value()
elif option == "--lenient":
Options.error_on_unknown_names = False
Options.error_on_uninitialized = False
elif option == '--init-suffix':
options.init_suffix = pop_arg()
elif option == '--source-root':
Options.source_root = pop_arg()
elif option == '-2':
options.language_level = 2
elif option == '-3':
options.language_level = 3
elif option == '--3str':
options.language_level = '3str'
elif option == "--capi-reexport-cincludes":
options.capi_reexport_cincludes = True
elif option == "--fast-fail":
Options.fast_fail = True
elif option == "--cimport-from-pyx":
Options.cimport_from_pyx = True
elif option in ('-Werror', '--warning-errors'):
Options.warning_errors = True
elif option in ('-Wextra', '--warning-extra'):
options.compiler_directives.update(Options.extra_warnings)
elif option == "--old-style-globals":
Options.old_style_globals = True
elif option == "--directive" or option.startswith('-X'):
if option.startswith('-X') and option[2:].strip():
x_args = option[2:]
else:
x_args = pop_value()
try:
options.compiler_directives = Options.parse_directive_list(
x_args, relaxed_bool=True,
current_settings=options.compiler_directives)
except ValueError as e:
sys.stderr.write("Error in compiler directive: %s\n" % e.args[0])
sys.exit(1)
elif option == "--compile-time-env" or option.startswith('-E'):
if option.startswith('-E') and option[2:].strip():
x_args = option[2:]
else:
x_args = pop_value()
try:
options.compile_time_env = Options.parse_compile_time_env(
x_args, current_settings=options.compile_time_env)
except ValueError as e:
sys.stderr.write("Error in compile-time-env: %s\n" % e.args[0])
sys.exit(1)
elif option == "--module-name":
options.module_name = pop_value()
elif option in ('-M', '--depfile'):
options.depfile = True
elif option.startswith('--debug'):
option = option[2:].replace('-', '_')
from . import DebugFlags
if option in dir(DebugFlags):
setattr(DebugFlags, option, True)
else:
sys.stderr.write("Unknown debug flag: %s\n" % option)
bad_usage()
elif option in ('-h', '--help'):
sys.stdout.write(usage)
sys.exit(0)
else:
sys.stderr.write(usage)
sys.stderr.write("Unknown compiler flag: %s\n" % option)
sys.exit(1)
else:
sources.append(pop_arg())
if pending_arg:
bad_usage()
if options.use_listing_file and len(sources) > 1:
sys.stderr.write(
"cython: Only one source file allowed when using -o\n")
sys.exit(1)
if len(sources) == 0 and not options.show_version:
bad_usage()
if Options.embed and len(sources) > 1:
sys.stderr.write(
"cython: Only one source file allowed when using --embed\n")
sys.exit(1)
if options.module_name:
if options.timestamps:
sys.stderr.write(
"cython: Cannot use --module-name with --timestamps\n")
sys.exit(1)
if len(sources) > 1:
sys.stderr.write(
"cython: Only one source file allowed when using --module-name\n")
sys.exit(1)
return options, sources
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@cython@Cython@Compiler@CmdLine.py@.PATH_END.py
|
{
"filename": "test_spectrum.py",
"repo_name": "tardis-sn/tardis",
"repo_path": "tardis_extracted/tardis-main/tardis/montecarlo/tests/test_spectrum.py",
"type": "Python"
}
|
import pytest
import numpy as np
import pandas as pd
import os
from astropy import units as u, constants as c
import astropy.tests.helper as test_helper
from numpy.testing import assert_almost_equal
from tardis.montecarlo.spectrum import (
TARDISSpectrum,
)
BIN = 5
TESTDATA = [
{
"nu": u.Quantity(np.linspace(1, 50, BIN + 1), "Hz"),
"lum": u.Quantity(np.linspace(1e27, 1e28, BIN), "erg / s"),
"distance": None,
},
{
"nu": u.Quantity(np.linspace(1, 50, BIN + 1), "Hz"),
"lum": u.Quantity(np.linspace(1e27, 1e28, BIN), "erg / s"),
"distance": u.Quantity(1, "Mpc"),
},
{
"nu": u.Quantity([1, 2, 3, 4], "Hz"),
"lum": u.Quantity([1, 2, 3], "erg / s") * np.pi,
"distance": u.Quantity(0.5, "cm"),
},
]
@pytest.fixture(scope="module", params=TESTDATA)
def spectrum(request):
data = TARDISSpectrum(
request.param["nu"],
request.param["lum"],
)
distance = request.param["distance"]
if distance:
data.distance = distance
return data
###
# Testing properties
###
def test_frequency(spectrum):
assert np.all(spectrum.frequency == spectrum._frequency[:-1])
def test_luminosity_density_nu(spectrum):
expected = spectrum.luminosity / np.diff(spectrum._frequency)
test_helper.assert_quantity_allclose(
spectrum.luminosity_density_nu, expected
)
def test_luminosity_density_lambda(spectrum):
expected = spectrum.f_nu_to_f_lambda(
spectrum.luminosity / np.diff(spectrum._frequency)
)
test_helper.assert_quantity_allclose(
spectrum.luminosity_density_lambda, expected
)
def test_flux_nu(spectrum):
if getattr(spectrum, "distance", None):
if pytest.__version__ < "2.8":
pytest.xfail(
reason="requires pytest.warns (introduced in pytest v2.8)",
)
with pytest.warns(DeprecationWarning):
test_helper.assert_quantity_allclose(
spectrum.flux_nu,
spectrum.luminosity_to_flux(
spectrum.luminosity_density_nu, spectrum.distance
),
)
else:
with pytest.raises(AttributeError):
spectrum.flux_nu
def test_flux_lambda(spectrum):
if getattr(spectrum, "distance", None):
if pytest.__version__ < "2.8":
pytest.xfail(
reason="requires pytest.warns (introduced in pytest v2.8)",
)
with pytest.warns(DeprecationWarning):
test_helper.assert_quantity_allclose(
spectrum.flux_lambda,
spectrum.luminosity_to_flux(
spectrum.luminosity_density_lambda, spectrum.distance
),
)
else:
with pytest.raises(AttributeError):
spectrum.flux_nu
###
# Testing methods
###
def test_luminosity_to_flux():
lum = u.Quantity(np.arange(1, 4, 1) * np.pi, "erg / s")
distance = u.Quantity(0.5, "cm")
flux = TARDISSpectrum.luminosity_to_flux(lum, distance)
test_helper.assert_quantity_allclose(
flux, u.Quantity(lum.value / np.pi, "erg s^-1 cm^-2")
)
def test_f_nu_to_f_lambda(spectrum):
expected = (
spectrum.luminosity_density_nu
* spectrum.frequency ** 2
/ c.c.to("angstrom/s")
).to("erg / (s angstrom)")
print(expected)
test_helper.assert_quantity_allclose(
spectrum.luminosity_density_lambda, expected
)
expected = (
spectrum.luminosity_density_nu
* spectrum.frequency ** 2
/ c.c.to("angstrom/s")
).to("erg / (s angstrom)")
np.testing.assert_allclose(
spectrum.luminosity_density_lambda.value, expected.value
)
###
# Save and Load
###
def compare_spectra(actual, desired):
test_helper.assert_quantity_allclose(actual.frequency, desired.frequency)
test_helper.assert_quantity_allclose(actual.luminosity, desired.luminosity)
if getattr(actual, "distance", None):
test_helper.assert_quantity_allclose(actual.distance, desired.distance)
@pytest.fixture(scope="module", autouse=True)
def to_hdf_buffer(hdf_file_path, spectrum):
spectrum.to_hdf(hdf_file_path, name="spectrum", overwrite=True)
@pytest.mark.parametrize("attr", TARDISSpectrum.hdf_properties)
def test_hdf_spectrum(hdf_file_path, spectrum, attr):
actual = getattr(spectrum, attr)
if hasattr(actual, "cgs"):
actual = actual.cgs.value
if np.isscalar(actual):
path = os.path.join("spectrum", "scalars")
expected = getattr(pd.read_hdf(hdf_file_path, path), attr)
assert_almost_equal(actual, expected)
else:
path = os.path.join("spectrum", attr)
expected = pd.read_hdf(hdf_file_path, path)
assert_almost_equal(actual, expected.values)
###
# Test creation from nonstandard units
###
def test_creat_from_wl(spectrum):
actual = TARDISSpectrum(
spectrum._frequency.to("angstrom", u.spectral()), spectrum.luminosity
)
compare_spectra(actual, spectrum)
def test_creat_from_J(spectrum):
actual = TARDISSpectrum(
spectrum._frequency, spectrum.luminosity.to("J / s")
)
compare_spectra(actual, spectrum)
|
tardis-snREPO_NAMEtardisPATH_START.@tardis_extracted@tardis-main@tardis@montecarlo@tests@test_spectrum.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "petigura/terra",
"repo_path": "terra_extracted/terra-master/terra/FFA/__init__.py",
"type": "Python"
}
|
petiguraREPO_NAMEterraPATH_START.@terra_extracted@terra-master@terra@FFA@__init__.py@.PATH_END.py
|
|
{
"filename": "test_regions.py",
"repo_name": "spacetelescope/jdaviz",
"repo_path": "jdaviz_extracted/jdaviz-main/jdaviz/configs/cubeviz/plugins/tests/test_regions.py",
"type": "Python"
}
|
"""This module tests region handling specific to Cubeviz.
Generic handling logic already covered in
jdaviz/configs/imviz/tests/test_regions.py
"""
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import SkyCoord
from regions import PixCoord, CirclePixelRegion, CircleSkyRegion, EllipsePixelRegion
from specutils import Spectrum1D, SpectralRegion
from jdaviz.configs.imviz.tests.test_regions import BaseRegionHandler
class TestLoadRegions(BaseRegionHandler):
@pytest.fixture(autouse=True)
def setup_class(self, cubeviz_helper, image_cube_hdu_obj_microns):
self.cubeviz = cubeviz_helper
cubeviz_helper.load_data(image_cube_hdu_obj_microns, data_label='has_microns')
self.viewer = cubeviz_helper.default_viewer._obj # This is used in BaseRegionHandler
self.spectrum_viewer = cubeviz_helper.app.get_viewer(
cubeviz_helper._default_spectrum_viewer_reference_name
)
def teardown_method(self, method):
"""Clear all the subsets for the next test method."""
self.cubeviz._delete_all_regions()
def test_regions_mask(self):
mask = np.zeros((9, 10), dtype=np.bool_)
mask[0, 0] = True
bad_regions = self.cubeviz.plugins['Subset Tools'].import_region(
[mask], return_bad_regions=True)
# TODO: Update expected results if we ever support masked Subset in Cubeviz.
assert len(bad_regions) == 1 and bad_regions[0][1] == 'Mask creation failed'
def test_regions_pixel(self):
# A little out-of-bounds should still overlay the overlapped part.
my_reg = CirclePixelRegion(center=PixCoord(x=6, y=2), radius=5)
bad_regions = self.cubeviz.plugins['Subset Tools'].import_region(
[my_reg], return_bad_regions=True)
assert len(bad_regions) == 0
self.verify_region_loaded('Subset 1', count=1)
assert len(self.cubeviz.get_interactive_regions()) == 1
def test_regions_sky_has_wcs(self):
sky = SkyCoord(205.4397, 27.0035, unit='deg')
my_reg_sky_1 = CircleSkyRegion(center=sky, radius=0.0004 * u.deg)
bad_regions = self.cubeviz.plugins['Subset Tools'].import_region(
my_reg_sky_1, return_bad_regions=True)
# TODO: Update expected results when we support sky regions in Cubeviz.
assert len(bad_regions) == 1 and bad_regions[0][1] == 'Sky region provided but data has no valid WCS' # noqa
def test_spatial_spectral_mix(self):
# Manually draw ellipse.
self.cubeviz._apply_interactive_region('bqplot:ellipse', (0, 0), (9, 8))
# Manually draw wavelength range.
unit = u.Unit(self.cubeviz.plugins['Unit Conversion'].spectral_unit.selected)
self.cubeviz.plugins['Subset Tools'].import_region(SpectralRegion(4.892 * unit,
4.896 * unit))
self.cubeviz.app.session.edit_subset_mode.edit_subset = None
# Get interactive spatial regions only.
spatial_subsets = self.cubeviz.get_interactive_regions()
assert list(spatial_subsets.keys()) == ['Subset 1'], spatial_subsets
assert isinstance(spatial_subsets['Subset 1'], EllipsePixelRegion)
# NOTE: This does not test that spectrum from Subset is actually scientifically accurate.
# Get spectral regions only.
# https://github.com/spacetelescope/jdaviz/issues/1584
with pytest.warns(UserWarning, match='Applying the value from the redshift slider'):
spectral_subsets = self.cubeviz.specviz.get_spectra()
assert list(spectral_subsets.keys()) == ['Spectrum (sum)',
'Spectrum (Subset 1, sum)',
'Spectrum (sum) (Subset 2)',
'Spectrum (Subset 1, sum) (Subset 2)']
for sp in spectral_subsets.values():
assert isinstance(sp, Spectrum1D)
|
spacetelescopeREPO_NAMEjdavizPATH_START.@jdaviz_extracted@jdaviz-main@jdaviz@configs@cubeviz@plugins@tests@test_regions.py@.PATH_END.py
|
{
"filename": "minimizer.py",
"repo_name": "IvS-KULeuven/IvSPythonRepository",
"repo_path": "IvSPythonRepository_extracted/IvSPythonRepository-master/sigproc/lmfit/minimizer.py",
"type": "Python"
}
|
"""
Simple minimizer is a wrapper around scipy.leastsq, allowing a
user to build a fitting model as a function of general purpose
Fit Parameters that can be fixed or floated, bounded, and written
as a simple expression of other Fit Parameters.
The user sets up a model in terms of instance of Parameters, writes a
function-to-be-minimized (residual function) in terms of these Parameters.
Copyright (c) 2011 Matthew Newville, The University of Chicago
<newville@cars.uchicago.edu>
"""
from numpy import (dot, eye, ndarray, ones_like,
sqrt, take, transpose, triu)
from numpy.dual import inv
from numpy.linalg import LinAlgError
from scipy.optimize import leastsq as scipy_leastsq
from scipy.optimize import fmin as scipy_fmin
#from scipy.optimize import anneal as scipy_anneal #TODO remove scipy_anneal AND def anneal() as they are not used?
from scipy.optimize.lbfgsb import fmin_l_bfgs_b as scipy_lbfgsb
# check for scipy.optimize.minimize
HAS_SCALAR_MIN = False
try:
from scipy.optimize import minimize as scipy_minimize
HAS_SCALAR_MIN = True
except ImportError:
pass
from .asteval import Interpreter
from .astutils import NameFinder
from .parameter import Parameter, Parameters
# use locally modified version of uncertainties package
import uncertainties
def asteval_with_uncertainties(*vals, **kwargs):
"""
given values for variables, calculate object value.
This is used by the uncertainties package to calculate
the uncertainty in an object even with a complicated
expression.
"""
_obj = kwargs.get('_obj', None)
_pars = kwargs.get('_pars', None)
_names = kwargs.get('_names', None)
_asteval = kwargs.get('_asteval', None)
if (_obj is None or
_pars is None or
_names is None or
_asteval is None or
_obj.ast is None):
return 0
for val, name in zip(vals, _names):
_asteval.symtable[name] = val
return _asteval.eval(_obj.ast)
wrap_ueval = uncertainties.wrap(asteval_with_uncertainties)
def eval_stderr(obj, uvars, _names, _pars, _asteval):
"""evaluate uncertainty and set .stderr for a parameter `obj`
given the uncertain values `uvars` (a list of uncertainties.ufloats),
a list of parameter names that matches uvars, and a dict of param
objects, keyed by name.
This uses the uncertainties package wrapped function to evaluate
the uncertainty for an arbitrary expression (in obj.ast) of parameters.
"""
if not isinstance(obj, Parameter) or not hasattr(obj, 'ast'):
return
uval = wrap_ueval(*uvars, _obj=obj, _names=_names,
_pars=_pars, _asteval=_asteval)
try:
obj.stderr = uval.std_dev
except:
obj.stderr = 0
class MinimizerException(Exception):
"""General Purpose Exception"""
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
def __str__(self):
return "\n%s" % (self.msg)
def check_ast_errors(error):
"""check for errors derived from asteval, raise MinimizerException"""
if len(error) > 0:
for err in error:
msg = '%s: %s' % (err.get_error())
return msg
return None
class Minimizer(object):
"""general minimizer"""
err_nonparam = \
"params must be a minimizer.Parameters() instance or list of Parameters()"
err_maxfev = """Too many function calls (max set to %i)! Use:
minimize(func, params, ...., maxfev=NNN)
or set leastsq_kws['maxfev'] to increase this maximum."""
def __init__(self, userfcn, params, fcn_args=None, fcn_kws=None,
iter_cb=None, scale_covar=True, **kws):
self.userfcn = userfcn
self.userargs = fcn_args
if self.userargs is None:
self.userargs = []
self.userkws = fcn_kws
if self.userkws is None:
self.userkws = {}
self.kws = kws
self.iter_cb = iter_cb
self.scale_covar = scale_covar
self.nfev = 0
self.nfree = 0
self.message = None
self.var_map = []
self.jacfcn = None
self.asteval = Interpreter()
self.namefinder = NameFinder()
self.__prepared = False
self.__set_params(params)
self.prepare_fit()
def __update_paramval(self, name):
"""
update parameter value, including setting bounds.
For a constrained parameter (one with an expr defined),
this first updates (recursively) all parameters on which
the parameter depends (using the 'deps' field).
"""
# Has this param already been updated?
# if this is called as an expression dependency,
# it may have been!
if self.updated[name]:
return
par = self.params[name]
if par.expr is not None:
for dep in par.deps:
self.__update_paramval(dep)
par.value = self.asteval.run(par.ast)
out = check_ast_errors(self.asteval.error)
if out is not None:
self.asteval.raise_exception(None)
self.asteval.symtable[name] = par.value
self.updated[name] = True
def update_constraints(self):
"""update all constrained parameters, checking that
dependencies are evaluated as needed."""
self.updated = dict([(name, False) for name in self.params])
for name in self.params:
self.__update_paramval(name)
def __residual(self, fvars):
"""
residual function used for least-squares fit.
With the new, candidate values of fvars (the fitting variables),
this evaluates all parameters, including setting bounds and
evaluating constraints, and then passes those to the
user-supplied function to calculate the residual.
"""
# set parameter values
for varname, val in zip(self.var_map, fvars):
# self.params[varname].value = val
par = self.params[varname]
par.value = par.from_internal(val)
self.nfev = self.nfev + 1
self.update_constraints()
out = self.userfcn(self.params, *self.userargs, **self.userkws)
if hasattr(self.iter_cb, '__call__'):
self.iter_cb(self.params, self.nfev, out,
*self.userargs, **self.userkws)
return out
def __jacobian(self, fvars):
"""
analytical jacobian to be used with the Levenberg-Marquardt
modified 02-01-2012 by Glenn Jones, Aberystwyth University
"""
for varname, val in zip(self.var_map, fvars):
# self.params[varname].value = val
self.params[varname].from_internal(val)
self.nfev = self.nfev + 1
self.update_constraints()
# computing the jacobian
return self.jacfcn(self.params, *self.userargs, **self.userkws)
def __set_params(self, params):
""" set internal self.params from a Parameters object or
a list/tuple of Parameters"""
if params is None or isinstance(params, Parameters):
self.params = params
elif isinstance(params, (list, tuple)):
_params = Parameters()
for _par in params:
if not isinstance(_par, Parameter):
raise MinimizerException(self.err_nonparam)
else:
_params[_par.name] = _par
self.params = _params
else:
raise MinimizerException(self.err_nonparam)
def penalty(self, params):
"""penalty function for scalar minimizers:
evaluates user-supplied objective function,
if result is an array, return array sum-of-squares.
"""
r = self.__residual(params)
if isinstance(r, ndarray):
r = (r*r).sum()
return r
def prepare_fit(self, params=None):
"""prepare parameters for fit"""
# determine which parameters are actually variables
# and which are defined expressions.
if params is None and self.params is not None and self.__prepared:
return
if params is not None and self.params is None:
self.__set_params(params)
self.nfev = 0
self.var_map = []
self.vars = []
self.vmin, self.vmax = [], []
for name, par in list(self.params.items()):
if par.expr is not None:
par.ast = self.asteval.parse(par.expr)
check_ast_errors(self.asteval.error)
par.vary = False
par.deps = []
self.namefinder.names = []
self.namefinder.generic_visit(par.ast)
for symname in self.namefinder.names:
if (symname in self.params and
symname not in par.deps):
par.deps.append(symname)
elif par.vary:
self.var_map.append(name)
self.vars.append(par.setup_bounds())
# self.vars.append(par.set_internal_value())
#self.vmin.append(par.min)
#self.vmax.append(par.max)
self.asteval.symtable[name] = par.value
par.init_value = par.value
if par.name is None:
par.name = name
self.nvarys = len(self.vars)
# now evaluate make sure initial values
# are used to set values of the defined expressions.
# this also acts as a check of expression syntax.
self.update_constraints()
self.__prepared = True
'''def anneal(self, schedule='cauchy', **kws):
"""
use simulated annealing
"""
sched = 'fast'
if schedule in ('cauchy', 'boltzmann'):
sched = schedule
self.prepare_fit()
sakws = dict(full_output=1, schedule=sched,
maxiter = 2000 * (self.nvarys + 1))
sakws.update(self.kws)
sakws.update(kws)
print("WARNING: scipy anneal appears unusable!")
saout = scipy_anneal(self.penalty, self.vars, **sakws)
self.sa_out = saout
return'''
def lbfgsb(self, **kws):
"""
use l-bfgs-b minimization
"""
self.prepare_fit()
lb_kws = dict(factr=1000.0, approx_grad=True, m=20,
maxfun = 2000 * (self.nvarys + 1),
# bounds = zip(self.vmin, self.vmax),
)
lb_kws.update(self.kws)
lb_kws.update(kws)
xout, fout, info = scipy_lbfgsb(self.penalty, self.vars, **lb_kws)
self.nfev = info['funcalls']
self.message = info['task']
self.chisqr = (self.penalty(xout)**2).sum()
def fmin(self, **kws):
"""
use nelder-mead (simplex) minimization
"""
self.prepare_fit()
fmin_kws = dict(full_output=True, disp=False, retall=True,
ftol=1.e-4, xtol=1.e-4,
maxfun = 5000 * (self.nvarys + 1))
fmin_kws.update(kws)
ret = scipy_fmin(self.penalty, self.vars, **fmin_kws)
xout, fout, iter, funccalls, warnflag, allvecs = ret
self.nfev = funccalls
self.chisqr = (self.penalty(xout)**2).sum()
def scalar_minimize(self, method='Nelder-Mead', hess=None, tol=None, **kws):
"""use one of the scaler minimization methods from scipy.
Available methods include:
Nelder-Mead
Powell
CG (conjugate gradient)
BFGS
Newton-CG
Anneal
L-BFGS-B
TNC
COBYLA
SLSQP
If the objective function returns a numpy array instead
of the expected scalar, the sum of squares of the array
will be used.
Note that bounds and constraints can be set on Parameters
for any of these methods, so are not supported separately
for those designed to use bounds.
"""
if not HAS_SCALAR_MIN :
raise NotImplementedError
self.prepare_fit()
maxfev = 1000*(self.nvarys + 1)
opts = {'maxiter': maxfev}
if method not in ('L-BFGS-B', 'TNC', 'SLSQP'):
opts['maxfev'] = maxfev
fmin_kws = dict(method=method, tol=tol, hess=hess, options=opts)
fmin_kws.update(self.kws)
fmin_kws.update(kws)
ret = scipy_minimize(self.penalty, self.vars, **fmin_kws)
xout = ret.x
self.message = ret.message
self.nfev = ret.nfev
self.chisqr = (self.penalty(xout)**2).sum()
def leastsq(self, **kws):
"""
use Levenberg-Marquardt minimization to perform fit.
This assumes that ModelParameters have been stored,
and a function to minimize has been properly set up.
This wraps scipy.optimize.leastsq, and keyword arguments are passed
directly as options to scipy.optimize.leastsq
When possible, this calculates the estimated uncertainties and
variable correlations from the covariance matrix.
writes outputs to many internal attributes, and
returns True if fit was successful, False if not.
"""
self.prepare_fit()
lskws = dict(full_output=1, xtol=1.e-7, ftol=1.e-7,
gtol=1.e-7, maxfev=2000*(self.nvarys+1), Dfun=None)
lskws.update(self.kws)
lskws.update(kws)
if lskws['Dfun'] is not None:
self.jacfcn = lskws['Dfun']
lskws['Dfun'] = self.__jacobian
lsout = scipy_leastsq(self.__residual, self.vars, **lskws)
_best, _cov, infodict, errmsg, ier = lsout
self.residual = resid = infodict['fvec']
self.ier = ier
self.lmdif_message = errmsg
self.message = 'Fit succeeded.'
self.success = ier in [1, 2, 3, 4]
if ier == 0:
self.message = 'Invalid Input Parameters.'
elif ier == 5:
self.message = self.err_maxfev % lskws['maxfev']
else:
self.message = 'Tolerance seems to be too small.'
self.nfev = infodict['nfev']
self.ndata = len(resid)
sum_sqr = (resid**2).sum()
self.chisqr = sum_sqr
self.nfree = (self.ndata - self.nvarys)
self.redchi = sum_sqr / self.nfree
# need to map _best values to params, then calculate the
# grad for the variable parameters
grad = ones_like(_best)
vbest = ones_like(_best)
for ivar, varname in enumerate(self.var_map):
par = self.params[varname]
grad[ivar] = par.scale_gradient(_best[ivar])
vbest[ivar] = par.value
# modified from JJ Helmus' leastsqbound.py
infodict['fjac'] = transpose(transpose(infodict['fjac']) /
take(grad, infodict['ipvt'] - 1))
rvec = dot(triu(transpose(infodict['fjac'])[:self.nvarys, :]),
take(eye(self.nvarys), infodict['ipvt'] - 1, 0))
try:
cov = inv(dot(transpose(rvec), rvec))
except (LinAlgError, ValueError):
cov = None
for par in list(self.params.values()):
par.stderr, par.correl = 0, None
self.covar = cov
if cov is None:
self.errorbars = False
self.message = '%s. Could not estimate error-bars'
else:
self.errorbars = True
if self.scale_covar:
self.covar = cov = cov * sum_sqr / self.nfree
# uncertainties on constrained parameters:
# get values with uncertainties (including correlations),
# temporarily set Parameter values to these,
# re-evaluate contrained parameters to extract stderr
# and then set Parameters back to best-fit value
uvars = uncertainties.correlated_values(vbest, self.covar)
for ivar, varname in enumerate(self.var_map):
par = self.params[varname]
par.stderr = sqrt(cov[ivar, ivar])
par.correl = {}
for jvar, varn2 in enumerate(self.var_map):
if jvar != ivar:
par.correl[varn2] = (cov[ivar, jvar]/
(par.stderr * sqrt(cov[jvar, jvar])))
for pname, par in list(self.params.items()):
eval_stderr(par, uvars, self.var_map,
self.params, self.asteval)
# restore nominal values
for v, nam in zip(uvars, self.var_map):
self.asteval.symtable[nam] = v.nominal_value
for par in list(self.params.values()):
if hasattr(par, 'ast'):
delattr(par, 'ast')
return self.success
def minimize(fcn, params, method='leastsq', args=None, kws=None,
scale_covar=True, engine=None, iter_cb=None, **fit_kws):
"""simple minimization function,
finding the values for the params which give the
minimal sum-of-squares of the array return by fcn
"""
fitter = Minimizer(fcn, params, fcn_args=args, fcn_kws=kws,
iter_cb=iter_cb, scale_covar=scale_covar, **fit_kws)
_scalar_methods = {'nelder': 'Nelder-Mead', 'powell': 'Powell',
'cg': 'CG ', 'bfgs': 'BFGS',
'newton': 'Newton-CG', 'anneal': 'Anneal',
'lbfgs': 'L-BFGS-B', 'l-bfgs': 'L-BFGS-B',
'tnc': 'TNC', 'cobyla': 'COBYLA',
'slsqp': 'SLSQP'}
_fitmethods = {'anneal': 'anneal', 'nelder': 'fmin',
'lbfgsb': 'lbfgsb', 'leastsq': 'leastsq'}
if engine is not None:
method = engine
meth = method.lower()
fitfunction = None
kwargs = {}
# default and most common option: use leastsq method.
if meth == 'leastsq':
fitfunction = fitter.leastsq
else:
# if scalar_minimize() is supported and method is in list, use it.
if HAS_SCALAR_MIN:
for name, method in list(_scalar_methods.items()):
if meth.startswith(name):
fitfunction = fitter.scalar_minimize
kwargs = dict(method=method)
# look for other built-in methods
if fitfunction is None:
for name, method in list(_fitmethods.items()):
if meth.startswith(name):
fitfunction = getattr(fitter, method)
if fitfunction is not None:
fitfunction(**kwargs)
return fitter
def make_paras_and_func(fcn, x0, used_kwargs=None):
"""nach
A function which takes a function a makes a parameters-dict
for it.
Takes the function fcn. A starting guess x0 for the
non kwargs paramter must be also given. If kwargs
are used, used_kwargs is dict were the keys are the
used kwarg and the values are the starting values.
"""
import inspect
args = inspect.getargspec(fcn)
defaults = args[-1]
len_def = len(defaults) if defaults is not None else 0
# have_defaults = args[-len(defaults):]
args_without_defaults = len(args[0])- len_def
if len(x0) < args_without_defaults:
raise ValueError( 'x0 to short')
p = Parameters()
for i, val in enumerate(x0):
p.add(args[0][i], val)
if used_kwargs:
for arg, val in list(used_kwargs.items()):
p.add(arg, val)
else:
used_kwargs = {}
def func(para):
"wrapped func"
kwdict = {}
for arg in list(used_kwargs.keys()):
kwdict[arg] = para[arg].value
vals = [para[i].value for i in p]
return fcn(*vals[:len(x0)], **kwdict)
return p, func
|
IvS-KULeuvenREPO_NAMEIvSPythonRepositoryPATH_START.@IvSPythonRepository_extracted@IvSPythonRepository-master@sigproc@lmfit@minimizer.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "kennethcarrell/ATARRI",
"repo_path": "ATARRI_extracted/ATARRI-main/README.md",
"type": "Markdown"
}
|
# ATARRI
A TESS Archive RR Lyrae Classifier
## Look in the [doc](doc/) directory for information about:
- [Installing](doc/Installation.pdf) this package
- [Using](doc/UserManual.pdf) this package
- A [Science-Use Guidebook](doc/Guidebook.pdf) for this package.
###### Example usage:
```python
import tkinter
from ATARRI.RRLClassifier import RRLClassifier
root = tkinter.Tk()
theGui = RRLClassifier(root)
root.mainloop()
```
###### Todo:
- [ ] Add machine learning prediction
|
kennethcarrellREPO_NAMEATARRIPATH_START.@ATARRI_extracted@ATARRI-main@README.md@.PATH_END.py
|
{
"filename": "astropyconst13.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/constants/astropyconst13.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Astronomical and physics constants for Astropy v1.3 and earlier.
See :mod:`astropy.constants` for a complete listing of constants
defined in Astropy.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
from .constant import Constant
from . import codata2010, iau2012
for _nm, _c in itertools.chain(sorted(vars(codata2010).items()),
sorted(vars(iau2012).items())):
if (isinstance(_c, Constant) and _c.abbrev not in locals()):
locals()[_c.abbrev] = _c
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@constants@astropyconst13.py@.PATH_END.py
|
{
"filename": "_ticks.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterternary/marker/colorbar/_ticks.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicksValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="ticks",
parent_name="scatterternary.marker.colorbar",
**kwargs,
):
super(TicksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["outside", "inside", ""]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterternary@marker@colorbar@_ticks.py@.PATH_END.py
|
{
"filename": "_len.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/contour/colorbar/_len.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="len", parent_name="contour.colorbar", **kwargs):
super(LenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@contour@colorbar@_len.py@.PATH_END.py
|
{
"filename": "tools.py",
"repo_name": "pcubillos/pyratbay",
"repo_path": "pyratbay_extracted/pyratbay-master/pyratbay/tools/tools.py",
"type": "Python"
}
|
# Copyright (c) 2021 Patricio Cubillos
# Pyrat Bay is open-source software under the GNU GPL-2.0 license (see LICENSE)
__all__ = [
'log_error',
'cd',
'tmp_reset',
'binsearch',
'divisors',
'unpack',
'u',
'get_param',
'ifirst', 'ilast',
'isfile',
'file_exists',
'path',
'Formatted_Write',
'Timer',
'get_exomol_mol',
'cia_hitran', 'cia_borysow',
'radius_to_depth',
'depth_to_radius',
'ignore_system_exit',
]
import os
import re
import struct
import time
import numbers
import string
import textwrap
import itertools
import functools
from collections.abc import Iterable
from contextlib import contextmanager
import configparser
import numpy as np
import mc3.utils as mu
from .. import constants as pc
from .. import io as io
from ..lib import _indices
@contextmanager
def log_error(log=None, error=None):
"""Capture exceptions into a log.error() call."""
try:
yield
except Exception as e:
if log is None:
log = mu.Log(logname=None, verb=1, width=80)
if error is None:
error = str(e)
log.error(error, tracklev=-4)
@contextmanager
def cd(newdir):
"""
Context manager for changing the current working directory.
Taken from here: https://stackoverflow.com/questions/431684/
"""
olddir = os.getcwd()
os.chdir(os.path.expanduser(newdir))
try:
yield
finally:
os.chdir(olddir)
def recursive_setattr(obj, attr, val):
"""Recursive setattr, see https://stackoverflow.com/questions/31174295"""
pre, _, post = attr.rpartition('.')
return setattr(recursive_getattr(obj, pre) if pre else obj, post, val)
def recursive_getattr(obj, attr):
"""Recursive getattr, see https://stackoverflow.com/questions/31174295"""
def _getattr(obj, attr):
return getattr(obj, attr)
return functools.reduce(_getattr, [obj] + attr.split('.'))
@contextmanager
def tmp_reset(obj, *attrs, **tmp_attrs):
"""
Temporarily remove attributes from an object.
Examples
--------
>>> import pyratbay.tools as pt
>>> o = type('obj', (object,), {'x':1.0, 'y':2.0})
>>> obj = type('obj', (object,), {'z':3.0, 'w':4.0, 'o':o})
>>> # All listed arguments are set to None:
>>> with pt.tmp_reset(obj, 'o.x', 'z'):
>>> print(obj.o.x, obj.o.y, obj.z, obj.w)
(None, 2.0, None, 4.0)
>>> # Keyword arguments can be set to a value, but cannot be recursive:
>>> with pt.tmp_reset(obj, 'o.x', z=10):
>>> print(obj.o.x, obj.o.y, obj.z, obj.w)
(None, 2.0, 10, 4.0)
"""
orig_attrs = {}
for attr in attrs:
orig_attrs[attr] = recursive_getattr(obj, attr)
recursive_setattr(obj, attr, None)
for attr, tmp_val in tmp_attrs.items():
orig_attrs[attr] = recursive_getattr(obj, attr)
recursive_setattr(obj, attr, tmp_val)
yield
for attr, orig_val in orig_attrs.items():
recursive_setattr(obj, attr, orig_val)
def binsearch(tli, wnumber, rec0, nrec, upper=True):
r"""
Do a binary+linear search in TLI dbfile for record with wavenumber
immediately less equal to wnumber (if upper is True), or greater
equal to wnumber (if upper) is False (considering duplicate values
in tli file).
Parameters
----------
tli: File object
TLI file where to search.
wnumber: Scalar
Target wavenumber in cm-1.
rec0: Integer
File position of first wavenumber record.
nrec: Integer
Number of wavenumber records.
upper: Boolean
If True, consider wnumber as an upper boundary. If False,
consider wnumber as a lower boundary.
Returns
-------
irec: Integer
Index of record nearest to target. Return -1 if out of bounds.
Examples
--------
>>> import pyratbay.tools as pt
>>> import struct
>>> # Mock a TLI file:
>>> wn = [0.0, 1.0, 1.0, 1.0, 2.0, 2.0]
>>> with open('tli_demo.dat', 'wb') as tli:
>>> tli.write(struct.pack(str(len(wn))+"d", *wn))
>>> # Now do bin searches for upper and lower boundaries:
>>> with open('tli_demo.dat', 'rb') as tli:
>>> bs_lower = [pt.binsearch(tli, target, 0, len(wn), upper=False)
>>> for target in [-1.0, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5]]
>>> bs_upper = [pt.binsearch(tli, target, 0, len(wn), upper=True)
>>> for target in [-1.0, 0.0, 0.5, 1.0, 1.5, 2.0, 2.5]]
>>> print(bs_lower, bs_upper, sep='\n')
[0, 0, 1, 1, 4, 4, -1]
[-1, 0, 0, 3, 3, 5, 5]
"""
if nrec <= 0:
raise ValueError('Requested binsearch over a zero a zero-sized array.')
# Initialize indices and current record:
irec = ilo = 0
ihi = nrec - 1
tli.seek(rec0, 0)
current = first = struct.unpack('d', tli.read(8))[0]
tli.seek(rec0 + ihi*pc.dreclen, 0)
last = struct.unpack('d', tli.read(8))[0]
# Out of bounds:
if wnumber < first and upper:
return -1
if last < wnumber and not upper:
return -1
# Binary search:
while ihi - ilo > 1:
irec = (ihi + ilo) // 2
tli.seek(rec0 + irec*pc.dreclen, 0)
current = struct.unpack('d', tli.read(8))[0]
if current > wnumber:
ihi = irec
else:
ilo = irec
# Linear search:
if upper and current > wnumber:
return irec - 1
elif not upper and current < wnumber:
return irec + 1
elif upper:
while current <= wnumber:
irec += 1
if irec > nrec-1:
return nrec-1
tli.seek(rec0 + irec*pc.dreclen, 0)
current = struct.unpack('d', tli.read(8))[0]
return irec - 1
else:
while current >= wnumber:
irec -= 1
if irec < 0:
return 0
tli.seek(rec0 + irec*pc.dreclen, 0)
current = struct.unpack('d', tli.read(8))[0]
return irec + 1
def divisors(number):
"""
Find all the integer divisors of number.
"""
divs = []
for i in np.arange(1, number/2+1):
if number % i == 0:
divs.append(i)
divs.append(number)
return np.asarray(divs, int)
def unpack(file, n, dtype):
r"""
Wrapper for struct unpack.
Parameters
----------
file: File object
File object to read from.
n: Integer
Number of elements to read from file.
dtype: String
Data type of the bytes read.
Returns
-------
output: Scalar, tuple, or string
If dtype is 's' return the string (decoded as UTF-8).
If there is a single element to read, return the scalar value.
Else, return a tuple with the elements read.
Examples
--------
>>> import pyratbay.tools as pt
>>> import struct
>>> import numpy as np
>>> # Store a string and numbers in a binary file:
>>> with open('delete_me.dat', 'wb') as bfile:
>>> bfile.write(struct.pack('3s', 'H2O'.encode('utf-8')))
>>> bfile.write(struct.pack('h', 3))
>>> bfile.write(struct.pack('3f', np.pi, np.e, np.inf))
>>> # Unpack them:
>>> with open('delete_me.dat', 'rb') as bfile:
>>> string = pt.unpack(bfile, 3, 's')
>>> number = pt.unpack(bfile, 1, 'h')
>>> values = pt.unpack(bfile, 3, 'f')
>>> # See outputs:
>>> print(string, number, values, sep='\n')
H2O
3
(3.1415927410125732, 2.7182817459106445, inf)
"""
# Calculate number of bytes and read:
size = struct.calcsize(f'{n}{dtype}')
output = struct.unpack(f'{n}{dtype}', file.read(size))
if dtype == 's':
return output[0].decode('utf-8')
elif n == 1:
return output[0]
return output
def u(units):
"""
Get the conversion factor (to the CGS system) for units.
Parameters
----------
units: String
Name of units.
Returns
-------
value: Float
Value of input units in CGS units.
Examples
--------
>>> import pyratbay.tools as pt
>>> for units in ['cm', 'm', 'rearth', 'rjup', 'au']:
>>> print(f'{units} = {pt.u(units)} cm')
cm = 1.0 cm
m = 100.0 cm
rearth = 637810000.0 cm
rjup = 7149200000.0 cm
au = 14959787069100.0 cm
"""
# Accept only valid units:
if not hasattr(pc, units):
raise ValueError(
f"Units '{units}' does not exist in pyratbay.constants.")
return getattr(pc, units)
def get_param(param, units='none', gt=None, ge=None):
"""
Read a parameter that may or may not have units.
If it doesn't, default to the 'units' input argument.
Parameters
----------
param: String, Float, integer, or ndarray
The parameter value (which may contain the units).
units: String
The default units for the parameter.
gt: Float
If not None, check output is greater than gt.
ge: Float
If not None, check output is greater-equal than gt.
Returns
-------
value: Float or integer
Examples
--------
>>> import pyratbay.tools as pt
>>> # One meter in cm:
>>> pt.get_param('1.0 m')
100.0
>>> # Alternatively, specify units in second argument:
>>> pt.get_param(1.0, 'm')
100.0
>>> # Units in 'param' take precedence over 'unit':
>>> pt.get_param('1.0 m', 'km')
100.0
>>> # Request returned value to be positive:
>>> pt.get_param('-30.0 kelvin', gt=0.0)
ValueError: Value -30.0 must be > 0.0.
"""
if param is None:
return None
# Split the parameter if it has a white-space:
if isinstance(param, str):
par = param.split()
if len(par) > 2:
raise ValueError(f"Invalid value '{param}'")
if len(par) == 2:
units = par[1]
if not hasattr(pc, units):
raise ValueError(f"Invalid units for value '{param}'")
try:
value = float(par[0])
except:
raise ValueError(f"Invalid value '{param}'")
else:
value = param
# Use given units:
if isinstance(param, (numbers.Number, np.ndarray)) \
or (isinstance(param, str) and len(par) == 1):
if units is None or not hasattr(pc, units):
raise ValueError(f"Invalid units '{units}'")
# Apply the units:
value *= u(units)
if gt is not None and value <= gt:
raise ValueError(f'Value {value} must be > {gt}')
if ge is not None and value < ge:
raise ValueError(f'Value {value} must be >= {ge}')
return value
def ifirst(data, default_ret=-1):
"""
Get the first index where data is True or 1.
Parameters
----------
data: 1D bool/integer iterable
An array of bools or integers.
default_ret: Integer
Default returned value when no value in data is True or 1.
Returns
-------
first: integer
First index where data == True or 1. Return default_ret otherwise.
Examples
--------
>>> import pyratbay.tools as pt
>>> import numpy as np
>>> print(pt.ifirst([1,0,0]))
0
>>> print(pt.ifirst(np.arange(5)>2.5))
3
>>> print(pt.ifirst([False, True, True]))
1
>>> print(pt.ifirst([False, False, False]))
-1
>>> print(pt.ifirst([False, False, False], default_ret=0))
0
"""
return _indices.ifirst(np.asarray(data, int), default_ret)
def ilast(data, default_ret=-1):
"""
Get the last index where data is 1 or True.
Parameters
----------
data: 1D bool/integer iterable
An array of bools or integers.
default_ret: Integer
Default returned value when no value in data is True or 1.
Returns
-------
last: integer
Last index where data == 1 or True. Return default_ret otherwise.
Examples
--------
>>> import pyratbay.tools as pt
>>> import numpy as np
>>> print(pt.ilast([1,0,0]))
0
>>> print(pt.ilast(np.arange(5)<2.5))
2
>>> print(pt.ilast([False, True, True]))
2
>>> print(pt.ilast([False, False, False]))
-1
>>> print(pt.ilast([False, False, False], default_ret=0))
0
"""
return _indices.ilast(np.asarray(data, int), default_ret)
def isfile(path):
"""
Check whether a path (or list of paths) is a regular file.
Parameters
----------
path: String or list
Path(s) to check.
Returns
-------
status: Integer
If path is None, return -1.
If any path is not a regular file, return 0.
If all paths are a regular file, return 1.
Examples (for Python 2.7, import from pathlib2)
--------
>>> import pyratbay.tools as pt
>>> from pathlib import Path
>>> # Mock couple files:
>>> file1, file2 = './tmp_file1.deleteme', './tmp_file2.deleteme'
>>> Path(file1).touch()
>>> Path(file2).touch()
>>> # Input is None:
>>> print(pt.isfile(None))
-1
>>> # All input files exist:
>>> print(pt.isfile(file1))
1
>>> print(pt.isfile([file1]))
1
>>> print(pt.isfile([file1, file2]))
1
>>> # At least one input does not exist:
>>> print(pt.isfile('nofile'))
0
>>> print(pt.isfile(['nofile']))
0
>>> print(pt.isfile([file1, 'nofile']))
0
"""
# None exception:
if path is None:
return -1
if isinstance(path, str):
paths = [path]
else:
paths = path
# Regular file or not:
return int(all(os.path.isfile(path) for path in paths))
def file_exists(pname, desc, value):
"""
Check that a file or list of files (value) exist. If not None
and file(s) do not exist, raise a ValueError.
Parameters
----------
pname: String
Parameter name.
desc: String
Parameter description.
value: String or list of strings
File path(s) to check.
Examples (for Python 2.7, import from pathlib2)
--------
>>> import pyratbay.tools as pt
>>> from pathlib import Path
>>> # None is OK:
>>> pt.file_exists('none', 'None input', None)
>>> # Create a file, check it exists:
>>> Path('./new_tmp_file.dat').touch()
>>> pt.file_exists('testfile', 'Test', 'new_tmp_file.dat')
>>> # Non-existing file throws error:
>>> pt.file_exists('testfile', 'Test', 'no_file.dat')
ValueError: Test file (testfile) does not exist: 'no_file.dat'
"""
if value is None:
return
if isinstance(value, str):
values = [value]
else:
values = value
for value in values:
if not os.path.isfile(value):
raise ValueError(f"{desc} file ({pname}) does not exist: '{value}'")
def path(filename):
"""
Ensure file names have non-null path
Parameters
----------
filename: String
A file name.
Examples
--------
>>> import pyratbay.tools as pt
>>> print(pt.path('file.txt'))
./file.txt
>>> print(pt.path('./file.txt'))
./file.txt
>>> print(pt.path('/home/user/file.txt'))
/home/user/file.txt
"""
if filename is None:
return None
path, filename = os.path.split(filename)
if path == '':
path = '.'
return f'{path}/{filename}'
class Formatted_Write(string.Formatter):
"""
Write (and keep) formatted, wrapped text to string.
Following PEP3101, this class subclasses Formatter to handle
None when a specific format is set.
Examples
--------
>>> import numpy as np
>>> import pyratbay.tools as pt
>>> fmt = pt.Formatted_Write()
>>> rstar = np.pi/3.14
>>> fmt.write('Stellar radius (rstar, rsun): {:.2f}', rstar)
>>> fmt.write('Stellar radius (rstar, rsun): {:.2f}', None)
>>> fmt.write('Stellar radius (rstar, rsun): {}', rstar)
>>> fmt.write('Stellar radius (rstar, rsun): {}', None)
>>> print(fmt.text)
Stellar radius (rstar, rsun): 1.00
Stellar radius (rstar, rsun): None
Stellar radius (rstar, rsun): 1.0005072145190423
Stellar radius (rstar, rsun): None
"""
def __init__(self, indent=0, si=4, fmt=None, edge=None, lw=80, prec=None):
"""
Parameters
----------
indent: Integer
Number of blanks for indentation in first line.
si: Integer
Number of blanks for indentation in subsequent lines.
fmt: dict of callables.
Default formatting for numpy arrays (as in formatting in
np.printoptions).
edge: Integer
Default number of array items in summary at beginning/end
(as in edgeitems in np.printoptions).
lw: Integer
Default number of characters per line (as in linewidth in
np.printoptions).
prec: Integer
Default precision for floating point values (as in precision
in np.printoptions).
"""
self.text = ''
self.indent = indent
self.si = si
# Numpy formatting:
self.fmt = fmt
self.edge = edge
self.lw = lw
self.prec = prec
def format_field(self, value, spec):
if value is None:
return 'None'
return super(Formatted_Write, self).format_field(value, spec)
def write(self, text, *format, **numpy_fmt):
"""
Write formatted text.
See __init__ arguments for avaiable numpy_fmt items.
"""
numpy_fmt.setdefault('fmt', self.fmt)
numpy_fmt.setdefault('edge', self.edge)
numpy_fmt.setdefault('lw', self.lw)
numpy_fmt.setdefault('prec', self.prec)
if numpy_fmt['edge'] is None:
threshold = None
else:
threshold = 2*numpy_fmt['edge']
fmt = {
'formatter': numpy_fmt['fmt'],
'edgeitems': numpy_fmt['edge'],
'threshold': threshold,
'linewidth': numpy_fmt['lw'],
'precision': numpy_fmt['prec'],
}
with np.printoptions(**fmt):
text = super(Formatted_Write, self).format(text, *format)
indspace = ' '*self.indent
if self.si is None:
sindspace = indspace
else:
sindspace = ' '*self.si
for line in text.splitlines():
self.text += textwrap.fill(
line, break_long_words=False, initial_indent=indspace,
subsequent_indent=sindspace, width=80)
self.text += '\n'
class Timer(object):
"""
Timer to get the time (in seconds) since the last call.
"""
def __init__(self):
self.t0 = time.time()
def clock(self):
tnew = time.time()
delta = tnew - self.t0
self.t0 = tnew
return delta
def get_exomol_mol(dbfile):
"""
Parse an exomol file to extract the molecule and isotope name.
Parameters
----------
dbfile: String
An exomol line-list file (must follow ExoMol naming convention).
Returns
-------
molecule: String
Name of the molecule.
isotope: String
Name of the isotope (See Tennyson et al. 2016, jmosp, 327).
Examples
--------
>>> import pyratbay.tools as pt
>>> filenames = [
>>> '1H2-16O__POKAZATEL__00400-00500.trans.bz2',
>>> '1H-2H-16O__VTT__00250-00500.trans.bz2',
>>> '12C-16O2__HITEMP.pf',
>>> '12C-16O-18O__Zak.par',
>>> '12C-1H4__YT10to10__01100-01200.trans.bz2',
>>> '12C-1H3-2H__MockName__01100-01200.trans.bz2'
>>> ]
>>> for db in filenames:
>>> print(pt.get_exomol_mol(db))
('H2O', '116')
('H2O', '126')
('CO2', '266')
('CO2', '268')
('CH4', '21111')
('CH4', '21112')
"""
atoms = os.path.split(dbfile)[1].split('_')[0].split('-')
elements = []
isotope = ''
for atom in atoms:
match = re.match(r"([0-9]+)([a-z]+)([0-9]*)", atom, re.I)
N = 1 if match.group(3) == '' else int(match.group(3))
elements += N * [match.group(2)]
isotope += match.group(1)[-1:] * N
composition = [list(g[1]) for g in itertools.groupby(elements)]
molecule = ''.join([
c[0] + str(len(c))*(len(c)>1)
for c in composition])
return molecule, isotope
def cia_hitran(ciafile, tstep=1, wstep=1):
"""
Re-write a HITRAN CIA file into Pyrat Bay format.
See Richard et al. (2012) and https://www.cfa.harvard.edu/HITRAN/
Parameters
----------
ciafile: String
A HITRAN CIA file.
tstep: Integer
Slicing step size along temperature dimension.
wstep: Integer
Slicing step size along wavenumber dimension.
Examples
--------
>>> import pyratbay.tools as pt
>>> # Before moving on, download a HITRAN CIA files from the link above.
>>> ciafile = 'H2-H2_2011.cia'
>>> pt.cia_hitran(ciafile, tstep=2, wstep=10)
"""
# Extract CS data:
with open(ciafile, 'r') as f:
info = f.readline().strip().split()
species = info[0].split('-')
temps, data, wave = [], [], []
wnmin, wnmax = -1, -1
f.seek(0)
for line in f:
if line.strip().startswith('-'.join(species)):
info = line.strip().split()
# if wn ranges differ, trigger new set
if float(info[1]) != wnmin or float(info[2]) != wnmax:
wnmin = float(info[1])
wnmax = float(info[2])
temp = float(info[4])
nwave = int (info[3])
wn = np.zeros(nwave, np.double)
cs = np.zeros(nwave, np.double)
i = 0
continue
# else, read in opacities
wn[i], cs[i] = line.split()[0:2]
i += 1
if i == nwave:
temps.append(temp)
# Thin the arrays in wavenumber if requested:
data.append(cs[::wstep])
wave.append(wn[::wstep])
# Identify sets:
temps = np.array(temps)
ntemps = len(temps)
i = 0
while i < ntemps:
wn = wave[i]
j = i
while j < ntemps and len(wave[j])==len(wn) and np.all(wave[j]-wn==0):
j += 1
temp = temps[i:j:tstep]
# Set cm-1 amagat-2 units:
cs = np.array(data[i:j])[::tstep] * pc.amagat**2
pair = '-'.join(species)
wl_min = 1.0/(wn[-1]*pc.um)
wl_max = 1.0/(wn[0]*pc.um)
csfile = (
f'CIA_HITRAN_{pair}_{wl_min:.1f}-{wl_max:.1f}um_'
f'{temp[0]:04.0f}-{temp[-1]:04.0f}K.dat')
header = (
f'# This file contains the reformated {pair} CIA data from\n'
f'# HITRAN file: {ciafile}\n\n')
io.write_cs(csfile, cs, species, temp, wn, header)
i = j
def cia_borysow(ciafile, species1, species2):
"""
Re-write a Borysow CIA file into Pyrat Bay format.
See http://www.astro.ku.dk/~aborysow/programs/
Parameters
----------
ciafile: String
A HITRAN CIA file.
species1: String
First CIA species.
species2: String
Second CIA species.
Examples
--------
>>> import pyratbay.tools as pt
>>> # Before moving on, download a HITRAN CIA files from the link above.
>>> ciafile = 'ciah2he_dh_quantmech'
>>> pt.cia_borysow(ciafile, 'H2', 'He')
"""
data = np.loadtxt(ciafile, skiprows=3)
wn = data[:,0]
cs = data[:,1:].T
with open(ciafile) as f:
line = f.readline()
temp = f.readline().split()[1:]
temp = [float(t.replace('K','')) for t in temp]
species = [species1, species2]
pair = '-'.join(species)
wl_min = 1.0/(wn[-1]*pc.um)
wl_max = 1.0/(wn[0]*pc.um)
file_name = os.path.basename(ciafile)
csfile = (
f'CIA_Borysow_{pair}_{wl_min:.1f}-{wl_max:.1f}um_'
f'{temp[0]:04.0f}-{temp[-1]:04.0f}K.dat')
header = (
f'# This file contains the reformated {pair} CIA data from:\n'
f'# http://www.astro.ku.dk/~aborysow/programs/{file_name}\n\n')
io.write_cs(csfile, cs, species, temp, wn, header)
def radius_to_depth(rprs, rprs_err):
r"""
Compute transit depth (and uncertainties) from input
planet=to-star radius-ratio, with error propagation.
Parameters
----------
rprs: Float or float iterable
Planet-to-star radius ratio.
rprs_err: Float or float iterable
Uncertainties of the radius ratios.
Returns
-------
depth: Float or float ndarray
Transit depth for given radius ratio.
depth_err: Float or float ndarray
Uncertainties of the transit depth.
Examples
--------
>>> import numpy as np
>>> import pyratbay.tools as pt
>>> rprs = 1.2
>>> rprs_err = 0.25
>>> depth, depth_err = pt.radius_to_depth(rprs, rprs_err)
>>> print(f'Depth = {depth} +/- {depth_err}')
Depth = 1.44 +/- 0.6
>>> rprs = [1.2, 1.5]
>>> rprs_err = [0.25, 0.3]
>>> depth, depth_err = pt.radius_to_depth(rprs, rprs_err)
>>> print('Depth Uncert\n' +
>>> '\n'.join([f'{d} +/- {de:.1f}' for d,de in zip(depth, depth_err)]))
Depth Uncert
1.44 +/- 0.6
2.25 +/- 0.9
"""
if not isinstance(rprs, Iterable):
pass
elif not isinstance(rprs, np.ndarray):
rprs = np.array(rprs)
rprs_err = np.array(rprs_err)
depth = rprs**2.0
depth_err = 2.0 * rprs * rprs_err
return depth, depth_err
def depth_to_radius(depth, depth_err):
r"""
Compute planet-to-star radius ratio (and uncertainties) from
input transit depth, with error propagation.
Parameters
----------
depth: Float or float iterable
Transit depth.
depth_err: Float or float iterable
Uncertainties of the transit depth.
Returns
-------
rprs: Float or float ndarray
Planet-to-star radius ratio.
rprs_err: Float or float ndarray
Uncertainties of the radius ratio rprs.
Examples
--------
>>> import numpy as np
>>> import pyratbay.tools as pt
>>> depth = 1.44
>>> depth_err = 0.6
>>> rprs, rprs_err = pt.depth_to_radius(depth, depth_err)
>>> print(f'Rp/Rs = {rprs} +/- {rprs_err}')
Rp/Rs = 1.2 +/- 0.25
>>> depth = [1.44, 2.25]
>>> depth_err = [0.6, 0.9]
>>> rprs, rprs_err = pt.depth_to_radius(depth, depth_err)
>>> print('Rp/Rs Uncert\n'
>>> + '\n'.join([f'{r} +/- {re}' for r,re in zip(rprs, rprs_err)]))
Rp/Rs Uncert
1.2 +/- 0.25
1.5 +/- 0.3
"""
if not isinstance(depth, Iterable):
pass
elif not isinstance(depth, np.ndarray):
depth = np.array(depth)
depth_err = np.array(depth_err)
rprs = np.sqrt(depth)
rprs_err = 0.5 * depth_err / rprs
return rprs, rprs_err
def ignore_system_exit(func):
"""Decorator to ignore SystemExit exceptions."""
@functools.wraps(func)
def new_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except SystemExit:
return None
return new_func
|
pcubillosREPO_NAMEpyratbayPATH_START.@pyratbay_extracted@pyratbay-master@pyratbay@tools@tools.py@.PATH_END.py
|
{
"filename": "LocalProxy.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/ObitTalk/python/LocalProxy.py",
"type": "Python"
}
|
# Copyright (C) 2007 Associated Universities, Inc
# Copyright (C) 2005 Joint Institute for VLBI in Europe
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
This module provides instances to dispatch function calls locally,
without doing any RPC.
"""
# The AIPSTask module should always be available.
from __future__ import absolute_import
import Proxy.AIPSTask
AIPSTask = Proxy.AIPSTask.AIPSTask()
# The same goes for the ObitTask module.
import Proxy.ObitTask
ObitTask = Proxy.ObitTask.ObitTask()
import Proxy.ObitScriptP
ObitScript = Proxy.ObitScriptP.ObitScript()
# Local AIPS data directories - this is imported in AIPS - recursive
import AIPS
# The AIPSData module depends on Obit. Since Obit might not be
# available, leave out the AIPSUVData and AIPSImage instances if we
# fail to load the module.
try:
import Proxy.AIPSData
except:
pass
else:
AIPSImage = Proxy.AIPSData.AIPSImage(None)
AIPSUVData = Proxy.AIPSData.AIPSUVData(None)
AIPSCat = Proxy.AIPSData.AIPSCat()
# FITS
try:
import Proxy.FITSData
except:
pass
else:
FITSImage = Proxy.FITSData.FITSImage(None)
FITSUVData = Proxy.FITSData.FITSUVData(None)
FITSCat = Proxy.FITSData.FITSCat()
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@ObitTalk@python@LocalProxy.py@.PATH_END.py
|
{
"filename": "py_dataset_adapter_test.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/trainers/data_adapters/py_dataset_adapter_test.py",
"type": "Python"
}
|
import math
import time
import jax
import numpy as np
import pytest
import tensorflow as tf
import torch
from absl.testing import parameterized
from keras.src import backend
from keras.src import testing
from keras.src.testing.test_utils import named_product
from keras.src.trainers.data_adapters import py_dataset_adapter
from keras.src.utils.rng_utils import set_random_seed
class ExamplePyDataset(py_dataset_adapter.PyDataset):
def __init__(
self,
x_set,
y_set,
sample_weight=None,
batch_size=32,
delay=0,
infinite=False,
**kwargs,
):
super().__init__(**kwargs)
self.x, self.y = x_set, y_set
self.batch_size = batch_size
self.sample_weight = sample_weight
self.delay = delay
self.infinite = infinite
@property
def num_batches(self):
if self.infinite:
return None
return math.ceil(len(self.x) / self.batch_size)
def __getitem__(self, idx):
# Create artificial delay to test multiprocessing
time.sleep(self.delay)
if self.infinite:
idx = idx % math.ceil(len(self.x) / self.batch_size)
# Return x, y for batch idx.
low = idx * self.batch_size
# Cap upper bound at array length; the last batch may be smaller
# if the total number of items is not a multiple of batch size.
high = min(low + self.batch_size, len(self.x))
batch_x = self.x[low:high]
batch_y = self.y[low:high]
if self.sample_weight is not None:
return batch_x, batch_y, self.sample_weight[low:high]
return batch_x, batch_y
class DictPyDataset(py_dataset_adapter.PyDataset):
def __init__(self, inputs, batch_size=32, **kwargs):
super().__init__(**kwargs)
self.inputs = inputs
self.batch_size = batch_size
@property
def num_batches(self):
return math.ceil(len(self.inputs["x"]) / self.batch_size)
def __getitem__(self, idx):
# Return x, y for batch idx.
low = idx * self.batch_size
# Cap upper bound at array length; the last batch may be smaller
# if the total number of items is not a multiple of batch size.
high = min(low + self.batch_size, len(self.inputs["x"]))
batch_x = self.inputs["x"][low:high]
batch_y = self.inputs["y"][low:high]
batch = {"x": batch_x, "y": batch_y}
return batch
class ExceptionPyDataset(py_dataset_adapter.PyDataset):
@property
def num_batches(self):
return 4
def __getitem__(self, index):
if index < 2:
return (
np.random.random((64, 4)).astype("float32"),
np.random.random((64, 2)).astype("float32"),
)
raise ValueError("Expected exception")
@pytest.mark.skipif(testing.tensorflow_uses_gpu(), reason="Flaky on GPU")
class PyDatasetAdapterTest(testing.TestCase):
@parameterized.named_parameters(
named_product(
[
{
"testcase_name": "multiprocessing",
"workers": 2,
"use_multiprocessing": True,
"max_queue_size": 10,
"dataset_type": "np",
},
{
"testcase_name": "multithreading",
"workers": 2,
"use_multiprocessing": False,
"max_queue_size": 10,
"dataset_type": "np",
},
{
"testcase_name": "single_np",
"dataset_type": "np",
},
{
"testcase_name": "single_tf",
"dataset_type": "tf",
},
{
"testcase_name": "single_jax",
"dataset_type": "jax",
},
{
"testcase_name": "single_torch",
"dataset_type": "torch",
},
],
infinite=[True, False],
shuffle=[True, False],
)
)
def test_basic_flow(
self,
shuffle,
dataset_type,
infinite,
workers=0,
use_multiprocessing=False,
max_queue_size=0,
):
if use_multiprocessing and shuffle:
pytest.skip("Starting processes is slow, test fewer variants")
set_random_seed(1337)
x = np.random.random((64, 4)).astype("float32")
y = np.array([[i, i] for i in range(64)], dtype="float32")
CPU_DEVICES = {
"tensorflow": "CPU:0",
"jax": "cpu:0",
"torch": "cpu",
"numpy": "cpu",
}
with backend.device(CPU_DEVICES[backend.backend()]):
if dataset_type == "tf":
x, y = tf.constant(x), tf.constant(y)
elif dataset_type == "jax":
x, y = jax.numpy.array(x), jax.numpy.array(y)
elif dataset_type == "torch":
x, y = torch.as_tensor(x), torch.as_tensor(y)
py_dataset = ExamplePyDataset(
x,
y,
batch_size=16,
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
infinite=infinite,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
py_dataset, shuffle=shuffle
)
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
expected_class = np.ndarray
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
expected_class = tf.Tensor
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
expected_class = jax.Array if dataset_type == "jax" else np.ndarray
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
expected_class = torch.Tensor
sample_order = []
adapter.on_epoch_begin()
for batch in it:
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertIsInstance(bx, expected_class)
self.assertIsInstance(by, expected_class)
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
for i in range(by.shape[0]):
sample_order.append(by[i, 0])
if infinite:
if len(sample_order) == 64:
adapter.on_epoch_end()
adapter.on_epoch_begin()
elif len(sample_order) >= 128:
break
adapter.on_epoch_end()
expected_order = list(range(64))
if infinite:
self.assertAllClose(sample_order, expected_order + expected_order)
elif shuffle:
self.assertNotAllClose(sample_order, expected_order)
self.assertAllClose(sorted(sample_order), expected_order)
else:
self.assertAllClose(sample_order, expected_order)
# TODO: test class_weight
# TODO: test sample weights
# TODO: test inference mode (single output)
def test_speedup(self):
x = np.random.random((40, 4))
y = np.random.random((40, 2))
no_speedup_py_dataset = ExamplePyDataset(
x,
y,
batch_size=4,
delay=0.5,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
no_speedup_py_dataset, shuffle=False
)
gen = adapter.get_numpy_iterator()
t0 = time.time()
for batch in gen:
pass
no_speedup_time = time.time() - t0
speedup_py_dataset = ExamplePyDataset(
x,
y,
batch_size=4,
workers=4,
# TODO: the github actions runner may have performance issue with
# multiprocessing
# use_multiprocessing=True,
max_queue_size=8,
delay=0.5,
)
adapter = py_dataset_adapter.PyDatasetAdapter(
speedup_py_dataset, shuffle=False
)
gen = adapter.get_numpy_iterator()
t0 = time.time()
for batch in gen:
pass
speedup_time = time.time() - t0
self.assertLess(speedup_time, no_speedup_time)
def test_dict_inputs(self):
inputs = {
"x": np.random.random((40, 4)),
"y": np.random.random((40, 2)),
}
py_dataset = DictPyDataset(inputs, batch_size=4)
adapter = py_dataset_adapter.PyDatasetAdapter(py_dataset, shuffle=False)
gen = adapter.get_numpy_iterator()
for batch in gen:
self.assertEqual(len(batch), 2)
bx, by = batch["x"], batch["y"]
self.assertIsInstance(bx, np.ndarray)
self.assertIsInstance(by, np.ndarray)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(bx.shape, (4, 4))
self.assertEqual(by.shape, (4, 2))
ds = adapter.get_tf_dataset()
for batch in ds:
self.assertEqual(len(batch), 2)
bx, by = batch["x"], batch["y"]
self.assertIsInstance(bx, tf.Tensor)
self.assertIsInstance(by, tf.Tensor)
self.assertEqual(bx.dtype, by.dtype)
self.assertEqual(tuple(bx.shape), (4, 4))
self.assertEqual(tuple(by.shape), (4, 2))
def test_with_different_shapes(self):
class TestPyDataset(py_dataset_adapter.PyDataset):
@property
def num_batches(self):
return 3
def __getitem__(self, idx):
if idx == 0:
return np.ones([16, 4], "float32"), np.ones(
[16, 2], "float32"
)
if idx == 1:
return np.ones([16, 5], "float32"), np.ones(
[16, 2], "float32"
)
else:
return np.ones([2, 6], "float32"), np.ones(
[2, 2], "float32"
)
adapter = py_dataset_adapter.PyDatasetAdapter(
TestPyDataset(), shuffle=False
)
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
for i, batch in enumerate(it):
self.assertEqual(len(batch), 2)
bx, by = batch
self.assertEqual(bx.dtype, by.dtype)
self.assertContainsExactSubsequence(str(bx.dtype), "float32")
if i == 0:
self.assertEqual(bx.shape, (16, 4))
self.assertEqual(by.shape, (16, 2))
elif i == 1:
self.assertEqual(bx.shape, (16, 5))
self.assertEqual(by.shape, (16, 2))
else:
self.assertEqual(bx.shape, (2, 6))
self.assertEqual(by.shape, (2, 2))
@parameterized.named_parameters(
[
{
"testcase_name": "multiprocessing",
"workers": 2,
"use_multiprocessing": True,
"max_queue_size": 10,
},
{
"testcase_name": "multithreading",
"workers": 2,
"max_queue_size": 10,
},
{
"testcase_name": "single",
},
]
)
def test_exception_reported(
self,
workers=0,
use_multiprocessing=False,
max_queue_size=0,
):
dataset = ExceptionPyDataset(
workers=workers,
use_multiprocessing=use_multiprocessing,
max_queue_size=max_queue_size,
)
adapter = py_dataset_adapter.PyDatasetAdapter(dataset, shuffle=False)
expected_exception_class = ValueError
if backend.backend() == "numpy":
it = adapter.get_numpy_iterator()
elif backend.backend() == "tensorflow":
it = adapter.get_tf_dataset()
# tf.data wraps the exception
expected_exception_class = tf.errors.InvalidArgumentError
elif backend.backend() == "jax":
it = adapter.get_jax_iterator()
elif backend.backend() == "torch":
it = adapter.get_torch_dataloader()
it = iter(it)
next(it)
next(it)
with self.assertRaisesRegex(
expected_exception_class, "Expected exception"
):
next(it)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@trainers@data_adapters@py_dataset_adapter_test.py@.PATH_END.py
|
{
"filename": "global_clock_corrections.py",
"repo_name": "nanograv/PINT",
"repo_path": "PINT_extracted/PINT-master/src/pint/observatory/global_clock_corrections.py",
"type": "Python"
}
|
"""Tools for working with clock corrections obtained from a global location.
The goal is for PINT (and other programs) to be able to download up-to-date
observatory clock corrections from a central location, which observatories
or third parties will update as new clock correction data becomes available.
The global repository is currently hosted on github. Available clock correction
files and their updating requirements are listed in a file there called index.txt.
This too is checked occasionally for updates.
The downloaded files are stored in the Astropy cache,
to clear out old files you will want to do
``astropy.utils.data.clear_download_cache()``.
"""
import collections
import time
from pathlib import Path
from warnings import warn
from astropy.utils.data import download_file
from loguru import logger as log
from pint.pulsar_mjd import Time
global_clock_correction_url_base = (
"https://raw.githubusercontent.com/ipta/pulsar-clock-corrections/main/"
)
# These are mirrors that have (presumed) identical data but might be available when
# the base URL is not. If the base URL is not included it will not actually be
# checked.
global_clock_correction_url_mirrors = [global_clock_correction_url_base]
# PINT will check the index if it is more than this old
index_name = "index.txt"
index_update_interval_days = 1
def get_file(
name,
update_interval_days=7,
download_policy="if_expired",
url_base=None,
url_mirrors=None,
invalid_if_older_than=None,
):
"""Obtain a local file pointing to a current version of name.
The mtime of the returned file will record when the data was last obtained
from the internet.
Parameters
----------
name : str
The name of the file within the repository.
update_interval_days : float
How old the cached version can be before needing to be updated. Can be infinity.
download_policy : str
When to try downloading from the Net. Options are: "always", "never",
"if_expired" (if the cached version is older than update_interval_days),
or "if_missing" (only if nothing is currently available).
url_base : str or None
If provided, override the repository location stored in the source code.
Useful mostly for testing.
url_mirrors : list of str or None
If provided, override the repository mirrors stored in the source code.
Useful mostly for testing.
invalid_if_older_than : astropy.time.Time or None
Re-download the file if the cached version is older than this.
Returns
-------
pathlib.Path
The location of the file.
"""
log.trace(f"File {name} requested")
if url_base is None:
url_base = global_clock_correction_url_base
if url_mirrors is None:
url_mirrors = global_clock_correction_url_mirrors
elif url_mirrors is None:
url_mirrors = [url_base]
local_file = None
remote_url = url_base + name
mirror_urls = [u + name for u in url_mirrors]
if download_policy != "always":
try:
local_file = Path(download_file(remote_url, cache=True, sources=[]))
log.trace(f"file {remote_url} found in cache at path: {local_file}")
except KeyError as e:
log.trace(f"file {remote_url} not found in cache")
if download_policy == "never":
raise FileNotFoundError(name) from e
if download_policy == "if_missing" and local_file is not None:
log.trace(
f"File {name} found and returned due to download policy {download_policy}"
)
return local_file
if local_file is not None:
file_time = Path(local_file).stat().st_mtime
if (
invalid_if_older_than is not None
and Time(file_time, format="unix") < invalid_if_older_than
):
log.trace(
f"File {name} found but re-downloaded because "
f"it is older than {invalid_if_older_than}"
)
local_file = None
if download_policy == "if_expired" and local_file is not None:
# FIXME: will update_interval_days=np.inf work with unit conversion?
file_time = Path(local_file).stat().st_mtime
now = time.time()
if now - file_time < update_interval_days * 86400:
# Not expired
log.trace(
f"File {name} found and returned due to "
f"download policy {download_policy} and recentness"
)
return local_file
# By this point we know we need a new file but we want it to wind up in
# the cache
log.info(
f"File {name} to be downloaded due to download policy "
f"{download_policy}: {remote_url}"
)
try:
return Path(download_file(remote_url, cache="update", sources=mirror_urls))
except IOError as e:
if download_policy != "if_expired" or local_file is None:
raise
warn(
f"File {name} should be downloaded but {local_file} is being used "
f"because an error occurred: {e}"
)
return local_file
IndexEntry = collections.namedtuple(
"IndexEntry", ["file", "update_interval_days", "invalid_if_older_than", "extra"]
)
class Index:
"""Index of files available from the global repository.
The list is obtained by downloading (via the cache) the file ``index.txt``
from the repository. The result is stored in a dictionary ``index.files`` that
maps filenames (like ``gps2utc.clk`` to IndexEntry objects describing those
files. These entries contain information about expiry and validity of the file.
For parameter meanings see :func:`pint.observatory.global_clock_corrections.get_file`.
"""
def __init__(self, download_policy="if_expired", url_base=None, url_mirrors=None):
index_file = get_file(
index_name,
index_update_interval_days,
download_policy=download_policy,
url_base=url_base,
url_mirrors=url_mirrors,
)
self.files = {}
for line in open(index_file):
line = line.strip()
if line.startswith("#"):
continue
if not line:
continue
e = line.split(maxsplit=3)
date = None if e[2] == "---" else Time(e[2], format="iso")
t = IndexEntry(
file=e[0],
update_interval_days=float(e[1]),
invalid_if_older_than=date,
extra=e[3] if len(e) > 3 else "",
)
file = Path(t.file).name
self.files[file] = t
def get_clock_correction_file(
filename, download_policy="if_expired", url_base=None, url_mirrors=None
):
"""Obtain a current version of the clock correction file.
The clock correction file is looked up in the index downloaded from the
repository; unknown clock correction files trigger a KeyError. Known
ones use the index's information about when they expire.
Parameters
----------
name : str
The name of the file within the repository.
download_policy : str
When to try downloading from the Net. Options are: "always", "never",
"if_expired" (if the cached version is older than update_interval_days),
or "if_missing" (only if nothing is currently available).
url_base : str or None
If provided, override the repository location stored in the source code.
Useful mostly for testing.
url_mirrors : list of str or None
If provided, override the repository mirrors stored in the source code.
Useful mostly for testing.
"""
# FIXME: cache/share the index object?
index = Index(
download_policy=download_policy, url_base=url_base, url_mirrors=url_mirrors
)
details = index.files[filename]
return get_file(
details.file,
update_interval_days=details.update_interval_days,
download_policy=download_policy,
url_base=url_base,
url_mirrors=url_mirrors,
invalid_if_older_than=details.invalid_if_older_than,
)
def update_all(
export_to=None, download_policy="if_expired", url_base=None, url_mirrors=None
):
"""Download and update all clock corrections in the index.
You can also export them all to a directory.
This includes all the files in the repository, regardless of what
PINT knows about them. (For example, the repository probably
includes the file `leap.sec` but PINT does not use it.) If you want to
download only the clock correction files that PINT uses,
see :func:`pint.observatory.update_clock_files`.
Parameters
----------
export_to : str or pathlib.Path, optional
If provided, write all files to this directory.
download_policy : str
Under what conditions to download a new file.
url_base : str, optional
The location of the global repository. Useful for debugging.
url_mirrors : list of str, optional
A list of places to look for the content. Useful for debugging.
"""
index = Index(
download_policy=download_policy, url_base=url_base, url_mirrors=url_mirrors
)
for filename, details in index.files.items():
f = get_file(
details.file,
update_interval_days=details.update_interval_days,
download_policy=download_policy,
url_base=url_base,
url_mirrors=url_mirrors,
invalid_if_older_than=details.invalid_if_older_than,
)
if export_to is not None:
(Path(export_to) / filename).write_text(Path(f).read_text())
|
nanogravREPO_NAMEPINTPATH_START.@PINT_extracted@PINT-master@src@pint@observatory@global_clock_corrections.py@.PATH_END.py
|
{
"filename": "daily_test.py",
"repo_name": "gwastro/pycbc",
"repo_path": "pycbc_extracted/pycbc-master/examples/workflow/data_checker/daily_test.py",
"type": "Python"
}
|
#!/usr/bin/env python
# Copyright (C) 2013 Ian W. Harry
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
Description of daily_test
"""
import pycbc
import pycbc.version
__author__ = "Ian Harry <ian.harry@astro.cf.ac.uk>"
__version__ = pycbc.version.git_verbose_msg
__date__ = pycbc.version.date
__program__ = "daily_test"
import os
import logging
import argparse
import pycbc.workflow as _workflow
logging.basicConfig(format='%(asctime)s:%(levelname)s : %(message)s', \
level=logging.INFO,datefmt='%I:%M:%S')
logger = logging.getLogger()
# command line options
_desc = __doc__[1:]
parser = argparse.ArgumentParser(description=_desc)
parser.add_argument('--version', action='version', version=__version__)
_workflow.add_workflow_command_line_group(parser)
args = parser.parse_args()
workflow = _workflow.Workflow(args, 'daily_check')
currDir = os.getcwd()
segDir = os.path.join(currDir,"segments")
dfDirSYR = os.path.join(currDir,"datafindSYR")
dfDirCIT = os.path.join(currDir,"datafindCIT")
dfDirLHO = os.path.join(currDir,"datafindLHO")
dfDirLLO = os.path.join(currDir,"datafindLLO")
dfDirUWM = os.path.join(currDir,"datafindUWM")
print("BEGIN BY GENERATING SCIENCE AND CAT_X VETOES")
def segment_report(sSegs):
fullLen = 0
fullNum = 0
shortLen = 0
shortNum = 0
longLen = 0
longNum = 0
for ifo in sSegs.keys():
for seg in sSegs[ifo]:
fullLen += abs(seg)
fullNum += 1
if abs(seg) > 500:
shortLen+=abs(seg)
shortNum+=1
if abs(seg) > 2000:
longLen+=abs(seg)
longNum+=1
print("For ifo %s there is %d seconds of data in %d segments, %d seconds (%d unique segments) in segments longer than 500s and %d seconds (%d unique segments) longer than 2000s." %(ifo, fullLen, fullNum, shortLen, shortNum, longLen, longNum))
scienceSegs, segsList = _workflow.setup_segment_generation(workflow, segDir)
segment_report(scienceSegs)
print("STARTING DF")
print()
# Start with SYR comparison
# FIXME: Used to use deecopy here, but now that seems to fail so repeating
# segment query calls with logging off. This may be slow!
logger.disabled = True
scienceSegsS, _ = _workflow.setup_segment_generation(workflow, segDir)
logger.disabled = False
print("RUNNING DATAFIND FOR SYR")
datafinds, scienceSegsS = _workflow.setup_datafind_workflow(workflow, scienceSegsS,
dfDirSYR, segsList, tag="SYR")
segment_report(scienceSegsS)
print()
print()
print("RUNNING DATAFIND FOR CIT")
logger.disabled = True
scienceSegsC, _ = _workflow.setup_segment_generation(workflow, segDir)
logger.disabled = False
datafinds, scienceSegsC = _workflow.setup_datafind_workflow(workflow, scienceSegsC,
dfDirCIT, segsList, tag="CIT")
segment_report(scienceSegsC)
print("Frames present a SYR and not at CIT:")
for ifo in scienceSegsS.keys():
print("For ifo", ifo)
if ifo in scienceSegsC.keys():
print(scienceSegsS[ifo] - scienceSegsC[ifo])
else:
print("No science segments for ifo %s at CIT" %(ifo))
print()
print("Frames present at CIT and not at SYR:")
for ifo in scienceSegsC.keys():
print("For ifo", ifo)
if ifo in scienceSegsS.keys():
print(scienceSegsC[ifo] - scienceSegsS[ifo])
else:
print("No science segments for ifo %s at SYR" %(ifo))
print()
# Next do LHO comparison
print()
print("RUNNING DATAFIND FOR LHO")
logger.disabled = True
scienceSegsS, _ = _workflow.setup_segment_generation(workflow, segDir)
logger.disabled = False
datafinds, scienceSegsS = _workflow.setup_datafind_workflow(workflow, scienceSegsS,
dfDirLHO, segsList, tag="LHO")
segment_report(scienceSegsS)
print("Frames present at LHO and not at CIT:")
for ifo in scienceSegsS.keys():
print("For ifo", ifo)
if ifo in scienceSegsC.keys():
print(scienceSegsS[ifo] - scienceSegsC[ifo])
else:
print("No science segments for ifo %s at CIT" %(ifo))
print()
print("Frames present at CIT and not at LHO:")
for ifo in scienceSegsC.keys():
print("For ifo", ifo)
if ifo in scienceSegsS.keys():
print(scienceSegsC[ifo] - scienceSegsS[ifo])
else:
print("No science segments for ifo %s at LHO" %(ifo))
print()
# Next do LLO comparison
print()
print("RUNNING DATAFIND FOR LLO")
logger.disabled = True
scienceSegsS, _ = _workflow.setup_segment_generation(workflow, segDir)
logger.disabled = False
datafinds, scienceSegsS = _workflow.setup_datafind_workflow(workflow, scienceSegsS,
dfDirLLO, segsList, tag="LLO")
segment_report(scienceSegsS)
print("Frames present at LLO and not at CIT:")
for ifo in scienceSegsS.keys():
print("For ifo", ifo)
if ifo in scienceSegsC.keys():
print(scienceSegsS[ifo] - scienceSegsC[ifo])
else:
print("No science segments for ifo %s at CIT" %(ifo))
print()
print("Frames present at CIT and not at LLO:")
for ifo in scienceSegsC.keys():
print("For ifo", ifo)
if ifo in scienceSegsS.keys():
print(scienceSegsC[ifo] - scienceSegsS[ifo])
else:
print("No science segments for ifo %s at LLO" %(ifo))
print()
# Next do UWM comparison
print()
print("RUNNING DATAFIND FOR UWM")
logger.disabled = True
scienceSegsS, _ = _workflow.setup_segment_generation(workflow, segDir)
logger.disabled = False
datafinds, scienceSegsS = _workflow.setup_datafind_workflow(workflow, scienceSegsS,
dfDirUWM, segsList, tag="UWM")
segment_report(scienceSegsS)
print("Frames present at UWM and not at CIT:")
for ifo in scienceSegsS.keys():
print("For ifo", ifo)
if ifo in scienceSegsC.keys():
print(scienceSegsS[ifo] - scienceSegsC[ifo])
else:
print("No science segments for ifo %s at CIT" %(ifo))
print()
print("Frames present at CIT and not at UWM:")
for ifo in scienceSegsC.keys():
print("For ifo", ifo)
if ifo in scienceSegsS.keys():
print(scienceSegsC[ifo] - scienceSegsS[ifo])
else:
print("No science segments for ifo %s at UWM" %(ifo))
print()
|
gwastroREPO_NAMEpycbcPATH_START.@pycbc_extracted@pycbc-master@examples@workflow@data_checker@daily_test.py@.PATH_END.py
|
{
"filename": "test_log_exoctk.py",
"repo_name": "ExoCTK/exoctk",
"repo_path": "exoctk_extracted/exoctk-main/exoctk/tests/test_log_exoctk.py",
"type": "Python"
}
|
#! /usr/bin/env python
"""Tests for the ``log_exoctk`` module.
Authors
-------
Matthew Bourque
Use
---
These tests can be run via the command line (omit the ``-s`` to
suppress verbose output to stdout):
::
pytest -s test_log_exoctk.py
"""
import os
import astropy
import sqlite3
from exoctk import log_exoctk
def test_create_db():
"""Test the ``create_db`` function"""
# Create the database
db_path = './test.db'
log_exoctk.create_db(db_path)
# Ensure the database exists
assert os.path.exists(db_path)
# Remove the database
os.remove(db_path)
def test_load_db():
"""Test the ``load_db`` function"""
# Create the database
db_path = './test.db'
log_exoctk.create_db(db_path)
# Ensure the database can be loaded
cursor = log_exoctk.load_db(db_path)
assert isinstance(cursor, sqlite3.Cursor) and cursor is not None
# Remove the database
os.remove(db_path)
def test_log_form_input():
"""Test the ``log_form_input`` function"""
# Create database
db_path = './test.db'
log_exoctk.create_db(db_path)
# Define test parameters
form_dict = {}
tables = ['groups_integrations', 'limb_darkening', 'contam_visibility', 'phase_constraint', 'fortney', 'generic']
database = sqlite3.connect(db_path, isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES, check_same_thread=False)
cursor = database.cursor()
# Test the function
for table in tables:
log_exoctk.log_form_input(form_dict, table, database)
# Make sure a record was inserted
cursor.execute(f'SELECT * FROM {table}')
rows = cursor.fetchall()
assert len(rows) > 0
# Remove the database
os.remove(db_path)
def test_scrub():
"""Test the ``scrub`` function"""
table_name = 'DROP TABLE groups_integrations'
returned_name = log_exoctk.scrub(table_name)
assert returned_name == 'DROPTABLEgroupsintegrations'
def test_view_log():
"""Test the ``view_log`` function"""
# Create database
db_path = './test.db'
log_exoctk.create_db(db_path)
# Define test parameters
tables = ['groups_integrations', 'limb_darkening', 'contam_visibility', 'phase_constraint', 'fortney', 'generic']
conn = sqlite3.connect(db_path, isolation_level=None, detect_types=sqlite3.PARSE_DECLTYPES, check_same_thread=False)
database = conn.cursor()
# Test the function
for table in tables:
data = log_exoctk.view_log(database, table)
assert isinstance(data, astropy.table.table.Table) and data is not None
# Remove the database
os.remove(db_path)
|
ExoCTKREPO_NAMEexoctkPATH_START.@exoctk_extracted@exoctk-main@exoctk@tests@test_log_exoctk.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "EliseJ/astroABC",
"repo_path": "astroABC_extracted/astroABC-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
from pkg_resources import parse_version
from setuptools import setup, find_packages
numpy_min_version = '1.8'
def get_numpy_status():
"""
Returns a dictionary containing a boolean specifying whether NumPy
is up-to-date, along with the version string (empty string if
not installed).
"""
numpy_status = {}
try:
import numpy
numpy_version = numpy.__version__
numpy_status['up_to_date'] = parse_version(numpy_version) >= parse_version(numpy_min_version)
numpy_status['version'] = numpy_version
except ImportError:
numpy_status['up_to_date'] = False
numpy_status['version'] = ""
return numpy_status
def setup_astroabc():
numpy_status = get_numpy_status()
numpy_req_str = "astroABC requires NumPy >= {0}.\n".format(numpy_min_version)
if numpy_status['up_to_date'] is False:
if numpy_status['version']:
raise ImportError("Your installation of NumPy""{0} is out-of-date.\n{1}".format(numpy_status['version'],numpy_req_str))
else:
raise ImportError("NumPy is not installed.\n{0}".format(numpy_req_str))
from numpy.distutils.misc_util import Configuration
from numpy.distutils.core import setup
setup( name='astroabc',
version='1.3.2',
author="Elise Jennings",
author_email="elise.jennings@gmail.com ",
url="https://github.com/EliseJ/astroABC",
description='A Python implementation of an Approximate Bayesian Computation Sequential Monte Carlo (ABC SMC) sampler for parameter estimation.',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
],
requires=['NumPy (>=2.7)',],
long_description="""
Approximate Bayesian computation (ABC) and so
called "likelihood free" Markov chain Monte Carlo
techniques are popular methods for tackling parameter
inference in scenarios where the likelihood is intractable or unknown.
These methods are called likelihood free as they are free from
the usual assumptions about the form of the likelihood e.g. Gaussian,
as ABC aims to simulate samples from the parameter posterior distribution directly.
``astroABC`` is a python package that implements
an Approximate Bayesian Computation Sequential Monte Carlo (ABC SMC) sampler
as a python class. It is extremely flexible and applicable to a large suite of problems.
``astroABC`` requires ``NumPy``,``SciPy`` and ``sklearn``. ``mpi4py`` and ``multiprocessing`` are optional.
""",
#packages=find_packages(exclude=['contrib', 'docs', 'tests']),
packages=["astroabc", "examples"],
)
if __name__ == '__main__':
setup_astroabc()
|
EliseJREPO_NAMEastroABCPATH_START.@astroABC_extracted@astroABC-master@setup.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "adrn/schwimmbad",
"repo_path": "schwimmbad_extracted/schwimmbad-main/docs/conf.py",
"type": "Python"
}
|
import importlib.metadata
project = "schwimmbad"
copyright = "2016-2024, Adrian Price-Whelan"
author = "Adrian Price-Whelan"
version = release = importlib.metadata.version("schwimmbad")
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.napoleon",
"sphinx_autodoc_typehints",
"sphinx_copybutton",
"sphinx_automodapi.automodapi",
"sphinx_automodapi.smart_resolver",
]
source_suffix = [".rst", ".md"]
exclude_patterns = [
"_build",
"**.ipynb_checkpoints",
"Thumbs.db",
".DS_Store",
".env",
".venv",
]
html_theme = "pydata_sphinx_theme"
html_static_path = ["_static"]
html_css_files = [
"custom.css",
]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"emcee": ("http://emcee.readthedocs.io/en/latest/", None),
"numpy": (
"https://numpy.org/doc/stable/",
(None, "http://data.astropy.org/intersphinx/numpy.inv"),
),
"scipy": (
"https://docs.scipy.org/doc/scipy/",
(None, "http://data.astropy.org/intersphinx/scipy.inv"),
),
"matplotlib": (
"https://matplotlib.org/stable/",
(None, "http://data.astropy.org/intersphinx/matplotlib.inv"),
),
"astropy": ("https://docs.astropy.org/en/stable/", None),
}
nitpick_ignore = [
("py:class", "_io.StringIO"),
("py:class", "_io.BytesIO"),
]
always_document_param_types = True
|
adrnREPO_NAMEschwimmbadPATH_START.@schwimmbad_extracted@schwimmbad-main@docs@conf.py@.PATH_END.py
|
{
"filename": "mcrotations.py",
"repo_name": "astropy/halotools",
"repo_path": "halotools_extracted/halotools-master/halotools/utils/mcrotations.py",
"type": "Python"
}
|
"""
A set of rotation utilites that apply monte carlo
roations to collections of 2- and 3-dimensional vectors
"""
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
from astropy.utils.misc import NumpyRNGContext
from .vector_utilities import elementwise_norm, elementwise_dot
from .vector_utilities import rotate_vector_collection
from .rotations2d import (
rotation_matrices_from_angles as rotation_matrices_from_angles_2d,
)
from .rotations3d import (
rotation_matrices_from_angles as rotation_matrices_from_angles_3d,
)
__all__ = [
"random_rotation_3d",
"random_rotation_2d",
"random_perpendicular_directions",
"random_unit_vectors_3d",
"random_unit_vectors_2d",
]
__author__ = ["Duncan Campbell"]
def random_rotation_3d(vectors, seed=None):
r"""
Apply a random rotation to a set of 3d vectors.
Parameters
----------
vectors : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d vectors
seed : int, optional
Random number seed
Returns
-------
rotated_vectors : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d vectors
Example
-------
Create a random set of 3D unit vectors.
>>> npts = 1000
>>> x1 = random_unit_vectors_3d(npts)
Randomly rotate these vectors.
>>> x2 = random_rotation_3d(x1)
"""
with NumpyRNGContext(seed):
ran_direction = random_unit_vectors_3d(1)[0]
ran_angle = np.random.random(size=1) * (np.pi)
ran_rot = rotation_matrices_from_angles_3d(ran_angle, ran_direction)
return rotate_vector_collection(ran_rot, vectors)
def random_rotation_2d(vectors, seed=None):
r"""
Apply a random rotation to a set of 2d vectors.
Parameters
----------
vectors : ndarray
Numpy array of shape (npts, 2) storing a collection of 2d vectors
seed : int, optional
Random number seed
Returns
-------
rotated_vectors : ndarray
Numpy array of shape (npts, 2) storing a collection of 2d vectors
Example
-------
Create a random set of 2D unit vectors.
>>> npts = 1000
>>> x1 = random_unit_vectors_2d(npts)
Randomly rotate these vectors.
>>> x2 = random_rotation_2d(x1)
"""
with NumpyRNGContext(seed):
ran_angle = np.random.random(size=1) * (np.pi)
ran_rot = rotation_matrices_from_angles_2d(ran_angle)
return rotate_vector_collection(ran_rot, vectors)
def random_perpendicular_directions(v, seed=None):
r"""
Given an input list of 3D vectors, v, return a list of 3D vectors
such that each returned vector has unit-length and is
orthogonal to the corresponding vector in v.
Parameters
----------
v : ndarray
Numpy array of shape (npts, 3) storing a collection of 3d vectors
seed : int, optional
Random number seed used to choose a random orthogonal direction
Returns
-------
result : ndarray
Numpy array of shape (npts, 3)
Example
-------
Create a random set of 3D unit vectors.
>>> npts = 1000
>>> x1 = random_unit_vectors_3d(npts)
For each vector in x1, create a perpendicular vector
>>> x2 = random_perpendicular_directions(x1)
"""
v = np.atleast_2d(v)
npts = v.shape[0]
with NumpyRNGContext(seed):
w = random_unit_vectors_3d(npts)
vnorms = elementwise_norm(v).reshape((npts, 1))
wnorms = elementwise_norm(w).reshape((npts, 1))
e_v = v / vnorms
e_w = w / wnorms
v_dot_w = elementwise_dot(e_v, e_w).reshape((npts, 1))
e_v_perp = e_w - v_dot_w * e_v
e_v_perp_norm = elementwise_norm(e_v_perp).reshape((npts, 1))
return e_v_perp / e_v_perp_norm
def random_unit_vectors_3d(npts):
r"""
Generate random 3D unit vectors.
Parameters
----------
npts : int
number of vectors
Returns
-------
result : numpy.array
Numpy array of shape (npts, 3) containing random unit vectors
"""
ndim = 3
x = np.random.normal(size=(npts, ndim), scale=1.0)
r = np.sqrt(np.sum((x) ** 2, axis=-1))
return (1.0 / r[:, np.newaxis]) * x
def random_unit_vectors_2d(npts):
r"""
Generate random 2D unit vectors.
Parameters
----------
npts : int
number of vectors
Returns
-------
result : numpy.array
Numpy array of shape (npts, 2) containing random unit vectors
"""
r = 1.0
phi = np.random.uniform(0.0, 2.0 * np.pi, size=(npts,))
x = r * np.cos(phi)
y = r * np.sin(phi)
return np.vstack((x, y)).T
|
astropyREPO_NAMEhalotoolsPATH_START.@halotools_extracted@halotools-master@halotools@utils@mcrotations.py@.PATH_END.py
|
{
"filename": "analysis.py",
"repo_name": "zblz/naima",
"repo_path": "naima_extracted/naima-main/src/naima/analysis.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from pathlib import Path
import astropy.units as u
import h5py
import numpy as np
from astropy import log
from astropy.io.misc.hdf5 import read_table_hdf5, write_table_hdf5
from astropy.table import Table
from .plot import find_ML
try:
import yaml # noqa
HAS_PYYAML = True
except ImportError:
HAS_PYYAML = False
__all__ = [
"save_diagnostic_plots",
"save_results_table",
"save_run",
"read_run",
]
def save_diagnostic_plots(
outname,
sampler,
modelidxs=None,
pdf=False,
sed=True,
blob_labels=None,
last_step=False,
dpi=100,
):
"""
Generate diagnostic plots.
- A plot for each of the chain parameters showing walker progression, final
sample distribution and several statistical measures of this
distribution: ``outname_chain_parN.png`` (see `naima.plot_chain`).
- A corner plot of sample density in the two dimensional parameter space of
all parameter pairs of the run, with the Maximum Likelihood parameter
vector indicated in blue: ``outname_corner.png`` (see `corner.corner`).
- A plot for each of the models returned as blobs by the model function.
The maximum likelihood model is shown, as well as the 1 and 3 sigma
confidence level contours. The first model will be compared with
observational data and residuals shown. ``outname_fit_modelN.png`` (see
`naima.plot_fit` and `naima.plot_blob`).
Parameters
----------
outname : str
Name to be used to save diagnostic plot files.
sampler : `emcee.EnsembleSampler` instance
Sampler instance from which chains, blobs and data are read.
modelidxs : iterable of integers, optional
Model numbers to be plotted. Default: All returned in sampler.get_blobs
blob_labels : list of strings, optional
Label for each of the outputs of the model. They will be used as title
for the corresponding plot.
pdf : bool, optional
Whether to save plots to multipage pdf.
"""
from matplotlib import pyplot as plt
from .plot import plot_blob, plot_chain, plot_corner
# This function should never be interactive
old_interactive = plt.rcParams["interactive"]
plt.rcParams["interactive"] = False
if pdf:
plt.rc("pdf", fonttype=42)
log.info(
"Saving diagnostic plots in file " "{0}_plots.pdf".format(outname)
)
from matplotlib.backends.backend_pdf import PdfPages
outpdf = PdfPages("{0}_plots.pdf".format(outname))
# Chains
for par, label in zip(
range(sampler.get_chain().shape[-1]), sampler.labels
):
try:
log.info("Plotting chain of parameter {0}...".format(label))
f = plot_chain(sampler, par, last_step=last_step)
if pdf:
f.savefig(outpdf, format="pdf", dpi=dpi)
else:
if "log(" in label or "log10(" in label:
label = label.split("(")[-1].split(")")[0]
f.savefig("{0}_chain_{1}.png".format(outname, label), dpi=dpi)
f.clf()
plt.close(f)
except Exception as e:
log.warning(
"plot_chain failed for paramter"
" {0} ({1}): {2}".format(label, par, e)
)
# Corner plot
log.info("Plotting corner plot...")
f = plot_corner(sampler)
if f is not None:
if pdf:
f.savefig(outpdf, format="pdf", dpi=dpi)
else:
f.savefig("{0}_corner.png".format(outname), dpi=dpi)
f.clf()
plt.close(f)
# Fit
if modelidxs is None:
nmodels = len(sampler.get_blobs()[-1][0])
modelidxs = list(range(nmodels))
if isinstance(sed, bool):
sed = [sed for idx in modelidxs]
if blob_labels is None:
blob_labels = ["Model output {0}".format(idx) for idx in modelidxs]
elif len(modelidxs) == 1 and isinstance(blob_labels, str):
blob_labels = [blob_labels]
elif len(blob_labels) < len(modelidxs):
# Add labels
n = len(blob_labels)
blob_labels += [
"Model output {0}".format(idx) for idx in modelidxs[n:]
]
for modelidx, plot_sed, label in zip(modelidxs, sed, blob_labels):
try:
log.info("Plotting {0}...".format(label))
f = plot_blob(
sampler,
blobidx=modelidx,
label=label,
sed=plot_sed,
n_samples=100,
last_step=last_step,
)
if pdf:
f.savefig(outpdf, format="pdf", dpi=dpi)
else:
f.savefig(
"{0}_model{1}.png".format(outname, modelidx), dpi=dpi
)
f.clf()
plt.close(f)
except Exception as e:
log.warning("plot_blob failed for {0}: {1}".format(label, e))
if pdf:
outpdf.close()
# set interactive back to original
plt.rcParams["interactive"] = old_interactive
def save_results_table(
outname,
sampler,
format="ascii.ecsv",
convert_log=True,
last_step=False,
include_blobs=True,
):
"""
Save an ASCII table with the results stored in the
`~emcee.EnsembleSampler`.
The table contains the median, 16th and 84th percentile confidence region
(~1sigma) for each parameter.
Parameters
----------
outname : str
Root name to be used to save the table. ``_results.dat`` will be
appended for the output filename.
sampler : `emcee.EnsembleSampler` instance
Sampler instance from which chains, blobs and data are read.
format : str, optional
Format of the saved table. Must be a format string accepted by
`astropy.table.Table.write`, see the `astropy unified file read/write
interface documentation
<https://astropy.readthedocs.org/en/latest/io/unified.html>`_. Only the
``ascii.ecsv`` and ``ascii.ipac`` formats are able to preserve all the
information stored in the ``run_info`` dictionary of the sampler.
Defaults to ``ascii.ecsv`` if available (only in astropy > v1.0), else
``ascii.ipac``.
convert_log : bool, optional
Whether to convert natural or base-10 logarithms into original values
in addition to saving the logarithm value.
last_step : bool, optional
Whether to only use the positions in the final step of the run (True,
default) or the whole chain (False).
include_blobs : bool, optional
Whether to save the distribution properties of the scalar blobs in the
sampler. Default is True.
Returns
-------
table : `~astropy.table.Table`
Table with the results.
"""
if not HAS_PYYAML and format == "ascii.ecsv":
format = "ascii.ipac"
log.warning(
"PyYAML package is required for ECSV format,"
" falling back to {0}...".format(format)
)
elif format not in ["ascii.ecsv", "ascii.ipac"]:
log.warning(
"The chosen table format does not have an astropy"
" writer that suppports metadata writing, no run info"
" will be saved to the file!"
)
file_extension = "dat"
if format == "ascii.ecsv":
file_extension = "ecsv"
log.info(
"Saving results table in {0}_results.{1}".format(
outname, file_extension
)
)
labels = sampler.labels
if last_step:
dists = sampler.get_chain()[:, -1, :]
else:
dists = sampler.get_chain(flat=True)
quant = [16, 50, 84]
# Do we need more info on the distributions?
t = Table(
names=["label", "median", "unc_lo", "unc_hi"],
dtype=["S72", "f8", "f8", "f8"],
)
t["label"].description = "Name of the parameter"
t["median"].description = "Median of the posterior distribution function"
t["unc_lo"].description = (
"Difference between the median and the"
" {0}th percentile of the pdf, ~1sigma lower uncertainty".format(
quant[0]
)
)
t["unc_hi"].description = (
"Difference between the {0}th percentile"
" and the median of the pdf, ~1sigma upper uncertainty".format(
quant[2]
)
)
metadata = {}
# Start with info from the distributions used for storing the results
metadata["n_samples"] = dists.shape[0]
# save ML parameter vector and best/median loglikelihood
ML, MLp, MLerr, _ = find_ML(sampler, None)
metadata["ML_pars"] = [float(p) for p in MLp]
metadata["MaxLogLikelihood"] = float(ML)
# compute and save BIC
BIC = len(MLp) * np.log(len(sampler.data)) - 2 * ML
metadata["BIC"] = BIC
# And add all info stored in the sampler.run_info dict
if hasattr(sampler, "run_info"):
metadata.update(sampler.run_info)
for p, label in enumerate(labels):
dist = dists[:, p]
xquant = np.percentile(dist, quant)
quantiles = dict(zip(quant, xquant))
med = quantiles[50]
lo, hi = med - quantiles[16], quantiles[84] - med
t.add_row((label, med, lo, hi))
if convert_log and ("log10(" in label or "log(" in label):
nlabel = label.split("(")[-1].split(")")[0]
ltype = label.split("(")[0]
if ltype == "log10":
new_dist = 10 ** dist
elif ltype == "log":
new_dist = np.exp(dist)
quantiles = dict(zip(quant, np.percentile(new_dist, quant)))
med = quantiles[50]
lo, hi = med - quantiles[16], quantiles[84] - med
t.add_row((nlabel, med, lo, hi))
if include_blobs:
blobs = sampler.get_blobs()
nblobs = len(blobs[-1][0])
for idx in range(nblobs):
blob0 = blobs[-1][0][idx]
IS_SCALAR = False
if isinstance(blob0, u.Quantity):
if blob0.size == 1:
IS_SCALAR = True
unit = blob0.unit
elif np.isscalar(blob0):
IS_SCALAR = True
unit = None
if IS_SCALAR:
if last_step:
blobl = [m[idx] for m in blobs[-1]]
else:
blobl = []
for step in blobs:
for walkerblob in step:
blobl.append(walkerblob[idx])
if unit:
dist = np.array([b.value for b in blobl])
metadata["blob{0}_unit".format(idx)] = unit.to_string()
else:
dist = np.array(blobl)
quantiles = dict(zip(quant, np.percentile(dist, quant)))
med = quantiles[50]
lo, hi = med - quantiles[16], quantiles[84] - med
t.add_row(("blob{0}".format(idx), med, lo, hi))
if format == "ascii.ipac":
# Only keywords are written to IPAC tables
t.meta["keywords"] = {}
for di in metadata.items():
t.meta["keywords"][di[0]] = {"value": di[1]}
else:
if format == "ascii.ecsv":
# there can be no numpy arrays in the metadata (YAML doesn't like
# them)
for di in list(metadata.items()):
if type(di[1]).__module__ == np.__name__:
try:
# convert arrays
metadata[di[0]] = [a.item() for a in di[1]]
except TypeError:
# convert scalars
metadata[di[0]] = di[1].item()
# Save it directly in meta for readability in ECSV
t.meta.update(metadata)
t.write("{0}_results.{1}".format(outname, file_extension), format=format)
return t
def save_run(filename, sampler, compression=True, clobber=False):
"""
Save the sampler chain, data table, parameter labels, metadata blobs, and
run information to a hdf5 file.
The data table and parameter labels stored in the sampler will also be
saved to the hdf5 file.
Parameters
----------
filename : str
Filename for hdf5 file. If the filename extension is not 'h5' or
'hdf5', the suffix '_chain.h5' will be appended to the filename.
sampler : `emcee.EnsembleSampler` instance
Sampler instance for which chain and run information is saved.
compression : bool, optional
Whether gzip compression is applied to the dataset on write. Default is
True.
clobber : bool, optional
Whether to overwrite the output filename if it exists.
"""
filename = Path(filename)
if filename.suffix not in {".hdf5", ".h5"}:
raise ValueError("Filename must end in .hdf5 or .h5 suffix")
if os.path.exists(filename) and not clobber:
log.warning(
"Not writing file because file exists and clobber is False"
)
return
with h5py.File(filename, "w") as f:
group = f.create_group("mcmc")
group.create_dataset(
"chain", data=sampler.get_chain(), compression=compression
)
group.create_dataset(
"log_prob",
data=sampler.get_log_prob(),
compression=compression,
)
# blobs
blob = sampler.get_blobs()[-1][0]
for idx, item in enumerate(blob):
if isinstance(item, u.Quantity):
# scalar or array quantity
units = [item.unit.to_string()]
elif isinstance(item, float):
units = [""]
elif (
isinstance(item, tuple) or isinstance(item, list)
) and np.all([isinstance(x, np.ndarray) for x in item]):
units = []
for x in item:
if isinstance(x, u.Quantity):
units.append(x.unit.to_string())
else:
units.append("")
else:
log.warning(
"blob number {0} has unknown format and cannot be saved "
"in HDF5 file"
)
continue
# traverse blobs list. This will probably be slow and there should
# be a better way
blob = []
for step in sampler.get_blobs():
for walkerblob in step:
blob.append(walkerblob[idx])
blob = u.Quantity(blob).value
blobdataset = group.create_dataset(
"blob{0}".format(idx), data=blob, compression=compression
)
if len(units) > 1:
for j, unit in enumerate(units):
blobdataset.attrs["unit{0}".format(j)] = unit
else:
blobdataset.attrs["unit"] = units[0]
write_table_hdf5(
sampler.data,
group,
path="data",
serialize_meta=True,
compression=compression,
)
# add all run info to group attributes
if hasattr(sampler, "run_info"):
for key in sampler.run_info.keys():
val = sampler.run_info[key]
try:
group.attrs[key] = val
except TypeError:
group.attrs[key] = str(val)
# add other sampler info to the attrs
group.attrs["acceptance_fraction"] = np.mean(
sampler.acceptance_fraction
)
# add labels as individual attrs (there might be a better way)
for i, label in enumerate(sampler.labels):
group.attrs["label{0}".format(i)] = label
class _result:
"""
Minimal emcee.EnsembleSampler like container for chain results
"""
def get_value(self, name, flat=False):
v = getattr(self, name)
if flat:
s = list(v.shape[1:])
s[0] = np.prod(v.shape[:2])
return v.reshape(s)
return v
def get_chain(self, **kwargs):
return self.get_value("chain", **kwargs)
def get_log_prob(self, **kwargs):
return self.get_value("log_prob", **kwargs)
def get_blobs(self, **kwargs):
return self.get_value("_blobs", **kwargs)
def read_run(filename, modelfn=None):
"""
Read chain from a hdf5 saved with `save_run`.
This function will also read the labels, data table, and metadata blobs
stored in the original sampler. If you want to use the result object with
`plot_fit` and setting the ``e_range`` parameter, you must provide the
model function with the `modelfn` argument given that functions cannot be
serialized in hdf5 files.
Parameters
----------
filename : str
Filename of the hdf5 containing the chain, lnprobability, and blob
arrays in the group 'sampler'
modelfn : function, optional
Model function to be attached to the returned sampler
Returns
-------
result : class
Container object with same properties as an `emcee.EnsembleSampler`
resulting from a sampling run. This object can be passed onto
`~naima.plot_fit`, `~naima.plot_chain`, and `~naima.plot_corner` for
analysis as you would do with a `emcee.EnsembleSampler` instance.
"""
# initialize empty sampler class to return
result = _result()
result.modelfn = modelfn
result.run_info = {}
f = h5py.File(filename, "r")
# chain and lnprobability
result.chain = np.array(f["mcmc/chain"])
result.log_prob = np.array(f["mcmc/log_prob"])
# blobs
result_blobs = []
nsteps, nwalkers, nblobs = result.chain.shape
blobs = []
blobrank = []
for i in range(100):
# first read each of the blobs and convert to Quantities
try:
ds = f["mcmc/blob{0}".format(i)]
rank = np.ndim(ds[0])
blobrank.append(rank)
if rank <= 1:
blobs.append(u.Quantity(ds[()], unit=ds.attrs["unit"]))
else:
blob = []
for j in range(np.ndim(ds[0])):
blob.append(
u.Quantity(
ds[:, j, :], unit=ds.attrs["unit{0}".format(j)]
)
)
blobs.append(blob)
except KeyError:
break
# Now organize in an awful list of lists of arrays
for step in range(nsteps):
steplist = []
for walker in range(nwalkers):
n = step * nwalkers + walker
walkerblob = []
for j in range(len(blobs)):
if blobrank[j] <= 1:
walkerblob.append(blobs[j][n])
else:
blob = []
for k in range(blobrank[j]):
blob.append(blobs[j][k][n])
walkerblob.append(blob)
steplist.append(walkerblob)
result_blobs.append(steplist)
result._blobs = np.array(result_blobs, dtype=np.dtype("object"))
# run info
result.run_info = dict(f["mcmc"].attrs)
result.acceptance_fraction = f["mcmc"].attrs["acceptance_fraction"]
# labels
result.labels = []
for i in range(nblobs):
result.labels.append(f["mcmc"].attrs["label{0}".format(i)])
# data
result.data = read_table_hdf5(f["mcmc"], "data")
return result
|
zblzREPO_NAMEnaimaPATH_START.@naima_extracted@naima-main@src@naima@analysis.py@.PATH_END.py
|
{
"filename": "recipes_BUNDLE.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/ghost/recipes/sq/recipes_BUNDLE.py",
"type": "Python"
}
|
"""
Recipes available to data with tags ``['GHOST', `BUNDLE`]``.
Recipes imported from :any:`qa.recipes_BUNDLE`.
"""
recipe_tags = set(['GHOST', 'BUNDLE', 'RAW', 'UNPREPARED'])
def processBundle(p):
"""
This recipe processes GHOST observation bundles.
Parameters
----------
p : Primitives object
A primitive set matching the recipe_tags.
"""
p.splitBundle()
return
_default = processBundle
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@ghost@recipes@sq@recipes_BUNDLE.py@.PATH_END.py
|
{
"filename": "_yanchor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/_yanchor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="yanchor", parent_name="scatter3d.marker.colorbar", **kwargs
):
super(YanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["top", "middle", "bottom"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter3d@marker@colorbar@_yanchor.py@.PATH_END.py
|
{
"filename": "common.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/integrate/_ivp/common.py",
"type": "Python"
}
|
from itertools import groupby
from warnings import warn
import numpy as np
from scipy.sparse import find, coo_matrix
EPS = np.finfo(float).eps
def validate_first_step(first_step, t0, t_bound):
"""Assert that first_step is valid and return it."""
if first_step <= 0:
raise ValueError("`first_step` must be positive.")
if first_step > np.abs(t_bound - t0):
raise ValueError("`first_step` exceeds bounds.")
return first_step
def validate_max_step(max_step):
"""Assert that max_Step is valid and return it."""
if max_step <= 0:
raise ValueError("`max_step` must be positive.")
return max_step
def warn_extraneous(extraneous):
"""Display a warning for extraneous keyword arguments.
The initializer of each solver class is expected to collect keyword
arguments that it doesn't understand and warn about them. This function
prints a warning for each key in the supplied dictionary.
Parameters
----------
extraneous : dict
Extraneous keyword arguments
"""
if extraneous:
warn("The following arguments have no effect for a chosen solver: "
f"{', '.join(f'`{x}`' for x in extraneous)}.",
stacklevel=3)
def validate_tol(rtol, atol, n):
"""Validate tolerance values."""
if np.any(rtol < 100 * EPS):
warn("At least one element of `rtol` is too small. "
f"Setting `rtol = np.maximum(rtol, {100 * EPS})`.",
stacklevel=3)
rtol = np.maximum(rtol, 100 * EPS)
atol = np.asarray(atol)
if atol.ndim > 0 and atol.shape != (n,):
raise ValueError("`atol` has wrong shape.")
if np.any(atol < 0):
raise ValueError("`atol` must be positive.")
return rtol, atol
def norm(x):
"""Compute RMS norm."""
return np.linalg.norm(x) / x.size ** 0.5
def select_initial_step(fun, t0, y0, t_bound,
max_step, f0, direction, order, rtol, atol):
"""Empirically select a good initial step.
The algorithm is described in [1]_.
Parameters
----------
fun : callable
Right-hand side of the system.
t0 : float
Initial value of the independent variable.
y0 : ndarray, shape (n,)
Initial value of the dependent variable.
t_bound : float
End-point of integration interval; used to ensure that t0+step<=tbound
and that fun is only evaluated in the interval [t0,tbound]
max_step : float
Maximum allowable step size.
f0 : ndarray, shape (n,)
Initial value of the derivative, i.e., ``fun(t0, y0)``.
direction : float
Integration direction.
order : float
Error estimator order. It means that the error controlled by the
algorithm is proportional to ``step_size ** (order + 1)`.
rtol : float
Desired relative tolerance.
atol : float
Desired absolute tolerance.
Returns
-------
h_abs : float
Absolute value of the suggested initial step.
References
----------
.. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
Equations I: Nonstiff Problems", Sec. II.4.
"""
if y0.size == 0:
return np.inf
interval_length = abs(t_bound - t0)
if interval_length == 0.0:
return 0.0
scale = atol + np.abs(y0) * rtol
d0 = norm(y0 / scale)
d1 = norm(f0 / scale)
if d0 < 1e-5 or d1 < 1e-5:
h0 = 1e-6
else:
h0 = 0.01 * d0 / d1
# Check t0+h0*direction doesn't take us beyond t_bound
h0 = min(h0, interval_length)
y1 = y0 + h0 * direction * f0
f1 = fun(t0 + h0 * direction, y1)
d2 = norm((f1 - f0) / scale) / h0
if d1 <= 1e-15 and d2 <= 1e-15:
h1 = max(1e-6, h0 * 1e-3)
else:
h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1))
return min(100 * h0, h1, interval_length, max_step)
class OdeSolution:
"""Continuous ODE solution.
It is organized as a collection of `DenseOutput` objects which represent
local interpolants. It provides an algorithm to select a right interpolant
for each given point.
The interpolants cover the range between `t_min` and `t_max` (see
Attributes below). Evaluation outside this interval is not forbidden, but
the accuracy is not guaranteed.
When evaluating at a breakpoint (one of the values in `ts`) a segment with
the lower index is selected.
Parameters
----------
ts : array_like, shape (n_segments + 1,)
Time instants between which local interpolants are defined. Must
be strictly increasing or decreasing (zero segment with two points is
also allowed).
interpolants : list of DenseOutput with n_segments elements
Local interpolants. An i-th interpolant is assumed to be defined
between ``ts[i]`` and ``ts[i + 1]``.
alt_segment : boolean
Requests the alternative interpolant segment selection scheme. At each
solver integration point, two interpolant segments are available. The
default (False) and alternative (True) behaviours select the segment
for which the requested time corresponded to ``t`` and ``t_old``,
respectively. This functionality is only relevant for testing the
interpolants' accuracy: different integrators use different
construction strategies.
Attributes
----------
t_min, t_max : float
Time range of the interpolation.
"""
def __init__(self, ts, interpolants, alt_segment=False):
ts = np.asarray(ts)
d = np.diff(ts)
# The first case covers integration on zero segment.
if not ((ts.size == 2 and ts[0] == ts[-1])
or np.all(d > 0) or np.all(d < 0)):
raise ValueError("`ts` must be strictly increasing or decreasing.")
self.n_segments = len(interpolants)
if ts.shape != (self.n_segments + 1,):
raise ValueError("Numbers of time stamps and interpolants "
"don't match.")
self.ts = ts
self.interpolants = interpolants
if ts[-1] >= ts[0]:
self.t_min = ts[0]
self.t_max = ts[-1]
self.ascending = True
self.side = "right" if alt_segment else "left"
self.ts_sorted = ts
else:
self.t_min = ts[-1]
self.t_max = ts[0]
self.ascending = False
self.side = "left" if alt_segment else "right"
self.ts_sorted = ts[::-1]
def _call_single(self, t):
# Here we preserve a certain symmetry that when t is in self.ts,
# if alt_segment=False, then we prioritize a segment with a lower
# index.
ind = np.searchsorted(self.ts_sorted, t, side=self.side)
segment = min(max(ind - 1, 0), self.n_segments - 1)
if not self.ascending:
segment = self.n_segments - 1 - segment
return self.interpolants[segment](t)
def __call__(self, t):
"""Evaluate the solution.
Parameters
----------
t : float or array_like with shape (n_points,)
Points to evaluate at.
Returns
-------
y : ndarray, shape (n_states,) or (n_states, n_points)
Computed values. Shape depends on whether `t` is a scalar or a
1-D array.
"""
t = np.asarray(t)
if t.ndim == 0:
return self._call_single(t)
order = np.argsort(t)
reverse = np.empty_like(order)
reverse[order] = np.arange(order.shape[0])
t_sorted = t[order]
# See comment in self._call_single.
segments = np.searchsorted(self.ts_sorted, t_sorted, side=self.side)
segments -= 1
segments[segments < 0] = 0
segments[segments > self.n_segments - 1] = self.n_segments - 1
if not self.ascending:
segments = self.n_segments - 1 - segments
ys = []
group_start = 0
for segment, group in groupby(segments):
group_end = group_start + len(list(group))
y = self.interpolants[segment](t_sorted[group_start:group_end])
ys.append(y)
group_start = group_end
ys = np.hstack(ys)
ys = ys[:, reverse]
return ys
NUM_JAC_DIFF_REJECT = EPS ** 0.875
NUM_JAC_DIFF_SMALL = EPS ** 0.75
NUM_JAC_DIFF_BIG = EPS ** 0.25
NUM_JAC_MIN_FACTOR = 1e3 * EPS
NUM_JAC_FACTOR_INCREASE = 10
NUM_JAC_FACTOR_DECREASE = 0.1
def num_jac(fun, t, y, f, threshold, factor, sparsity=None):
"""Finite differences Jacobian approximation tailored for ODE solvers.
This function computes finite difference approximation to the Jacobian
matrix of `fun` with respect to `y` using forward differences.
The Jacobian matrix has shape (n, n) and its element (i, j) is equal to
``d f_i / d y_j``.
A special feature of this function is the ability to correct the step
size from iteration to iteration. The main idea is to keep the finite
difference significantly separated from its round-off error which
approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a
huge error and assures that the estimated derivative are reasonably close
to the true values (i.e., the finite difference approximation is at least
qualitatively reflects the structure of the true Jacobian).
Parameters
----------
fun : callable
Right-hand side of the system implemented in a vectorized fashion.
t : float
Current time.
y : ndarray, shape (n,)
Current state.
f : ndarray, shape (n,)
Value of the right hand side at (t, y).
threshold : float
Threshold for `y` value used for computing the step size as
``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of
absolute tolerance (atol) for a solver should be passed as `threshold`.
factor : ndarray with shape (n,) or None
Factor to use for computing the step size. Pass None for the very
evaluation, then use the value returned from this function.
sparsity : tuple (structure, groups) or None
Sparsity structure of the Jacobian, `structure` must be csc_matrix.
Returns
-------
J : ndarray or csc_matrix, shape (n, n)
Jacobian matrix.
factor : ndarray, shape (n,)
Suggested `factor` for the next evaluation.
"""
y = np.asarray(y)
n = y.shape[0]
if n == 0:
return np.empty((0, 0)), factor
if factor is None:
factor = np.full(n, EPS ** 0.5)
else:
factor = factor.copy()
# Direct the step as ODE dictates, hoping that such a step won't lead to
# a problematic region. For complex ODEs it makes sense to use the real
# part of f as we use steps along real axis.
f_sign = 2 * (np.real(f) >= 0).astype(float) - 1
y_scale = f_sign * np.maximum(threshold, np.abs(y))
h = (y + factor * y_scale) - y
# Make sure that the step is not 0 to start with. Not likely it will be
# executed often.
for i in np.nonzero(h == 0)[0]:
while h[i] == 0:
factor[i] *= 10
h[i] = (y[i] + factor[i] * y_scale[i]) - y[i]
if sparsity is None:
return _dense_num_jac(fun, t, y, f, h, factor, y_scale)
else:
structure, groups = sparsity
return _sparse_num_jac(fun, t, y, f, h, factor, y_scale,
structure, groups)
def _dense_num_jac(fun, t, y, f, h, factor, y_scale):
n = y.shape[0]
h_vecs = np.diag(h)
f_new = fun(t, y[:, None] + h_vecs)
diff = f_new - f[:, None]
max_ind = np.argmax(np.abs(diff), axis=0)
r = np.arange(n)
max_diff = np.abs(diff[max_ind, r])
scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
if np.any(diff_too_small):
ind, = np.nonzero(diff_too_small)
new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
h_vecs[ind, ind] = h_new
f_new = fun(t, y[:, None] + h_vecs[:, ind])
diff_new = f_new - f[:, None]
max_ind = np.argmax(np.abs(diff_new), axis=0)
r = np.arange(ind.shape[0])
max_diff_new = np.abs(diff_new[max_ind, r])
scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
if np.any(update):
update, = np.nonzero(update)
update_ind = ind[update]
factor[update_ind] = new_factor[update]
h[update_ind] = h_new[update]
diff[:, update_ind] = diff_new[:, update]
scale[update_ind] = scale_new[update]
max_diff[update_ind] = max_diff_new[update]
diff /= h
factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
return diff, factor
def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups):
n = y.shape[0]
n_groups = np.max(groups) + 1
h_vecs = np.empty((n_groups, n))
for group in range(n_groups):
e = np.equal(group, groups)
h_vecs[group] = h * e
h_vecs = h_vecs.T
f_new = fun(t, y[:, None] + h_vecs)
df = f_new - f[:, None]
i, j, _ = find(structure)
diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc()
max_ind = np.array(abs(diff).argmax(axis=0)).ravel()
r = np.arange(n)
max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel()
scale = np.maximum(np.abs(f[max_ind]),
np.abs(f_new[max_ind, groups[r]]))
diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
if np.any(diff_too_small):
ind, = np.nonzero(diff_too_small)
new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
h_new_all = np.zeros(n)
h_new_all[ind] = h_new
groups_unique = np.unique(groups[ind])
groups_map = np.empty(n_groups, dtype=int)
h_vecs = np.empty((groups_unique.shape[0], n))
for k, group in enumerate(groups_unique):
e = np.equal(group, groups)
h_vecs[k] = h_new_all * e
groups_map[group] = k
h_vecs = h_vecs.T
f_new = fun(t, y[:, None] + h_vecs)
df = f_new - f[:, None]
i, j, _ = find(structure[:, ind])
diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]],
(i, j)), shape=(n, ind.shape[0])).tocsc()
max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel()
r = np.arange(ind.shape[0])
max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel()
scale_new = np.maximum(
np.abs(f[max_ind_new]),
np.abs(f_new[max_ind_new, groups_map[groups[ind]]]))
update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
if np.any(update):
update, = np.nonzero(update)
update_ind = ind[update]
factor[update_ind] = new_factor[update]
h[update_ind] = h_new[update]
diff[:, update_ind] = diff_new[:, update]
scale[update_ind] = scale_new[update]
max_diff[update_ind] = max_diff_new[update]
diff.data /= np.repeat(h, np.diff(diff.indptr))
factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
return diff, factor
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@integrate@_ivp@common.py@.PATH_END.py
|
{
"filename": "scimath.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py2/numpy/lib/scimath.py",
"type": "Python"
}
|
"""
Wrapper functions to more user-friendly calling of certain math functions
whose output data-type is different than the input data-type in certain
domains of the input.
For example, for functions like `log` with branch cuts, the versions in this
module provide the mathematically valid answers in the complex plane::
>>> import math
>>> from numpy.lib import scimath
>>> scimath.log(-math.exp(1)) == (1+1j*math.pi)
True
Similarly, `sqrt`, other base logarithms, `power` and trig functions are
correctly handled. See their respective docstrings for specific examples.
"""
from __future__ import division, absolute_import, print_function
import numpy.core.numeric as nx
import numpy.core.numerictypes as nt
from numpy.core.numeric import asarray, any
from numpy.core.overrides import array_function_dispatch
from numpy.lib.type_check import isreal
__all__ = [
'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
'arctanh'
]
_ln2 = nx.log(2.0)
def _tocomplex(arr):
"""Convert its input `arr` to a complex array.
The input is returned as a complex array of the smallest type that will fit
the original data: types like single, byte, short, etc. become csingle,
while others become cdouble.
A copy of the input is always made.
Parameters
----------
arr : array
Returns
-------
array
An array with the same input data as the input but in complex form.
Examples
--------
First, consider an input of type short:
>>> a = np.array([1,2,3],np.short)
>>> ac = np.lib.scimath._tocomplex(a); ac
array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
>>> ac.dtype
dtype('complex64')
If the input is of type double, the output is correspondingly of the
complex double type as well:
>>> b = np.array([1,2,3],np.double)
>>> bc = np.lib.scimath._tocomplex(b); bc
array([ 1.+0.j, 2.+0.j, 3.+0.j])
>>> bc.dtype
dtype('complex128')
Note that even if the input was complex to begin with, a copy is still
made, since the astype() method always copies:
>>> c = np.array([1,2,3],np.csingle)
>>> cc = np.lib.scimath._tocomplex(c); cc
array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
>>> c *= 2; c
array([ 2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
>>> cc
array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
"""
if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
nt.ushort, nt.csingle)):
return arr.astype(nt.csingle)
else:
return arr.astype(nt.cdouble)
def _fix_real_lt_zero(x):
"""Convert `x` to complex if it has real, negative components.
Otherwise, output is just the array version of the input (via asarray).
Parameters
----------
x : array_like
Returns
-------
array
Examples
--------
>>> np.lib.scimath._fix_real_lt_zero([1,2])
array([1, 2])
>>> np.lib.scimath._fix_real_lt_zero([-1,2])
array([-1.+0.j, 2.+0.j])
"""
x = asarray(x)
if any(isreal(x) & (x < 0)):
x = _tocomplex(x)
return x
def _fix_int_lt_zero(x):
"""Convert `x` to double if it has real, negative components.
Otherwise, output is just the array version of the input (via asarray).
Parameters
----------
x : array_like
Returns
-------
array
Examples
--------
>>> np.lib.scimath._fix_int_lt_zero([1,2])
array([1, 2])
>>> np.lib.scimath._fix_int_lt_zero([-1,2])
array([-1., 2.])
"""
x = asarray(x)
if any(isreal(x) & (x < 0)):
x = x * 1.0
return x
def _fix_real_abs_gt_1(x):
"""Convert `x` to complex if it has real components x_i with abs(x_i)>1.
Otherwise, output is just the array version of the input (via asarray).
Parameters
----------
x : array_like
Returns
-------
array
Examples
--------
>>> np.lib.scimath._fix_real_abs_gt_1([0,1])
array([0, 1])
>>> np.lib.scimath._fix_real_abs_gt_1([0,2])
array([ 0.+0.j, 2.+0.j])
"""
x = asarray(x)
if any(isreal(x) & (abs(x) > 1)):
x = _tocomplex(x)
return x
def _unary_dispatcher(x):
return (x,)
@array_function_dispatch(_unary_dispatcher)
def sqrt(x):
"""
Compute the square root of x.
For negative input elements, a complex value is returned
(unlike `numpy.sqrt` which returns NaN).
Parameters
----------
x : array_like
The input value(s).
Returns
-------
out : ndarray or scalar
The square root of `x`. If `x` was a scalar, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.sqrt
Examples
--------
For real, non-negative inputs this works just like `numpy.sqrt`:
>>> np.lib.scimath.sqrt(1)
1.0
>>> np.lib.scimath.sqrt([1, 4])
array([ 1., 2.])
But it automatically handles negative inputs:
>>> np.lib.scimath.sqrt(-1)
(0.0+1.0j)
>>> np.lib.scimath.sqrt([-1,4])
array([ 0.+1.j, 2.+0.j])
"""
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
@array_function_dispatch(_unary_dispatcher)
def log(x):
"""
Compute the natural logarithm of `x`.
Return the "principal value" (for a description of this, see `numpy.log`)
of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
complex principle value is returned.
Parameters
----------
x : array_like
The value(s) whose log is (are) required.
Returns
-------
out : ndarray or scalar
The log of the `x` value(s). If `x` was a scalar, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.log
Notes
-----
For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
(note, however, that otherwise `numpy.log` and this `log` are identical,
i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
notably, the complex principle value if ``x.imag != 0``).
Examples
--------
>>> np.emath.log(np.exp(1))
1.0
Negative arguments are handled "correctly" (recall that
``exp(log(x)) == x`` does *not* hold for real ``x < 0``):
>>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
True
"""
x = _fix_real_lt_zero(x)
return nx.log(x)
@array_function_dispatch(_unary_dispatcher)
def log10(x):
"""
Compute the logarithm base 10 of `x`.
Return the "principal value" (for a description of this, see
`numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
returns ``inf``). Otherwise, the complex principle value is returned.
Parameters
----------
x : array_like or scalar
The value(s) whose log base 10 is (are) required.
Returns
-------
out : ndarray or scalar
The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
otherwise an array object is returned.
See Also
--------
numpy.log10
Notes
-----
For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
(note, however, that otherwise `numpy.log10` and this `log10` are
identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
and, notably, the complex principle value if ``x.imag != 0``).
Examples
--------
(We set the printing precision so the example can be auto-tested)
>>> np.set_printoptions(precision=4)
>>> np.emath.log10(10**1)
1.0
>>> np.emath.log10([-10**1, -10**2, 10**2])
array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ])
"""
x = _fix_real_lt_zero(x)
return nx.log10(x)
def _logn_dispatcher(n, x):
return (n, x,)
@array_function_dispatch(_logn_dispatcher)
def logn(n, x):
"""
Take log base n of x.
If `x` contains negative inputs, the answer is computed and returned in the
complex domain.
Parameters
----------
n : array_like
The integer base(s) in which the log is taken.
x : array_like
The value(s) whose log base `n` is (are) required.
Returns
-------
out : ndarray or scalar
The log base `n` of the `x` value(s). If `x` was a scalar, so is
`out`, otherwise an array is returned.
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.lib.scimath.logn(2, [4, 8])
array([ 2., 3.])
>>> np.lib.scimath.logn(2, [-4, -8, 8])
array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ])
"""
x = _fix_real_lt_zero(x)
n = _fix_real_lt_zero(n)
return nx.log(x)/nx.log(n)
@array_function_dispatch(_unary_dispatcher)
def log2(x):
"""
Compute the logarithm base 2 of `x`.
Return the "principal value" (for a description of this, see
`numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
``inf``). Otherwise, the complex principle value is returned.
Parameters
----------
x : array_like
The value(s) whose log base 2 is (are) required.
Returns
-------
out : ndarray or scalar
The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.log2
Notes
-----
For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
(note, however, that otherwise `numpy.log2` and this `log2` are
identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
and, notably, the complex principle value if ``x.imag != 0``).
Examples
--------
We set the printing precision so the example can be auto-tested:
>>> np.set_printoptions(precision=4)
>>> np.emath.log2(8)
3.0
>>> np.emath.log2([-4, -8, 8])
array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ])
"""
x = _fix_real_lt_zero(x)
return nx.log2(x)
def _power_dispatcher(x, p):
return (x, p)
@array_function_dispatch(_power_dispatcher)
def power(x, p):
"""
Return x to the power p, (x**p).
If `x` contains negative values, the output is converted to the
complex domain.
Parameters
----------
x : array_like
The input value(s).
p : array_like of ints
The power(s) to which `x` is raised. If `x` contains multiple values,
`p` has to either be a scalar, or contain the same number of values
as `x`. In the latter case, the result is
``x[0]**p[0], x[1]**p[1], ...``.
Returns
-------
out : ndarray or scalar
The result of ``x**p``. If `x` and `p` are scalars, so is `out`,
otherwise an array is returned.
See Also
--------
numpy.power
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.lib.scimath.power([2, 4], 2)
array([ 4, 16])
>>> np.lib.scimath.power([2, 4], -2)
array([ 0.25 , 0.0625])
>>> np.lib.scimath.power([-2, 4], 2)
array([ 4.+0.j, 16.+0.j])
"""
x = _fix_real_lt_zero(x)
p = _fix_int_lt_zero(p)
return nx.power(x, p)
@array_function_dispatch(_unary_dispatcher)
def arccos(x):
"""
Compute the inverse cosine of x.
Return the "principal value" (for a description of this, see
`numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
`abs(x) <= 1`, this is a real number in the closed interval
:math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
Parameters
----------
x : array_like or scalar
The value(s) whose arccos is (are) required.
Returns
-------
out : ndarray or scalar
The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
is `out`, otherwise an array object is returned.
See Also
--------
numpy.arccos
Notes
-----
For an arccos() that returns ``NAN`` when real `x` is not in the
interval ``[-1,1]``, use `numpy.arccos`.
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.arccos(1) # a scalar is returned
0.0
>>> np.emath.arccos([1,2])
array([ 0.-0.j , 0.+1.317j])
"""
x = _fix_real_abs_gt_1(x)
return nx.arccos(x)
@array_function_dispatch(_unary_dispatcher)
def arcsin(x):
"""
Compute the inverse sine of x.
Return the "principal value" (for a description of this, see
`numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
`abs(x) <= 1`, this is a real number in the closed interval
:math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
returned.
Parameters
----------
x : array_like or scalar
The value(s) whose arcsin is (are) required.
Returns
-------
out : ndarray or scalar
The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
is `out`, otherwise an array object is returned.
See Also
--------
numpy.arcsin
Notes
-----
For an arcsin() that returns ``NAN`` when real `x` is not in the
interval ``[-1,1]``, use `numpy.arcsin`.
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.arcsin(0)
0.0
>>> np.emath.arcsin([0,1])
array([ 0. , 1.5708])
"""
x = _fix_real_abs_gt_1(x)
return nx.arcsin(x)
@array_function_dispatch(_unary_dispatcher)
def arctanh(x):
"""
Compute the inverse hyperbolic tangent of `x`.
Return the "principal value" (for a description of this, see
`numpy.arctanh`) of `arctanh(x)`. For real `x` such that
`abs(x) < 1`, this is a real number. If `abs(x) > 1`, or if `x` is
complex, the result is complex. Finally, `x = 1` returns``inf`` and
`x=-1` returns ``-inf``.
Parameters
----------
x : array_like
The value(s) whose arctanh is (are) required.
Returns
-------
out : ndarray or scalar
The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
a scalar so is `out`, otherwise an array is returned.
See Also
--------
numpy.arctanh
Notes
-----
For an arctanh() that returns ``NAN`` when real `x` is not in the
interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
return +/-inf for `x = +/-1`).
Examples
--------
>>> np.set_printoptions(precision=4)
>>> np.emath.arctanh(np.eye(2))
array([[ Inf, 0.],
[ 0., Inf]])
>>> np.emath.arctanh([1j])
array([ 0.+0.7854j])
"""
x = _fix_real_abs_gt_1(x)
return nx.arctanh(x)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py2@numpy@lib@scimath.py@.PATH_END.py
|
{
"filename": "_hovertemplatesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/splom/_hovertemplatesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="hovertemplatesrc", parent_name="splom", **kwargs):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@splom@_hovertemplatesrc.py@.PATH_END.py
|
{
"filename": "main_sequence_kernels.py",
"repo_name": "ArgonneCPAC/diffstar",
"repo_path": "diffstar_extracted/diffstar-main/diffstar/kernels/main_sequence_kernels.py",
"type": "Python"
}
|
"""
"""
from collections import OrderedDict, namedtuple
import numpy as np
from diffmah.defaults import MAH_K
from diffmah.individual_halo_assembly import (
_calc_halo_history,
_calc_halo_history_scalar,
_rolling_plaw_vs_logt,
)
from jax import jit as jjit
from jax import lax
from jax import numpy as jnp
from jax import vmap
from ..utils import _inverse_sigmoid, _jax_get_dt_array, _sigmoid
from .gas_consumption import _gas_conversion_kern, _vmap_gas_conversion_kern
DEFAULT_MS_PDICT = OrderedDict(
lgmcrit=12.0,
lgy_at_mcrit=-1.0,
indx_lo=1.0,
indx_hi=-1.0,
tau_dep=2.0,
)
MSParams = namedtuple("MSParams", list(DEFAULT_MS_PDICT.keys()))
DEFAULT_MS_PARAMS = MSParams(*list(DEFAULT_MS_PDICT.values()))
MS_PARAM_BOUNDS_PDICT = OrderedDict(
lgmcrit=(9.0, 13.5),
lgy_at_mcrit=(-3.0, 0.0),
indx_lo=(0.0, 5.0),
indx_hi=(-5.0, 0.0),
tau_dep=(0.01, 20.0),
)
INDX_K = 9.0 # Main sequence efficiency transition speed.
def calculate_sigmoid_bounds(param_bounds):
bounds_out = OrderedDict()
for key in param_bounds:
_bounds = (
float(np.mean(param_bounds[key])),
abs(float(4.0 / np.diff(param_bounds[key])[0])),
)
bounds_out[key] = _bounds + param_bounds[key]
return bounds_out
MS_BOUNDING_SIGMOID_PDICT = calculate_sigmoid_bounds(MS_PARAM_BOUNDS_PDICT)
@jjit
def _lax_ms_sfh_scalar_kern_scan(t_form, mah_params, ms_params, lgt0, fb, t_table):
logmp, logtc, early, late = mah_params
all_mah_params = lgt0, logmp, logtc, MAH_K, early, late
lgt_form = jnp.log10(t_form)
log_mah_at_tform = _rolling_plaw_vs_logt(lgt_form, *all_mah_params)
sfr_eff_params = ms_params[:4]
sfr_eff = _sfr_eff_plaw(log_mah_at_tform, *sfr_eff_params)
tau_dep = ms_params[4]
tau_dep_max = MS_BOUNDING_SIGMOID_PDICT["tau_dep"][3]
dtarr = _jax_get_dt_array(t_table)
@jjit
def scan_func(carryover, el):
tacc, dt = el
dmgas_dt = carryover
lgtacc = jnp.log10(tacc)
res = _calc_halo_history_scalar(lgtacc, *all_mah_params)
dmhdt_at_tacc, log_mah_at_tacc = res
dmgdt_inst = fb * dmhdt_at_tacc
lag_factor = _gas_conversion_kern(t_form, tacc, dt, tau_dep, tau_dep_max)
dmgas_dt_from_tacc = dmgdt_inst * lag_factor * dt
dmgas_dt = dmgas_dt + dmgas_dt_from_tacc
carryover = dmgas_dt
accumulated = dmgas_dt
return carryover, accumulated
scan_init = 0.0
scan_arr = jnp.array((t_table, dtarr)).T
res = lax.scan(scan_func, scan_init, scan_arr)
dmgas_dt = res[0]
sfr = dmgas_dt * sfr_eff
return sfr
@jjit
def _lax_ms_sfh_scalar_kern_sum(t_form, mah_params, ms_params, lgt0, fb, t_table):
logmp, logtc, early, late = mah_params
all_mah_params = lgt0, logmp, logtc, MAH_K, early, late
lgt_form = jnp.log10(t_form)
log_mah_at_tform = _rolling_plaw_vs_logt(lgt_form, *all_mah_params)
sfr_eff_params = ms_params[:4]
sfr_eff = _sfr_eff_plaw(log_mah_at_tform, *sfr_eff_params)
tau_dep = ms_params[4]
tau_dep_max = MS_BOUNDING_SIGMOID_PDICT["tau_dep"][3]
# compute inst. gas accretion
lgtacc = jnp.log10(t_table)
res = _calc_halo_history(lgtacc, *all_mah_params)
dmhdt_at_tacc, log_mah_at_tacc = res
dmgdt_inst = fb * dmhdt_at_tacc
# compute the consumption kernel
dt = t_table[1] - t_table[0]
kern = _vmap_gas_conversion_kern(t_form, t_table, dt, tau_dep, tau_dep_max)
# convolve
dmgas_dt = jnp.sum(dmgdt_inst * kern * dt)
sfr = dmgas_dt * sfr_eff
return sfr
_lax_ms_sfh_scalar_kern = _lax_ms_sfh_scalar_kern_sum
@jjit
def _sfr_eff_plaw(lgm, lgmcrit, lgy_at_mcrit, indx_lo, indx_hi):
"""Instantaneous baryon conversion efficiency of main sequence galaxies
Main sequence efficiency kernel, epsilon(Mhalo)
Parameters
----------
lgm : ndarray of shape (n_times, )
Diffmah halo mass accretion history in units of Msun
lgmcrit : float
Base-10 log of the critical mass
lgy_at_mcrit : float
Base-10 log of the critical efficiency at critical mass
indx_lo : float
Asymptotic value of the efficiency at low halo masses
indx_hi : float
Asymptotic value of the efficiency at high halo masses
Returns
-------
efficiency : ndarray of shape (n_times)
Main sequence efficiency value at each snapshot
"""
slope = _sigmoid(lgm, lgmcrit, INDX_K, indx_lo, indx_hi)
eff = lgy_at_mcrit + slope * (lgm - lgmcrit)
return 10**eff
@jjit
def _get_bounded_sfr_params(
u_lgmcrit,
u_lgy_at_mcrit,
u_indx_lo,
u_indx_hi,
u_tau_dep,
):
lgmcrit = _sigmoid(u_lgmcrit, *MS_BOUNDING_SIGMOID_PDICT["lgmcrit"])
lgy_at_mcrit = _sigmoid(u_lgy_at_mcrit, *MS_BOUNDING_SIGMOID_PDICT["lgy_at_mcrit"])
indx_lo = _sigmoid(u_indx_lo, *MS_BOUNDING_SIGMOID_PDICT["indx_lo"])
indx_hi = _sigmoid(u_indx_hi, *MS_BOUNDING_SIGMOID_PDICT["indx_hi"])
tau_dep = _sigmoid(u_tau_dep, *MS_BOUNDING_SIGMOID_PDICT["tau_dep"])
bounded_params = (
lgmcrit,
lgy_at_mcrit,
indx_lo,
indx_hi,
tau_dep,
)
return bounded_params
@jjit
def _get_unbounded_sfr_params(
lgmcrit,
lgy_at_mcrit,
indx_lo,
indx_hi,
tau_dep,
):
u_lgmcrit = _inverse_sigmoid(lgmcrit, *MS_BOUNDING_SIGMOID_PDICT["lgmcrit"])
u_lgy_at_mcrit = _inverse_sigmoid(
lgy_at_mcrit, *MS_BOUNDING_SIGMOID_PDICT["lgy_at_mcrit"]
)
u_indx_lo = _inverse_sigmoid(indx_lo, *MS_BOUNDING_SIGMOID_PDICT["indx_lo"])
u_indx_hi = _inverse_sigmoid(indx_hi, *MS_BOUNDING_SIGMOID_PDICT["indx_hi"])
u_tau_dep = _inverse_sigmoid(tau_dep, *MS_BOUNDING_SIGMOID_PDICT["tau_dep"])
bounded_params = (
u_lgmcrit,
u_lgy_at_mcrit,
u_indx_lo,
u_indx_hi,
u_tau_dep,
)
return bounded_params
@jjit
def _get_bounded_sfr_params_galpop_kern(ms_params):
return jnp.array(_get_bounded_sfr_params(*ms_params))
@jjit
def _get_unbounded_sfr_params_galpop_kern(u_ms_params):
return jnp.array(_get_unbounded_sfr_params(*u_ms_params))
_get_bounded_sfr_params_vmap = jjit(
vmap(_get_bounded_sfr_params_galpop_kern, in_axes=(0,))
)
_get_unbounded_sfr_params_vmap = jjit(
vmap(_get_unbounded_sfr_params_galpop_kern, in_axes=(0,))
)
MSUParams = namedtuple("MSUParams", ["u_" + key for key in DEFAULT_MS_PDICT.keys()])
DEFAULT_U_MS_PARAMS = MSUParams(*_get_unbounded_sfr_params(*DEFAULT_MS_PARAMS))
DEFAULT_U_MS_PDICT = OrderedDict(
[(key, val) for key, val in zip(DEFAULT_U_MS_PARAMS._fields, DEFAULT_U_MS_PARAMS)]
)
|
ArgonneCPACREPO_NAMEdiffstarPATH_START.@diffstar_extracted@diffstar-main@diffstar@kernels@main_sequence_kernels.py@.PATH_END.py
|
{
"filename": "strings_test.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/tests/strings_test.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import sys
import re
import vaex
import numpy as np
import pyarrow as pa
import pytest
from common import small_buffer
@pytest.mark.skipif(vaex.utils.osname == 'windows',
reason="windows' snprintf seems buggy")
def test_format():
num1 = np.array([1, 2, 3], dtype=int)
num2 = np.array([1.1, 2.2, 3.3], dtype=float)
text = ['Here', 'we', 'go']
df = vaex.from_arrays(num1=num1, num2=num2, text=text)
assert df.num1.format("%d").tolist() == ['1', '2', '3']
assert df.num1.format("%04d").tolist() == ['0001', '0002', '0003']
assert df.num2.format('%f').tolist() == ['1.100000', '2.200000', '3.300000']
assert df.num2.format('%05.2f').tolist() == ['01.10', '02.20', '03.30']
assert df.text.format('pre-%s-post').tolist() == ['pre-%s-post' % k for k in text]
def test_dtype_object_string(tmpdir):
# CHANGE: before vaex v4 we worked with dtype object, now we lazily cast to arrow
x = np.arange(8, 12)
s = np.array(list(map(str, x)), dtype='O')
df = vaex.from_arrays(x=x, s=s)
assert df.columns['s'].type == pa.string()
path = str(tmpdir.join('test.arrow'))
df.export(path)
df_read = vaex.open(path)
assert df_read.compare(df) == ([], [], [], [])
def test_dtype_unicode_string(tmpdir):
# CHANGE: before vaex v4 we worked with unicode, now we lazily cast to arrow
x = np.arange(8, 12)
s = np.array(list(map(str, x)), dtype='U')
df = vaex.from_arrays(x=x, s=s)
assert df.columns['s'].type == pa.string()
path = str(tmpdir.join('test.arrow'))
df.export(path)
df_read = vaex.open(path)
assert df_read.compare(df) == ([], [], [], [])
def test_export_arrow_strings_to_hdf5(tmpdir):
df = vaex.from_arrays(names=np.array(['hi', 'is', 'l2', np.nan], dtype='O'))
path = str(tmpdir.join('test.arrow'))
df.export(path)
df_read_arrow = vaex.open(path)
path = str(tmpdir.join('test.hdf5'))
df.export(path)
df_read_hdf5 = vaex.open(path)
assert df_read_hdf5.compare(df_read_arrow) == ([], [], [], [])
def test_arrow_strings_concat(tmpdir):
df = vaex.from_arrays(names=['hi', 'is', 'l2'])
path = str(tmpdir.join('test.arrow'))
df.export(path)
df_read_arrow = vaex.open(path)
path = str(tmpdir.join('test.hdf5'))
df_read_arrow.export(path)
df_read_hdf5 = vaex.open(path)
assert df_read_hdf5.compare(df_read_arrow) == ([], [], [], [])
def test_concat():
ds1 = vaex.from_arrays(names=['hi', 'is', 'l2'])
ds2 = vaex.from_arrays(names=['hello', 'this', 'is', 'long'])
ds = ds1.concat(ds2)
assert len(ds) == len(ds1) + len(ds2)
assert ds.data_type('names') == pa.string()
assert ds.data_type('names') != object
def test_string_count_stat():
ds = vaex.from_arrays(names=['hello', 'this', 'is', 'long'])
assert ds.count(ds.names) == 4
ds = vaex.from_arrays(names=np.ma.array(['hello', 'this', 'is', 'long'], mask=[0, 0, 1, 0]))
assert ds.count(ds.names) == 3
df = vaex.from_arrays(names=np.array(['hi', 'is', 'l2', np.nan], dtype='O'))
assert df.count(ds.names) == 3
names = vaex.string_column(['hello', 'this', None, 'long'])
x = np.arange(len(names))
df = vaex.from_arrays(names=names, x=x)
assert df.count(df.names, binby='x', limits=[0, 100], shape=1).tolist() == [3]
@pytest.mark.skip
def test_string_dtype_with_none():
ds = vaex.from_arrays(names=['hello', 'this', 'is', None])
assert ds.count(ds.names) == 3
def test_unicode():
ds = vaex.from_arrays(names=['bla\u1234'])
assert ds.names.dtype == pa.string()
ds = vaex.from_arrays(names=['bla'])
assert ds.names.dtype == pa.string()
@pytest.mark.skipif(sys.version_info < (3, 3), reason="requires python3.4 or higher")
def test_concat_mixed():
# this can happen when you want to concat multiple csv files
# and pandas makes one have nans, since they all have missing values
# and the other string
ds1 = vaex.from_arrays(names=['not', 'missing'])
ds2 = vaex.from_arrays(names=[np.nan, np.nan])
assert ds1.data_type(ds1.names) == pa.string()
assert ds2.data_type(ds2.names) == np.float64
ds = ds1.concat(ds2)
assert len(ds) == len(ds1) + len(ds2)
assert ds.data_type(ds.names) == ds1.names.dtype
def test_strip():
ds = vaex.from_arrays(names=['this ', ' has', ' space'])
ds['stripped'] = ds.names.str.strip()
ds.stripped.tolist() == ['this', 'has', 'space']
@pytest.mark.skipif(sys.version_info < (3, 3), reason="requires python3.4 or higher")
def test_unicode2(tmpdir):
path = str(tmpdir.join('utf32.hdf5'))
ds = vaex.from_arrays(names=["vaex", "or", "væx!"])
assert ds.names.dtype == pa.string()
ds.export_hdf5(path)
ds = vaex.open(path)
assert ds.names.dtype == pa.string()
assert ds.names.tolist() == ["vaex", "or", "væx!"]
@pytest.fixture(params=['dfs_arrow', 'dfs_array'])
def dfs(request, dfs_arrow, dfs_array):
named = dict(dfs_arrow=dfs_arrow, dfs_array=dfs_array)
return named[request.param]
string_list = ["vaex", " \tor", "VæX! ", "vaex or VÆX!", "Æ and", "æ are weird", "12", "æ", "a1", "a1æ", "\t "]
unicode_compat = lambda x: x
try:
unicode
unicode_compat = lambda x: x.decode('utf8')
string_list = map(unicode_compat, string_list)
except NameError:
pass
string_list_reverse = string_list[::-1]
@pytest.fixture(scope='session')
def dfs_arrow(tmp_path_factory):
tmpdir = tmp_path_factory.mktemp("vaex")
path = str(tmpdir / 'strings.hdf5')
df = vaex.from_arrays(s=vaex.string_column(string_list), sr=vaex.string_column(string_list_reverse))
df.export(path) # we write it out so that the memory is read only
return vaex.open(path)
return df
def test_null_values():
df = vaex.from_arrays(s=vaex.string_column(['aap', None, 'mies']), x=[0, 1, 2])
assert df.count() == 3
assert df.count(df.s) == 2
assert df.count(df.s, selection=df.x > 0) == 1
@pytest.fixture()
def dfs_array():
return vaex.from_arrays(s=np.array(string_list, dtype='O'), sr=np.array(string_list_reverse, dtype='O'))
def test_byte_length(dfs):
assert dfs.s.str.byte_length().tolist() == [len(k.encode('utf8')) for k in string_list]
def test_string_capitalize(dfs):
assert dfs.s.str.capitalize().tolist() == dfs.s.str_pandas.capitalize().tolist()
def test_string_cat(dfs):
c = [s1+s2 for s1, s2 in zip(string_list, string_list_reverse)]
assert dfs.s.str.cat(dfs.sr).tolist() == c
@pytest.mark.xfail(reason='pandas does not like getting an arrow array as argument')
def test_string_cat(dfs):
c = [s1+s2 for s1, s2 in zip(string_list, string_list_reverse)]
assert dfs.s.str_pandas.cat(dfs.sr).tolist() == c
def test_string_contains(dfs):
assert dfs.s.str.contains('v', regex=False).tolist() == [True, False, False, True, False, False, False, False, False, False, False]
assert dfs.s.str.contains('æ', regex=False).tolist() == [False, False, True, False, False, True, False, True, False, True, False]
assert dfs.s.str.contains('Æ', regex=False).tolist() == [False, False, False, True, True, False, False, False, False, False, False]
@pytest.mark.parametrize("width", [2, 10])
def test_string_center(dfs, width):
assert dfs.s.str.center(width).tolist() == dfs.s.str_pandas.center(width).tolist()
def test_string_counts(dfs):
assert dfs.s.str.count("v", regex=False).tolist() == dfs.s.str_pandas.count("v").tolist()
assert dfs.s.str.count("[va]", regex=True).tolist() == dfs.s.str_pandas.count("[va]").tolist()
def test_string_endswith(dfs):
assert dfs.s.str.endswith("x").tolist() == dfs.s.str_pandas.endswith("x").tolist()
@pytest.mark.parametrize("sub", ["v", unicode_compat("æ")])
@pytest.mark.parametrize("start", [0, 3, 5])
@pytest.mark.parametrize("end", [-1, 3, 5, 10])
def test_string_find(dfs, sub, start, end):
assert dfs.s.str.find(sub, start, end).tolist() == dfs.s.str_pandas.find(sub, start, end).tolist()
@pytest.mark.parametrize("i", [-1, 3, 5, 10])
def test_string_get(dfs, i):
x = dfs.s.str_pandas.get(i).values.tolist()
assert dfs.s.str.get(i).tolist() == [k[i] if i < len(k) else '' for k in string_list]
@pytest.mark.parametrize("sub", ["v", "æ"])
@pytest.mark.parametrize("start", [0, 3, 5])
@pytest.mark.parametrize("end", [-1, 3, 5, 10])
def test_string_index(dfs, sub, start, end):
assert dfs.s.str.find(sub, start, end).tolist() == dfs.s.str.index(sub, start, end).tolist()
@pytest.mark.parametrize("pattern", [None, ' '])
def test_string_join(dfs, pattern):
assert dfs.s.str.split(pattern).str.join('-').tolist() == dfs.s.str.split(pattern).str.join('-').tolist()
@pytest.mark.parametrize("chunk_size", [3, 13])
def test_string_join_large_list_large_string(chunk_size):
x = np.arange(11)
s = pa.array([[f'{i:02}'] for i in range(11)], type=pa.large_list(pa.large_string()))
df = vaex.from_arrays(x=x, s=s)
with small_buffer(df, size=chunk_size):
df['joined_s'] = df.s.str.join(",")
# Do the joining outside of vaex
expected_result = [','.join(i) for i in df.s.tolist()]
result = df.joined_s.tolist()
assert result == expected_result
def test_string_len(dfs):
assert dfs.s.str.len().astype('i4').tolist() == [len(k) for k in string_list]
assert dfs.s.str_pandas.len().astype('i4').tolist() == [len(k) for k in string_list]
@pytest.mark.parametrize("width", [2, 10])
def test_string_ljust(dfs, width):
assert dfs.s.str.ljust(width).tolist() == dfs.s.str_pandas.ljust(width).tolist()
def test_string_lower(dfs):
assert dfs.s.str.lower().tolist() == dfs.s.str_pandas.lower().tolist()
def test_string_lstrip(dfs):
assert dfs.s.str.lstrip().tolist() == dfs.s.str_pandas.lstrip().tolist()
assert dfs.s.str.lstrip('vV ').tolist() == dfs.s.str_pandas.lstrip('vV ').tolist()
def test_string_match(dfs):
assert dfs.s.str.match('^v.*').tolist() == dfs.s.str_pandas.match('^v.*').tolist()
assert dfs.s.str.match('^v.*').tolist() == [k.startswith('v') for k in string_list]
# TODO: normalize
@pytest.mark.parametrize("width", [2, 10])
@pytest.mark.parametrize("side", ['left', 'right', 'both'])
def test_string_pad(dfs, width, side):
assert dfs.s.str.pad(width, side=side).tolist() == dfs.s.str_pandas.pad(width, side=side).tolist()
# TODO: partition
@pytest.mark.parametrize("repeats", [1, 3])
def test_string_repeat(dfs, repeats):
assert dfs.s.str.repeat(repeats).tolist() == dfs.s.str_pandas.repeat(repeats).tolist()
@pytest.mark.parametrize("pattern", ["v", " ", unicode_compat("VæX")])
@pytest.mark.parametrize("replacement", ["?", unicode_compat("VæX")])
@pytest.mark.parametrize("n", [-1, 1])
def test_string_replace(dfs, pattern, replacement, n):
assert dfs.s.str.replace(pattern, replacement, n).tolist() == dfs.s.str_pandas.replace(pattern, replacement, n).tolist()
def test_string_replace_empty():
x = ['', '', 'please']
df = vaex.from_arrays(x=x,)
x = df.x.str.replace(pat = r'^\s*$', repl = 'empty', regex = True)
assert x.tolist() == ['empty', 'empty', 'please']
@pytest.mark.parametrize("pattern", ["v", " "])
@pytest.mark.parametrize("replacement", ["?", unicode_compat("VæX")])
@pytest.mark.parametrize("flags", [0, int(re.IGNORECASE)])
def test_string_replace_regex(dfs, pattern, replacement, flags):
assert dfs.s.str.replace(pattern, replacement, flags=flags, regex=True).tolist() == \
dfs.s.str_pandas.replace(pattern, replacement, flags=flags, regex=True).tolist()
@pytest.mark.xfail(reason='unicode not supported fully in regex')
@pytest.mark.parametrize("pattern", [unicode_compat("VæX")])
@pytest.mark.parametrize("replacement", ["?", unicode_compat("VæX")])
@pytest.mark.parametrize("flags", [0, int(re.IGNORECASE)])
def test_string_replace_regex_unicode(dfs, pattern, replacement, flags):
assert dfs.s.str.replace(pattern, replacement, flags=flags, regex=True).tolist() == \
dfs.s.str_pandas.replace(pattern, replacement, flags=flags, regex=True).tolist()
def test_string_extract_regex():
ds = vaex.from_arrays(email=["foo@bar.org", "bar@foo.org", "open@source.org", "invalid@address.com"])
pattern = "(?P<name>.*)@(?P<address>.*)\.org"
expr = ds.email.str.extract_regex(pattern=pattern)
assert expr.tolist() == [{"name": "foo", "address": "bar"},
{"name": "bar", "address": "foo"},
{"name": "open", "address": "source"},
None]
@pytest.mark.parametrize("sub", ["v", unicode_compat("æ")])
@pytest.mark.parametrize("start", [0, 3, 5])
@pytest.mark.parametrize("end", [-1, 3, 5, 10])
def test_string_rfind(dfs, sub, start, end):
assert dfs.s.str.rfind(sub, start, end).tolist() == dfs.s.str_pandas.rfind(sub, start, end).tolist()
@pytest.mark.parametrize("sub", ["v", unicode_compat("æ")])
@pytest.mark.parametrize("start", [0, 3, 5])
@pytest.mark.parametrize("end", [-1, 3, 5, 10])
def test_string_rindex(dfs, sub, start, end):
assert dfs.s.str.rindex(sub, start, end).tolist() == dfs.s.str_pandas.rfind(sub, start, end).tolist()
@pytest.mark.parametrize("width", [2, 10])
def test_string_rjust(dfs, width):
assert dfs.s.str.rjust(width).tolist() == dfs.s.str_pandas.rjust(width).tolist()
def test_string_rstrip(dfs):
assert dfs.s.str.rstrip().tolist() == dfs.s.str_pandas.rstrip().tolist()
assert dfs.s.str.rstrip('x! ').tolist() == dfs.s.str_pandas.rstrip('x! ').tolist()
# @pytest.mark.parametrize("start", [0, 3, 5])
# @pytest.mark.parametrize("end", [-1, 3, 5, 10])
@pytest.mark.parametrize("start", [0, -1, -5, 10])
@pytest.mark.parametrize("end", [None, -1, 3, 1000])
def test_string_slice(dfs, start, end):
assert dfs.s.str.slice(start, end).tolist() == dfs.s.str_pandas.slice(start, end).tolist()
def test_string_startswith(dfs):
assert dfs.s.str.startswith("x").tolist() == dfs.s.str_pandas.startswith("x").tolist()
def test_string_strip(dfs):
assert dfs.s.str.rstrip().tolist() == dfs.s.str_pandas.rstrip().tolist()
assert dfs.s.str.rstrip('vx! ').tolist() == dfs.s.str_pandas.rstrip('vx! ').tolist()
def test_string_title(dfs):
assert dfs.s.str.title().tolist() == dfs.s.str_pandas.title().tolist()
def test_string_lower(dfs):
assert dfs.s.str.lower().tolist() == dfs.s.str_pandas.lower().tolist()
def test_string_upper(dfs):
assert dfs.s.str.upper().tolist() == dfs.s.str_pandas.upper().tolist()
def test_string_isalnum(dfs):
assert dfs.s.str.isalnum().tolist() == dfs.s.str_pandas.isalnum().tolist()
def test_string_isalpha(dfs):
assert dfs.s.str.isalpha().tolist() == dfs.s.str_pandas.isalpha().tolist()
def test_string_isdigit(dfs):
assert dfs.s.str.isdigit().tolist() == dfs.s.str_pandas.isdigit().tolist()
def test_string_isspace(dfs):
assert dfs.s.str.isspace().tolist() == dfs.s.str_pandas.isspace().tolist()
def test_string_islower(dfs):
assert dfs.s.str.islower().tolist() == dfs.s.str_pandas.islower().tolist()
assert dfs.s.str.lower().str.islower().tolist() == dfs.s.str_pandas.lower().str_pandas.islower().tolist()
@pytest.mark.parametrize("ascii", [True, False])
def test_string_istitle(ascii):
df = vaex.from_arrays(s=['Title Case', 'no title'])
assert df.s.str.istitle(ascii=ascii).tolist() == [True, False]
def test_string_isupper(dfs):
assert dfs.s.str.isupper().tolist() == dfs.s.str_pandas.isupper().tolist()
assert dfs.s.str.upper().str.isupper().tolist() == dfs.s.str_pandas.upper().str_pandas.isupper().tolist()
def test_string_isspace(dfs):
assert dfs.s.str.isspace().tolist() == dfs.s.str_pandas.isspace().tolist()
@pytest.mark.parametrize("width", [2, 10])
def test_string_zfill(dfs, width):
assert dfs.s.str.zfill(width).tolist() == dfs.s.str_pandas.zfill(width).tolist()
def test_to_string():
x = np.arange(1, 4, dtype='f4')
df = vaex.from_arrays(x=x)
df['s'] = df.x.to_string()
assert df.s.tolist() == ["%f" % k for k in x]
def test_string_strip_special_case():
strings = ["Explanation\nWhy the edits made under my username Hardcore Metallica Fan were reverted? "
"They weren't vandalisms, just closure on some GAs after I voted at New York Dolls FAC. "
"And please don't remove the template from the talk page since I'm retired now.89.205.38.27"]
df = vaex.from_arrays(s=vaex.string_column(strings))
df.s.str.strip(' ').values # .get(0)
def test_string_strip_special_case2():
strings = ['The eunuch in question left me no choice but to reinsert it. Take action as you see fit.·snunɐw·']
df = vaex.from_arrays(s=vaex.string_column(strings))
assert df.s.str.upper().tolist() == df.s.str_pandas.upper().tolist()
@pytest.mark.xfail(reason='we need to fix this, similar to upper and lower')
def test_string_strip_special_case3():
strings = ['ɐa', 'aap']
df = vaex.from_arrays(s=vaex.string_column(strings))
assert df.s.str.capitalize().tolist() == df.s.str_pandas.capitalize().tolist()
def test_string_slice_repr():
s = ['Here', 'is', 'a', 'simple', 'unit-test']
df = vaex.from_arrays(s=s)
df['sliced_s'] = df.s.str.slice(start=2, stop=5)
repr(df['sliced_s'])
@pytest.mark.skipif(sys.version_info[0] == 2, reason="no support for python2")
@pytest.mark.parametrize("match", ["vaex", "VæX! "])
def test_strings_operator_equals(dfs, match):
assert (dfs.s == match).tolist() == [k == match for k in string_list]
assert (match == dfs.s).tolist() == [k == match for k in string_list]
assert (dfs.s == dfs.s).tolist() == [k == k for k in string_list]
@pytest.mark.skipif(sys.version_info[0] == 2, reason="no support for python2")
@pytest.mark.parametrize("match", ["vaex", "VæX! "])
def test_strings_operator_notequals(dfs, match):
assert (dfs.s != match).tolist() == [k != match for k in string_list]
assert (match != dfs.s).tolist() == [k != match for k in string_list]
assert (dfs.s != dfs.s).tolist() == [k != k for k in string_list]
@pytest.mark.skipif(sys.version_info[0] == 2, reason="no support for python2")
@pytest.mark.parametrize("extra", ["vaex", "VæX! "])
def test_strings_operator_plus(dfs, extra):
assert (dfs.s + extra).tolist() == [k + extra for k in string_list]
assert (extra + dfs.s).tolist() == [extra + k for k in string_list]
assert (dfs.s + dfs.s).tolist() == [k + k for k in string_list]
assert (dfs.s + extra + dfs.s).tolist() == [k + extra + k for k in string_list]
def test_masked_string():
s = np.ma.MaskedArray(data=['dog', 'dog', 'cat', 'cat', 'mouse'], mask=[False, False, True, False, True])
df = vaex.from_arrays(s=s)
assert (df.s == 'cat').tolist() == [False, False, False, True, False]
def test_string_operations_from_mmap_file(tmpdir):
# if we write the file to disk and mmap it read only, we trigger invalid memory writes
# see https://github.com/vaexio/vaex/pull/459
x = np.arange(5)
y = np.array(['This', 'is', 'a', None, 'test'])
df = vaex.from_arrays(x=x, y=y)
filename = str(tmpdir / 'test.hdf5')
df.export_hdf5(filename)
df_from_file = vaex.open(filename)
assert df_from_file.y.str.slice(start=0, stop=2).tolist() == ['Th', 'is', 'a', None, 'te']
assert df_from_file.y.str.upper().tolist() == ['THIS', 'IS', 'A', None, 'TEST']
def test_string_operation_empty_df(df_factory):
df = df_factory(s=["aap", "noot"])
df[df.s == "MIES"].s.unique()
def test_string_split():
df = vaex.from_arrays(s=["aap noot mies", None, "kees", ""])
assert df.s.str.split().tolist() == [["aap", "noot", "mies"], None, ["kees"], [""]]
assert df.s.str.split(max_splits=1).tolist() == [["aap", "noot mies"], None, ["kees"], [""]]
def test_string_rsplit():
df = vaex.from_arrays(s=["aap noot mies", None, "kees", ""])
assert df.s.str.rsplit(pattern='noot').tolist() == [['aap ', ' mies'], None, ['kees'], ['']]
assert df.s.str.rsplit(pattern='noot', max_splits=1).tolist() == [['aap ', ' mies'], None, ['kees'], ['']]
def test_string_split_upper():
df = vaex.from_arrays(s=["aap noot mies", None, "kees", ""])
assert df.s.str.split().str.upper().tolist() == [["AAP", "NOOT", "MIES"], None, ["KEES"], [""]]
def test_string_split_contains():
df = vaex.from_arrays(s=["aap noot mies", None, "kees", ""])
assert df.s.str.split().str.contains('aap').tolist() == [[True, False, False], None, [False], [False]]
def test_string_join_split_nested():
df = vaex.from_arrays(s=['foo-bar a-b', '1-2 3-4-5'])
level1 = df.s.str.split(' ')
assert level1.tolist() == [['foo-bar', 'a-b'], ['1-2', '3-4-5']]
level2 = level1.str.split('-')
assert level2.tolist() == [[['foo', 'bar'], ['a', 'b']], [['1', '2'], ['3', '4', '5']]]
level1 = level2.str.join('**')
assert level1.tolist() == [['foo**bar', 'a**b'], ['1**2', '3**4**5']]
level0 = level1.str.join('--')
assert level0.tolist() == ['foo**bar--a**b', '1**2--3**4**5']
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@tests@strings_test.py@.PATH_END.py
|
{
"filename": "merge.py",
"repo_name": "dirac-institute/hybrid_sso_catalogue",
"repo_path": "hybrid_sso_catalogue_extracted/hybrid_sso_catalogue-main/hybridcat/merge.py",
"type": "Python"
}
|
from scipy.spatial import cKDTree
import numpy as np
import pandas as pd
import dask
from dask.distributed import Client, wait
@dask.delayed
def merge_magnitude_bin(sim, real, min_mag, max_mag, k=100, d_max=0.1, output_folder="output/"):
"""Merge the simulated solar system catalogue with the real MPCORB data for a certain magnitude bin.
Parameters
----------
sim : `pandas DataFrame`
Dataframe with the simulated catalogue (must contain x,y,z,vx,vy,vz and H)
real : `pandas DataFrame`
Dataframe with the real catalogue (must contain x,y,z,vx,vy,vz and H)
min_mag : `float`
Minimum magnitude to consider for this merge
max_mag : `float`
Maximum magnitude to consider for this merge
k : `int`, optional
Maximum number of neighbours to find, by default 100
d_max : `float`, optional
Maximum distance within which to find neighbours in AU, by default 0.1
output_folder : `str`, optional
Path to the folder in which output will be stored, by default "output/"
Returns
-------
taken_ids : `float/array`
An array of the ids that have been replaced by the real objects in this magnitude bin
"""
real_xyz = np.array([real["x"].values, real["y"].values, real["z"].values]).T
sim_xyz = np.array([sim["x"].values, sim["y"].values, sim["z"].values]).T
v_sim = np.array([sim.vx.values, sim.vy.values, sim.vz.values])
v_real = np.array([real.vx.values, real.vy.values, real.vz.values]).T
# get the matching simulated data and build a K-D Tree
sim_mag_mask = np.logical_and(sim.H >= min_mag, sim.H < max_mag)
sim_id = sim.id[sim_mag_mask].values
tree = cKDTree(sim_xyz[sim_mag_mask])
# get the matching real data from MPCORB
real_mag_mask = np.logical_and(real.H >= min_mag, real.H < max_mag)
real_objects = real_xyz[real_mag_mask]
real_velocities = v_real[real_mag_mask]
# keep track of objects already assigned
taken = []
all_inds = range(len(sim_id))
# check if there are enough simulated objects to match all real objects
if len(sim_id) <= len(real_objects):
print("WARNING: Not enough simulated objects to match all real objects in magnitude bin {}-{}".format(min_mag, max_mag))
np.save(output_folder + "matched_{}_{}.npy".format(min_mag, max_mag), np.array(sim_id))
return np.array(sim_id)
# iterate over every object in the real catalogue
for obj, vel in zip(real_objects, real_velocities):
# find the nearest k neighbours within d_max and mask further neighbours
distances, inds = tree.query(obj, k=k, distance_upper_bound=d_max)
distances, inds = distances[np.isfinite(distances)], inds[np.isfinite(distances)]
# get only the options which haven't yet been assigned
unassigned_inds = np.setdiff1d(inds, taken, assume_unique=True)
# if there are many matching object
if len(unassigned_inds) > 1:
# find the closest velocity of the bunch and assign it
best = np.sum((v_sim[:, unassigned_inds] - vel[:, np.newaxis])**2, axis=0).argmin()
taken.append(unassigned_inds[best])
# if only one then just immediately assign it
elif len(unassigned_inds) == 1:
taken.append(unassigned_inds[0])
# if none are found nearby then pick a random object from the same magnitude bin
else:
taken.append(np.random.choice(np.setdiff1d(all_inds, taken, assume_unique=True)))
np.save(output_folder + "matched_{}_{}.npy".format(min_mag, max_mag), np.array(sim_id[taken]))
return np.array(sim_id[taken])
def merge_catalogues(mpcorb, s3m, output_folder="output/", H_bins=np.arange(-2, 28 + 0.5, 0.5), n_workers=48,
memory_limit="16GB", timeout=300, k=100, d_max=0.1):
"""Merge mpcorb and s3m!
Parameters
----------
mpcorb : `Pandas Dataframe`
MPCORB catalogue
s3m : `Pandas DataFrame`
S3m catalogue
output_folder : `str`, optional
Path to the output folder, by default "output/"
H_bins : `list`, optional
Magnitude bins, by default np.arange(-2, 28 + 0.5, 0.5)
n_workers : `int`, optional
How many workers to use, by default 48
memory_limit : `str`, optional
Limit on the memory used by each worker, by default "16GB"
timeout : `int`, optional
Timeout for dask, by default 300
k : `int`, optional
Number of neighbours to consider in matching, by default 100
d_max : `float`, optional
Maximum distance a neighbour can occur at in AU, by default 0.1
Returns
-------
matched_ids : `list`
The IDs of S3m objects that can be replaced by MPCORB ones
"""
# start the Dask client
client = Client(n_workers=n_workers, threads_per_worker=1, memory_limit=memory_limit)
# scatter the catalogues to avoid huge files
s3m_handle, = client.scatter([s3m], broadcast=True)
mpcorb_handle, = client.scatter([mpcorb], broadcast=True)
# prepare the run each magnitude bin
output = []
for left, right in zip(H_bins[:-1], H_bins[1:]):
output.append(merge_magnitude_bin(sim=s3m_handle, real=mpcorb_handle, min_mag=left, max_mag=right,
k=k, d_max=d_max, output_folder=output_folder))
# set them all running
results = client.compute(output, timeout=timeout)
wait(results)
# save the final result in
# np.save(output_folder + "all.npy", results)
return results
|
dirac-instituteREPO_NAMEhybrid_sso_cataloguePATH_START.@hybrid_sso_catalogue_extracted@hybrid_sso_catalogue-main@hybridcat@merge.py@.PATH_END.py
|
{
"filename": "imaging_plots.py",
"repo_name": "tvwenger/wisp",
"repo_path": "wisp_extracted/wisp-master/wisp/imaging_plots.py",
"type": "Python"
}
|
"""
imaging_plots.py - WISP plotting utilities for imaging
Copyright(C) 2018-2022 by
Trey V. Wenger; tvwenger@gmail.com
GNU General Public License v3 (GNU GPLv3)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Changelog:
Trey V. Wenger - April 2021 - v3.0
Improve code readability.
"""
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from astropy.io import fits
from astropy.wcs import WCS
from .utils import natural_sort
def contplot(img):
"""
Generate PDF of MFS diagnostic plots
Inputs:
img :: Imaging object
The imaging object
Returns: Nothing
"""
# Get center pixels
center_x = int(img.cp["imsize"][0] / 2)
center_y = int(img.cp["imsize"][1] / 2)
# Loop over all plot filenames
goodplots = []
spws = ["cont"] + natural_sort(img.cont_spws + img.line_spws)
for spw in spws:
if spw != "cont":
spw = "spw{0}".format(spw)
# check that this spectral window exists
fname = "{0}.{1}.{2}.mfs.clean.image.fits".format(img.imfield, spw, img.stokes)
if img.uvtaper:
fname = "{0}.{1}.{2}.mfs.uvtaper.clean.image.fits".format(
img.imfield, spw, img.stokes
)
fname = os.path.join(img.outdir, fname)
if not os.path.exists(fname):
continue
if img.uvtaper:
fitsfiles = [
"{0}.{1}.{2}.mfs.uvtaper.dirty.image.fits".format(
img.imfield, spw, img.stokes
),
"{0}.{1}.{2}.mfs.uvtaper.clean.image.fits".format(
img.imfield, spw, img.stokes
),
"{0}.{1}.{2}.mfs.uvtaper.clean.residual.fits".format(
img.imfield, spw, img.stokes
),
"{0}.{1}.{2}.mfs.uvtaper.clean.pbcor.image.fits".format(
img.imfield, spw, img.stokes
),
]
maskfiles = [
"{0}.{1}.{2}.mfs.uvtaper.clean.mask.fits".format(
img.imfield, spw, img.stokes
),
"{0}.{1}.{2}.mfs.uvtaper.clean.mask.fits".format(
img.imfield, spw, img.stokes
),
"{0}.{1}.{2}.mfs.uvtaper.clean.mask.fits".format(
img.imfield, spw, img.stokes
),
"{0}.{1}.{2}.mfs.uvtaper.clean.mask.fits".format(
img.imfield, spw, img.stokes
),
]
titles = [
"{0} - {1} - Taper/Dirty".format(img.imfield, spw),
"{0} - {1} - Taper/Clean".format(img.imfield, spw),
"{0} - {1} - Taper/Residual".format(img.imfield, spw),
"{0} - {1} - Taper/PBCorr".format(img.imfield, spw),
]
labels = [
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
]
vlims = [
(None, None),
(None, None),
(None, None),
(None, None),
]
else:
fitsfiles = [
"{0}.{1}.{2}.mfs.dirty.image.fits".format(img.imfield, spw, img.stokes),
"{0}.{1}.{2}.mfs.clean.image.fits".format(img.imfield, spw, img.stokes),
"{0}.{1}.{2}.mfs.clean.residual.fits".format(
img.imfield, spw, img.stokes
),
"{0}.{1}.{2}.mfs.clean.pbcor.image.fits".format(
img.imfield, spw, img.stokes
),
]
maskfiles = [
"{0}.{1}.{2}.mfs.clean.mask.fits".format(img.imfield, spw, img.stokes),
"{0}.{1}.{2}.mfs.clean.mask.fits".format(img.imfield, spw, img.stokes),
"{0}.{1}.{2}.mfs.clean.mask.fits".format(img.imfield, spw, img.stokes),
"{0}.{1}.{2}.mfs.clean.mask.fits".format(img.imfield, spw, img.stokes),
]
titles = [
"{0} - {1} - Dirty".format(img.imfield, spw),
"{0} - {1} - Clean".format(img.imfield, spw),
"{0} - {1} - Residual".format(img.imfield, spw),
"{0} - {1} - PBCorr".format(img.imfield, spw),
]
labels = [
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
]
vlims = [
(None, None),
(None, None),
(None, None),
(None, None),
]
# Generate figures for each Stokes parameter
for ind, stokes in enumerate(img.stokes):
# Loop over figures
for fitsfile, maskfile, title, label, vlim in zip(
fitsfiles, maskfiles, titles, labels, vlims
):
# Open fits file, generate WCS
hdulist = fits.open(os.path.join(img.outdir, fitsfile))
hdu = hdulist[0]
wcs = WCS(hdu.header)
# Set-up figure
plt.ioff()
fig = plt.figure()
ax = plt.subplot(projection=wcs.sub(["celestial"]))
ax.set_title("{0} - {1}".format(title, stokes))
cax = ax.imshow(
hdu.data[ind, 0],
origin="lower",
interpolation="none",
cmap="binary",
vmin=vlim[0],
vmax=vlim[1],
)
# ax.grid(True,color='black',ls='solid')
if img.cp["frame"] == "J2000":
ax.coords[0].set_major_formatter("hh:mm:ss")
ax.set_xlabel("RA (J2000)")
ax.set_ylabel("Declination (J2000)")
elif img.cp["frame"] == "GALACTIC":
ax.set_xlabel("Galactic Longitude")
ax.set_ylabel("Galactic Latitude")
# Plot clean mask as contour
if maskfile != "":
maskhdu = fits.open(os.path.join(img.outdir, maskfile))[0]
_ = ax.contour(
maskhdu.data[ind, 0],
levels=[0.5],
origin="lower",
colors="r",
linewidths=2,
)
# Plot beam, if it is defined
if "BMAJ" in hdu.header.keys():
cell = float(img.cp["cell"].replace("arcsec", ""))
beam_maj = hdu.header["BMAJ"] * 3600.0 / cell
beam_min = hdu.header["BMIN"] * 3600.0 / cell
beam_pa = hdu.header["BPA"]
ellipse = Ellipse(
(
center_x - int(3.0 * center_x / 4),
center_y - int(3.0 * center_y / 4),
),
beam_min,
beam_maj,
angle=beam_pa,
fill=True,
zorder=10,
hatch="///",
edgecolor="black",
facecolor="white",
)
ax.add_patch(ellipse)
elif len(hdulist) > 1:
hdu = hdulist[1]
cell = float(img.cp["cell"].replace("arcsec", ""))
beam_maj = hdu.data["BMAJ"][ind] / cell
beam_min = hdu.data["BMIN"][ind] / cell
beam_pa = hdu.data["BPA"][ind]
ellipse = Ellipse(
(
center_x - int(3.0 * center_x / 4),
center_y - int(3.0 * center_y / 4),
),
beam_min,
beam_maj,
angle=beam_pa,
fill=True,
zorder=10,
hatch="///",
edgecolor="black",
facecolor="white",
)
ax.add_patch(ellipse)
# Plot colorbar
cbar = fig.colorbar(cax, fraction=0.046, pad=0.04)
cbar.set_label(label)
# Re-scale to fit, then save
fname = fitsfile.replace(".fits", ".{0}.pdf".format(stokes))
fig.savefig(os.path.join(img.outdir, fname), bbox_inches="tight")
plt.close(fig)
plt.ion()
goodplots.append(fname)
#
# Generate PDF of plots
#
# need to fix filenames so LaTeX doesn't complain
outplots = ["{" + fn.replace(".pdf", "") + "}.pdf" for fn in goodplots]
img.logger.info("Generating PDF...")
fname = "{0}.contplots.tex".format(img.imfield)
if img.uvtaper:
fname = "{0}.uvtaper.contplots.tex".format(img.imfield)
with open(os.path.join(img.outdir, fname), "w") as f:
f.write(r"\documentclass{article}" + "\n")
f.write(r"\usepackage{graphicx}" + "\n")
f.write(r"\usepackage[margin=0.1cm]{geometry}" + "\n")
f.write(r"\begin{document}" + "\n")
for i in range(0, len(outplots), 4):
f.write(r"\begin{figure}" + "\n")
f.write(r"\centering" + "\n")
f.write(
r"\includegraphics[width=0.45\textwidth]{" + outplots[i] + r"}" + "\n"
)
f.write(
r"\includegraphics[width=0.45\textwidth]{"
+ outplots[i + 1]
+ r"} \\"
+ "\n"
)
f.write(
r"\includegraphics[width=0.45\textwidth]{"
+ outplots[i + 2]
+ r"}"
+ "\n"
)
f.write(
r"\includegraphics[width=0.45\textwidth]{"
+ outplots[i + 3]
+ r"}"
+ "\n"
)
f.write(r"\end{figure}" + "\n")
f.write(r"\clearpage" + "\n")
f.write(r"\end{document}")
os.chdir(img.outdir)
os.system("pdflatex -interaction=batchmode {0}".format(fname))
os.chdir("..")
img.logger.info("Done.")
def lineplot(img):
"""
Generate PDF of channel cube diagnostic plots
Inputs:
img :: Imaging object
The Imaging object
Returns: Nothing
"""
# Get center pixels
center_x = int(img.cp["imsize"][0] / 2)
center_y = int(img.cp["imsize"][1] / 2)
# Loop over spws
goodplots = []
for spw in img.line_spws:
# check that this spectral window exists
fname = "{0}.spw{1}.{2}.channel.clean.image.fits".format(
img.imfield, spw, img.stokes
)
if img.uvtaper:
fname = "{0}.spw{1}.{2}.channel.uvtaper.clean.image.fits".format(
img.imfield, spw, img.stokes
)
fname = os.path.join(img.outdir, fname)
if not os.path.exists(fname):
continue
# Loop over all plot filenames
if img.uvtaper:
fitsfiles = [
"{0}.spw{1}.{2}.channel.uvtaper.dirty.image.fits".format(
img.imfield, spw, img.stokes
),
"{0}.spw{1}.{2}.channel.uvtaper.clean.image.fits".format(
img.imfield, spw, img.stokes
),
"{0}.spw{1}.{2}.channel.uvtaper.clean.residual.fits".format(
img.imfield, spw, img.stokes
),
"{0}.spw{1}.{2}.channel.uvtaper.clean.pbcor.image.fits".format(
img.imfield, spw, img.stokes
),
]
maskfile = "{0}.spw{1}.{2}.channel.uvtaper.clean.mask.fits".format(
img.imfield, spw, img.stokes
)
titles = [
"{0} - {1} - Taper/Dirty".format(img.imfield, spw),
"{0} - {1} - Taper/Clean".format(img.imfield, spw),
"{0} - {1} - Taper/Residual".format(img.imfield, spw),
"{0} - {1} - Taper/PBCorr".format(img.imfield, spw),
]
labels = [
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
]
vlims = [
(None, None),
(None, None),
(None, None),
(None, None),
]
else:
fitsfiles = [
"{0}.spw{1}.{2}.channel.dirty.image.fits".format(
img.imfield, spw, img.stokes
),
"{0}.spw{1}.{2}.channel.clean.image.fits".format(
img.imfield, spw, img.stokes
),
"{0}.spw{1}.{2}.channel.clean.residual.fits".format(
img.imfield, spw, img.stokes
),
"{0}.spw{1}.{2}.channel.clean.pbcor.image.fits".format(
img.imfield, spw, img.stokes
),
]
maskfile = "{0}.spw{1}.{2}.channel.clean.mask.fits".format(
img.imfield, spw, img.stokes
)
titles = [
"{0} - {1} - Dirty".format(img.imfield, spw),
"{0} - {1} - Clean".format(img.imfield, spw),
"{0} - {1} - Residual".format(img.imfield, spw),
"{0} - {1} - PBCorr".format(img.imfield, spw),
]
labels = [
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
"Flux Density (Jy/beam)",
]
vlims = [
(None, None),
(None, None),
(None, None),
(None, None),
]
# Generate figures for each Stokes parameter
for ind, stokes in enumerate(img.stokes):
# Loop over figures
for fitsfile, title, label, vlim in zip(fitsfiles, titles, labels, vlims):
# Open fits file, generate WCS
hdulist = fits.open(os.path.join(img.outdir, fitsfile))
hdu = hdulist[0]
wcs = WCS(hdu.header)
# Generate figure
plt.ioff()
fig = plt.figure()
ax = plt.subplot(projection=wcs.sub(["celestial"]))
ax.set_title("{0} - {1}".format(title, stokes))
center_chan = int(hdu.data.shape[1] // 2)
cax = ax.imshow(
hdu.data[ind, center_chan],
origin="lower",
interpolation="none",
cmap="binary",
vmin=vlim[0],
vmax=vlim[1],
)
# ax.grid(True,color='black',ls='solid')
if img.cp["frame"] == "J2000":
ax.coords[0].set_major_formatter("hh:mm:ss")
ax.set_xlabel("RA (J2000)")
ax.set_ylabel("Declination (J2000)")
elif img.cp["frame"] == "GALACTIC":
ax.set_xlabel("Galactic Longitude")
ax.set_ylabel("Galactic Latitude")
# Plot clean mask as contour
maskhdu = fits.open(os.path.join(img.outdir, maskfile))[0]
_ = ax.contour(
maskhdu.data[ind, center_chan],
levels=[0.5],
origin="lower",
colors="r",
linewidths=2,
)
# Plot beam, if it is defined
if "BMAJ" in hdu.header.keys():
cell = float(img.cp["cell"].replace("arcsec", ""))
beam_maj = hdu.header["BMAJ"] * 3600.0 / cell
beam_min = hdu.header["BMIN"] * 3600.0 / cell
beam_pa = hdu.header["BPA"]
ellipse = Ellipse(
(
center_x - int(3.0 * center_x / 4),
center_y - int(3.0 * center_y / 4),
),
beam_min,
beam_maj,
angle=beam_pa,
fill=True,
zorder=10,
hatch="///",
edgecolor="black",
facecolor="white",
)
ax.add_patch(ellipse)
elif len(hdulist) > 1:
hdu = hdulist[1]
cell = float(img.cp["cell"].replace("arcsec", ""))
beam_maj = hdu.data["BMAJ"][center_chan] / cell
beam_min = hdu.data["BMIN"][center_chan] / cell
beam_pa = hdu.data["BPA"][center_chan]
ellipse = Ellipse(
(
center_x - int(3.0 * center_x / 4),
center_y - int(3.0 * center_y / 4),
),
beam_min,
beam_maj,
angle=beam_pa,
fill=True,
zorder=10,
hatch="///",
edgecolor="black",
facecolor="white",
)
ax.add_patch(ellipse)
# Plot colorbar
cbar = fig.colorbar(cax, fraction=0.046, pad=0.04)
cbar.set_label(label)
# Re-scale to fit, then save
fname = fitsfile.replace(".fits", ".{0}.pdf".format(stokes))
fig.savefig(os.path.join(img.outdir, fname), bbox_inches="tight")
plt.close(fig)
plt.ion()
goodplots.append(fname)
# Generate spectrum
if img.uvtaper:
fitsfile = "{0}.spw{1}.{2}.channel.uvtaper.clean.image.fits".format(
img.imfield, spw, img.stokes
)
else:
fitsfile = "{0}.spw{1}.{2}.channel.clean.image.fits".format(
img.imfield, spw, img.stokes
)
hdu = fits.open(os.path.join(img.outdir, fitsfile))[0]
spec = hdu.data[ind, :, center_x, center_y]
isnan = spec == 0.0
spec[isnan] = np.nan
velo = (
np.arange(len(spec)) * hdu.header["CDELT3"] + hdu.header["CRVAL3"]
) / 1000.0
# Generate figure
plt.ioff()
fig = plt.figure()
ax = plt.subplot()
ax.plot(velo, spec, "k-")
ax.set_xlabel("Velocity (km/s)")
ax.set_ylabel("Flux Density (Jy/beam)")
ax.set_xlim(np.nanmin(velo), np.nanmax(velo))
if np.all(np.isnan(spec)):
ax.set_ylim(-1.0, 1.0)
else:
ybuff = 0.1 * (np.nanmax(spec) - np.nanmin(spec))
ax.set_ylim(np.nanmin(spec) - ybuff, np.nanmax(spec) + ybuff)
if img.uvtaper:
ax.set_title(
"{0} - {1} - Taper/Center - {2}".format(img.imfield, spw, stokes)
)
else:
ax.set_title(
"{0} - {1} - Center - {2}".format(img.imfield, spw, stokes)
)
ax.grid(False)
fig.tight_layout()
fname = fitsfile.replace(".fits", ".{0}.spec.pdf".format(stokes))
fig.savefig(os.path.join(img.outdir, fname), bbox_inches="tight")
plt.close(fig)
plt.ion()
goodplots.append(fname)
# Generate PDF of plots
# need to fix filenames so LaTeX doesn't complain
goodplots = ["{" + fn.replace(".pdf", "") + "}.pdf" for fn in goodplots]
img.logger.info("Generating PDF...")
fname = "{0}.lineplots.tex".format(img.imfield)
if img.uvtaper:
fname = "{0}.uvtaper.lineplots.tex".format(img.imfield)
with open(os.path.join(img.outdir, fname), "w") as f:
f.write(r"\documentclass{article}" + "\n")
f.write(r"\usepackage{graphicx}" + "\n")
f.write(r"\usepackage[margin=0.1cm]{geometry}" + "\n")
f.write(r"\begin{document}" + "\n")
for i in range(0, len(goodplots), 5):
f.write(r"\begin{figure}" + "\n")
f.write(r"\centering" + "\n")
f.write(r"\includegraphics[width=0.45\textwidth]{" + goodplots[i] + "}\n")
f.write(
r"\includegraphics[width=0.45\textwidth]{"
+ goodplots[i + 1]
+ r"} \\"
+ "\n"
)
f.write(
r"\includegraphics[width=0.45\textwidth]{" + goodplots[i + 2] + "}\n"
)
f.write(
r"\includegraphics[width=0.45\textwidth]{"
+ goodplots[i + 3]
+ r"} \\"
+ "\n"
)
f.write(
r"\includegraphics[width=0.45\textwidth]{" + goodplots[i + 4] + "}\n"
)
f.write(r"\end{figure}" + "\n")
f.write(r"\clearpage" + "\n")
f.write(r"\end{document}")
os.chdir(img.outdir)
os.system("pdflatex -interaction=batchmode {0}".format(fname))
os.chdir("..")
img.logger.info("Done.")
|
tvwengerREPO_NAMEwispPATH_START.@wisp_extracted@wisp-master@wisp@imaging_plots.py@.PATH_END.py
|
{
"filename": "plotting.py",
"repo_name": "epfl-radio-astro/scatternet",
"repo_path": "scatternet_extracted/scatternet-main/scatternet/utils/plotting.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
from sklearn import svm, metrics
from sklearn.preprocessing import StandardScaler
def plot_features(x0, x_in, y_in, feature_matrix, indices, feature_labels,label_list):
n_output_coeffs = feature_matrix.shape[1]
plt.clf()
fig, axs = plt.subplots(len(indices),n_output_coeffs+2, figsize=(10, 6))
plt.tick_params(left = False, bottom=False)
#plt.tight_layout()
fig.subplots_adjust(hspace=0, wspace= 0, bottom = 0.01, left = 0.25, top = 0.7, right = 0.99)
for idx, gal in enumerate(indices):
if idx == 0:
v = axs[idx,0].set_title("Original input image", fontsize=10)
v1 = axs[idx,1].set_title("Cleaned input image", fontsize=10)
v.set_rotation(70)
v1.set_rotation(70)
for i,f in enumerate(feature_labels):
vi = axs[idx,i+2].set_title(f, fontsize=10)
vi.set_rotation(70)
axs[idx,0].imshow(x0[gal,:,:])
axs[idx,0].set_yticklabels([])
axs[idx,0].set_xticklabels([])
h = axs[idx,0].set_ylabel( label_list[y_in[gal]],fontsize=10,loc = 'top')
h.set_rotation(-10)
axs[idx,1].imshow(x_in[gal,:,:])
axs[idx,1].set_yticklabels([])
axs[idx,1].set_xticklabels([])
for i,f in enumerate(range(n_output_coeffs)):
axs[idx,i+2].imshow(feature_matrix[gal,f,:,:])
axs[idx,i+2].set_yticklabels([])
axs[idx,i+2].set_xticklabels([])
plt.show()
def bench_k_means(estimator, data, labels):
# Define the metrics which require only the true labels and estimator
# labels
clustering_metrics = [
metrics.homogeneity_score,
metrics.completeness_score,
metrics.v_measure_score,
metrics.adjusted_rand_score,
metrics.adjusted_mutual_info_score,
]
results = [m(labels, estimator.labels_) for m in clustering_metrics]
# The silhouette score requires the full dataset
results += [
metrics.silhouette_score(
data,
estimator.labels_,
metric="euclidean",
sample_size=300,
)
]
# Show the results
formatter_result = (
"Homogeneity: {:.3f}; Completeness: {:.3f}; V Measure: {:.3f};\n Adjusted rand: {:.3f}; Adjusted mutual info: {:.3f}; Silhouette: {:.3f}"
)
return (formatter_result.format(*results))
def make_pca_plot(feature_matrix, y_test,titlestr, outsr ):
print(feature_matrix.shape)
feature_matrix = StandardScaler().fit_transform(feature_matrix.reshape(feature_matrix.shape[0],-1))
reduced_data = PCA(2).fit_transform(feature_matrix)
reduced_data = np.array(reduced_data, dtype=np.double)
kmeans = KMeans(init="k-means++", n_clusters=n_classes, n_init=4)
estimator = kmeans.fit(reduced_data)
#model.evaluate(x_test, y_test)
score_str = bench_k_means(estimator, reduced_data,y_test)
print(score_str)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = 0.02 # point in the mesh [x_min, x_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(
Z,
interpolation="nearest",
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect="auto",
origin="lower",
)
import itertools
marker = itertools.cycle((',', '+', '.', 'o', '*'))
for k in keys:
k_reduced_data = reduced_data[np.where(y_test==k)]
plt.plot(k_reduced_data[:, 0], k_reduced_data[:, 1], marker = next(marker), markersize=2, linestyle='')
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(
centroids[:, 0],
centroids[:, 1],
marker="x",
s=169,
linewidths=3,
color="w",
zorder=10,
)
title = titlestr+"\nCentroids are marked with white cross\n"+score_str
plt.title(title)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.savefig(outsr)
#plt.show()
def make_svm_plot(X, y, target_names, titlestr, outsr ):
n_features = X.shape[1]*X.shape[2]*X.shape[3]
print(n_features)
feature_matrix = StandardScaler().fit_transform(X.reshape(X.shape[0],-1))
X_pca = PCA(min(n_features/2., 50)).fit_transform(feature_matrix)
print(X_pca.shape)
clf = svm.SVC()
clf.fit(X_pca, y)
#t0 = time()
y_pred = clf.predict(X_pca)
#print("done in %0.3fs" % (time() - t0))
print(metrics.classification_report(y, y_pred, target_names=target_names))
metrics.ConfusionMatrixDisplay.from_estimator(
clf, X_pca, y, display_labels=target_names, xticks_rotation="vertical"
)
plt.tight_layout()
plt.show()
|
epfl-radio-astroREPO_NAMEscatternetPATH_START.@scatternet_extracted@scatternet-main@scatternet@utils@plotting.py@.PATH_END.py
|
{
"filename": "google_jobs.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/utilities/google_jobs.py",
"type": "Python"
}
|
"""Util that calls Google Scholar Search."""
from typing import Any, Dict, Optional, cast
from langchain_core.utils import convert_to_secret_str, get_from_dict_or_env
from pydantic import BaseModel, ConfigDict, SecretStr, model_validator
class GoogleJobsAPIWrapper(BaseModel):
"""Wrapper for SerpApi's Google Scholar API
You can create SerpApi.com key by signing up at: https://serpapi.com/users/sign_up.
The wrapper uses the SerpApi.com python package:
https://serpapi.com/integrations/python
To use, you should have the environment variable ``SERPAPI_API_KEY``
set with your API key, or pass `serp_api_key` as a named parameter
to the constructor.
Example:
.. code-block:: python
from langchain_community.utilities import GoogleJobsAPIWrapper
google_Jobs = GoogleJobsAPIWrapper()
google_Jobs.run('langchain')
"""
serp_search_engine: Any = None
serp_api_key: Optional[SecretStr] = None
model_config = ConfigDict(
extra="forbid",
)
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""Validate that api key and python package exists in environment."""
values["serp_api_key"] = convert_to_secret_str(
get_from_dict_or_env(values, "serp_api_key", "SERPAPI_API_KEY")
)
try:
from serpapi import SerpApiClient
except ImportError:
raise ImportError(
"google-search-results is not installed. "
"Please install it with `pip install google-search-results"
">=2.4.2`"
)
serp_search_engine = SerpApiClient
values["serp_search_engine"] = serp_search_engine
return values
def run(self, query: str) -> str:
"""Run query through Google Trends with Serpapi"""
# set up query
serpapi_api_key = cast(SecretStr, self.serp_api_key)
params = {
"engine": "google_jobs",
"api_key": serpapi_api_key.get_secret_value(),
"q": query,
}
total_results = []
client = self.serp_search_engine(params)
total_results = client.get_dict()["jobs_results"]
# extract 1 job info:
res_str = ""
for i in range(1):
job = total_results[i]
res_str += (
"\n_______________________________________________"
+ f"\nJob Title: {job['title']}\n"
+ f"Company Name: {job['company_name']}\n"
+ f"Location: {job['location']}\n"
+ f"Description: {job['description']}"
+ "\n_______________________________________________\n"
)
return res_str + "\n"
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@utilities@google_jobs.py@.PATH_END.py
|
{
"filename": "evolve_triple_with_wind.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/examples/textbook/evolve_triple_with_wind.py",
"type": "Python"
}
|
"""
N-body integration of N particles with a Salpeter initial mass
function between Mmin and Mmax and with stellar evolution with
metallicity z.
"""
import sys
import numpy
from optparse import OptionParser
from prepare_figure import single_frame
from distinct_colours import get_distinct
from amuse.units import units, constants, nbody_system
from amuse.units.quantities import zero
from amuse.datamodel import Particle, Particles
from amuse.support.console import set_printing_strategy
from amuse.io import store
from amuse.ext.orbital_elements import new_binary_from_orbital_elements
from amuse.ext.orbital_elements import orbital_elements_from_binary
from amuse.community.huayno.interface import Huayno
from amuse.community.smalln.interface import SmallN
from amuse.community.hermite.interface import Hermite
from amuse.community.seba.interface import SeBa
from amuse.community.sse.interface import SSE
def orbital_period(a, Mtot):
return 2*numpy.pi*(a**3/(constants.G*Mtot)).sqrt()
def semimajor_axis(P, Mtot):
return (constants.G*Mtot*P**2/(4*numpy.pi**2))**(1./3)
def get_orbital_elements_of_triple(stars):
inner_binary = stars[0]+stars[1]
outer_binary = Particles(1)
outer_binary[0].mass = inner_binary.mass.sum()
outer_binary[0].position = inner_binary.center_of_mass()
outer_binary[0].velocity = inner_binary.center_of_mass_velocity()
outer_binary.add_particle(stars[2])
M1, M2, ain, ein, ta_in, inc_in, lan_in, aop_in \
= orbital_elements_from_binary(inner_binary, G=constants.G)
M12, M3, aout, eout, ta_out, outc_out, lan_out, aop_out \
= orbital_elements_from_binary(outer_binary, G=constants.G)
return ain, ein, aout, eout
def evolve_triple_with_wind(M1, M2, M3, Pora, Pin_0, ain_0, aout_0,
ein_0, eout_0, t_end, nsteps, scheme, dtse_fac):
import random
from amuse.ext.solarsystem import get_position
print("Initial masses:", M1, M2, M3)
t_stellar = 4.0|units.Myr
triple = Particles(3)
triple[0].mass = M1
triple[1].mass = M2
triple[2].mass = M3
stellar = SeBa()
stellar.particles.add_particles(triple)
channel_from_stellar = stellar.particles.new_channel_to(triple)
stellar.evolve_model(t_stellar)
channel_from_stellar.copy_attributes(["mass"])
M1 = triple[0].mass
M2 = triple[1].mass
M3 = triple[2].mass
print("T=", stellar.model_time.in_(units.Myr))
print("M=", stellar.particles.mass.in_(units.MSun))
print("Masses at time T:", M1, M2, M3)
# Inner binary
tmp_stars = Particles(2)
tmp_stars[0].mass = M1
tmp_stars[1].mass = M2
if Pora == 1:
ain_0 = semimajor_axis(Pin_0, M1+M2)
else:
Pin_0 = orbital_period(ain_0, M1+M2)
print('ain_0 =', ain_0)
print('M1+M2 =', M1+M2)
print('Pin_0 =', Pin_0.value_in(units.day), '[day]')
#print 'semi:', semimajor_axis(Pin_0, M1+M2).value_in(units.AU), 'AU'
#print 'period:', orbital_period(ain_0, M1+M2).value_in(units.day), '[day]'
dt = 0.1*Pin_0
ma = 180
inc = 30
aop = 180
lon = 0
r, v = get_position(M1, M2, ein_0, ain_0, ma, inc, aop, lon, dt)
tmp_stars[1].position = r
tmp_stars[1].velocity = v
tmp_stars.move_to_center()
# Outer binary
r, v = get_position(M1+M2, M3, eout_0, aout_0, 0, 0, 0, 0, dt)
tertiary = Particle()
tertiary.mass = M3
tertiary.position = r
tertiary.velocity = v
tmp_stars.add_particle(tertiary)
tmp_stars.move_to_center()
triple.position = tmp_stars.position
triple.velocity = tmp_stars.velocity
Mtriple = triple.mass.sum()
Pout = orbital_period(aout_0, Mtriple)
print("T=", stellar.model_time.in_(units.Myr))
print("M=", stellar.particles.mass.in_(units.MSun))
print("Pout=", Pout.in_(units.Myr))
converter = nbody_system.nbody_to_si(triple.mass.sum(), aout_0)
gravity = Hermite(converter)
gravity.particles.add_particles(triple)
channel_from_framework_to_gd = triple.new_channel_to(gravity.particles)
channel_from_gd_to_framework = gravity.particles.new_channel_to(triple)
Etot_init = gravity.kinetic_energy + gravity.potential_energy
Etot_prev = Etot_init
gravity.particles.move_to_center()
time = 0.0 | t_end.unit
ts = t_stellar + time
ain, ein, aout, eout = get_orbital_elements_of_triple(triple)
print("Triple elements t=", time, \
"inner:", triple[0].mass, triple[1].mass, ain, ein, \
"outer:", triple[2].mass, aout, eout)
dt_diag = t_end/float(nsteps)
t_diag = dt_diag
t = [time.value_in(units.Myr)]
smai = [ain/ain_0]
ecci = [ein/ein_0]
smao = [aout/aout_0]
ecco = [eout/eout_0]
ain = ain_0
def advance_stellar(ts, dt):
E0 = gravity.kinetic_energy + gravity.potential_energy
ts += dt
stellar.evolve_model(ts)
channel_from_stellar.copy_attributes(["mass"])
channel_from_framework_to_gd.copy_attributes(["mass"])
return ts, gravity.kinetic_energy + gravity.potential_energy - E0
def advance_gravity(tg, dt):
tg += dt
gravity.evolve_model(tg)
channel_from_gd_to_framework.copy()
return tg
while time < t_end:
Pin = orbital_period(ain, triple[0].mass+triple[1].mass)
dt = dtse_fac*Pin
dt *= random.random()
if scheme == 1:
ts, dE_se = advance_stellar(ts, dt)
time = advance_gravity(time, dt)
elif scheme == 2:
time = advance_gravity(time, dt)
ts, dE_se = advance_stellar(ts, dt)
else:
dE_se = zero
#ts, dE_se = advance_stellar(ts, dt/2)
time = advance_gravity(time, dt)
#ts, dE = advance_stellar(ts, dt/2)
#dE_se += dE
if time >= t_diag:
t_diag = time + dt_diag
Ekin = gravity.kinetic_energy
Epot = gravity.potential_energy
Etot = Ekin + Epot
dE = Etot_prev - Etot
Mtot = triple.mass.sum()
print("T=", time, end=' ')
print("M=", Mtot, "(dM[SE]=", Mtot/Mtriple, ")", end=' ')
print("E= ", Etot, "Q= ", Ekin/Epot, end=' ')
print("dE=", (Etot_init-Etot)/Etot, "ddE=", (Etot_prev-Etot)/Etot, end=' ')
print("(dE[SE]=", dE_se/Etot, ")")
Etot_init -= dE
Etot_prev = Etot
ain, ein, aout, eout = get_orbital_elements_of_triple(triple)
print("Triple elements t=", (4|units.Myr) + time, \
"inner:", triple[0].mass, triple[1].mass, ain, ein, \
"outer:", triple[2].mass, aout, eout)
t.append(time.value_in(units.Myr))
smai.append(ain/ain_0)
ecci.append(ein/ein_0)
smao.append(aout/aout_0)
ecco.append(eout/eout_0)
if eout > 1.0 or aout <= zero:
print("Binary ionized or merged")
break
gravity.stop()
stellar.stop()
return t, smai, ecci, smao, ecco
def main(M1, M2, M3, Pora, Pin, ain, aout, ein, eout,
t_end, nsteps, scheme, dtse_fac):
from matplotlib import pyplot
x_label = "$a/a_{0}$"
y_label = "$e/e_{0}$"
fig = single_frame(x_label, y_label, logx=False, logy=False,
xsize=10, ysize=8)
color = get_distinct(4)
if scheme == 0:
srange = [1,2,3]
else:
srange = [scheme]
i = 0
for s in srange:
time, ai, ei, ao, eo = evolve_triple_with_wind(M1, M2, M3,
Pora, Pin,
ain, aout,
ein, eout,
t_end, nsteps,
s, dtse_fac)
if i == 0:
pyplot.plot(ai, ei, c=color[0], label='inner')
pyplot.plot(ao, eo, c=color[1], label='outer')
i = 1
else:
pyplot.plot(ai, ei, c=color[0])
pyplot.plot(ao, eo, c=color[1])
pyplot.legend(loc='best', ncol=1, shadow=False, fontsize=20)
#save_file = 'evolve_triple_with_wind.png'
save_file = 'evolve_triple_with_wind' \
+'_s={:d}_dtse={:.3f}'.format(scheme, dtse_fac) \
+'.png'
pyplot.savefig(save_file)
print('\nSaved figure in file', save_file,'\n')
pyplot.show()
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-n",
dest="nsteps", type="int", default = 10000,
help="diagnostic time steps [%default]")
result.add_option("--M1", unit=units.MSun,
dest="M1", type="float", default = 60 | units.MSun,
help="Primary mass [%default]")
result.add_option("--M2", unit=units.MSun,
dest="M2", type="float", default = 30 | units.MSun,
help="secondary mass [%default]")
result.add_option("--M3", unit=units.MSun,
dest="M3", type="float", default = 20 | units.MSun,
help="secondary mass [%default]")
result.add_option("--Pora",
dest="Pora", type="int", default = 1,
help="period (1) or semimajor axis (2) [%default]")
result.add_option("--Pin", unit=units.day,
dest="Pin", type="float", default = 19|units.day,
help="orbital period [%default]")
result.add_option("--ain", unit=units.AU,
dest="ain", type="float", default = 0.63|units.AU,
help="orbital separation [%default]")
result.add_option("--aout", unit=units.AU,
dest="aout", type="float", default = 100|units.AU,
help="orbital separation [%default]")
result.add_option("--ein",
dest="ein", type="float", default = 0.2,
help="orbital eccentricity [%default]")
result.add_option("--eout",
dest="eout", type="float", default = 0.6,
help="orbital eccentricity [%default]")
result.add_option("-t", unit=units.Myr,
dest="t_end", type="float", default = 0.55 | units.Myr,
help="end time of the simulation [%default]")
result.add_option("-s",
dest="scheme", type="int", default = 3,
help="integration scheme [%default]")
result.add_option("--dtse",
dest="dtse_fac", type="float", default = 0.1,
help="stellar mass-loss time step fraction [%default]")
return result
if __name__ in ('__main__', '__plot__'):
set_printing_strategy("custom",
preferred_units = [units.MSun, units.AU, units.Myr],
precision = 12, prefix = "",
separator = " [", suffix = "]")
o, arguments = new_option_parser().parse_args()
main(**o.__dict__)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@examples@textbook@evolve_triple_with_wind.py@.PATH_END.py
|
{
"filename": "identity_test.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/qa/L0_backend_identity/identity_test.py",
"type": "Python"
}
|
#!/usr/bin/python
# Copyright 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import argparse
import sys
from builtins import range
import numpy as np
import requests as httpreq
import tritonclient.grpc as grpcclient
import tritonclient.http as httpclient
from tritonclient.utils import np_to_triton_dtype
FLAGS = None
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"-v",
"--verbose",
action="store_true",
required=False,
default=False,
help="Enable verbose output",
)
parser.add_argument(
"-u", "--url", type=str, required=False, help="Inference server URL."
)
parser.add_argument(
"-i",
"--protocol",
type=str,
required=False,
default="http",
help='Protocol ("http"/"grpc") used to '
+ 'communicate with inference service. Default is "http".',
)
FLAGS = parser.parse_args()
if (FLAGS.protocol != "http") and (FLAGS.protocol != "grpc"):
print(
'unexpected protocol "{}", expects "http" or "grpc"'.format(FLAGS.protocol)
)
exit(1)
client_util = httpclient if FLAGS.protocol == "http" else grpcclient
if FLAGS.url is None:
FLAGS.url = "localhost:8000" if FLAGS.protocol == "http" else "localhost:8001"
# Run async requests to make sure backend handles request batches
# correctly. We use just HTTP for this since we are not testing the
# protocol anyway.
if FLAGS.protocol == "http":
model_name = "identity_uint32"
request_parallelism = 4
shape = [2, 2]
with client_util.InferenceServerClient(
FLAGS.url, concurrency=request_parallelism, verbose=FLAGS.verbose
) as client:
input_datas = []
requests = []
for i in range(request_parallelism):
input_data = (16384 * np.random.randn(*shape)).astype(np.uint32)
input_datas.append(input_data)
inputs = [
client_util.InferInput(
"INPUT0", input_data.shape, np_to_triton_dtype(input_data.dtype)
)
]
inputs[0].set_data_from_numpy(input_data)
requests.append(client.async_infer(model_name, inputs))
for i in range(request_parallelism):
# Get the result from the initiated asynchronous inference request.
# Note the call will block till the server responds.
results = requests[i].get_result()
print(results)
output_data = results.as_numpy("OUTPUT0")
if output_data is None:
print("error: expected 'OUTPUT0'")
sys.exit(1)
if not np.array_equal(output_data, input_datas[i]):
print(
"error: expected output {} to match input {}".format(
output_data, input_datas[i]
)
)
sys.exit(1)
# Make sure the requests ran in parallel.
stats = client.get_inference_statistics(model_name)
if (len(stats["model_stats"]) != 1) or (
stats["model_stats"][0]["name"] != model_name
):
print("error: expected statistics for {}".format(model_name))
sys.exit(1)
stat = stats["model_stats"][0]
if (stat["inference_count"] != 8) or (stat["execution_count"] != 1):
print(
"error: expected execution_count == 1 and inference_count == 8, got {} and {}".format(
stat["execution_count"], stat["inference_count"]
)
)
sys.exit(1)
# Check metrics to make sure they are reported correctly
metrics = httpreq.get("http://localhost:8002/metrics")
print(metrics.text)
success_str = (
'nv_inference_request_success{model="identity_uint32",version="1"}'
)
infer_count_str = 'nv_inference_count{model="identity_uint32",version="1"}'
infer_exec_str = (
'nv_inference_exec_count{model="identity_uint32",version="1"}'
)
custom_metric_str = (
'input_byte_size_counter{model="identity_uint32",version="1"}'
)
success_val = None
infer_count_val = None
infer_exec_val = None
custom_metric_val = None
for line in metrics.text.splitlines():
if line.startswith(success_str):
success_val = float(line[len(success_str) :])
if line.startswith(infer_count_str):
infer_count_val = float(line[len(infer_count_str) :])
if line.startswith(infer_exec_str):
infer_exec_val = float(line[len(infer_exec_str) :])
if line.startswith(custom_metric_str):
custom_metric_val = float(line[len(custom_metric_str) :])
if success_val != 4:
print(
"error: expected metric {} == 4, got {}".format(
success_str, success_val
)
)
sys.exit(1)
if infer_count_val != 8:
print(
"error: expected metric {} == 8, got {}".format(
infer_count_str, infer_count_val
)
)
sys.exit(1)
if infer_exec_val != 1:
print(
"error: expected metric {} == 1, got {}".format(
infer_exec_str, infer_exec_val
)
)
sys.exit(1)
if custom_metric_val != 64:
print(
"error: expected metric {} == 64, got {}".format(
custom_metric_str, custom_metric_val
)
)
sys.exit(1)
# Reuse a single client for all sync tests
with client_util.InferenceServerClient(FLAGS.url, verbose=FLAGS.verbose) as client:
for model_name, np_dtype, shape in (
# yapf: disable
("identity_fp32", np.float32, [1, 0]),
("identity_fp32", np.float32, [1, 5]),
("identity_uint32", np.uint32, [4, 0]),
("identity_uint32", np.uint32, [8, 5]),
("identity_nobatch_int8", np.int8, [0]),
("identity_nobatch_int8", np.int8, [7]),
("identity_bytes", object, [1, 1]),
("identity_bf16", np.float32, [1, 0]),
("identity_bf16", np.float32, [1, 5])
):
# yapf: enable
if np_dtype != object:
input_data = (16384 * np.random.randn(*shape)).astype(np_dtype)
else:
in0 = 16384 * np.ones(shape, dtype="int")
in0n = np.array([str(x) for x in in0.reshape(in0.size)], dtype=object)
input_data = in0n.reshape(in0.shape)
if model_name != "identity_bf16":
triton_type = np_to_triton_dtype(input_data.dtype)
else:
triton_type = "BF16"
inputs = [client_util.InferInput("INPUT0", input_data.shape, triton_type)]
inputs[0].set_data_from_numpy(input_data)
results = client.infer(model_name, inputs)
print(results)
# Make sure outputs are expected value
output_data = results.as_numpy("OUTPUT0")
if np_dtype == object:
output_data = np.array(
[str(x, encoding="utf-8") for x in output_data.flatten()],
dtype=object,
).reshape(output_data.shape)
if output_data is None:
print("error: expected 'OUTPUT0'")
sys.exit(1)
if model_name == "identity_bf16":
if input_data.shape != output_data.shape:
print(
"error: expected output shape {} to match input shape {}".format(
output_data.shape, input_data.shape
)
)
sys.exit(1)
for input, output in zip(
np.nditer(input_data, flags=["refs_ok", "zerosize_ok"], order="C"),
np.nditer(output_data, flags=["refs_ok", "zerosize_ok"], order="C"),
):
if input.tobytes()[2:4] != output.tobytes()[2:4]:
print(
"error: expected low-order bits of output {} to match low-order bits of input {}".format(
output, input
)
)
sys.exit(1)
if output.tobytes()[0:2] != b"\x00\x00":
print(
"error: expected output {} to have all-zero high-order bits, got {}".format(
output, output.tobytes()[0:2]
)
)
sys.exit(1)
else:
if not np.array_equal(output_data, input_data):
print(
"error: expected output {} to match input {}".format(
output_data, input_data
)
)
sys.exit(1)
# Make sure response parameters are correct
response = results.get_response()
if FLAGS.protocol == "http":
params = response["parameters"]
param0 = params["param0"]
param1 = params["param1"]
param2 = params["param2"]
param3 = params["param3"]
else:
params = response.parameters
param0 = params["param0"].string_param
param1 = params["param1"].int64_param
param2 = params["param2"].bool_param
param3 = params["param3"].double_param
if param0 != "an example string parameter":
print("error: expected 'param0' == 'an example string parameter'")
sys.exit(1)
if param1 != 42:
print("error: expected 'param1' == 42")
sys.exit(1)
if param2 != False:
print("error: expected 'param2' == False")
sys.exit(1)
if param3 != 123.123:
print("error: expected 'param3' == 123.123")
sys.exit(1)
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@qa@L0_backend_identity@identity_test.py@.PATH_END.py
|
{
"filename": "test_event_list.py",
"repo_name": "threeML/threeML",
"repo_path": "threeML_extracted/threeML-master/threeML/test/test_event_list.py",
"type": "Python"
}
|
from __future__ import division
from past.utils import old_div
import os
import numpy as np
import pytest
from .conftest import get_test_datasets_directory
from threeML.io.file_utils import within_directory
from threeML.utils.time_interval import TimeIntervalSet
from threeML.utils.time_series.event_list import EventListWithDeadTime, EventList
__this_dir__ = os.path.join(os.path.abspath(os.path.dirname(__file__)))
datasets_dir = get_test_datasets_directory()
def is_within_tolerance(truth, value, relative_tolerance=0.01):
assert truth != 0
if abs(old_div((truth - value), truth)) <= relative_tolerance:
return True
else:
return False
def test_event_list_constructor():
dummy_times = np.linspace(0, 10, 10)
dummy_energy = np.zeros_like(dummy_times)
start = 0
stop = 10
evt_list = EventList(
arrival_times=dummy_times,
measurement=dummy_energy,
n_channels=1,
start_time=start,
stop_time=stop,
)
# should only have 10 events
assert evt_list.n_events == 10
with pytest.raises(RuntimeError):
evt_list.bins
with pytest.raises(AttributeError):
evt_list.text_bins
assert evt_list.bkg_intervals is None
with pytest.raises(AttributeError):
evt_list.tmax_list
with pytest.raises(AttributeError):
evt_list.tmin_list
assert evt_list.polynomials is None
assert evt_list._instrument == "UNKNOWN"
assert evt_list._mission == "UNKNOWN"
def test_unbinned_fit(event_time_series):
start, stop = 0, 50
poly = [1]
arrival_times = event_time_series
evt_list = EventListWithDeadTime(
arrival_times=arrival_times,
measurement=np.zeros_like(arrival_times),
n_channels=1,
start_time=arrival_times[0],
stop_time=arrival_times[-1],
dead_time=np.zeros_like(arrival_times),
)
evt_list.set_background_interval(
"%f-%f" % (start + 1, stop - 1), unbinned=True
)
results = evt_list.get_poly_info()["coefficients"]
evt_list.set_active_time_intervals("0-1")
assert evt_list.time_intervals == TimeIntervalSet.from_list_of_edges([0, 1])
assert evt_list._poly_counts.sum() > 0
evt_list.__repr__()
def test_binned_fit(event_time_series):
start, stop = 0, 50
poly = [1]
arrival_times = event_time_series
evt_list = EventListWithDeadTime(
arrival_times=arrival_times,
measurement=np.zeros_like(arrival_times),
n_channels=1,
start_time=arrival_times[0],
stop_time=arrival_times[-1],
dead_time=np.zeros_like(arrival_times),
)
evt_list.set_background_interval(
"%f-%f" % (start + 1, stop - 1), unbinned=False
)
evt_list.set_active_time_intervals("0-1")
results = evt_list.get_poly_info()["coefficients"]
assert evt_list.time_intervals == TimeIntervalSet.from_list_of_edges([0, 1])
assert evt_list._poly_counts.sum() > 0
evt_list.__repr__()
|
threeMLREPO_NAMEthreeMLPATH_START.@threeML_extracted@threeML-master@threeML@test@test_event_list.py@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattermap/marker/colorbar/tickfont/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="variant",
parent_name="scattermap.marker.colorbar.tickfont",
**kwargs,
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattermap@marker@colorbar@tickfont@_variant.py@.PATH_END.py
|
{
"filename": "point_source_cached.py",
"repo_name": "lenstronomy/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/PointSource/point_source_cached.py",
"type": "Python"
}
|
__all__ = ["PointSourceCached"]
class PointSourceCached(object):
"""This class is the same as PointSource() except that it saves image and source
positions in cache.
This speeds-up repeated calls for the same source and lens model and avoids duplicating the lens equation solving.
Attention: cache needs to be deleted before calling functions with different lens and point source parameters.
"""
def __init__(self, point_source_model, save_cache=False):
self._model = point_source_model
self._save_cache = save_cache
def delete_lens_model_cache(self):
if hasattr(self, "_x_image"):
del self._x_image
if hasattr(self, "_y_image"):
del self._y_image
if hasattr(self, "_x_image_add"):
del self._x_image_add
if hasattr(self, "_y_image_add"):
del self._y_image_add
if hasattr(self, "_x_source"):
del self._x_source
if hasattr(self, "_y_source"):
del self._y_source
def set_save_cache(self, save_bool):
self._save_cache = save_bool
def update_lens_model(self, lens_model_class):
self._model.update_lens_model(lens_model_class)
def image_position(
self,
kwargs_ps,
kwargs_lens=None,
magnification_limit=None,
kwargs_lens_eqn_solver=None,
additional_images=False,
):
"""On-sky image positions.
:param kwargs_ps: keyword arguments of the point source model
:param kwargs_lens: keyword argument list of the lens model(s), only used when
requiring the lens equation solver
:param magnification_limit: float >0 or None, if float is set and additional
images are computed, only those images will be computed that exceed the
lensing magnification (absolute value) limit
:param kwargs_lens_eqn_solver: keyword arguments specifying the numerical
settings for the lens equation solver see LensEquationSolver() class for
details
:param additional_images: if True, solves the lens equation for additional
images
:type additional_images: bool
:return: image positions in x, y as arrays
"""
if additional_images and not self._model.additional_images:
# ignore cached parts if additional images
if (
not self._save_cache
or not hasattr(self, "_x_image_add")
or not hasattr(self, "_y_image_add")
):
self._x_image_add, self._y_image_add = self._model.image_position(
kwargs_ps,
kwargs_lens=kwargs_lens,
magnification_limit=magnification_limit,
kwargs_lens_eqn_solver=kwargs_lens_eqn_solver,
additional_images=additional_images,
)
return self._x_image_add, self._y_image_add
if (
not self._save_cache
or not hasattr(self, "_x_image")
or not hasattr(self, "_y_image")
):
self._x_image, self._y_image = self._model.image_position(
kwargs_ps,
kwargs_lens=kwargs_lens,
magnification_limit=magnification_limit,
kwargs_lens_eqn_solver=kwargs_lens_eqn_solver,
additional_images=additional_images,
)
return self._x_image, self._y_image
def source_position(self, kwargs_ps, kwargs_lens=None):
"""Original source position (prior to lensing)
:param kwargs_ps: point source keyword arguments
:param kwargs_lens: lens model keyword argument list (only used when required)
:return: x, y position
"""
if (
not self._save_cache
or not hasattr(self, "_x_source")
or not hasattr(self, "_y_source")
):
self._x_source, self._y_source = self._model.source_position(
kwargs_ps, kwargs_lens=kwargs_lens
)
return self._x_source, self._y_source
def image_amplitude(
self,
kwargs_ps,
kwargs_lens=None,
magnification_limit=None,
kwargs_lens_eqn_solver=None,
):
"""Image brightness amplitudes.
:param kwargs_ps: keyword arguments of the point source model
:param kwargs_lens: keyword argument list of the lens model(s), only used when
requiring the lens equation solver
:param magnification_limit: float >0 or None, if float is set and additional
images are computed, only those images will be computed that exceed the
lensing magnification (absolute value) limit
:param kwargs_lens_eqn_solver: keyword arguments specifying the numerical
settings for the lens equation solver see LensEquationSolver() class for
details
:return: array of image amplitudes
"""
x_pos, y_pos = self.image_position(
kwargs_ps,
kwargs_lens,
magnification_limit=magnification_limit,
kwargs_lens_eqn_solver=kwargs_lens_eqn_solver,
)
return self._model.image_amplitude(
kwargs_ps, kwargs_lens=kwargs_lens, x_pos=x_pos, y_pos=y_pos
)
def source_amplitude(self, kwargs_ps, kwargs_lens=None):
"""Intrinsic brightness amplitude of point source.
:param kwargs_ps: keyword arguments of the point source model
:param kwargs_lens: keyword argument list of the lens model(s), only used when
positions are defined in image plane and have to be ray-traced back
:return: brightness amplitude (as numpy array)
"""
return self._model.source_amplitude(kwargs_ps, kwargs_lens=kwargs_lens)
|
lenstronomyREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@PointSource@point_source_cached.py@.PATH_END.py
|
{
"filename": "developer.md",
"repo_name": "fritz-marshal/fritz",
"repo_path": "fritz_extracted/fritz-main/doc/developer.md",
"type": "Markdown"
}
|
# Developer Guidelines
## How to contribute
Contributions to Fritz are made through [GitHub Pull Requests](https://help.github.com/en/github/collaborating-with-issues-and-pull-requests/about-pull-requests), a set of proposed commits (or patches).
To prepare, you should:
- Create your own fork the [fritz repository](https://github.com/fritz-marshal/fritz) by clicking the "fork" button.
- [Set up SSH authentication with GitHub](https://help.github.com/en/github/authenticating-to-github/connecting-to-github-with-ssh).
- Clone (download) your copy of the repository, and set up a remote called `upstream` that points to the main Fritz repository.
```sh
git clone git@github.com:yourname/fritz
git remote add upstream git@github.com:fritz-marshal/fritz
```
Then, for each feature you wish to contribute, create a pull request:
1. Download the latest version of Fritz, and create a new branch for your work.
Here, let's say we want to contribute some documentation fixes; we'll call our branch `rewrite-contributor-guide`.
```sh
git checkout master
git pull upstream master
git checkout -b rewrite-contributor-guide
```
1. Make modifications to Fritz and commit your changes using `git add` and `git commit`. Each commit message should consist of a summary line and a longer description, e.g.:
```text
Rewrite the contributor guide
While reading through the contributor guide, I noticed several places
in which instructions were out of order. I therefore reorganized all
sections to follow logically, and fixed several grammar mistakes along
the way.
```
1. When ready, push your branch to GitHub:
```sh
git push origin rewrite-contributor-guide
```
Once the branch is uploaded, GitHub should print a URL for turning your branch into a pull request. Open that URL in your browser, write an informative title and description for your pull request, and submit it.
1. The Fritz will now review your contribution, and suggest changes. *To simplify review, please limit pull requests to one logical set of changes.* To incorporate changes recommended by the reviewers, commit edits to your branch, and push to the branch again (there is no need to re-create the pull request, it will automatically track modifications to your branch).
1. Once the pull request has been reviewed and approved by at least two team members, it will be merged into Fritz.
## Setting up your environment
We use flake8 to verify that code complies with [PEP8](https://www.python.org/dev/peps/pep-0008/). Please install the git pre-commit hook using:
```sh
./fritz developer
```
The pre-commit hook will lint *changes* made to the source. To lint
the entire repository, use:
```sh
./fritz lint
```
|
fritz-marshalREPO_NAMEfritzPATH_START.@fritz_extracted@fritz-main@doc@developer.md@.PATH_END.py
|
{
"filename": "provisional.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/hypothesis/py3/hypothesis/provisional.py",
"type": "Python"
}
|
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""This module contains various provisional APIs and strategies.
It is intended for internal use, to ease code reuse, and is not stable.
Point releases may move or break the contents at any time!
Internet strategies should conform to :rfc:`3986` or the authoritative
definitions it links to. If not, report the bug!
"""
# https://tools.ietf.org/html/rfc3696
import string
from importlib import resources
from typing import Optional
from hypothesis import strategies as st
from hypothesis.errors import InvalidArgument
from hypothesis.internal.conjecture import utils as cu
from hypothesis.strategies._internal.utils import defines_strategy
URL_SAFE_CHARACTERS = frozenset(string.ascii_letters + string.digits + "$-_.+!*'(),~")
FRAGMENT_SAFE_CHARACTERS = URL_SAFE_CHARACTERS | {"?", "/"}
# This file is sourced from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
# The file contains additional information about the date that it was last updated.
try: # pragma: no cover
traversable = resources.files("hypothesis.vendor") / "tlds-alpha-by-domain.txt"
_comment, *_tlds = traversable.read_text(encoding="utf-8").splitlines()
except (AttributeError, ValueError): # pragma: no cover # .files() was added in 3.9
_comment, *_tlds = resources.read_text(
"hypothesis.vendor", "tlds-alpha-by-domain.txt", encoding="utf-8"
).splitlines()
assert _comment.startswith("#")
# Remove special-use domain names from the list. For more discussion
# see https://github.com/HypothesisWorks/hypothesis/pull/3572
TOP_LEVEL_DOMAINS = ["COM", *sorted((d for d in _tlds if d != "ARPA"), key=len)]
class DomainNameStrategy(st.SearchStrategy):
@staticmethod
def clean_inputs(
minimum: int, maximum: int, value: Optional[int], variable_name: str
) -> int:
if value is None:
value = maximum
elif not isinstance(value, int):
raise InvalidArgument(
f"Expected integer but {variable_name} is a {type(value).__name__}"
)
elif not minimum <= value <= maximum:
raise InvalidArgument(
f"Invalid value {minimum!r} < {variable_name}={value!r} < {maximum!r}"
)
return value
def __init__(
self, max_length: Optional[int] = None, max_element_length: Optional[int] = None
) -> None:
"""
A strategy for :rfc:`1035` fully qualified domain names.
The upper limit for max_length is 255 in accordance with :rfc:`1035#section-2.3.4`
The lower limit for max_length is 4, corresponding to a two letter domain
with a single letter subdomain.
The upper limit for max_element_length is 63 in accordance with :rfc:`1035#section-2.3.4`
The lower limit for max_element_length is 1 in accordance with :rfc:`1035#section-2.3.4`
"""
# https://tools.ietf.org/html/rfc1035#section-2.3.4
max_length = self.clean_inputs(4, 255, max_length, "max_length")
max_element_length = self.clean_inputs(
1, 63, max_element_length, "max_element_length"
)
super().__init__()
self.max_length = max_length
self.max_element_length = max_element_length
# These regular expressions are constructed to match the documented
# information in https://tools.ietf.org/html/rfc1035#section-2.3.1
# which defines the allowed syntax of a subdomain string.
if self.max_element_length == 1:
self.label_regex = r"[a-zA-Z]"
elif self.max_element_length == 2:
self.label_regex = r"[a-zA-Z][a-zA-Z0-9]?"
else:
maximum_center_character_pattern_repetitions = self.max_element_length - 2
self.label_regex = r"[a-zA-Z]([a-zA-Z0-9\-]{0,%d}[a-zA-Z0-9])?" % (
maximum_center_character_pattern_repetitions,
)
def do_draw(self, data):
# 1 - Select a valid top-level domain (TLD) name
# 2 - Check that the number of characters in our selected TLD won't
# prevent us from generating at least a 1 character subdomain.
# 3 - Randomize the TLD between upper and lower case characters.
domain = data.draw(
st.sampled_from(TOP_LEVEL_DOMAINS)
.filter(lambda tld: len(tld) + 2 <= self.max_length)
.flatmap(
lambda tld: st.tuples(
*(st.sampled_from([c.lower(), c.upper()]) for c in tld)
).map("".join)
)
)
# RFC-5890 s2.3.1 says such labels are reserved, and since we don't
# want to bother with xn-- punycode labels we'll exclude them all.
elem_st = st.from_regex(self.label_regex, fullmatch=True).filter(
lambda label: len(label) < 4 or label[2:4] != "--"
)
# The maximum possible number of subdomains is 126,
# 1 character subdomain + 1 '.' character, * 126 = 252,
# with a max of 255, that leaves 3 characters for a TLD.
# Allowing any more subdomains would not leave enough
# characters for even the shortest possible TLDs.
elements = cu.many(data, min_size=1, average_size=3, max_size=126)
while elements.more():
# Generate a new valid subdomain using the regex strategy.
sub_domain = data.draw(elem_st)
if len(domain) + len(sub_domain) >= self.max_length:
data.stop_example(discard=True)
break
domain = sub_domain + "." + domain
return domain
@defines_strategy(force_reusable_values=True)
def domains(
*, max_length: int = 255, max_element_length: int = 63
) -> st.SearchStrategy[str]:
"""Generate :rfc:`1035` compliant fully qualified domain names."""
return DomainNameStrategy(
max_length=max_length, max_element_length=max_element_length
)
# The `urls()` strategy uses this to generate URL fragments (e.g. "#foo").
# It has been extracted to top-level so that we can test it independently
# of `urls()`, which helps with getting non-flaky coverage of the lambda.
_url_fragments_strategy = (
st.lists(
st.builds(
lambda char, encode: (
f"%{ord(char):02X}"
if (encode or char not in FRAGMENT_SAFE_CHARACTERS)
else char
),
st.characters(min_codepoint=0, max_codepoint=255),
st.booleans(),
),
min_size=1,
)
.map("".join)
.map("#{}".format)
)
@defines_strategy(force_reusable_values=True)
def urls() -> st.SearchStrategy[str]:
"""A strategy for :rfc:`3986`, generating http/https URLs."""
def url_encode(s: str) -> str:
return "".join(c if c in URL_SAFE_CHARACTERS else "%%%02X" % ord(c) for c in s)
schemes = st.sampled_from(["http", "https"])
ports = st.integers(min_value=0, max_value=2**16 - 1).map(":{}".format)
paths = st.lists(st.text(string.printable).map(url_encode)).map("/".join)
return st.builds(
"{}://{}{}/{}{}".format,
schemes,
domains(),
st.just("") | ports,
paths,
st.just("") | _url_fragments_strategy,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@hypothesis@py3@hypothesis@provisional.py@.PATH_END.py
|
{
"filename": "test_materials.py",
"repo_name": "Jashcraf/poke",
"repo_path": "poke_extracted/poke-main/tests/test_materials.py",
"type": "Python"
}
|
import numpy as np
import poke.materials as mat
def test_create_index_model():
# TODO: The file import system doesn't work on the GH Actions test
# prealloc lists
reference = []
test = []
# pick a complex material and a dielectric
materials = ['Al','HfO2']
# load refractive indices from a certain wavelength
wvl = [0.51660,0.5060]
ns = [0.51427,1.9083754098692]
ks = [4.95111,0]
for material,wave,n,k in zip(materials,wvl,ns,ks):
n_callable = mat.create_index_model(material)
reference.append(n + 1j*k)
test.append(n_callable(wave))
return np.testing.assert_allclose(reference,test)
|
JashcrafREPO_NAMEpokePATH_START.@poke_extracted@poke-main@tests@test_materials.py@.PATH_END.py
|
{
"filename": "earth_signal.py",
"repo_name": "cosmo-ethz/hide",
"repo_path": "hide_extracted/hide-master/hide/plugins/earth_signal.py",
"type": "Python"
}
|
# HIDE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HIDE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HIDE. If not, see <http://www.gnu.org/licenses/>.
'''
Created on Dec 8, 2014
author: jakeret
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import importlib
from ivy.plugin.base_plugin import BasePlugin
earth_signal = None
class Plugin(BasePlugin):
def __call__(self):
global earth_signal
if earth_signal is None:
#load module
mod = importlib.import_module(self.ctx.params.earth_signal_provider)
#delegate loading of signal
earth_signal = mod.load_signal(self.ctx)
self.ctx.earth_signal = earth_signal
def __str__(self):
return "Applying earth signals"
|
cosmo-ethzREPO_NAMEhidePATH_START.@hide_extracted@hide-master@hide@plugins@earth_signal.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "stephane-caron/qpsolvers",
"repo_path": "qpsolvers_extracted/qpsolvers-main/qpsolvers/__init__.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: LGPL-3.0-or-later
# Copyright 2016-2022 Stéphane Caron and the qpsolvers contributors
"""Quadratic programming solvers in Python with a unified API."""
from ._internals import available_solvers
from .active_set import ActiveSet
from .exceptions import (
NoSolverSelected,
ParamError,
ProblemError,
QPError,
SolverError,
SolverNotFound,
)
from .problem import Problem
from .solution import Solution
from .solve_ls import solve_ls
from .solve_qp import solve_problem, solve_qp
from .solve_unconstrained import solve_unconstrained
from .solvers import (
cvxopt_solve_qp,
daqp_solve_qp,
dense_solvers,
ecos_solve_qp,
gurobi_solve_qp,
highs_solve_qp,
hpipm_solve_qp,
mosek_solve_qp,
osqp_solve_qp,
piqp_solve_qp,
proxqp_solve_qp,
qpalm_solve_qp,
qpoases_solve_qp,
qpswift_solve_qp,
quadprog_solve_qp,
scs_solve_qp,
sparse_solvers,
)
from .unsupported import nppro_solve_qp
from .utils import print_matrix_vector
__version__ = "4.4.0"
__all__ = [
"ActiveSet",
"NoSolverSelected",
"ParamError",
"Problem",
"ProblemError",
"QPError",
"Solution",
"SolverError",
"SolverNotFound",
"__version__",
"available_solvers",
"cvxopt_solve_qp",
"daqp_solve_qp",
"dense_solvers",
"ecos_solve_qp",
"gurobi_solve_qp",
"highs_solve_qp",
"hpipm_solve_qp",
"mosek_solve_qp",
"nppro_solve_qp",
"osqp_solve_qp",
"print_matrix_vector",
"piqp_solve_qp",
"proxqp_solve_qp",
"qpalm_solve_qp",
"qpoases_solve_qp",
"qpswift_solve_qp",
"quadprog_solve_qp",
"scs_solve_qp",
"solve_ls",
"solve_problem",
"solve_qp",
"solve_unconstrained",
"sparse_solvers",
]
|
stephane-caronREPO_NAMEqpsolversPATH_START.@qpsolvers_extracted@qpsolvers-main@qpsolvers@__init__.py@.PATH_END.py
|
{
"filename": "_legendwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contourcarpet/_legendwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="legendwidth", parent_name="contourcarpet", **kwargs
):
super(LegendwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contourcarpet@_legendwidth.py@.PATH_END.py
|
{
"filename": "python__staged_predict__output-type__intro.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/_includes/work_src/reusage/python__staged_predict__output-type__intro.md",
"type": "Markdown"
}
|
Generator that produces predictions with a sequentially growing subset of trees from the model. The type of generated values depends on the number of input objects:
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@_includes@work_src@reusage@python__staged_predict__output-type__intro.md@.PATH_END.py
|
{
"filename": "imaging.py",
"repo_name": "psheehan/pdspy",
"repo_path": "pdspy_extracted/pdspy-master/pdspy/imaging/imaging.py",
"type": "Python"
}
|
import numpy
import astropy
import h5py
from ..constants.physics import c
class Image:
def __init__(self,image=None,x=None,y=None,header=None,wave=None, \
freq=None,unc=None,velocity=None):
if (type(image) != type(None)):
self.image = image
if (type(x) != type(None)):
self.x = x
self.y = y
if (type(header) != type(None)):
self.header = header
if (type(unc) != type(None)):
self.unc = unc
if (type(velocity) != type(None)):
self.velocity = velocity
if (type(wave) == type(None)) and (type(freq) != type(None)):
self.freq = freq
self.wave = c / freq
elif (type(wave) != type(None)) and (type(freq) == type(None)):
self.wave = wave
self.freq = c / wave
elif (type(wave) != type(None)) and (type(freq) != type(None)):
self.wave = wave
self.freq = freq
def asFITS(self):
hdulist = astropy.io.fits.HDUList([])
for i in range(self.image[0,0,:].size):
hdu = astropy.io.fits.PrimaryHDU(self.image[:,:,i])
if hasattr(self, "header"):
hdu.header = self.header[i]
hdulist.append(hdu)
return hdulist
def read(self, filename=None, usefile=None):
if (usefile == None):
f = h5py.File(filename, "r")
else:
f = usefile
if ('x' in f):
x = f['x'][...]
y = f['y'][...]
else:
x = None
y = None
if ('freq' in f):
freq = f['freq'][...]
else:
freq = None
image = f['image'][...]
if ('unc' in f):
unc = f['unc'][...]
else:
unc = None
self.__init__(image, x=x, y=y, unc=unc, freq=freq)
if (usefile == None):
f.close()
def write(self, filename=None, usefile=None):
if (usefile == None):
f = h5py.File(filename, "w")
else:
f = usefile
if hasattr(self, "x"):
x_dset = f.create_dataset("x", (self.x.size,), dtype='f')
x_dset[...] = self.x
y_dset = f.create_dataset("y", (self.y.size,), dtype='f')
y_dset[...] = self.y
if hasattr(self, "freq"):
freq_dset = f.create_dataset("freq", (self.freq.size,), dtype='f')
freq_dset[...] = self.freq
image_dset = f.create_dataset("image", self.image.shape, dtype='f')
image_dset[...] = self.image
if hasattr(self, "unc"):
unc_dset = f.create_dataset("uncertainty", self.unc.shape, \
dtype='f')
unc_dset[...] = self.unc
if (usefile == None):
f.close()
|
psheehanREPO_NAMEpdspyPATH_START.@pdspy_extracted@pdspy-master@pdspy@imaging@imaging.py@.PATH_END.py
|
{
"filename": "plate.py",
"repo_name": "sdss/marvin",
"repo_path": "marvin_extracted/marvin-main/python/marvin/tools/plate.py",
"type": "Python"
}
|
#!/usr/bin/env python
# encoding: utf-8
# Created by Brian Cherinka on 2016-05-17 10:17:35
# Licensed under a 3-clause BSD license.
# Revision History:
# Initial Version: 2016-05-17 10:17:35 by Brian Cherinka
# Last Modified On: 2016-05-17 10:17:35 by Brian
from __future__ import division, print_function
import inspect
from astropy.io import fits
from marvin import config
from marvin.core.exceptions import MarvinError
from marvin.tools.cube import Cube
from marvin.utils.general.structs import FuzzyList
from .core import MarvinToolsClass
try:
from sdss_access.path import Path
except ImportError:
Path = None
class Plate(MarvinToolsClass, FuzzyList):
'''A class to interface with MaNGA Plate.
This class represents a Plate, initialised either
from a file, a database, or remotely via the Marvin API. The class
inherits from Python's list class, and is defined as a list of
Cube objects. As it inherits from list, it can do all the standard Python
list operations.
When instanstantiated, Marvin Plate will attempt to discover and load all the Cubes
associated with this plate.
Parameters:
plate (str):
The plate id of the Plate to load.
plateifu (str):
The plate-ifu of the Plate to load
filename (str):
The path of the file containing the data cube to load.
mode ({'local', 'remote', 'auto'}):
The load mode to use. See
:doc:`Mode secision tree</mode_decision>`..
release (str):
The MPL/DR version of the data to use.
nocubes (bool):
Set this to turn off the Cube loading
Attributes:
cubeXXXX (object):
The Marvin Cube object for the given ifu, e.g. cube1901 refers to the Cube for plateifu 8485-1901
plate/plateid (int):
The plate id for this plate
cartid (str):
The cart id for this plate
designid (int):
The design id for this plate
ra (float):
The RA of the plate center
dec (float):
The declination of the plate center
dateobs (str):
The date of observation for this plate
surveymode (str):
The survey mode for this plate
isbright (bool):
True if this is a bright time plate
Return:
plate:
An object representing the Plate entity. The object is a list of
Cube objects, one for each IFU cube in the Plate entity.
Example:
>>> from marvin.tools.plate import Plate
>>> plate = Plate(plate=8485)
>>> print(plate)
>>> <Marvin Plate (plate=8485, n_cubes=17, mode='local', data_origin='db')>
>>>
>>> print('Cubes found in this plate: {0}'.format(len(plate)))
>>> Cubes found in this plate: 4
>>>
>>> # access the plate via index to access the individual cubes
>>> plate[0]
>>> <Marvin Cube (plateifu='8485-12701', mode='local', data_origin='db')>
>>>
>>> # or by name
>>> plate['12702']
>>> <Marvin Cube (plateifu='8485-12702', mode='local', data_origin='db')>
>>>
'''
def __init__(self, input=None, filename=None, mangaid=None, plateifu=None,
mode=None, data=None, release=None, plate=None,
download=None, nocubes=None):
self._cubes = None
self._plate = None
self._pdict = None
self.platedir = None
self.nocubes = nocubes
# If plateid specified, force a temp plateifu
if plate:
self.plateid = plate
plateifu = '{0}-XXXX'.format(self.plateid)
self.plateifu = plateifu
args = [plate, plateifu]
assert any(args), 'Enter plate or plateifu!'
MarvinToolsClass.__init__(self, input=input, filename=filename, mangaid=mangaid,
plateifu=plateifu, mode=mode, data=data, release=release,
download=download)
# sort out any plateid, plate-ifu, mangaid name snafus
self._sortOutNames()
# grab the plate info
if self.data_origin == 'file':
self._getPlateFromFile()
elif self.data_origin == 'db':
self._getPlateFromDB()
elif self.data_origin == 'api':
self._getPlateFromAPI()
# load the plate params and init the Marvin Cubes
self._setParams()
if not self.nocubes:
self._initCubes()
def __repr__(self):
'''Representation for Plate.'''
return ('<Marvin Plate (plate={self.plateid!r}, n_cubes={0}, mode={self.mode!r}, '
'data_origin={self.data_origin!r})>'.format(len(self), self=self))
def __dir__(self):
''' Overriding dir for Plate '''
# get the attributes from the class itself
class_members = list(list(zip(*inspect.getmembers(self.__class__)))[0])
instance_attr = list(self.__dict__.keys())
# get the dir from FuzzyList
listattr = ['cube{0}'.format(i.plateifu.split('-')[1]) for i in self]
listattr.sort()
return listattr + sorted(class_members + instance_attr)
def __getattr__(self, value):
if 'cube' in value:
ifu = value.split('cube')[-1]
plateifu = '{0}-{1}'.format(self.plate, ifu)
return self[plateifu]
return super(Plate, self).__getattribute__(value)
def _getFullPath(self, **kwargs):
"""Returns the full path of the file in the tree."""
self.filename = super(Plate, self)._getFullPath('mangaplate', drpver=self._drpver,
plate=self.plateid, **kwargs)
self.platedir = self.filename
self._checkFilename()
return self.filename
def _getPlateFromFile(self):
''' Initialize a Plate from a Cube/RSS File'''
# Load file
try:
self._hdr = fits.getheader(self.filename, 1)
self.plateid = int(self._hdr['PLATEID'])
except Exception as e:
raise MarvinError('Could not initialize via filename: {0}'
.format(e))
else:
self.data_origin = 'file'
self._makePdict()
def _getPlateFromDB(self):
''' Initialize a Plate from the DB '''
import sqlalchemy
from marvin import marvindb as mdb
if not mdb.isdbconnected:
raise MarvinError('No db connected')
# Grab any cube for this plate
cube = None
try:
cube = mdb.session.query(mdb.datadb.Cube).join(
mdb.datadb.PipelineInfo, mdb.datadb.PipelineVersion).\
filter(mdb.datadb.Cube.plate == self.plateid,
mdb.datadb.PipelineVersion.version == self._drpver).first()
except sqlalchemy.orm.exc.NoResultFound as ee:
raise MarvinError('Could not retrieve Cube for plate {0}: '
'No Results Found: {1}'
.format(self.plateid, ee))
except Exception as ee:
raise MarvinError('Could not retrieve Cube for plate {0}: '
'Unknown exception: {1}'
.format(self.plateid, ee))
else:
# no cube
if not cube:
raise MarvinError('No cube found in db for plate {0}, drpver {1}'
.format(self.plateid, self._drpver))
# cube but no plateclass
try:
self._plate = cube.plateclass
except AttributeError as ee:
raise MarvinError('AttributeError: cube has no plateclass for plate {0}: {1}'
.format(self.plateid, ee))
else:
self._hdr = self._plate._hdr
self._pdict = self._plate.__dict__
self.data_origin = 'db'
if not self._plate:
raise MarvinError('Could not retrieve Plate for id {0}'.format(self.plateid))
def _getPlateFromAPI(self):
''' Initialize a Plate using the API '''
# Checks that the Plate exists.
routeparams = {'plateid': self.plateid}
url = config.urlmap['api']['getPlate']['url'].format(**routeparams)
# Make the API call
response = self._toolInteraction(url)
data = response.getData()
self._hdr = data['header']
self.data_origin = 'api'
self._makePdict()
def _initCubes(self):
''' Initialize a list of Marvin Cube objects '''
_cubes = [None]
if self.data_origin == 'file':
sdss_path = Path(release=self.release)
if self.dir3d == 'stack':
cubes = sdss_path.expand('mangacube', drpver=self._drpver,
plate=self.plateid, ifu='*', wave='LOG')
else:
cubes = sdss_path.expand('mangamastar', drpver=self._drpver,
plate=self.plateid, ifu='*', wave='LOG')
_cubes = [Cube(filename=cube, mode=self.mode, release=self.release) for cube in cubes]
elif self.data_origin == 'db':
_cubes = [Cube(plateifu=cube.plateifu, mode=self.mode, release=self.release)
for cube in self._plate.cubes]
elif self.data_origin == 'api':
routeparams = {'plateid': self.plateid}
url = config.urlmap['api']['getPlateCubes']['url'].format(**routeparams)
# Make the API call
response = self._toolInteraction(url)
data = response.getData()
plateifus = data['plateifus']
_cubes = [Cube(plateifu=pifu, mode=self.mode, release=self.release) for pifu in plateifus]
FuzzyList.__init__(self, _cubes)
self.mapper = (lambda e: e.plateifu)
def _setParams(self):
''' Set the plate parameters '''
self.ra = self._pdict.get('ra', None)
self.dec = self._pdict.get('dec', None)
self.designid = self._pdict.get('designid', None)
self.cartid = self._pdict.get('cartid', None)
self.dateobs = self._pdict.get('dateobs', None)
self.platetype = self._pdict.get('platetype', None)
self.surveymode = self._pdict.get('surveymode', None)
self.isbright = self._pdict.get('isbright', None)
self.dir3d = self._pdict.get('dir3d', None)
self.plateid = int(self.plateid)
def _makePdict(self):
''' Make the necessary plate dictionary '''
self._pdict = {}
self._pdict['ra'] = self._hdr.get('CENRA', None)
self._pdict['dec'] = self._hdr.get('CENDEC', None)
self._pdict['designid'] = self._hdr.get('DESIGNID', None)
self._pdict['cartid'] = self._hdr.get('CARTID', None)
self._pdict['dateobs'] = self._hdr.get('DATE-OBS', None)
self._pdict['platetype'] = self._hdr.get('PLATETYP', None)
self._pdict['surveymode'] = self._hdr.get('SRVYMODE', None)
self._pdict['isbright'] = 'APOGEE' in self._pdict['surveymode']
self._pdict['dir3d'] = 'mastar' if self._pdict['isbright'] else 'stack'
self._pdict['ra'] = float(self._pdict['ra'])
self._pdict['dec'] = float(self._pdict['dec'])
self._pdict['designid'] = float(self._pdict['designid'])
def _sortOutNames(self):
''' Sort out any name issues with plateid, plateifu, mangaid inputs '''
if self.plateifu and 'XXX' not in self.plateifu:
plate, ifu = self.plateifu.split('-')
self.plateid = int(plate)
def _checkFilename(self):
''' Checks the filename for a proper FITS file '''
# if filename is not FITS, then try to load one
if 'fits' not in self.filename.lower():
if not Path:
raise MarvinError('sdss_access is not installed')
else:
# is_public = 'DR' in self._release
# path_release = self._release.lower() if is_public else None
sdss_path = Path(release=self._release)
# try a cube
full = sdss_path.full('mangacube', drpver=self._drpver, plate=self.plateid, ifu='*', wave='LOG')
cubeexists = sdss_path.any('', full=full)
if cubeexists:
file = sdss_path.one('', full=full)
else:
# try an rss
full = sdss_path.full('mangarss', drpver=self._drpver, plate=self.plateid, ifu='*', wave='LOG')
rssexists = sdss_path.any('', full=full)
if rssexists:
file = sdss_path.one('', full=full)
else:
file = None
# load the file
if file:
self.filename = file
else:
self.filename = None
|
sdssREPO_NAMEmarvinPATH_START.@marvin_extracted@marvin-main@python@marvin@tools@plate.py@.PATH_END.py
|
{
"filename": "transformer.py",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/ultralytics/nn/modules/transformer.py",
"type": "Python"
}
|
# Ultralytics YOLO 🚀, AGPL-3.0 license
"""Transformer modules."""
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.init import constant_, xavier_uniform_
from .conv import Conv
from .utils import _get_clones, inverse_sigmoid, multi_scale_deformable_attn_pytorch
__all__ = (
"TransformerEncoderLayer",
"TransformerLayer",
"TransformerBlock",
"MLPBlock",
"LayerNorm2d",
"AIFI",
"DeformableTransformerDecoder",
"DeformableTransformerDecoderLayer",
"MSDeformAttn",
"MLP",
)
class TransformerEncoderLayer(nn.Module):
"""Defines a single layer of the transformer encoder."""
def __init__(self, c1, cm=2048, num_heads=8, dropout=0.0, act=nn.GELU(), normalize_before=False):
"""Initialize the TransformerEncoderLayer with specified parameters."""
super().__init__()
from ...utils.torch_utils import TORCH_1_9
if not TORCH_1_9:
raise ModuleNotFoundError(
"TransformerEncoderLayer() requires torch>=1.9 to use nn.MultiheadAttention(batch_first=True)."
)
self.ma = nn.MultiheadAttention(c1, num_heads, dropout=dropout, batch_first=True)
# Implementation of Feedforward model
self.fc1 = nn.Linear(c1, cm)
self.fc2 = nn.Linear(cm, c1)
self.norm1 = nn.LayerNorm(c1)
self.norm2 = nn.LayerNorm(c1)
self.dropout = nn.Dropout(dropout)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.act = act
self.normalize_before = normalize_before
@staticmethod
def with_pos_embed(tensor, pos=None):
"""Add position embeddings to the tensor if provided."""
return tensor if pos is None else tensor + pos
def forward_post(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
"""Performs forward pass with post-normalization."""
q = k = self.with_pos_embed(src, pos)
src2 = self.ma(q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.fc2(self.dropout(self.act(self.fc1(src))))
src = src + self.dropout2(src2)
return self.norm2(src)
def forward_pre(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
"""Performs forward pass with pre-normalization."""
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.ma(q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.fc2(self.dropout(self.act(self.fc1(src2))))
return src + self.dropout2(src2)
def forward(self, src, src_mask=None, src_key_padding_mask=None, pos=None):
"""Forward propagates the input through the encoder module."""
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class AIFI(TransformerEncoderLayer):
"""Defines the AIFI transformer layer."""
def __init__(self, c1, cm=2048, num_heads=8, dropout=0, act=nn.GELU(), normalize_before=False):
"""Initialize the AIFI instance with specified parameters."""
super().__init__(c1, cm, num_heads, dropout, act, normalize_before)
def forward(self, x):
"""Forward pass for the AIFI transformer layer."""
c, h, w = x.shape[1:]
pos_embed = self.build_2d_sincos_position_embedding(w, h, c)
# Flatten [B, C, H, W] to [B, HxW, C]
x = super().forward(x.flatten(2).permute(0, 2, 1), pos=pos_embed.to(device=x.device, dtype=x.dtype))
return x.permute(0, 2, 1).view([-1, c, h, w]).contiguous()
@staticmethod
def build_2d_sincos_position_embedding(w, h, embed_dim=256, temperature=10000.0):
"""Builds 2D sine-cosine position embedding."""
assert embed_dim % 4 == 0, "Embed dimension must be divisible by 4 for 2D sin-cos position embedding"
grid_w = torch.arange(w, dtype=torch.float32)
grid_h = torch.arange(h, dtype=torch.float32)
grid_w, grid_h = torch.meshgrid(grid_w, grid_h, indexing="ij")
pos_dim = embed_dim // 4
omega = torch.arange(pos_dim, dtype=torch.float32) / pos_dim
omega = 1.0 / (temperature**omega)
out_w = grid_w.flatten()[..., None] @ omega[None]
out_h = grid_h.flatten()[..., None] @ omega[None]
return torch.cat([torch.sin(out_w), torch.cos(out_w), torch.sin(out_h), torch.cos(out_h)], 1)[None]
class TransformerLayer(nn.Module):
"""Transformer layer https://arxiv.org/abs/2010.11929 (LayerNorm layers removed for better performance)."""
def __init__(self, c, num_heads):
"""Initializes a self-attention mechanism using linear transformations and multi-head attention."""
super().__init__()
self.q = nn.Linear(c, c, bias=False)
self.k = nn.Linear(c, c, bias=False)
self.v = nn.Linear(c, c, bias=False)
self.ma = nn.MultiheadAttention(embed_dim=c, num_heads=num_heads)
self.fc1 = nn.Linear(c, c, bias=False)
self.fc2 = nn.Linear(c, c, bias=False)
def forward(self, x):
"""Apply a transformer block to the input x and return the output."""
x = self.ma(self.q(x), self.k(x), self.v(x))[0] + x
return self.fc2(self.fc1(x)) + x
class TransformerBlock(nn.Module):
"""Vision Transformer https://arxiv.org/abs/2010.11929."""
def __init__(self, c1, c2, num_heads, num_layers):
"""Initialize a Transformer module with position embedding and specified number of heads and layers."""
super().__init__()
self.conv = None
if c1 != c2:
self.conv = Conv(c1, c2)
self.linear = nn.Linear(c2, c2) # learnable position embedding
self.tr = nn.Sequential(*(TransformerLayer(c2, num_heads) for _ in range(num_layers)))
self.c2 = c2
def forward(self, x):
"""Forward propagates the input through the bottleneck module."""
if self.conv is not None:
x = self.conv(x)
b, _, w, h = x.shape
p = x.flatten(2).permute(2, 0, 1)
return self.tr(p + self.linear(p)).permute(1, 2, 0).reshape(b, self.c2, w, h)
class MLPBlock(nn.Module):
"""Implements a single block of a multi-layer perceptron."""
def __init__(self, embedding_dim, mlp_dim, act=nn.GELU):
"""Initialize the MLPBlock with specified embedding dimension, MLP dimension, and activation function."""
super().__init__()
self.lin1 = nn.Linear(embedding_dim, mlp_dim)
self.lin2 = nn.Linear(mlp_dim, embedding_dim)
self.act = act()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward pass for the MLPBlock."""
return self.lin2(self.act(self.lin1(x)))
class MLP(nn.Module):
"""Implements a simple multi-layer perceptron (also called FFN)."""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers, act=nn.ReLU, sigmoid=False):
"""Initialize the MLP with specified input, hidden, output dimensions and number of layers."""
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
self.sigmoid = sigmoid
self.act = act()
def forward(self, x):
"""Forward pass for the entire MLP."""
for i, layer in enumerate(self.layers):
x = getattr(self, "act", nn.ReLU())(layer(x)) if i < self.num_layers - 1 else layer(x)
return x.sigmoid() if getattr(self, "sigmoid", False) else x
class LayerNorm2d(nn.Module):
"""
2D Layer Normalization module inspired by Detectron2 and ConvNeXt implementations.
Original implementations in
https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py
and
https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py.
"""
def __init__(self, num_channels, eps=1e-6):
"""Initialize LayerNorm2d with the given parameters."""
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x):
"""Perform forward pass for 2D layer normalization."""
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
return self.weight[:, None, None] * x + self.bias[:, None, None]
class MSDeformAttn(nn.Module):
"""
Multiscale Deformable Attention Module based on Deformable-DETR and PaddleDetection implementations.
https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/ops/modules/ms_deform_attn.py
"""
def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):
"""Initialize MSDeformAttn with the given parameters."""
super().__init__()
if d_model % n_heads != 0:
raise ValueError(f"d_model must be divisible by n_heads, but got {d_model} and {n_heads}")
_d_per_head = d_model // n_heads
# Better to set _d_per_head to a power of 2 which is more efficient in a CUDA implementation
assert _d_per_head * n_heads == d_model, "`d_model` must be divisible by `n_heads`"
self.im2col_step = 64
self.d_model = d_model
self.n_levels = n_levels
self.n_heads = n_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)
self.value_proj = nn.Linear(d_model, d_model)
self.output_proj = nn.Linear(d_model, d_model)
self._reset_parameters()
def _reset_parameters(self):
"""Reset module parameters."""
constant_(self.sampling_offsets.weight.data, 0.0)
thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads)
grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)
grid_init = (
(grid_init / grid_init.abs().max(-1, keepdim=True)[0])
.view(self.n_heads, 1, 1, 2)
.repeat(1, self.n_levels, self.n_points, 1)
)
for i in range(self.n_points):
grid_init[:, :, i, :] *= i + 1
with torch.no_grad():
self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1))
constant_(self.attention_weights.weight.data, 0.0)
constant_(self.attention_weights.bias.data, 0.0)
xavier_uniform_(self.value_proj.weight.data)
constant_(self.value_proj.bias.data, 0.0)
xavier_uniform_(self.output_proj.weight.data)
constant_(self.output_proj.bias.data, 0.0)
def forward(self, query, refer_bbox, value, value_shapes, value_mask=None):
"""
Perform forward pass for multiscale deformable attention.
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
Args:
query (torch.Tensor): [bs, query_length, C]
refer_bbox (torch.Tensor): [bs, query_length, n_levels, 2], range in [0, 1], top-left (0,0),
bottom-right (1, 1), including padding area
value (torch.Tensor): [bs, value_length, C]
value_shapes (List): [n_levels, 2], [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]
value_mask (Tensor): [bs, value_length], True for non-padding elements, False for padding elements
Returns:
output (Tensor): [bs, Length_{query}, C]
"""
bs, len_q = query.shape[:2]
len_v = value.shape[1]
assert sum(s[0] * s[1] for s in value_shapes) == len_v
value = self.value_proj(value)
if value_mask is not None:
value = value.masked_fill(value_mask[..., None], float(0))
value = value.view(bs, len_v, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(query).view(bs, len_q, self.n_heads, self.n_levels, self.n_points, 2)
attention_weights = self.attention_weights(query).view(bs, len_q, self.n_heads, self.n_levels * self.n_points)
attention_weights = F.softmax(attention_weights, -1).view(bs, len_q, self.n_heads, self.n_levels, self.n_points)
# N, Len_q, n_heads, n_levels, n_points, 2
num_points = refer_bbox.shape[-1]
if num_points == 2:
offset_normalizer = torch.as_tensor(value_shapes, dtype=query.dtype, device=query.device).flip(-1)
add = sampling_offsets / offset_normalizer[None, None, None, :, None, :]
sampling_locations = refer_bbox[:, :, None, :, None, :] + add
elif num_points == 4:
add = sampling_offsets / self.n_points * refer_bbox[:, :, None, :, None, 2:] * 0.5
sampling_locations = refer_bbox[:, :, None, :, None, :2] + add
else:
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {num_points}.")
output = multi_scale_deformable_attn_pytorch(value, value_shapes, sampling_locations, attention_weights)
return self.output_proj(output)
class DeformableTransformerDecoderLayer(nn.Module):
"""
Deformable Transformer Decoder Layer inspired by PaddleDetection and Deformable-DETR implementations.
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
https://github.com/fundamentalvision/Deformable-DETR/blob/main/models/deformable_transformer.py
"""
def __init__(self, d_model=256, n_heads=8, d_ffn=1024, dropout=0.0, act=nn.ReLU(), n_levels=4, n_points=4):
"""Initialize the DeformableTransformerDecoderLayer with the given parameters."""
super().__init__()
# Self attention
self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)
self.dropout1 = nn.Dropout(dropout)
self.norm1 = nn.LayerNorm(d_model)
# Cross attention
self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)
self.dropout2 = nn.Dropout(dropout)
self.norm2 = nn.LayerNorm(d_model)
# FFN
self.linear1 = nn.Linear(d_model, d_ffn)
self.act = act
self.dropout3 = nn.Dropout(dropout)
self.linear2 = nn.Linear(d_ffn, d_model)
self.dropout4 = nn.Dropout(dropout)
self.norm3 = nn.LayerNorm(d_model)
@staticmethod
def with_pos_embed(tensor, pos):
"""Add positional embeddings to the input tensor, if provided."""
return tensor if pos is None else tensor + pos
def forward_ffn(self, tgt):
"""Perform forward pass through the Feed-Forward Network part of the layer."""
tgt2 = self.linear2(self.dropout3(self.act(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
return self.norm3(tgt)
def forward(self, embed, refer_bbox, feats, shapes, padding_mask=None, attn_mask=None, query_pos=None):
"""Perform the forward pass through the entire decoder layer."""
# Self attention
q = k = self.with_pos_embed(embed, query_pos)
tgt = self.self_attn(q.transpose(0, 1), k.transpose(0, 1), embed.transpose(0, 1), attn_mask=attn_mask)[
0
].transpose(0, 1)
embed = embed + self.dropout1(tgt)
embed = self.norm1(embed)
# Cross attention
tgt = self.cross_attn(
self.with_pos_embed(embed, query_pos), refer_bbox.unsqueeze(2), feats, shapes, padding_mask
)
embed = embed + self.dropout2(tgt)
embed = self.norm2(embed)
# FFN
return self.forward_ffn(embed)
class DeformableTransformerDecoder(nn.Module):
"""
Implementation of Deformable Transformer Decoder based on PaddleDetection.
https://github.com/PaddlePaddle/PaddleDetection/blob/develop/ppdet/modeling/transformers/deformable_transformer.py
"""
def __init__(self, hidden_dim, decoder_layer, num_layers, eval_idx=-1):
"""Initialize the DeformableTransformerDecoder with the given parameters."""
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.hidden_dim = hidden_dim
self.eval_idx = eval_idx if eval_idx >= 0 else num_layers + eval_idx
def forward(
self,
embed, # decoder embeddings
refer_bbox, # anchor
feats, # image features
shapes, # feature shapes
bbox_head,
score_head,
pos_mlp,
attn_mask=None,
padding_mask=None,
):
"""Perform the forward pass through the entire decoder."""
output = embed
dec_bboxes = []
dec_cls = []
last_refined_bbox = None
refer_bbox = refer_bbox.sigmoid()
for i, layer in enumerate(self.layers):
output = layer(output, refer_bbox, feats, shapes, padding_mask, attn_mask, pos_mlp(refer_bbox))
bbox = bbox_head[i](output)
refined_bbox = torch.sigmoid(bbox + inverse_sigmoid(refer_bbox))
if self.training:
dec_cls.append(score_head[i](output))
if i == 0:
dec_bboxes.append(refined_bbox)
else:
dec_bboxes.append(torch.sigmoid(bbox + inverse_sigmoid(last_refined_bbox)))
elif i == self.eval_idx:
dec_cls.append(score_head[i](output))
dec_bboxes.append(refined_bbox)
break
last_refined_bbox = refined_bbox
refer_bbox = refined_bbox.detach() if self.training else refined_bbox
return torch.stack(dec_bboxes), torch.stack(dec_cls)
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@ultralytics@nn@modules@transformer.py@.PATH_END.py
|
{
"filename": "bug_report.md",
"repo_name": "AndrewAnnex/SpiceyPy",
"repo_path": "SpiceyPy_extracted/SpiceyPy-main/.github/ISSUE_TEMPLATE/bug_report.md",
"type": "Markdown"
}
|
---
name: Bug report
about: Create a report to help us improve SpiceyPy!
title: ''
labels: ''
assignees: ''
---
**Describe the bug**
A clear and concise description of what the bug is. If this is an installation issue please first consult the common installation issues and solutions hosted in the documentation at: https://spiceypy.readthedocs.io/en/latest/installation.html#common-issues .
**Expected behavior**
A clear and concise description of what you expected to happen. Optional if the issue is a crash, can be short if it is an exception. More detail is needed if unexpected output occurs.
**To Reproduce**
Steps to reproduce the behavior, preferably complete code snippets and meta kernels with URLs for complete reproduction. If it is an installation issue please re-run the install command with verbose output and copy/paste the output into a code snippet or attach a text file.
**Desktop (please complete the following information):**
- SpiceyPy Version [1.0.0, 2.0.0, etc]
- OS: [e.g. MacOS, Windows, Linux]
- OS Architecture [64bit, 32bit]
- Python Version [2.7.12, 3.6.0, etc]
- Python Architecture [64bit, 32bit]
**Additional context**
Add any other context about the problem here, screenshots
|
AndrewAnnexREPO_NAMESpiceyPyPATH_START.@SpiceyPy_extracted@SpiceyPy-main@.github@ISSUE_TEMPLATE@bug_report.md@.PATH_END.py
|
{
"filename": "activation_benchmark.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/benchmarks/layer_benchmark/activation_benchmark.py",
"type": "Python"
}
|
"""Benchmark activation layers.
To run benchmarks, see the following command for an example, please change the
flag to your custom value:
```
python3 -m benchmarks.layer_benchmark.activation_benchmark \
--benchmark_name=benchmark_elu \
--num_samples=2048 \
--batch_size=256 \
--jit_compile=True
```
"""
from absl import app
from absl import flags
from benchmarks.layer_benchmark.base_benchmark import LayerBenchmark
FLAGS = flags.FLAGS
def benchmark_elu(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "ELU"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_prelu(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "PReLU"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_relu(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "ReLU"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_leaky_relu(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "LeakyReLU"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
def benchmark_softmax(
num_samples,
batch_size,
jit_compile=True,
):
layer_name = "Softmax"
init_args = {}
benchmark = LayerBenchmark(
layer_name,
init_args,
input_shape=[256, 256],
jit_compile=jit_compile,
)
benchmark.benchmark_predict(
num_samples=num_samples,
batch_size=batch_size,
)
benchmark.benchmark_train(
num_samples=num_samples,
batch_size=batch_size,
)
BENCHMARK_NAMES = {
"benchmark_elu": benchmark_elu,
"benchmark_relu": benchmark_relu,
"benchmark_leaky_relu": benchmark_leaky_relu,
"benchmark_prelu": benchmark_prelu,
"benchmark_softmax": benchmark_softmax,
}
def main(_):
benchmark_name = FLAGS.benchmark_name
num_samples = FLAGS.num_samples
batch_size = FLAGS.batch_size
jit_compile = FLAGS.jit_compile
if benchmark_name is None:
for name, benchmark_fn in BENCHMARK_NAMES.items():
benchmark_fn(num_samples, batch_size, jit_compile)
return
if benchmark_name not in BENCHMARK_NAMES:
raise ValueError(
f"Invalid benchmark name: {benchmark_name}, `benchmark_name` must "
f"be one of {BENCHMARK_NAMES.keys()}"
)
benchmark_fn = BENCHMARK_NAMES[benchmark_name]
benchmark_fn(num_samples, batch_size, jit_compile)
if __name__ == "__main__":
app.run(main)
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@benchmarks@layer_benchmark@activation_benchmark.py@.PATH_END.py
|
{
"filename": "example_parallel_M.py",
"repo_name": "CaymanUnterborn/ExoPlex",
"repo_path": "ExoPlex_extracted/ExoPlex-master/Examples/example_parallel_M.py",
"type": "Python"
}
|
# This file is part of ExoPlex - a self consistent planet builder
# Copyright (C) 2017 - by the ExoPlex team, released under the GNU
# GPL v2 or later.
"""
This example uses parallel processing to quickly calculate the best fit radius and pressure, temperature and density profiles
for a planet of a given mass, its uncertainty and a chosen composition.
The code begins by initializing the composition of the planet and retrieving the grids. In the main text code (at bottom)
one can set the number of samplings and the mass, radius, and uncertainties.
"""
import os
import sys
import multiprocessing as mp
import numpy as np
import matplotlib.pyplot as plt
# hack to allow scripts to be placed in subdirectories next to exoplex:
if not os.path.exists('ExoPlex') and os.path.exists('../ExoPlex'):
sys.path.insert(1, os.path.abspath('..'))
from ExoPlex import functions
from ExoPlex import run_perplex as perp
from ExoPlex import make_grids
Pressure_range_mantle_UM = '1 1400000'
Temperature_range_mantle_UM = '1600 3500'
Pressure_range_mantle_LM = '1250000 40000000'
Temperature_range_mantle_LM = '1700 7000'
water_potential_temp = 300.
comp_keys = ['wt_frac_water','FeMg','SiMg','CaMg','AlMg','wt_frac_FeO_wanted','wt_frac_Si_core',
'wt_frac_O_core','wt_frac_S_core', 'combine_phases','use_grids','conserve_oxy']
struct_keys = ['Pressure_range_mantle_UM','Temperature_range_mantle_UM','resolution_UM',
'Pressure_range_mantle_LM', 'Temperature_range_mantle_LM', 'resolution_LM',
'Mantle_potential_temp','water_potential_temp']
combine_phases = True
use_grids = True
# To have ExoPlex to give you compositional info and status of calculation set Verbose to TRUE.
# Note: setting this to True will slightly slow down the program
verbose = False
# create filename to store values
Output_filename = 'Filename'
# Next user must input the ratios by mole (Earth is Ca/Mg = .07, Si.Mg = 0.90, Al/Mg = 0.09, Fe/Mg = 0.9)
CaMg = 0.07
SiMg = 0.9
AlMg = 0.09
FeMg = 0.9
# How much water do you want in your planet? By mass fraction.
wt_frac_water = 0.0
# Don't forget that if you have water you need to add water layers
number_h2o_layers = 0
# The potential Temperature of Water, if present
water_potential_temp = 300.
# What fraction of the mantle would you like to be made of FeO? This Fe will be pulled from the core.
wt_frac_FeO_wanted = 0. # by mass
conserve_oxy = False
# Now we can mix various elements into the core or mantle
wt_frac_Si_core = 0. # by mass <1, note if you conserve oxygen this is calculated for you
wt_frac_O_core = 0. # by mass
wt_frac_S_core = 0. # by mass
# What potential temperature (in K) do you want to start your mantle adiabat?
Mantle_potential_temp = 1600.
# Input the resolution of your upper mantle and lower mantle composition, density grids
# These are input as number of T, P points. 50 50 = 2500 grid points, which takes about
# 5 minutes to calculate. Lower mantle resolution does not need to be higher since it's
# mostly ppv.
resolution_UM = '25 75'
resolution_LM = '75 75'
# lastly we need to decide how many layers to put in the planet. This is the resolution of
# the mass-radius sampling.
num_mantle_layers = 300
num_core_layers = 300
Output_radii = []
Output_mass = []
######### Initalize and run ExoPlex
compositional_params = dict(zip(comp_keys, [wt_frac_water, FeMg, SiMg, CaMg, AlMg, wt_frac_FeO_wanted, wt_frac_Si_core, \
wt_frac_O_core, wt_frac_S_core, combine_phases, use_grids, conserve_oxy]))
if use_grids == True:
filename = functions.find_filename(compositional_params, verbose)
else:
filename = ''
structure_params = dict(zip(struct_keys, [Pressure_range_mantle_UM, Temperature_range_mantle_UM, resolution_UM,
Pressure_range_mantle_LM, Temperature_range_mantle_LM, resolution_LM,
Mantle_potential_temp, water_potential_temp]))
layers = [num_mantle_layers, num_core_layers, number_h2o_layers]
Core_wt_per, Mantle_wt_per, Core_mol_per, core_mass_frac = functions.get_percents(compositional_params, verbose)
Mantle_filename = perp.run_perplex(*[Mantle_wt_per,compositional_params, structure_params,filename,verbose,combine_phases])
grids_low, names = make_grids.make_mantle_grid(Mantle_filename,Mantle_wt_per, True,use_grids)
names.append('Fe')
if layers[-1] > 0:
water_grid, water_phases = make_grids.make_water_grid()
for i in water_phases:
names.append(i)
else:
water_grid = []
grids_high = make_grids.make_mantle_grid(Mantle_filename,Mantle_wt_per, False,use_grids)[0]
core_grid = make_grids.make_core_grid()
grids = [grids_low,grids_high,core_grid,water_grid]
def calc_planet(x):
Planet = functions.find_Planet_mass(x, core_mass_frac,structure_params, compositional_params, grids, Core_wt_per, layers,verbose)
Planet['phase_names'] = names
Planet['phases'],Planet['phase_names'] = functions.get_phases(Planet, grids, layers,combine_phases)
functions.check(Planet)
rad = Planet['radius'][-1]/6371e3
mass = Planet['mass'][-1]/5.97e24
CMF = Planet['mass'][num_core_layers - 1] / Planet['mass'][-1]
CRF = Planet['radius'][num_core_layers - 1] / Planet['radius'][-1]
CMB_P = Planet['pressure'][num_core_layers] / 1e4
CMB_T = Planet['temperature'][num_core_layers]
if number_h2o_layers > 0:
WMB_P = Planet['pressure'][num_core_layers + num_mantle_layers] / 1e4
WMB_T = Planet['temperature'][num_core_layers + num_mantle_layers]
P_cen = Planet['pressure'][0] / 1e7
T_cen = Planet['temperature'][0]
if number_h2o_layers > 0:
WMB_P = Planet['pressure'][num_core_layers + num_mantle_layers] / 1e4
WMB_T = Planet['temperature'][num_core_layers + num_mantle_layers]
keys = ['radius','mass','CMF','CRF','CMB_P','CMB_T','P_cen', 'T_cen','WMB_P','WMB_T']
vals = [rad, mass, CMF, CRF, CMB_P, CMB_T, P_cen, T_cen, WMB_P, WMB_T]
return(dict(zip(keys, vals)))
else:
keys = ['radius','mass','CMF','CRF','CMB_P','CMB_T','P_cen', 'T_cen']
vals = [rad, mass, CMF, CRF, CMB_P, CMB_T, P_cen, T_cen]
return(dict(zip(keys, vals)))
if __name__ == "__main__":
num_pts = 100
M = 1
M_err = .1
Mass_planet = np.random.normal(M, M_err, num_pts)
pool = mp.Pool(processes=mp.cpu_count())
Planets = pool.map_async(calc_planet,Mass_planet).get()
pool.close()
plt.scatter(Mass_planet, [Planets[i].get('radius') for i in range(num_pts)])
plt.ylabel('Radius (Earth Radii)', size=20)
plt.xlabel('Mass (Earth Masses)', size=20)
plt.show()
|
CaymanUnterbornREPO_NAMEExoPlexPATH_START.@ExoPlex_extracted@ExoPlex-master@Examples@example_parallel_M.py@.PATH_END.py
|
{
"filename": "_textfont.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatterpolargl/_textfont.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="textfont", parent_name="scatterpolargl", **kwargs):
super(TextfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Textfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatterpolargl@_textfont.py@.PATH_END.py
|
{
"filename": "_domain.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sankey/_domain.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DomainValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="domain", parent_name="sankey", **kwargs):
super(DomainValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Domain"),
data_docs=kwargs.pop(
"data_docs",
"""
column
If there is a layout grid, use the domain for
this column in the grid for this sankey trace .
row
If there is a layout grid, use the domain for
this row in the grid for this sankey trace .
x
Sets the horizontal domain of this sankey trace
(in plot fraction).
y
Sets the vertical domain of this sankey trace
(in plot fraction).
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sankey@_domain.py@.PATH_END.py
|
{
"filename": "mcmc.py",
"repo_name": "jcforbes/gidget",
"repo_path": "gidget_extracted/gidget-master/mcmc.py",
"type": "Python"
}
|
import sys
import copy
import emcee
import triangle
import pickle
from mpi4py import MPI
from emcee.utils import MPIPool
import numpy as np
import exper
import readoutput
import os, glob
import matplotlib.pyplot as plt
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
chainDirRel = 'mcmcChain06h'
analysisDir = os.environ['GIDGETDIR']+'/analysis/'
chainDir = analysisDir+chainDirRel
procCounter=0
runNumber = 0
def lnBetaDensity(theta, a,b):
if theta<=0 or theta>=1:
return -np.inf
return (a-1.0)*np.log(theta) + (b-1.0)*np.log(1.0-theta)
def lnGammaDensity(theta, a,b):
if theta<=0:
return -np.inf
return (a-1.0)*np.log(theta) - b*theta
def lnLogNormalDensity(theta, mean, var):
if theta<=0:
return -np.inf
return -np.log(theta) - 0.5*(np.log(theta) - mean)**2.0/var
def lnNormalDensity(theta, mean, var):
return -0.5*(theta-mean)**2.0/var
def sampleFromBetaDensity(a,b):
assert a>0 and b>0
return np.random.beta(a,b)
def sampleFromGammaDensity(a,b):
assert a>0 and b>0
return np.random.gamma(a,1.0/b) # numpy's definition of the gamma uses e^{-x/scale}/scale^a
def sampleFromLogNormalDensity(mean,var):
return np.exp(sampleFromNormalDensity(mean,var))
def sampleFromNormalDensity(mean,var):
return np.random.normal(mean,np.sqrt(var))
# Define the base experiment we want.
def emceeParameterSpaceToGidgetExperiment(emceeParams):
global procCounter
global runNumber
# unpack emceeParams
#eta, epsff, fg0, muNorm, muScaling, fixedQ, accScaleLength, xiREC, accNorm, accAlphaZ, accAlphaMh, accCeiling, fcool, kappaMetals, ZIGM = emceeParams
fg0, muNorm, muScaling, accScaleLength, accNorm, accAlphaZ, accAlphaMh, accCeiling, fcool = emceeParams
# if any of the following assertions fail, you should probably adjust / check the prior
assert fg0>=0
assert fg0<=1.0
assert muNorm>=0
assert accScaleLength>=0
assert accCeiling<=1 and accCeiling>=0
assert 0<=fcool and fcool<=1
# Create experiment
basename = chainDirRel+'_'
name = basename+str(runNumber).zfill(3)+'_'+str(rank).zfill(5)+'_'+str(procCounter).zfill(5)
thisExper = exper.experiment(copy.copy(name))
procCounter+=1
# Set up a range of masses and computational domains
thisExper.vary('Mh0', 1.0e10, 5.0e11, 3, 1, 4)
thisExper.vary('R', 10*accScaleLength/.05, 50*accScaleLength/.05, 3, 1, 4)
# Set up some common parameters.
thisExper.irregularVary('dbg', 2**4+2**1+2**0)
thisExper.irregularVary('alphaMRI', 0)
thisExper.irregularVary('zstart',4.99)
thisExper.irregularVary('zrelax',5.0)
thisExper.irregularVary('Noutputs',20)
thisExper.irregularVary('eta',1.5)
thisExper.irregularVary('epsff',0.01)
thisExper.irregularVary('fixedQ',2.0)
thisExper.irregularVary('xiREC',0.0)
thisExper.irregularVary('kappaMetals',1.0)
thisExper.irregularVary('ZIGM',.02*.01)
#thisExper.irregularVary('eta',eta)
#thisExper.irregularVary('epsff',epsff)
thisExper.irregularVary('fg0',fg0)
thisExper.irregularVary('muNorm',muNorm)
thisExper.irregularVary('muScaling',muScaling)
#thisExper.irregularVary('fixedQ',fixedQ)
thisExper.irregularVary('accScaleLength',accScaleLength)
#thisExper.irregularVary('xiREC',xiREC)
thisExper.irregularVary('accNorm',accNorm)
thisExper.irregularVary('accAlphaZ',accAlphaZ)
thisExper.irregularVary('accAlphaMh',accAlphaMh)
thisExper.irregularVary('accCeiling',accCeiling)
thisExper.irregularVary('fcool',fcool)
#thisExper.irregularVary('kappaMetals',kappaMetals)
#thisExper.irregularVary('ZIGM',ZIGM)
return thisExper, name
def lnprior(emceeParams):
#eta, epsff, fg0, muNorm, muScaling, fixedQ, accScaleLength, xiREC, accNorm, accAlphaZ, accAlphaMh, accCeiling, fcool, kappaMetals, ZIGM = emceeParams
fg0, muNorm, muScaling, accScaleLength, accNorm, accAlphaZ, accAlphaMh, accCeiling, fcool = emceeParams
accum = 0.0
# accum += lnLogNormalDensity(eta, np.log(1.5), np.log(2)**2.0)
# accum += lnLogNormalDensity(epsff, np.log(0.01), np.log(2)**2.0)
accum += lnBetaDensity(fg0, 1.0, 0.1)
accum += lnGammaDensity(muNorm, 1, 1)
accum += lnNormalDensity(muScaling, -.5, 3.0)
# accum += lnGammaDensity(fixedQ-1.0, 2, 2)
#accum += lnBetaDensity(accScaleLength, .5, 9.5)
accum += lnLogNormalDensity(accScaleLength, np.log(.05), np.log(2)**2.0)
# accum += lnBetaDensity(xiREC, .1, .9)
accum += lnGammaDensity(accNorm, 0.3, 1)
accum += lnNormalDensity(accAlphaZ, 0.38, 0.5)
accum += lnNormalDensity(accAlphaMh, -.25, 0.5)
accum += lnBetaDensity(accCeiling, 1.0, 1.0)
accum += lnBetaDensity(fcool, 1.0, 1.0 )
# accum += lnLogNormalDensity(kappaMetals, 0, np.log(2)**2.0 )
# accum += lnBetaDensity(ZIGM, 2, 998)
if not np.isfinite(accum):
return -np.inf
return accum
def sampleFromPrior():
return [sampleFromBetaDensity(1.0,0.1), # fg0
sampleFromGammaDensity(1.0, 1.0), # muNorm
sampleFromNormalDensity(-.5, 3.0), # muScaling
sampleFromLogNormalDensity(np.log(.05),np.log(2)**2.0), # accScaleLength
sampleFromGammaDensity(0.3, 1), # accNorm
sampleFromNormalDensity(0.38, 0.5), # accAlphaZ
sampleFromNormalDensity(-0.25, 0.5), # accAlphaMh
sampleFromBetaDensity( 1.0, 1.0 ), # accCeiling
sampleFromBetaDensity( 1.0, 1.0 )] #fcool
def lnlikelihood(emceeParams):
# Set up the experiment
experToRun, name = emceeParameterSpaceToGidgetExperiment(emceeParams)
# Run the experiment.
print "Evaluating likelihood for params ",emceeParams
experToRun.localRun(1,0,maxTime = 3000)
output = readoutput.Experiment(name)
output.read()
if(len(output.models) < 3):
print "WARNING: setting likelihood to zero because ",len(output.models)," of the 3 models produced sensible results"
return -np.inf
model0 = output.models[0]
zs = model0.var['z'].sensible()
accum = 0
for model in output.models:
for ti in range(len(zs)):
Mh = model.var['Mh'].sensible(timeIndex=ti)
lo, hi, mid = efficiency(Mh, zs[ti])
eff = model.var['mstar'].sensible(timeIndex=ti)/Mh
logdist = np.abs(np.log(eff/mid)/np.log(hi/mid))
accum += -0.5 * logdist*logdist - np.log(np.log(hi/mid))
return accum
def lnProb(emceeParams):
pr = lnprior(emceeParams)
if np.isfinite(pr):
return lnlikelihood(emceeParams) + pr
return pr
# Formula from Moster, assuming no correlation bewteen parameters.
def efficiency(thisMh, z):
def Moster(Mh, mparams):
M10, M11, N10, N11, beta10, beta11, gamma10, gamma11 = mparams
logM1z = M10 + M11*z/(z+1.0)
Nz = N10 + N11*z/(z+1.0)
betaz = beta10 + beta11*z/(z+1.0)
gammaz = gamma10 + gamma11*z/(z+1.0)
M1 = np.power(10.0, logM1z)
eff = 2.0*Nz / (np.power(Mh/M1,-betaz) + np.power(Mh/M1,gammaz))
return eff
central = np.array([11.590, 1.195, 0.0351, -0.0247, 1.376, -0.826, 0.608, 0.329])
unc = np.array([0.236, 0.353, 0.0058, 0.0069, 0.153, 0.225, 0.059, 0.173])
Mhs = np.array([thisMh])
eff = Moster(Mhs, central)
for i in range(len(unc)):
theseParams = copy.copy(central)
theseParams[i] = theseParams[i]+unc[i]
eff = np.vstack([eff, Moster(Mhs, theseParams)])
theseParams = copy.copy(central)
theseParams[i] = theseParams[i]-unc[i]
eff = np.vstack([eff, Moster(Mhs, theseParams)])
effM = np.min(eff, axis=0)
effP = np.max(eff, axis=0)
return effM, effP, Moster(Mhs,central)
def run(N):
fn = chainDirRel+'.pickle'
nwalkers = 500
ndim = 9 # 15
#eta, epsff, fg0, muNorm, muScaling, fixedQ, accScaleLength, xiREC, accNorm, accAlphaZ, accAlphaMh, accCeiling, fcool, kappaMetals, ZIGM = emceeParams
#p00 = np.array([ .9, .1, -1., .08, .50959, .38, -.25, .7, .01 ])
#p0 = [p00*(1.0+0.2*np.random.randn( ndim )) for i in range(nwalkers)]
p0 = [sampleFromPrior() for i in range(nwalkers)]
restart = {}
restart['currentPosition'] = p0
restart['chain'] = None
restart['state'] = None
restart['prob'] = None
restart['iterationCounter'] = 0
restart['mcmcRunCounter'] = 0
updateRestart(fn,restart)
global runNumber
runNumber = restart['mcmcRunCounter']
restart['iterationCounter'] += N
restart['mcmcRunCounter'] += 1
pool = MPIPool(comm=comm, loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnProb, pool=pool)
#pos, prob, state = sampler.run_mcmc(restart['currentPosition'], N, rstate0=restart['state'], lnprob0=restart['prob'])
counter = 0
for result in sampler.sample(restart['currentPosition'], iterations=N, lnprob0=restart['prob'], rstate0=restart['state']):
print "Beginning iteration number ",counter," of ",N
pos, prob, state = result
restart['acor'] = sampler.acor[:] # autocorr length for each param (ndim)
restart['accept'] = sampler.acceptance_fraction[:] # acceptance frac for each walker.
restart['currentPosition'] = pos # same shape as p0: nwalkers x ndim
restart['state'] = state # random number generator state
restart['prob'] = prob # nwalkers x dim
if restart['chain'] is None:
restart['chain'] = sampler.chain # nwalkers x niterations x ndim
else:
print np.shape(restart['chain']), np.shape(sampler.chain[:,-1,:]), np.shape(sampler.chain)
print restart['mcmcRunCounter'], restart['iterationCounter']
#restart['chain'] = np.concatenate((restart['chain'], sampler.chain[:,-1,:]), axis=1)
print "dbg1: ",np.shape(restart['chain']), np.shape(np.zeros((nwalkers, 1, ndim))), np.shape(np.expand_dims(pos,1))
restart['chain'] = np.concatenate((restart['chain'], np.expand_dims(pos, 1)),axis=1)
saveRestart(fn,restart)
counter+=1
pool.close()
def tracePlots(chain, fn):
ndim = np.shape(chain)[2]
sq = np.sqrt(float(ndim))
nr = int(np.ceil(sq))
fig,ax = plt.subplots(nrows=nr,ncols=nr)
for dim in range(ndim):
i = np.mod(dim, nr)
j = ( dim -i )/nr
for walker in range(np.shape(chain)[0]):
ax[i,j].plot(chain[walker,:,dim],alpha=.3,ls='--')
plt.savefig(fn+'.png')
def printRestart(restart):
''' Print quick summary info about the current state of the sampler. '''
print "restart info: "
print " current shape of chain: (nwalkers x niterations x ndim) ",np.shape(restart['chain'])
print " autocorrelation lengths for each parameter: ",restart['acor']
print " acceptance rate for each walker: ",restart['accept']
def trianglePlot(restart,fn,burnIn=0):
shp = np.shape(restart['chain'])
prs = shp[0]*(shp[1]-burnIn)*shp[2]
prior = [sampleFromPrior() for i in range(prs)]
shape = np.shape(restart['chain'])
ndim = shape[2]
trifig = triangle.corner(restart['chain'][:,burnIn:,:].reshape((-1,ndim)), \
labels=[r'$f_{g,0}$',r'$\mu_0$',r'$\alpha_\mu$', r'$r_\mathrm{acc}/r_\mathrm{vir}$', \
r'$\epsilon_0$',r'$\alpha_z$',r'$\alpha_{M_h}$',r'$\epsilon_\mathrm{max}$',r'$f_\mathrm{cool}$'])
trifigPrior = triangle.corner(prior, color='red', fig=trifig,plot_datapoints=False)
trifig.savefig(fn)
def saveRestart(fn,restart):
with open(fn,'wb') as f:
print "checkpoint to "+fn
pickle.dump(restart,f,2)
def updateRestart(fn,restart):
if os.path.isfile(fn):
with open(fn,'rb') as f:
tmp_dict = pickle.load(f)
restart.update(tmp_dict)
if __name__=="__main__":
run(40)
#run(10)
restart={}
updateRestart(chainDirRel+'.pickle', restart)
printRestart(restart)
trianglePlot(restart,chainDirRel+'_triangle.png',burnIn=30)
tracePlots(restart['chain'], chainDirRel+'_trace')
|
jcforbesREPO_NAMEgidgetPATH_START.@gidget_extracted@gidget-master@mcmc.py@.PATH_END.py
|
{
"filename": "_yref.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/splom/marker/colorbar/_yref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YrefValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="yref", parent_name="splom.marker.colorbar", **kwargs
):
super(YrefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["container", "paper"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@splom@marker@colorbar@_yref.py@.PATH_END.py
|
{
"filename": "adapting_population_parameters.py",
"repo_name": "TRASAL/frbpoppy",
"repo_path": "frbpoppy_extracted/frbpoppy-master/examples/adapting_population_parameters.py",
"type": "Python"
}
|
"""Example of changing parameters."""
from frbpoppy import CosmicPopulation, Survey
import frbpoppy.gen_dists as gd
# Set up a population
cosmic_pop = CosmicPopulation(1e4, generate=False)
# ... or adapt the population per parameter, e.g.
cosmic_pop.set_dist(z_max=2.5)
# Or to adapt the luminosity
cosmic_pop.set_lum(model='powerlaw', low=1e44, high=1e45)
# Generate the population
cosmic_pop.generate()
# Setup a survey
survey = Survey('wsrt-apertif')
# and adapt the survey with
survey.set_beam('airy', n_sidelobes=2)
survey.snr_limit = 2
# For a full list of available arguments or parameters check the classes as
# defined in /frbpoppy/*.py
# Some more difficult examples
# Use a convolution of two distributions
cosmic_pop = CosmicPopulation.simple(n_srcs=1e4, repeaters=True)
# Draw the mean value per source from a normal distribution
mean_dist = gd.trunc_norm(mean=1, std=2, shape=int(1e4))
# And use those means as a _means_ to generate from a new Gaussian
# distribution per source
cosmic_pop.set_w(model='gauss', per_source='different',
mean=mean_dist, std=0.01*mean_dist)
# And generate afterwards
cosmic_pop.generate()
|
TRASALREPO_NAMEfrbpoppyPATH_START.@frbpoppy_extracted@frbpoppy-master@examples@adapting_population_parameters.py@.PATH_END.py
|
{
"filename": "ext_lomb_scargle_orig.py",
"repo_name": "quatrope/feets",
"repo_path": "feets_extracted/feets-master/res/paper/reports/fats_vs_feets/ext_lomb_scargle_orig.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
""""""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
import lomb
from feets import Extractor
# =============================================================================
# EXTRACTOR CLASS
# =============================================================================
class LombScargle(Extractor):
data = ['magnitude', 'time']
features = ["PeriodLS", "Period_fit", "Psi_CS", "Psi_eta"]
params = {"ofac": 6.}
def _compute_ls(self, magnitude, time, ofac):
fx, fy, nout, jmax, prob = lomb.fasper(time, magnitude, ofac, 100.)
period = fx[jmax]
T = 1.0 / period
new_time = np.mod(time, 2 * T) / (2 * T)
return T, new_time, prob, period
def _compute_cs(self, folded_data, N):
sigma = np.std(folded_data)
m = np.mean(folded_data)
s = np.cumsum(folded_data - m) * 1.0 / (N * sigma)
R = np.max(s) - np.min(s)
return R
def _compute_eta(self, folded_data, N):
sigma2 = np.var(folded_data)
Psi_eta = (1.0 / ((N - 1) * sigma2) *
np.sum(np.power(folded_data[1:] - folded_data[:-1], 2)))
return Psi_eta
def fit(self, magnitude, time, ofac):
T, new_time, prob, period = self._compute_ls(magnitude, time, ofac)
folded_data = magnitude[np.argsort(new_time)]
N = len(folded_data)
R = self._compute_cs(folded_data, N)
Psi_eta = self._compute_eta(folded_data, N)
return {"PeriodLS": T, "Period_fit": prob,
"Psi_CS": R, "Psi_eta": Psi_eta}
|
quatropeREPO_NAMEfeetsPATH_START.@feets_extracted@feets-master@res@paper@reports@fats_vs_feets@ext_lomb_scargle_orig.py@.PATH_END.py
|
{
"filename": "equation_losses.py",
"repo_name": "neuraloperator/neuraloperator",
"repo_path": "neuraloperator_extracted/neuraloperator-main/neuralop/losses/equation_losses.py",
"type": "Python"
}
|
import torch
import torch.nn.functional as F
from .data_losses import central_diff_2d
class BurgersEqnLoss(object):
"""
Computes loss for Burgers' equation.
"""
def __init__(self, visc=0.01, method="fdm", loss=F.mse_loss, domain_length=1.0):
super().__init__()
self.visc = visc
self.method = method
self.loss = loss
self.domain_length = domain_length
if not isinstance(self.domain_length, (tuple, list)):
self.domain_length = [self.domain_length] * 2
def fdm(self, u):
# remove extra channel dimensions
u = u.squeeze(1)
# shapes
_, nt, nx = u.shape
# we assume that the input is given on a regular grid
dt = self.domain_length[0] / (nt - 1)
dx = self.domain_length[1] / nx
# du/dt and du/dx
dudt, dudx = central_diff_2d(u, [dt, dx], fix_x_bnd=True, fix_y_bnd=True)
# d^2u/dxx
dudxx = (
torch.roll(u, -1, dims=-1) - 2 * u + torch.roll(u, 1, dims=-1)
) / dx**2
# fix boundary
dudxx[..., 0] = (u[..., 2] - 2 * u[..., 1] + u[..., 0]) / dx**2
dudxx[..., -1] = (u[..., -1] - 2 * u[..., -2] + u[..., -3]) / dx**2
# right hand side
right_hand_side = -dudx * u + self.visc * dudxx
# compute the loss of the left and right hand sides of Burgers' equation
return self.loss(dudt, right_hand_side)
def __call__(self, y_pred, **kwargs):
if self.method == "fdm":
return self.fdm(u=y_pred)
raise NotImplementedError()
class ICLoss(object):
"""
Computes loss for initial value problems.
"""
def __init__(self, loss=F.mse_loss):
super().__init__()
self.loss = loss
def initial_condition_loss(self, y_pred, x):
boundary_true = x[:, 0, 0, :]
boundary_pred = y_pred[:, 0, 0, :]
return self.loss(boundary_pred, boundary_true)
def __call__(self, y_pred, x, **kwargs):
return self.initial_condition_loss(y_pred, x)
|
neuraloperatorREPO_NAMEneuraloperatorPATH_START.@neuraloperator_extracted@neuraloperator-main@neuralop@losses@equation_losses.py@.PATH_END.py
|
{
"filename": "transmissionCurve.py",
"repo_name": "lsst/ip_isr",
"repo_path": "ip_isr_extracted/ip_isr-main/python/lsst/ip/isr/transmissionCurve.py",
"type": "Python"
}
|
# This file is part of ip_isr.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__all__ = ["IntermediateTransmissionCurve",
"IntermediateOpticsTransmissionCurve",
"IntermediateFilterTransmissionCurve",
"IntermediateSensorTransmissionCurve",
"IntermediateAtmosphereTransmissionCurve",
"IntermediateSystemTransmissionCurve", ]
import numpy as np
from astropy import units as u
from astropy.units import cds
from lsst.afw.image import TransmissionCurve
from .calibType import IsrCalib
class IntermediateTransmissionCurve(IsrCalib):
"""Definition for the TransmissionCurve format used as inputs.
Parameters
----------
filename : `str`
Filename of a transmission curve dataset.
"""
_OBSTYPE = "transmission_curve"
_SCHEMA = ""
_VERSION = 1.0
def __init__(self, filename=None):
self.data = None
self.transmissionCurve = None
self.isSpatiallyConstant = True
super().__init__()
# Because we are not persisting this calib as itself, we
# should skip adding any other attributes.
self.requiredAttributes.update(['isSpatiallyConstant'])
def setMetadata(self, metadata):
# Inherits from lsst.ip.isr.IsrCalib.setMetadata.
super().setMetadata(metadata)
@classmethod
def fromTable(cls, tableList):
"""Construct intermediate transmission curve from a list of input
tables. Only the first table is used.
Parameters
----------
tableList : `list` [`astropy.table.Table`]
List containing input tables.
Returns
-------
calib : `lsst.ip.isr.IntermediateTransmissionCurve`
The final calibration.
"""
calib = cls()
metadata = tableList[0].meta
calib.setMetadata(metadata)
calib.updateMetadata()
calib.data = tableList[0]
calib.setTransmissionCurveRepresentation()
return calib
def setTransmissionCurveRepresentation(self):
"""Construct transmission curve representation from the data that was
read.
Raises
------
RuntimeError
This is raised if no table data exists in the calibration,
if there are array length mismatches, or if the wavelength
sampling for multi-amp tables differ.
"""
if self.data is None:
raise RuntimeError("No table data was found to convert to a transmission curve!")
throughputKey = None
if 'wavelength' not in self.data.columns:
raise RuntimeError("Expected column [wavelength] not found.")
if 'efficiency' in self.data.columns:
throughputKey = 'efficiency'
elif 'throughput' in self.data.columns:
throughputKey = 'throughput'
else:
raise RuntimeError("Expected columns [throughput|efficiency] not found.")
doAverageCurves = False
if 'amp_name' in self.data.columns:
doAverageCurves = True
# These will be used to construct the
# ``lsst.afw.image.TransmissionCurve`` object.
wavelengths = None
throughput = None
if doAverageCurves:
curveStack = None
amplifierNames = set(self.data['amp_name'])
comparisonIndices = np.where(self.data['amp_name'] == next(iter(amplifierNames)))
wavelengths = self.data[comparisonIndices]['wavelength']
for amplifier in amplifierNames:
indices = np.where(self.data['amp_name'] == amplifier)
if len(self.data[indices]) != len(self.data[comparisonIndices]):
raise RuntimeError("Incompatible lengths in average.")
if not np.array_equal(self.data[indices]['wavelength'], wavelengths):
raise RuntimeError("Mismatch in wavelength samples.")
if curveStack is not None:
curveStack = np.column_stack((curveStack, self.data[indices][throughputKey]))
else:
curveStack = self.data[indices][throughputKey]
throughput = np.mean(curveStack, 1)
# This averaging operation has stripped units.
throughput = throughput * self.data[throughputKey].unit
else:
wavelengths = self.data['wavelength']
throughput = self.data[throughputKey]
# Convert units:
# import pdb; pdb.set_trace()
with cds.enable():
# These need to be in Angstroms, for consistency.
wavelengths = wavelengths.to(u.Angstrom).to_value()
if throughput.unit != u.dimensionless_unscaled and throughput.unit != u.UnrecognizedUnit('-'):
# These need to be fractions, not percent.
throughput = throughput.to(u.dimensionless_unscaled).to_value()
self.transmissionCurve = TransmissionCurve.makeSpatiallyConstant(
throughput.astype(np.float64),
wavelengths.astype(np.float64),
throughputAtMin=0.0,
throughputAtMax=0.0
)
def getTransmissionCurve(self):
if self.transmissionCurve is None and self.data is None:
raise RuntimeError("No transmission curve data found.")
if self.transmissionCurve is None:
self.setTransmissionCurveRepresentation()
return self.transmissionCurve
def writeFits(self, outputFilename):
"""Write the transmission curve data to a file.
Parameters
----------
outputFilename : `str`
Destination filename.
Returns
-------
outputFilename : `str`
The output filename actually used.
Raises
------
RuntimeError
Raised if no transmission curve can be created.
"""
if self.transmissionCurve is None and self.data is None:
raise RuntimeError("No transmission curve data found.")
if self.transmissionCurve is None:
self.setTransmissionCurveRepresentation()
return self.transmissionCurve.writeFits(outputFilename)
class IntermediateSensorTransmissionCurve(IntermediateTransmissionCurve):
_OBSTYPE = 'transmission_sensor'
class IntermediateFilterTransmissionCurve(IntermediateTransmissionCurve):
_OBSTYPE = 'transmission_filter'
class IntermediateOpticsTransmissionCurve(IntermediateTransmissionCurve):
_OBSTYPE = 'transmission_optics'
class IntermediateAtmosphereTransmissionCurve(IntermediateTransmissionCurve):
_OBSTYPE = 'transmission_atmosphere'
class IntermediateSystemTransmissionCurve(IntermediateTransmissionCurve):
_OBSTYPE = 'transmission_system'
|
lsstREPO_NAMEip_isrPATH_START.@ip_isr_extracted@ip_isr-main@python@lsst@ip@isr@transmissionCurve.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "RafiKueng/SpaghettiLens",
"repo_path": "SpaghettiLens_extracted/SpaghettiLens-master/_backup2/apps/lenses/__init__.py",
"type": "Python"
}
|
RafiKuengREPO_NAMESpaghettiLensPATH_START.@SpaghettiLens_extracted@SpaghettiLens-master@_backup2@apps@lenses@__init__.py@.PATH_END.py
|
|
{
"filename": "set_paths.py",
"repo_name": "ivastar/clear",
"repo_path": "clear_extracted/clear-master/set_paths.py",
"type": "Python"
}
|
#! /usr/bin/env python
"""Sets the paths for the CLEAR project, replacing all those that are
hardcoded in the following repos:
clear
mywfc3
threedhst
unicorn
Example:
To get a path, import the dictionary and get key.
from set_paths import paths
path_to_outputs = paths['path_to_outputs']
Authors:
C.M. Gosmeyer, Feb. 2016
Outputs:
"""
paths = {'path_to_outputs' : '/astro/clear/cgosmeyer/outputs/',
'path_to_grizli_outputs' : '/astro/clear/cgosmeyer/GRIZLI/outputs/',
'path_to_RAW' : '/astro/clear/cgosmeyer/RAW/',
'path_to_RAW_3DHST' : '/astro/clear/cgosmeyer/RAW_3DHST/',
'path_to_ref_files' : '/astro/clear/cgosmeyer/ref_files/',
'path_to_PREPARE' : '/astro/clear/cgosmeyer/PREPARE/',
'path_to_software' : '/astro/clear/cgosmeyer/software/',
'path_to_PERSIST' : '/astro/clear/cgosmeyer/PERSIST/',
'path_to_Extractions' : '/astro/clear/cgosmeyer/Extractions/'}
# path_to_ref_files contains REF, CONF, Synphot, iref, jref, and templates
# ref_files used to be Work, and REF used to be its own directory, not
# a sub-directory.
|
ivastarREPO_NAMEclearPATH_START.@clear_extracted@clear-master@set_paths.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.