metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "CatwomanModel.py",
"repo_name": "kevin218/Eureka",
"repo_path": "Eureka_extracted/Eureka-main/src/eureka/S5_lightcurve_fitting/models/CatwomanModel.py",
"type": "Python"
}
|
from functools import partial
try:
import catwoman
except ImportError:
print("Could not import catwoman. Functionality may be limited.")
from .BatmanModels import BatmanTransitModel
class CatwomanTransitModel(BatmanTransitModel):
"""Transit Model"""
def __init__(self, **kwargs):
"""Initialize the transit model
Parameters
----------
**kwargs : dict
Additional parameters to pass to
eureka.S5_lightcurve_fitting.models.Model.__init__().
Can pass in the parameters, longparamlist, nchan, and
paramtitles arguments here.
"""
# Inherit from BatmanTransitModel class
super().__init__(**kwargs)
self.name = 'catwoman transit'
# Define transit model to be used
self.transit_model = partial(catwoman.TransitModel,
max_err=kwargs['max_err'],
fac=kwargs['fac'])
if ('rp2' not in self.longparamlist[0]
and 'rprs2' not in self.longparamlist[0]):
raise AssertionError('You must include an rp2 parameter in your '
'EPF when using catwoman.')
|
kevin218REPO_NAMEEurekaPATH_START.@Eureka_extracted@Eureka-main@src@eureka@S5_lightcurve_fitting@models@CatwomanModel.py@.PATH_END.py
|
{
"filename": "polynomials.py",
"repo_name": "igrins/plp",
"repo_path": "plp_extracted/plp-master/igrins/libs/polynomials.py",
"type": "Python"
}
|
from __future__ import print_function
import numpy.polynomial as P
import numpy.polynomial.chebyshev as cheb
def check_string(s):
if isinstance(s, str):
return True
else:
return False
def convert_to_poly(p):
if p[0].lower().startswith("poly"):
return P.Polynomial(p[1])
elif p[0].lower().startswith("cheb"):
v = cheb.Chebyshev(p[1], domain=p[2], window=p[3])
return v
return None
def nested_convert_to_poly(l, level=0):
#print 1, level
if check_string(l):
return l
# if level > 2:
# print l
# return None
#print 2
try:
n = len(l)
except TypeError:
return l
#print 3
if n > 0 and check_string(l[0]):
v = convert_to_poly(l)
if v is not None:
return v
#print 4
l2 = [nested_convert_to_poly(l1, level=level+1) for l1 in l]
return l2
# for b, d in bottom_up_solutions_:
# import numpy.polynomial as P
# assert b[0] == "poly"
# assert d[0] == "poly"
# bp = P.Polynomial(b[1])
# dp = P.Polynomial(d[1])
# bottom_up_solutions.append((bp, dp))
def test():
print(nested_convert_to_poly([["poly", [1, 2, 3]]]))
print(nested_convert_to_poly([["poly", [1, 2, 3]],
["poly", [1, 2, 3]]]))
# l = ('Chebyshev([-38.58445754, -50.0196254 , -47.7578578 , 0.62804902, -1.06017566], [ 0., 2048.], [-1., 1.])',
# array([-38.58445754, -50.0196254 , -47.7578578 , 0.62804902, -1.06017566]),
# array([ 0., 2048.]),
# array([-1., 1.]))
# print nested_convert_to_poly(l)
l = [[u'poly',
[-36.70831668840952,
0.15547914347378863,
-0.0001331686992484067,
3.062811926225611e-08,
-6.9614038682757935e-12]],
[u'poly',
[32.497849958400614,
0.12293651610678769,
-8.773062254619747e-05,
5.241888065536226e-09,
-1.8583550163003756e-12]]]
print(nested_convert_to_poly(l))
if __name__ == "__main__":
test()
|
igrinsREPO_NAMEplpPATH_START.@plp_extracted@plp-master@igrins@libs@polynomials.py@.PATH_END.py
|
{
"filename": "VoigtFit_example.py",
"repo_name": "jkrogager/VoigtFit",
"repo_path": "VoigtFit_extracted/VoigtFit-master/test_data/VoigtFit_example.py",
"type": "Python"
}
|
import numpy as np
import VoigtFit
### Fit DLA towards quasar Q1313+1441
### Observed in X-shooter P089.A-0068
z_DLA = 1.7941
logNHI = 21.3, 0.1 # value, uncertainty
# If log(NHI) is not known use:
#logNHI = None
#### Load UVB and VIS data:
UVB_fname = 'data/test_UVB_1d.spec'
res_UVB = 8000
VIS_fname = 'data/test_VIS_1d.spec'
res_VIS = 11800
wl_uvb, spec_uvb, err_uvb = np.loadtxt(UVB_fname, unpack=True)
wl_vis, spec_vis, err_vis = np.loadtxt(VIS_fname, unpack=True)
# Alternatively, load a FITS spectrum (either a FITS table or array):
# wl, flux, err, mask, header = VoigtFit.io.load_fits_spectrum(fname)
dataset = VoigtFit.DataSet(z_DLA)
dataset.add_data(wl_uvb, spec_uvb, 299792./res_UVB, err=err_uvb, normalized=False)
dataset.add_data(wl_vis, spec_vis, 299792./res_VIS, err=err_vis, normalized=False)
### Define absorption lines:
dataset.add_line('FeII_2374')
dataset.add_line('FeII_2260')
dataset.add_line('CrII_2056')
dataset.add_line('CrII_2066')
dataset.add_line('CrII_2026')
dataset.add_line('ZnII_2026')
dataset.add_line('MgI_2026')
dataset.add_line('MgI_2852')
### If a line has been defined, and you don't want to fit it
### it can either be removed from the dataset completely:
#dataset.remove_line('CrII_2056')
### or deactivated:
#dataset.deactivate_line('FeII_2374')
### A deactivated line is still present in the dataset,
### but not included in the fit. The line may still show up in the final figure.
### Define components to fit:
# dataset.reset_components()
### Add velocity components for each ion:
# ion z b logN
dataset.add_component('FeII', 1.793532, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794060, 20, 15.0, var_z=1)
dataset.add_component('FeII', 1.794282, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.794722, 20, 14.3, var_z=1)
dataset.add_component('FeII', 1.795121, 15, 14.5, var_z=1, var_b=1)
#
# Options for the components:
# var_z=1/0 vary redshift for this component
# var_b=1/0 vary b-parameter for this component
# var_N=1/0 vary column density for this component
#
# Redshift and b-parameters can be tied.
# passing the option 'tie_z=z0_FeII' ties the redshift to the first component of FeII
# passing the option 'tie_b=b2_SiII' ties the b-parameter to the third component of SiII
#
# NOTE - the ion must be defined and the component index starts with 0
#
# The entire velocity structure can be copied from one ion to another:
dataset.copy_components(from_ion='FeII', to_ion='ZnII', logN=12.9, ref_comp=1)
# This copies the five components defined for FeII to ZnII and keeps
# the same pattern of initial guesses for column density.
# By giving ref_comp and logN, this intial guess pattern is scaled such
# that the second component has logN=12.9
#
# Individual components which are not observed for weaker lines can be removed:
#dataset.delete_component('ZnII', 4) # the index '4' refers to the fifth component
#dataset.delete_component('ZnII', 3)
#dataset.delete_component('ZnII', 2)
#dataset.delete_component('ZnII', 1)
#dataset.delete_component('ZnII', 0)
# NOTE - components should be deleted from last component to first component
# not the other way around as that messes up the component numbering.
dataset.copy_components(to_ion='CrII', from_ion='FeII', logN=13.6, ref_comp=1)
dataset.copy_components(to_ion='MgI', from_ion='FeII', logN=12.4, ref_comp=1)
# Crucial step:
dataset.prepare_dataset()
# Run the fit:
popt, chi2 = dataset.fit()
# Output best-fit parameters, total column densities and make plot:
dataset.plot_fit()
if logNHI:
dataset.print_metallicity(*logNHI)
dataset.print_total()
### The best-fit parameters can be accessed from the .best_fit attribute:
#logN0 = dataset.best_fit['logN0_FeII'].value
#logN0_err = dataset.best_fit['logN0_FeII'].stderr
#b1 = dataset.best_fit['b1_FeII'].value
#b1_err = dataset.best_fit['b1_FeII'].stderr
# Or you can create a list of all values:
#logN_FeII = [dataset.best_fit['logN%i_FeII' % num].value for num in range(len(dataset.components['FeII']))]
#logN_err_FeII = [dataset.best_fit['logN%i_FeII' % num].stderr for num in range(len(dataset.components['FeII']))]
dataset.save('example_fit.hdf5')
### The dataset which was defined above can be loaded like this:
# dataset = VoigtFit.load_dataset('example_fit.hdf5')
|
jkrogagerREPO_NAMEVoigtFitPATH_START.@VoigtFit_extracted@VoigtFit-master@test_data@VoigtFit_example.py@.PATH_END.py
|
{
"filename": "nemotsf.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/io/nemotsf.py",
"type": "Python"
}
|
import re
import numpy
from amuse.io import base
from amuse.units import units
from amuse.units import nbody_system
from amuse import datamodel
TEMPLATE = \
"""set SnapShot
set Parameters
int Nobj {0.number_of_particles:d}
double Time {0.time:e}
tes
set Particles
int CoordSystem {0.coordinate_system_id:d}
double Mass[{0.number_of_particles:d}] {0.mass_paragraph:s}
double PhaseSpace[{0.number_of_particles:d}][{0.number_of_phases:d}][{0.number_of_dimensions:d}] {0.phase_paragraph:s}
tes
tes
"""
class Particles2Tsf(object):
def __init__(self):
self.number_of_particles = 0
self.mass_paragraph = ""
self.phase_paragraph = ""
self.number_of_phases = 2
self.number_of_dimensions = 3
self.coordinate_system_id = 66306
def convert_to_string(self, particles, converter = None):
if not converter is None:
particles=datamodel.ParticlesWithUnitsConverted(
particles,
converter.as_converter_from_generic_to_si()
)
self.time = (particles.get_timestamp() or (0.0|nbody_system.time)).value_in(nbody_system.time)
self.particles = particles
self.number_of_particles = len(particles)
masses = particles.mass.value_in(nbody_system.mass)
velocities = particles.velocity.value_in(nbody_system.speed)
positions = particles.position.value_in(nbody_system.length)
for i, mass in enumerate(masses):
self.mass_paragraph += ' '+str(mass)
if (i % 5) == 0:
self.mass_paragraph += '\n '
for i, phase in enumerate(zip(positions,velocities)):
self.phase_paragraph += ' '.join([str(j) for j in phase[0]]) + ' ' + ' '.join([str(k) for k in phase[1]])
if (i % 1) == 0:
self.phase_paragraph += '\n '
return TEMPLATE.format(self)
class Tsf2Particles(object):
def __init__(self):
self.number_of_particles = 0
self.masses = []
self.phases = []
self.timestamp = None
def return_numbers_in_brackets(self, line):
numbers = []
indices = re.findall(r'\[[0-9]*\]',line)
for i in indices:
numbers.append((int)(i.strip('[').strip(']')))
return numbers
def return_numbers_from_paragraph(self, lines, start, end):
numlist =[]
for i in range(start, end):
linechunks = lines[i].split(' ')
for j in linechunks:
try:
numlist.append(float(j))
except:
pass
return numlist
def read_to_ram(self, string):
lines = string.splitlines()
timestamp_index = [i for i, oneline in enumerate(lines) if 'double Time' in oneline][0]
self.timestamp = float(lines[timestamp_index].strip().split(' ')[2]) | nbody_system.time
start_masses = [i for i, oneline in enumerate(lines) if 'double Mass' in oneline][0]
start_phasespace = [i for i, oneline in enumerate(lines) if 'double PhaseSpace' in oneline][0]
massline_numbers = self.return_numbers_in_brackets(lines[start_masses])
phaseline_numbers = self.return_numbers_in_brackets(lines[start_phasespace])
no_particles = phaseline_numbers[0]
no_phases = phaseline_numbers[1]
no_dimensions = phaseline_numbers[2]
self.number_of_particles = no_particles
self.masses = self.return_numbers_from_paragraph(lines, start_masses, start_phasespace)
self.phases = numpy.reshape(self.return_numbers_from_paragraph(lines, start_phasespace, len(lines)),
[no_particles,no_phases,no_dimensions])
def convert_to_particles(self, string, converter = None):
self.read_to_ram(string)
result = datamodel.Particles(self.number_of_particles)
result.mass = self.masses|nbody_system.mass
result.position = [i[0] for i in self.phases]|nbody_system.length
result.velocity = [i[1] for i in self.phases]|nbody_system.speed
if not self.timestamp is None:
result.savepoint(self.timestamp)
if not converter is None:
result=datamodel.ParticlesWithUnitsConverted(
result,
converter.as_converter_from_si_to_generic()
)
return result
class NemoFileFormatProcessor(base.FullTextFileFormatProcessor):
"""
Process a NEMO binary structured file
"""
provided_formats = ['nemo', 'tsf']
def __init__(self, filename = None, stream = None, set = None, format = None):
base.FileFormatProcessor.__init__(self, filename, set, format)
def load_string(self, string):
x = Tsf2Particles()
return x.convert_to_particles(string, self.nbody_to_si_converter)
def store_string(self):
x = Particles2Tsf()
return x.convert_to_string(self.set, self.nbody_to_si_converter)
@base.format_option
def nbody_to_si_converter(self):
"NEMO datafiles store nbody data, provide a converter to store si data (None means no converter)"
return None
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@io@nemotsf.py@.PATH_END.py
|
{
"filename": "ompi_cluster.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/clusters/ompi_cluster.py",
"type": "Python"
}
|
# Copyright 2023 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
import re
from jax._src import clusters
# OMPI_MCA_orte_hnp_uri exists only when processes are launched via mpirun or mpiexec
_ORTE_URI = 'OMPI_MCA_orte_hnp_uri'
_PROCESS_COUNT = 'OMPI_COMM_WORLD_SIZE'
_PROCESS_ID = 'OMPI_COMM_WORLD_RANK'
_LOCAL_PROCESS_ID = 'OMPI_COMM_WORLD_LOCAL_RANK'
class OmpiCluster(clusters.ClusterEnv):
name: str = "ompi"
@classmethod
def is_env_present(cls) -> bool:
return _ORTE_URI in os.environ
@classmethod
def get_coordinator_address(cls, timeout_secs: int | None) -> str:
# Examples of orte_uri:
# 1531576320.0;tcp://10.96.0.1,10.148.0.1,10.108.0.1:34911
# 1314521088.0;tcp6://[fe80::b9b:ac5d:9cf0:b858,2620:10d:c083:150e::3000:2]:43370
orte_uri = os.environ[_ORTE_URI]
job_id_str = orte_uri.split('.', maxsplit=1)[0]
# The jobid is always a multiple of 2^12, let's divide it by 2^12
# to reduce likelihood of port conflict between jobs
job_id = int(job_id_str) // 2**12
# Pick port in ephemeral range [(65535 - 2^12 + 1), 65535]
port = job_id % 2**12 + (65535 - 2**12 + 1)
launcher_ip_match = re.search(r"tcp://(.+?)[,:]|tcp6://\[(.+?)[,\]]", orte_uri)
if launcher_ip_match is None:
raise RuntimeError('Could not parse coordinator IP address from Open MPI environment.')
launcher_ip = next(i for i in launcher_ip_match.groups() if i is not None)
return f'{launcher_ip}:{port}'
@classmethod
def get_process_count(cls) -> int:
return int(os.environ[_PROCESS_COUNT])
@classmethod
def get_process_id(cls) -> int:
return int(os.environ[_PROCESS_ID])
@classmethod
def get_local_process_id(cls) -> int | None:
return int(os.environ[_LOCAL_PROCESS_ID])
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@clusters@ompi_cluster.py@.PATH_END.py
|
{
"filename": "zwarning.py",
"repo_name": "desihub/redrock",
"repo_path": "redrock_extracted/redrock-main/py/redrock/zwarning.py",
"type": "Python"
}
|
"""
redrock.zwarning
================
Mask bit definitions for zwarning.
WARNING on the warnings: not all of these are implemented yet.
"""
#- TODO: Consider using something like desispec.maskbits to provide a more
#- convenient wrapper class (probably copy it here; don't make a dependency)
#- That class as-is would bring in a yaml dependency.
class ZWarningMask(object):
SKY = 2**0 #- sky fiber
LITTLE_COVERAGE = 2**1 #- too little wavelength coverage
SMALL_DELTA_CHI2 = 2**2 #- chi-squared of best fit is too close to that of second best
NEGATIVE_MODEL = 2**3 #- synthetic spectrum is negative
MANY_OUTLIERS = 2**4 #- fraction of points more than 5 sigma away from best model is too large (>0.05)
Z_FITLIMIT = 2**5 #- chi-squared minimum at edge of the redshift fitting range
NEGATIVE_EMISSION = 2**6 #- a QSO line exhibits negative emission, triggered only in QSO spectra, if C_IV, C_III, Mg_II, H_beta, or H_alpha has LINEAREA + 3 * LINEAREA_ERR < 0
UNPLUGGED = 2**7 #- the fiber was unplugged/broken, so no spectrum obtained
BAD_TARGET = 2**8 #- catastrophically bad targeting data
NODATA = 2**9 #- No data for this fiber, e.g. because spectrograph was broken during this exposure (ivar=0 for all pixels)
BAD_MINFIT = 2**10 #- Bad parabola fit to the chi2 minimum
POORDATA = 2**11 #- Poor input data quality but try fitting anyway
#- The following bits are reserved for experiment-specific post-redrock
#- afterburner updates to ZWARN; redrock commits to *not* setting these bits
RESERVED16 = 2**16
RESERVED17 = 2**17
RESERVED18 = 2**18
RESERVED19 = 2**19
RESERVED20 = 2**20
RESERVED21 = 2**21
RESERVED22 = 2**22
RESERVED23 = 2**23
@classmethod
def flags(cls):
flagmask = list()
for key, value in cls.__dict__.items():
if not key.startswith('_') and key.isupper():
flagmask.append((key, value))
import numpy as np
isort = np.argsort([x[1] for x in flagmask])
flagmask = [flagmask[i] for i in isort]
return flagmask
#- mask of zwarn values that indicate bad individual template fits
badfit_mask = ZWarningMask.NEGATIVE_MODEL
badfit_mask |= ZWarningMask.MANY_OUTLIERS
badfit_mask |= ZWarningMask.Z_FITLIMIT
badfit_mask |= ZWarningMask.NEGATIVE_EMISSION
badfit_mask |= ZWarningMask.BAD_MINFIT
|
desihubREPO_NAMEredrockPATH_START.@redrock_extracted@redrock-main@py@redrock@zwarning.py@.PATH_END.py
|
{
"filename": "_hovertemplatesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/_hovertemplatesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hovertemplatesrc", parent_name="scatter3d", **kwargs
):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@_hovertemplatesrc.py@.PATH_END.py
|
{
"filename": "observations.py",
"repo_name": "dannyjacobs/ECHO",
"repo_path": "ECHO_extracted/ECHO-master/ECHO/observations.py",
"type": "Python"
}
|
from . import read_utils
from . import plot_utils
from . import position_utils
from . import time_utils
from . import beams
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdate
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import h5py
from astropy.time import Time
import healpy as hp
class Observation:
'''
The class object for making observations.
Args:
lat (float):, latitude of receiving antenna (degrees)
lon (float), longitude of receiving antenna (degrees)
frequency (int): the reference frequency of the transmitter (MHz)
channel (int), The reference channel of the transmitter
description (str):, text string with information about observation
'''
def __init__(self, lat, lon, frequency=None, description=None):
'''Create an observation for a particular target antenna.
'''
self.sortie_list = []
self.num_sorties = 0
self.isFlagged = False
self.lat = float(lat)
self.lon = float(lon)
if frequency: self.ref_frequency = float(frequency)
if description: self.description = description
return
def addSortie(self, tlog, ulog, data, sortie_name=None, sortie_title=None):
'''Add a sortie to the current observation class.
Args:
tlog (file): txt file for the tlog data
ulog (file): txt file for the ulog data
data (file): txt file for the receiver data
sortie_name (str): unique name for this sortie
sortie_title (str): display title for this sortie
'''
self.num_sorties+=1
self.sortie_list.append(self.Sortie(sortie_tlog=tlog, sortie_ulog=ulog, sortie_data=data, sortie_name=sortie_name, sortie_title=sortie_title, sortie_num=self.num_sorties, ref_f=self.ref_frequency ))
return
def read_sorties(self):
'''Reads in the data files for a given sortie.
'''
for sortie in self.sortie_list:
sortie.read()
sortie.get_freq_chans()
return
def flagSorties(self):
'''Flag the sortie for start and endpoints, as well as waypoints.
'''
for sortie in self.sortie_list:
print(sortie["name"])
#flag start/stop
sortie.flag_endpoints()
#flag waypoints
sortie.flag_waypoints()
#flag yaws
#sortie.flag_yaws()
return
def sort_sorties(self):
'''Sort our current list of sorties by time, using the first entry in each.
At any point we may need to sort the list of sorties by time.
It's preferable to do this rather than sort the data arrays after combining.
Returns:
s: Sortie object
'''
#get list of sorties
sorties = self.sortie_list
#check first time in each sortie
#order sorties by first time
s = sorted(sorties, key = lambda sortie:sortie.t_dict['global_t'][0,0])
return s
def combine_sorties(self):
'''Combine our current list of sorties to create a data object for position.
Sorts currently added sorties by timestamp, then aggregates into a single array
Returns:
dataproduct (array): 'Epoch Time(s), Lat(deg), Lon(deg), Alt(m from ground), Yaw(deg)' for every sortie
'''
if 'mission_data' in dir(self.sortie_list[0]):
combined_arr = self.sortie_list[0].mission_data
for sortie in self.sortie_list[1:]:
if 'mission_data' not in dir(self.sortie_list[0]):
print("Unable to combine: " +sortie.name + " mission data not flagged")
break
combined_arr = np.vstack((combined_arr, sortie.mission_data))
self.dataproduct = np.sort(combined_arr, axis=0) #remove after rewrite
else:
print("Unable to combine: " +self.sortie_list[0].name + " mission data not flagged")
return
def interpolate_rx(self, obsNum, tuning, polarization):
'''Takes position-times of the drone and interpolate the receiver data to the same dimensions as position data.
Args:
obsNum (int): the number of the observation to use
tuning (int): the number of the tuning to use, 1 or 2
pol (str): which polarization to use ('XX', 'YY', 'YX', 'XY')
Returns:
refined_array (array): 'Epoch Time(s), Lat(deg), Lon(deg), Alt(m from ground), Yaw(deg), Radio Spectra'
'''
obs='Observation'+str(obsNum)
tun='Tuning'+str(tuning)
pol = polarization
sorties = self.sort_sorties()
rx_data = []
t_rx = []
pos_times = []
for i,sortie in enumerate(sorties):
#get frequency channel of sortie
#target_data = h5py.File(sortie.data,'r')
target_data = sortie.data_dict
freqchan=sortie.freq_chan
start_time, end_time = sortie.mission_data[0,0], sortie.mission_data[-1,0]
pos_times.append(list(sortie.mission_data[:,0]))
rx_times = target_data[obs]['time'][()]
indices = np.nonzero(np.logical_and(rx_times >= start_time , rx_times <= end_time))
times = target_data[obs]['time'][list(indices[0])]
t_rx.append(Time(times,scale='utc',format='unix'))
rx_data.append(
read_utils.dB(
target_data[obs][tun][pol][indices[0],freqchan] #freqchan, 512]
)
)
rx = np.concatenate(rx_data)
t_rx = np.concatenate(t_rx)
pos_times = np.concatenate(pos_times)
postimes = Time(pos_times, format = 'unix')
time_info = Time(t_rx,scale='utc')
interp_rx = read_utils.interp_rx(postimes, time_info, rx)
for i,sortie in enumerate(sorties):
if i == 0:
sortie_full_mission = sortie.mission_data
else:
sortie_full_mission = np.vstack((sortie_full_mission, sortie.mission_data))
interp_arr = np.zeros((interp_rx.shape[0],1))
for i, interp in enumerate(interp_rx):
interp_arr[i,0] = interp
refined_array = np.hstack((sortie_full_mission, interp_arr))
self.refined_array=refined_array[~np.isnan(refined_array).any(axis=1)]
self.rx_full = rx
self.t_rx_full = time_info
return
def make_beam(self, lat=None, lon=None):
'''Read in the refined array and create a beam.
Args:
lat (): latitude of the receiver instrument
lon (): longitude of the receiver instrument
Returns:
'''
if not lat:
targetLat=self.lat
else:
targetLat=lat
if not lon:
targetLon=self.lon
else:
targetLon=lon
newBeam = beams.Beam(beam_type='healpy')
hpx_beam,hpx_rms,hpx_counts = newBeam.make_hpx_beam(self.refined_array, targetLat, targetLon)
self.hpx_beam = hpx_beam
self.hpx_rms = hpx_rms
self.hpx_counts = hpx_counts
return newBeam
def write_beam(self,prefix):
'''Write the beam file out to .fits.
Args:
prefix (str): A string used to name and identify the output files.
Returns:
'''
hp.write_map(prefix+'_beam.fits',self.hpx_beam, overwrite=True)
hp.write_map(prefix+'_rms.fits',self.hpx_rms, overwrite=True)
hp.write_map(prefix+'_counts.fits',self.hpx_counts, overwrite=True)
return
def plot_mollview(self, *args, **kwargs):
'''Plot a mollview of the beam using
Args:
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
'''
beam=self.hpx_beam
plot_utils.mollview(beam, 'Target Beam', *args, **kwargs)
return
def plot_grid(self, *args, **kwargs):
'''Plot a grid view of the beam.
Args:
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
'''
M = np.ma.array(self.hpx_beam,fill_value=hp.UNSEEN)
M = np.ma.masked_where(hp.UNSEEN==M,M)
M.fill_value = hp.UNSEEN
M -= M.max()
beams=[M]
plot_utils.healpix_grid(beams, 'Target Directivity', '1', 1, 1,*args, **kwargs)
return
def plot_beam(self, fits=False,beamfile=None,countsfile=None):
'''Plot the healpix beam from our observation object.
Optionally plot beams read in from beam files.
Args:
fits (bool):
beamfile (str):
countsfile (str):
Returns:
'''
if fits==True:
counts = read_utils.read_map(countsfile)
beam = read_utils.read_map(beamfile)
else:
M = np.ma.array(self.hpx_beam,fill_value=hp.UNSEEN)
M = np.ma.masked_where(hp.UNSEEN==M,M)
M.fill_value = hp.UNSEEN
counts = self.hpx_counts
beam = M
beam -= beam.max()
THETA,PHI,IM = plot_utils.project_healpix(beam)
X,Y = np.meshgrid(
np.linspace(-1,1,num=THETA.shape[0]),
np.linspace(-1,1,num=THETA.shape[1])
)
hp.mollview(beam)
plt.figure()
ax1 = plt.subplot(111)
ax1.axis('equal')
beamcoll = plot_utils.make_polycoll(beam,cmap=matplotlib.cm.jet)
beamcoll.set_clim(-2.3,0)
ax1.add_collection(beamcoll)
CS = ax1.contour(X,Y,THETA*180/np.pi,[20,40,60],colors='k')
CS.levels = [plot_utils.nf(val) for val in CS.levels]
plt.clabel(CS, inline=1, fontsize=10,fmt=plot_utils.fmt)
ax1.autoscale_view()
ax1.set_yticklabels([])
ax1.set_xticklabels([])
ax1.set_title('Gridded power')
cb = plt.colorbar(beamcoll, ax=ax1,orientation='horizontal')
tick_locator = matplotlib.ticker.MaxNLocator(nbins=5)
cb.locator = tick_locator
cb.update_ticks()
cb.set_label('dB')
return
def plot_slices(self, figsize=None, *args, **kwargs):
'''Plot E and H plane slices of the beam
Args:
figsize (tuple): figure size for plot
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
'''
radTheta=np.pi/2
alt=np.linspace(-radTheta, radTheta)
az=np.zeros_like(alt)
M = np.ma.array(self.hpx_beam,fill_value=hp.UNSEEN)
M = np.ma.masked_where(hp.UNSEEN==M,M)
M.fill_value = hp.UNSEEN
beam_map = M
beam_map -= beam_map.max()
slice_E = plot_utils.get_interp_val(beam_map,alt,az)
slice_H = plot_utils.get_interp_val(beam_map,alt,az+np.pi/2)
plt.figure(figsize=figsize)
plt.plot(alt*180/np.pi,slice_E,'-k',lw=2, *args, **kwargs)
plt.grid()
plt.xlabel('$\\theta$ (deg)')
plt.ylabel('E plane\n [dB V/m]')
plt.figure(figsize=figsize)
plt.plot(alt*180/np.pi,slice_H,'-k',lw=2, *args, **kwargs)
plt.grid()
plt.xlabel('$\\theta$ (deg)')
plt.ylabel('H plane\n [dB V/m]')
return
def plot_polar(self,altitude, *args, **kwargs):
'''Plot polar diagrams of the received beam.
Args:
altitude: angle from zenith in degrees
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
'''
radPhi=np.pi
az=np.linspace(0, 2*radPhi,360)
alt=np.zeros_like(az)
alt[:]=altitude*np.pi/180
M = np.ma.array(self.hpx_beam,fill_value=hp.UNSEEN)
M = np.ma.masked_where(hp.UNSEEN==M,M)
M.fill_value = hp.UNSEEN
beam_map = M
beam_map -= beam_map.max()
pol_slice = plot_utils.get_interp_val(beam_map,alt,az)
plt.figure(figsize=(8,8))
ax=plt.subplot(111, projection='polar', *args, **kwargs)
ax.plot(az, pol_slice)
ax.set_theta_zero_location('N')
ax.set_theta_direction('clockwise')
ax.set_rmax(5)
ax.set_rmin(-10)
ax.grid(True)
plt.show()
plt.close()
return
def plot_isometric(self, figsize=(5,5), *args, **kwargs):
'''Plot polar diagrams of the received beam.
Args:
figsize (tuple): figure size for plot
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
'''
xs=self.refined_array[:,1]
ys=self.refined_array[:,2]
zs=self.refined_array[:,3]
fig = plot_utils.plot_position_3d(xs, ys, zs, figsize, *args, **kwargs)
#fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
#ax.plot(xs, ys, zs=0, zdir='z')
fig.axes[0].set_xlabel('Latitude')
fig.axes[0].set_ylabel('Longitude')
fig.axes[0].set_zlabel('Altitude')
return
class Sortie:
'''
A sortie is created by three files: a ulog, a tlog, and an LWA data file.
The data from these files is read and compiled into arrays.
'''
def __init__(self, sortie_tlog, sortie_ulog, sortie_data, sortie_num, ref_f, sortie_name=None, sortie_title=None):
self.ulog = sortie_ulog
self.tlog = sortie_tlog
self.data = sortie_data
self.sortie_num=sortie_num
self.title=sortie_title
self.ref_frequency=ref_f
if not sortie_name:
#self.name = "sortie"+f"{sortie_num:02d}"
self.name = "sortie%(sortienum)02d"%{'sortienum':sortie_num}
flag_mask = []
return
def get_bootstart(self):
'''Uses the GPS time to calculate the time at drone boot.
'''
bootstart = self.u_dict["gps_position_u"][0][1] - self.u_dict["gps_position_u"][0][0]
return bootstart
def apply_bootstart(self):
'''Puts drone data on absolute GPS-based time scale.
Uses GPS messages in on-board ulog to calibrate times of positions logged on ground station.
'''
bootstart = self.u_dict["gps_position_u"][0][1] - self.u_dict["gps_position_u"][0][0]
for key,data in self.t_dict.items():
if key!="log_type":
if key!='waypoint_t':
data[:,0] = data[:,1]+bootstart
data = np.delete(data, 1, 1)
self.t_dict[key]=data
for key,data in self.u_dict.items():
if key!="log_type":
data[:,0] = data[:,0]+bootstart
if key!="global_position_u":
data = np.delete(data, 1, 1)
self.u_dict[key]=data
return
def get_freq_chans(self):
'''Find the channel for our reference frequency.
Args:
Returns:
'''
frequency=self.ref_frequency
obs='Observation1'
tun='Tuning1'
target_data = self.data_dict
center_freq = frequency*1e6 #into Hz
freq_arr = target_data[obs][tun]['freq']
get_ind = np.where(freq_arr<=center_freq)[0][-1]
return get_ind
def read(self):
'''Read in the sortie from associated data files.
The stored tlog, ulog, and receiver datafiles are opened and copied into dictionaries.
Returns:
t_dict (dict): A dictionary containing info from the sortie tlog
u_dict (dict): A dictionary containing info from the sortie ulog
data_dict (dict): A dictionary containing info from the sortie receiver datafile
'''
sortie_tlog = read_utils.read_tlog_txt(self.tlog)
sortie_ulog = read_utils.read_ulog(
self.ulog,
messages="vehicle_global_position,vehicle_local_position,vehicle_gps_position"
)
self.t_dict={
"log_type":"t",
"waypoint_t":sortie_tlog[0],
"global_t":sortie_tlog[1],
"local_t":sortie_tlog[2],
"gps_t":sortie_tlog[3]
}
self.u_dict={
"log_type":"u",
'global_position_u':sortie_ulog[0],
'local_position_u':sortie_ulog[1],
'gps_position_u':sortie_ulog[2]
}
#TODO: Add data read instead of reading in interpolate_rx
self.data_dict = read_utils.read_h5(self.data)
self.freq_chan = self.get_freq_chans()
return
#function to adjust gain?
def flag_waypoints(self):
'''Flag data arrays based on waypoint data.
Args:
'''
# flag based on mission waypoints
pass
def flag_endpoints(self):
'''Flag data arrays based on mission start/end.
Reads in "global_t" and "waypoint_t" from the tlog data dictionary.
"global_t" contains continuous position data from drone telemetry during the entire sortie.
"waypoint_t" contains the position data for each navigational waypoint used to maneuver the drone.
Updates flagged_data and mission_data properties for a sortie
'''
self.flagged_data, self.mission_data = read_utils.mission_endpoint_flagging(
self.t_dict["global_t"],
self.t_dict["waypoint_t"]
)
return
def flag_yaws(self):
# flag based on yaw position
pass
### Plotting Functions
def plot(self):
'''Creates multiple plots showing position data for sortie.
Tlog X/Y Position, ULog X/Y Position, X Position / Time, Y Position / Time, Z Position / Time:
Return:
Tlog X/Y Position:
ULog X/Y Position:
X Position / Time:
Y Position / Time:
Z Position / Time:
'''
fig1 = plt.figure()
plt.plot(self.t_dict['global_t'][:,1],self.t_dict['global_t'][:,2],'b.')
plt.plot(self.u_dict['global_position_u'][:,1],self.u_dict['global_position_u'][:,2],'r.', alpha=0.25)
plt.xlabel('Latitude (deg)')
plt.xticks(rotation=45)
plt.ylabel('Longitude (deg)')
plt.title(self.title + 'Global Position')
plt.axis('square')
plt.grid()
plt.legend(['Tlogs','Ulogs'],bbox_to_anchor=(1.35,1))
plt.ticklabel_format(useOffset=False)
fig2 = plt.figure(figsize=(5,5))
date_fmt = '%m-%d %H:%M:%S'
date_formatter = mdate.DateFormatter(date_fmt)
ax1 = fig2.add_subplot(311)
ax1.plot(self.t_dict['global_t'][:,0],self.t_dict['global_t'][:,1],'b-')
ax1.plot(self.u_dict['global_position_u'][:,0],self.u_dict['global_position_u'][:,1],'r-',alpha=0.5)
ax1.title.set_text('Global X')
#plt.xticks(rotation=15)
ax1.axes.get_yaxis().set_visible(False)
#ax1.xaxis.set_major_formatter(date_formatter)
ax2 = fig2.add_subplot(312)
ax2.plot(self.t_dict['global_t'][:,0],self.t_dict['global_t'][:,2],'b-')
ax2.plot(self.u_dict['global_position_u'][:,0],self.u_dict['global_position_u'][:,2],'r-',alpha=0.5)
ax2.title.set_text('Global Y')
#plt.xticks(rotation=15)
ax2.axes.get_yaxis().set_visible(False)
#ax2.xaxis.set_major_formatter(date_formatter)
ax3 = fig2.add_subplot(313)
ax3.plot(self.t_dict['global_t'][:,0],self.t_dict['global_t'][:,3],'b-')
ax3.plot(self.u_dict['global_position_u'][:,0],self.u_dict['global_position_u'][:,3],'r-',alpha=0.5)
ax3.title.set_text('Global Z')
#plt.xticks(rotation=15)
ax3.axes.get_yaxis().set_visible(False)
#ax3.xaxis.set_major_formatter(date_formatter)
#plt.legend(['Tlogs','Ulogs'],bbox_to_anchor=(1.25,7.5))
#fig.tight_layout()
#alt
#position
return
def plot_flags():
'''Creates a plot showing flagged positions for sortie.
Return:
'''
pass
|
dannyjacobsREPO_NAMEECHOPATH_START.@ECHO_extracted@ECHO-master@ECHO@observations.py@.PATH_END.py
|
{
"filename": "rfast_noise.py",
"repo_name": "hablabx/rfast",
"repo_path": "rfast_extracted/rfast-main/rfast_noise.py",
"type": "Python"
}
|
# import statements
import time
import shutil
import sys
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, Column, MaskedColumn
from astropy.io import ascii
from rfast_routines import noise
from rfast_routines import inputs
# get input script filename
if len(sys.argv) >= 2:
filename_scr = sys.argv[1] # if script name provided at command line
else:
filename_scr = input("rfast inputs script filename: ") # otherwise ask for filename
# obtain input parameters from script
fnr,fnn,fns,dirout,Nlev,pmin,pmax,bg,\
species_r,f0,rdgas,fnatm,skpatm,colr,colpr,psclr,imix,\
t0,rdtmp,fntmp,skptmp,colt,colpt,psclt,\
species_l,species_c,\
lams,laml,res,regrid,smpl,opdir,\
Rp,Mp,gp,a,As,em,\
grey,phfc,w,g1,g2,g3,pt,dpc,tauc0,lamc0,fc,\
ray,cld,ref,sct,fixp,pf,fixt,tf,p10,fp10,\
src,\
alpha,ntg,\
Ts,Rs,\
ntype,snr0,lam0,rnd,\
clr,fmin,mmr,nwalkers,nstep,nburn,thin,restart,progress = inputs(filename_scr)
# input data filename
fn_dat = fns + '.raw'
# read input data
data = ascii.read(dirout+fn_dat,data_start=1,delimiter='|')
lam = data['col2'][:]
dlam = data['col3'][:]
F1 = data['col4'][:]
F2 = data['col5'][:]
# snr0 constant w/wavelength case
if( len(snr0) == 1 ):
if (ntype != 'cppm'):
err = noise(lam0,snr0,lam,dlam,F2,Ts,ntype)
else:
err = np.zeros(F2.shape[0])
err[:] = 1/snr0
else: # otherwise snr0 is bandpass dependent
err = np.zeros(len(lam))
for i in range(0,len(snr0)):
ilam = np.where(np.logical_and(lam >= lams[i], lam <= laml[i]))
if (len(lam0) == 1): # lam0 may be bandpass dependent
lam0i = lam0
else:
lam0i = lam0[i]
if (ntype != 'cppm'):
erri = noise(lam0i,snr0[i],lam,dlam,F2,Ts,ntype)
err[ilam] = erri[ilam]
else:
err[ilam] = 1/snr0[i]
# generate faux spectrum, with random noise if requested
data = np.copy(F2)
if rnd:
for k in range(0,len(lam)):
data[k] = np.random.normal(F2[k], err[k], 1)
if data[k] < 0:
data[k] = 0.
# write data file
if (src == 'diff' or src == 'cmbn'):
names = ['wavelength (um)','d wavelength (um)','albedo','flux ratio','data','uncertainty']
if (src == 'thrm'):
names = ['wavelength (um)','d wavelength (um)','Tb (K)','flux (W/m**2/um)','data','uncertainty']
if (src == 'scnd'):
names = ['wavelength (um)','d wavelength (um)','Tb (K)','flux ratio','data','uncertainty']
if (src == 'trns'):
names = ['wavelength (um)','d wavelength (um)','zeff (m)','transit depth','data','uncertainty']
if (src == 'phas'):
names = ['wavelength (um)','d wavelength (um)','reflect','flux ratio','data','uncertainty']
data_out = Table([lam,dlam,F1,F2,data,err], names=names)
ascii.write(data_out,dirout+fnn+'.dat',format='fixed_width',overwrite=True)
# document parameters to file
shutil.copy(filename_scr,dirout+fnn+'.log')
# plot faux data
if (src == 'diff' or src == 'scnd' or src == 'cmbn' or src == 'phas'):
ylab = 'Planet-to-Star Flux Ratio'
if (src == 'thrm'):
ylab = r'Specific flux (W/m$^2$/${\rm \mu}$m)'
if (src == 'trns'):
ylab = r'Transit depth'
plt.errorbar(lam, data, yerr=err, fmt=".k")
plt.ylabel(ylab)
plt.xlabel(r'Wavelength (' + u'\u03bc' + 'm)')
plt.savefig(dirout+fnn+'.png',format='png',bbox_inches='tight')
plt.close()
|
hablabxREPO_NAMErfastPATH_START.@rfast_extracted@rfast-main@rfast_noise.py@.PATH_END.py
|
{
"filename": "readme.md",
"repo_name": "cameronliang/BayesVP",
"repo_path": "BayesVP_extracted/BayesVP-master/readme.md",
"type": "Markdown"
}
|
bayesvp
========
``bayesvp`` is a Bayesian MCMC parallel Voigt profile fitting routine. ``bayesvp`` provides a number of helpful executable scripts that work with command line arguments (saved in your environment ``PATH``). The main functionality is the MCMC Voigt profile fitting (``bvpfit``) where the user supplies a config file that specifies parameters for the fitting. These include parameter priors, number of walkers, parallel threads, line spread function, continuum model, Bayesian model comparisons, and etc. There are utility functions that allow users to quickly create an example config file, process and plot the chains, process and plot the best fit models and more. You can find more details on the code paper, [Liang & Kravtsov 2017](http://adsabs.harvard.edu/abs/2017arXiv171009852L) or a related paper [Liang et al. 2017](http://adsabs.harvard.edu/abs/2017arXiv171000411L)
Installation
------------
I recommend installing bayesvp with pip with the ``--user`` flag:
pip install bayesvp --user
This usually puts the executable scripts in ``~/.local/bin``. For MacOS users, ``~/Users/username/.local/bin``, where ``username`` is your username of your mac. Make sure that this is in your PATH.
You can also install it system-wide and might need to add ``sudo`` in the beginning.
After installing ``bayesvp``, you should run its unit tests to ensure the package works as expected. The simplest way to do this is inside a python shell:
from bayesvp.tests import run_tests
The output should look something like this:
test_config_file_exists (bayesvp.tests.test_config.TCConfigFile) ... ok
test_default_no_continuum_params (bayesvp.tests.test_config.TCConfigFile) ... ok
test_example_mcmc_params (bayesvp.tests.test_config.TCConfigFile) ... ok
...
test_prior (bayesvp.tests.test_likelihood.TCPosterior) ... ok
test_general_intensity (bayesvp.tests.test_model.TCSingleVP) ... ok
test_simple_spec (bayesvp.tests.test_model.TCSingleVP) ... ok
----------------------------------------------------------------------
Ran 13 tests in 3.654s
OK
If you encounter any error, please send output to the author.
Usage and Tests:
----------------
You can run a full test example by executing:
bvpfit --test -pc
If the optional ``-pc`` flag is supplied, the default config file and a log are written to the current directory at which the command is run.
This will run an MCMC fit with the default config file and test spectrum (./data/example).
After the fit is finished, to process the MCMC chain, you can type:
bvp_process_model --test
You can create your own default config file and modify it to suit the needs of your particular absorption line system. Use -a for the automatic flag.
bvp_write_config -a
These executables accept command line arguments. For example, to get more info on the
usage of bvpfit, simply type:
bvpfit -h
You may want to use the newly generated default config file after the test to set up absorption line systems of your own. Instead of ``--test``, you can supply your own config
file.
bvpfit full_path_to_my_own_config_file.dat
It should just be this easy if ``bayesvp`` is installed correctly and your environment ``PATH`` knows the location of these executables.
Required libraries:
-------------------
1) numpy, scipy, matplotlib and pyfits.
2) MCMC Samplers ([kombine](http://home.uchicago.edu/~farr/kombine/kombine.html) and/or [emcee](http://dan.iel.fm/emcee/current/))
Notes/Tips/Cautions:
--------------------
1. For placing constraints on non-detections (i.e., upper limits), one should not initialize walkers too far away from 'reasonable' parameters(e.g., column density or redshift if you know it from somewhere else). For example, if one knows logN= 15 is clearly too large given the data, then walkers should be initialized such that they do not waste time to get back to smaller logN and/or get stuck at larger logN.
2. For upper limits, it is better to fix the redshift of the desired system in order to place constraints.
3. In some cases, the data are contaminated by some other lines, one can skip this contaminated region.
e.g., say, from (1215 1219) is the ideal region, but region from 1216 - 1217 is contaminated. Then just select regions in the config file, by breaking the wanted region into two regions (and so forth).
1215 1216
1217 1219
4. One can add a continuum model (polynomial of degree n) by adding a new line: “continuum 1”, which will add a linear continuum with two extra of parameters (offset and a slope). We do not recommend to go higher than degree 2. The continuum model is restricted to fitting only one segment of the spectrum. Simultaneous fitting with multiple lines is not currently supported.
License & Citing
----------------
Author: Cameron Liang (cameron.liang@gmail.com)
Contributors: Andrey Kravtsov
License: MIT. Copyright (c) 2017-2018
If you use ``bayesvp``, please cite the paper:
@ARTICLE{Liang2018,
author = {{Liang}, Cameron J. and {Kravtsov}, Andrey V. and {Agertz}, Oscar},
title = "{Observing the circumgalactic medium of simulated galaxies through synthetic absorption spectra}",
journal = {\mnras},
keywords = {galaxies: haloes, quasars: absorption lines, Astrophysics - Astrophysics of Galaxies},
year = "2018",
month = "Sep",
volume = {479},
number = {2},
pages = {1822-1835},
doi = {10.1093/mnras/sty1668},
archivePrefix = {arXiv},
eprint = {1710.00411},
primaryClass = {astro-ph.GA},
adsurl = {https://ui.adsabs.harvard.edu/abs/2018MNRAS.479.1822L},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
or you can cite this one:
@ARTICLE{LiangKravtsov2017,
author = {{Liang}, C. and {Kravtsov}, A.},
title = "{BayesVP: a Bayesian Voigt profile fitting package}",
journal = {ArXiv e-prints},
archivePrefix = "arXiv",
eprint = {1710.09852},
keywords = {Astrophysics - Astrophysics of Galaxies, Astrophysics - Instrumentation and Methods for Astrophysics},
year = 2017,
month = oct,
adsurl = {http://adsabs.harvard.edu/abs/2017arXiv171009852L},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
### Release Notes
[0.2.4] - 2019-06-04: Added options to change to Agg backend from tkAgg.
[0.2.5] - 2019-12-19: Added option in config file for user defined output path
|
cameronliangREPO_NAMEBayesVPPATH_START.@BayesVP_extracted@BayesVP-master@readme.md@.PATH_END.py
|
{
"filename": "test_besancon.py",
"repo_name": "D-arioSpace/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/besancon/tests/test_besancon.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from contextlib import contextmanager
import pytest
from ... import besancon
from ...utils import commons
from astroquery.utils.mocks import MockResponse
# SKIP - don't run tests because Besancon folks don't want them (based on
# the fact that your@email.net is now rejected)
# def test_besancon_reader():
# assert os.path.exists('besancon_test.txt')
# B = asciitable.read('t/besancon_test.txt',
# Reader=besancon.BesanconFixed, guess=False)
# assert len(B) == 12
#
# def test_basic():
# besancon_model = besancon.request_besancon(
# 'astropy.astroquery@gmail.com',10.5,0.0,soli=0.0001)
# B = asciitable.read(besancon_model,
# Reader=besancon.BesanconFixed, guess=False)
# B.pprint()
# to prevent test hanging
besancon.Besancon.TIMEOUT = 1
besancon.Besancon.ping_delay = 1
DATA_FILES = ('besancon_test.txt', 'besancon_test2.txt')
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.mark.parametrize(('filename', 'length', 'ncols', 'd1', 'mv1'),
zip(DATA_FILES, (13, 6), (18, 24), (0.091, 0.111),
(10.20, 9.70)))
def test_reader(filename, length, ncols, d1, mv1):
besancon_model = data_path(filename)
with open(besancon_model, 'r') as f:
data = f.read()
B = besancon.core.parse_besancon_model_string(data)
B.pprint()
assert len(B) == length
assert len(B.columns) == ncols
assert B['Dist'][0] == d1
assert B['Mv'][0] == mv1
@pytest.fixture
def patch_post(request):
mp = request.getfixturevalue("monkeypatch")
mp.setattr(besancon.Besancon, '_request', post_mockreturn)
return mp
@pytest.fixture
def patch_get_readable_fileobj(request):
@contextmanager
def get_readable_fileobj_mockreturn(filename, **kwargs):
if isinstance(filename, str):
if '1376235131.430670' in filename:
is_binary = kwargs.get('encoding', None) == 'binary'
with open(data_path('1376235131.430670.resu'), "r" + ('b' if is_binary else '')) as file_obj:
yield file_obj
else:
yield filename
mp = request.getfixturevalue("monkeypatch")
mp.setattr(commons, 'get_readable_fileobj',
get_readable_fileobj_mockreturn)
return mp
def post_mockreturn(method, url, data, timeout=10, stream=True, **kwargs):
filename = data_path('query_return.iframe.html')
with open(filename, 'rb') as infile:
content = infile.read()
return MockResponseBesancon(content, filename, **kwargs)
def test_query(patch_post, patch_get_readable_fileobj):
result = besancon.Besancon.query(0, 0, 'a@b.com')
assert result is not None
def test_default_params():
""" Ensure that the default parameters of the query match the default
parameters on the web form (excepting coordinates and e-mail address) """
data = besancon.Besancon.query_async(0, 0, 'a@b.com',
get_query_payload=True)
with open(data_path('default_params.txt')) as f:
dp = eval(f.read())
for k in dp:
assert dp[k] == data[k]
class MockResponseBesancon(MockResponse):
def __init__(self, content=None, url=None, headers={}, **kwargs):
super().__init__(content)
self.raw = url # StringIO.StringIO(url)
self.headers = headers
|
D-arioSpaceREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@besancon@tests@test_besancon.py@.PATH_END.py
|
{
"filename": "user.py",
"repo_name": "rodluger/everest",
"repo_path": "everest_extracted/everest-master/everest/user.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`user.py` - User Python routines
----------------------------------------
This is the gateway to the :py:obj:`everest` catalog, containing
all of the user-facing code.
- :py:class:`Everest` is the main user-facing class for
interfacing with the catalog
- :py:func:`DVS` downloads and plots the data validation
summary for a given target
Instantiating an :py:class:`Everest` class automatically downloads
the light curve from the online MAST catalog. So, to get started,
all you need to do is run
.. code-block :: python
import everest
star = everest.Everest(201367065)
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
from . import __version__ as EVEREST_VERSION
from . import missions
from .basecamp import Basecamp
from .detrender import pPLD
from .gp import GetCovariance, GP
from .config import QUALITY_BAD, QUALITY_NAN, QUALITY_OUT, QUALITY_REC, \
QUALITY_TRN, EVEREST_DEV, EVEREST_FITS, EVEREST_MAJOR_MINOR
from .utils import InitLog, Formatter
import george
import os
import sys
import platform
import numpy as np
import matplotlib.pyplot as pl
try:
import pyfits
except ImportError:
try:
import astropy.io.fits as pyfits
except ImportError:
raise Exception('Please install the `pyfits` package.')
import subprocess
import six
from six.moves import urllib
from tempfile import NamedTemporaryFile
import shutil
from distutils.version import LooseVersion
import k2plr
k2plr_client = k2plr.API()
import logging
log = logging.getLogger(__name__)
def Search(ID, mission='k2'):
"""Why is my target not in the EVEREST database?"""
# Only K2 supported for now
assert mission == 'k2', "Only the K2 mission is supported for now."
print("Searching for target %d..." % ID)
# First check if it is in the database
season = missions.k2.Season(ID)
if season in [91, 92, [91, 92]]:
print("Campaign 9 is currently not part of the EVEREST catalog.")
return
elif season == 101:
print("The first half of campaign 10 is not currently part of " +
"the EVEREST catalog.")
return
elif season is not None:
print("Target is in campaign %d of the EVEREST catalog." % season)
return
# Get the kplr object
star = k2plr_client.k2_star(ID)
# First check if this is a star
if star.objtype.lower() != "star":
print("Target is of type %s, not STAR, " % star.objtype +
"and is therefore not included in the EVEREST catalog.")
return
# Let's try to download the pixel data and see what happens
try:
tpf = star.get_target_pixel_files()
except:
print("Unable to download the raw pixel files for this target.")
return
if len(tpf) == 0:
print("Raw pixel files are not available for this target. Looks like " +
"data may not have been collected for it.")
return
# Perhaps it's in a campaign we haven't gotten to yet
if tpf[0].sci_campaign not in missions.k2.SEASONS:
print("Targets for campaign %d are not yet available."
% tpf[0].sci_campaign)
return
# Let's try to download the K2SFF data
try:
k2sff = k2plr.K2SFF(ID)
except:
print("Error downloading the K2SFF light curve for this target. " +
"Currently, EVEREST uses the K2SFF apertures to perform " +
"photometry. This is likely to change in the next version.")
return
# Let's try to get the aperture
try:
assert np.count_nonzero(k2sff.apertures[15]), "Invalid aperture."
except:
print("Unable to retrieve the K2SFF aperture for this target. " +
"Currently, EVEREST uses the K2SFF apertures to perform " +
"photometry. This is likely to change in the next version.")
return
# Perhaps the star is *super* saturated and we didn't bother
# de-trending it?
if star.kp < 8:
print("Target has Kp = %.1f and is too saturated " +
"for proper de-trending with EVEREST.")
return
# I'm out of ideas
print("I'm not sure why this target isn't in the EVEREST catalog." +
"You can try de-trending it yourself:")
print("http://faculty.washington.edu/rodluger/everest/pipeline.html")
return
def DownloadFile(ID, season=None, mission='k2', cadence='lc',
filename=None, clobber=False):
'''
Download a given :py:mod:`everest` file from MAST.
:param str mission: The mission name. Default `k2`
:param str cadence: The light curve cadence. Default `lc`
:param str filename: The name of the file to download. Default \
:py:obj:`None`, in which case the default \
FITS file is retrieved.
:param bool clobber: If :py:obj:`True`, download and overwrite \
existing files. Default :py:obj:`False`
'''
# Get season
if season is None:
season = getattr(missions, mission).Season(ID)
if hasattr(season, '__len__'):
raise AttributeError(
"Please choose a `season` for this target: %s." % season)
if season is None:
if getattr(missions, mission).ISTARGET(ID):
raise ValueError('Target not found in local database. ' +
'Run `everest.Search(%d)` for more information.'
% ID)
else:
raise ValueError('Invalid target ID.')
path = getattr(missions, mission).TargetDirectory(ID, season)
relpath = getattr(missions, mission).TargetDirectory(
ID, season, relative=True)
if filename is None:
filename = getattr(missions, mission).FITSFile(ID, season, cadence)
# Check if file exists
if not os.path.exists(path):
os.makedirs(path)
elif os.path.exists(os.path.join(path, filename)) and not clobber:
log.info('Found cached file.')
return os.path.join(path, filename)
# Get file URL
log.info('Downloading the file...')
fitsurl = getattr(missions, mission).FITSUrl(ID, season)
if not fitsurl.endswith('/'):
fitsurl += '/'
# Download the data
r = urllib.request.Request(fitsurl + filename)
try:
handler = urllib.request.urlopen(r)
code = handler.getcode()
except (urllib.error.HTTPError, urllib.error.URLError):
code = 0
if int(code) == 200:
# Read the data
data = handler.read()
# Atomically save to disk
f = NamedTemporaryFile("wb", delete=False)
f.write(data)
f.flush()
os.fsync(f.fileno())
f.close()
shutil.move(f.name, os.path.join(path, filename))
else:
# Something went wrong!
log.error("Error code {0} for URL '{1}'".format(
code, fitsurl + filename))
# If the files can be accessed by `ssh`, let's try that
# (development version only!)
if EVEREST_FITS is None:
raise Exception("Unable to locate the file.")
# Get the url
inpath = os.path.join(EVEREST_FITS, relpath, filename)
outpath = os.path.join(path, filename)
# Download the data
log.info("Accessing file via `scp`...")
subprocess.call(['scp', inpath, outpath])
# Success?
if os.path.exists(os.path.join(path, filename)):
return os.path.join(path, filename)
else:
raise Exception("Unable to download the file." +
"Run `everest.Search(%d)` to troubleshoot." % ID)
def DVS(ID, season=None, mission='k2', clobber=False,
cadence='lc', model='nPLD'):
'''
Show the data validation summary (DVS) for a given target.
:param str mission: The mission name. Default `k2`
:param str cadence: The light curve cadence. Default `lc`
:param bool clobber: If :py:obj:`True`, download and overwrite \
existing files. Default :py:obj:`False`
'''
# Get season
if season is None:
season = getattr(missions, mission).Season(ID)
if hasattr(season, '__len__'):
raise AttributeError(
"Please choose a `season` for this target: %s." % season)
# Get file name
if model == 'nPLD':
filename = getattr(missions, mission).DVSFile(ID, season, cadence)
else:
if cadence == 'sc':
filename = model + '.sc.pdf'
else:
filename = model + '.pdf'
file = DownloadFile(ID, season=season,
mission=mission,
filename=filename,
clobber=clobber)
try:
if platform.system().lower().startswith('darwin'):
subprocess.call(['open', file])
elif os.name == 'nt':
os.startfile(file)
elif os.name == 'posix':
subprocess.call(['xdg-open', file])
else:
raise Exception("")
except:
log.info("Unable to open the pdf. Try opening it manually:")
log.info(file)
class Everest(Basecamp):
'''
The main user-accessible :py:mod:`everest` class for interfacing with the
light curves stored on MAST. Instantiating this class downloads the current
:py:mod:`everest` FITS file for the requested target and populates the
class instance with the light curve data and attributes. Many of the
methods are inherited from :py:class:`everest.Basecamp`.
:param int ID: The target ID. For `k2`, this is the `EPIC` \
number of the star.
:param str mission: The mission name. Default `k2`
:param bool quiet: Suppress :py:obj:`stdout` messages? \
Default :py:obj:`False`
:param str cadence: The light curve cadence. Default `lc`
:param bool clobber: If :py:obj:`True`, download and overwrite existing \
files. Default :py:obj:`False`
'''
def __init__(self, ID, season=None, mission='k2', quiet=False,
clobber=False, cadence='lc', **kwargs):
'''
'''
# Read kwargs
self.ID = ID
self.mission = mission
self.clobber = clobber
if season is not None:
self._season = season
# Initialize preliminary logging
if not quiet:
screen_level = logging.DEBUG
else:
screen_level = logging.CRITICAL
InitLog(None, logging.DEBUG, screen_level, False)
# Check the cadence
if cadence not in ['lc', 'sc']:
raise ValueError("Invalid cadence selected.")
self.cadence = cadence
# Download the FITS file if necessary
self.fitsfile = DownloadFile(
ID, season=season, mission=mission, clobber=clobber,
cadence=cadence)
self.model_name = pyfits.getheader(self.fitsfile, 1)['MODEL']
self._weights = None
# Check the pipeline version. Do we need to upgrade?
subversion = pyfits.getheader(self.fitsfile, 1).get('SUBVER', None)
if subversion is not None:
if LooseVersion(subversion) > LooseVersion(EVEREST_VERSION):
raise Exception("Desired light curve was generated with " +
"EVEREST version %s, but current version " +
"is %s.\n" % (subversion, EVEREST_VERSION) +
"Please upgrade EVEREST by running " +
"`pip install everest-pipeline --upgrade`.")
# Load the FITS file
self.load_fits()
def __repr__(self):
'''
'''
return "<everest.Everest(%d)>" % self.ID
@property
def name(self):
'''
Returns the name of the :py:mod:`everest` model used
to generate this light curve.
'''
return self.model_name
def reset(self):
'''
Re-loads the FITS file from disk.
'''
self.load_fits()
self._weights = None
def compute(self):
'''
Re-compute the :py:mod:`everest` model for the given
value of :py:obj:`lambda`.
For long cadence `k2` light curves, this should take several
seconds. For short cadence `k2` light curves, it may take a
few minutes. Note that this is a simple wrapper around
:py:func:`everest.Basecamp.compute`.
'''
# If we're doing iterative PLD, get the normalization
if self.model_name == 'iPLD':
self._get_norm()
# Compute as usual
super(Everest, self).compute()
# Make NaN cadences NaNs
self.flux[self.nanmask] = np.nan
def _get_norm(self):
'''
Computes the PLD flux normalization array.
..note :: `iPLD` model **only**.
'''
log.info('Computing the PLD normalization...')
# Loop over all chunks
mod = [None for b in self.breakpoints]
for b, brkpt in enumerate(self.breakpoints):
# Unmasked chunk
c = self.get_chunk(b)
# Masked chunk (original mask plus user transit mask)
inds = np.array(
list(set(np.concatenate([self.transitmask,
self.recmask]))), dtype=int)
M = np.delete(np.arange(len(self.time)), inds, axis=0)
if b > 0:
m = M[(M > self.breakpoints[b - 1] - self.bpad)
& (M <= self.breakpoints[b] + self.bpad)]
else:
m = M[M <= self.breakpoints[b] + self.bpad]
# This block of the masked covariance matrix
mK = GetCovariance(self.kernel, self.kernel_params,
self.time[m], self.fraw_err[m])
# Get median
med = np.nanmedian(self.fraw[m])
# Normalize the flux
f = self.fraw[m] - med
# The X^2 matrices
A = np.zeros((len(m), len(m)))
B = np.zeros((len(c), len(m)))
# Loop over all orders
for n in range(self.pld_order):
XM = self.X(n, m)
XC = self.X(n, c)
A += self.reclam[b][n] * np.dot(XM, XM.T)
B += self.reclam[b][n] * np.dot(XC, XM.T)
del XM, XC
W = np.linalg.solve(mK + A, f)
mod[b] = np.dot(B, W)
del A, B, W
# Join the chunks after applying the correct offset
if len(mod) > 1:
# First chunk
model = mod[0][:-self.bpad]
# Center chunks
for m in mod[1:-1]:
offset = model[-1] - m[self.bpad - 1]
model = np.concatenate(
[model, m[self.bpad:-self.bpad] + offset])
# Last chunk
offset = model[-1] - mod[-1][self.bpad - 1]
model = np.concatenate([model, mod[-1][self.bpad:] + offset])
else:
model = mod[0]
# Subtract the global median
model -= np.nanmedian(model)
# Save the norm
self._norm = self.fraw - model
def load_fits(self):
'''
Load the FITS file from disk and populate the
class instance with its data.
'''
log.info("Loading FITS file for %d." % (self.ID))
with pyfits.open(self.fitsfile) as f:
# Params and long cadence data
self.loaded = True
self.is_parent = False
try:
self.X1N = f[2].data['X1N']
except KeyError:
self.X1N = None
self.aperture = f[3].data
self.aperture_name = f[1].header['APNAME']
try:
self.bkg = f[1].data['BKG']
except KeyError:
self.bkg = 0.
self.bpad = f[1].header['BPAD']
self.cbv_minstars = []
self.cbv_num = f[1].header.get('CBVNUM', 1)
self.cbv_niter = f[1].header['CBVNITER']
self.cbv_win = f[1].header['CBVWIN']
self.cbv_order = f[1].header['CBVORD']
self.cadn = f[1].data['CADN']
self.cdivs = f[1].header['CDIVS']
self.cdpp = f[1].header['CDPP']
self.cdppr = f[1].header['CDPPR']
self.cdppv = f[1].header['CDPPV']
self.cdppg = f[1].header['CDPPG']
self.cv_min = f[1].header['CVMIN']
self.fpix = f[2].data['FPIX']
self.pixel_images = [f[4].data['STAMP1'],
f[4].data['STAMP2'], f[4].data['STAMP3']]
self.fraw = f[1].data['FRAW']
self.fraw_err = f[1].data['FRAW_ERR']
self.giter = f[1].header['GITER']
self.gmaxf = f[1].header.get('GMAXF', 200)
self.gp_factor = f[1].header['GPFACTOR']
try:
self.hires = f[5].data
except:
self.hires = None
self.kernel_params = np.array([f[1].header['GPWHITE'],
f[1].header['GPRED'],
f[1].header['GPTAU']])
try:
self.kernel = f[1].header['KERNEL']
self.kernel_params = np.append(
self.kernel_params,
[f[1].header['GPGAMMA'],
f[1].header['GPPER']])
except KeyError:
self.kernel = 'Basic'
self.pld_order = f[1].header['PLDORDER']
self.lam_idx = self.pld_order
self.leps = f[1].header['LEPS']
self.mag = f[0].header['KEPMAG']
self.max_pixels = f[1].header['MAXPIX']
self.model = self.fraw - f[1].data['FLUX']
self.nearby = []
for i in range(99):
try:
ID = f[1].header['NRBY%02dID' % (i + 1)]
x = f[1].header['NRBY%02dX' % (i + 1)]
y = f[1].header['NRBY%02dY' % (i + 1)]
mag = f[1].header['NRBY%02dM' % (i + 1)]
x0 = f[1].header['NRBY%02dX0' % (i + 1)]
y0 = f[1].header['NRBY%02dY0' % (i + 1)]
self.nearby.append(
{'ID': ID, 'x': x, 'y': y,
'mag': mag, 'x0': x0, 'y0': y0})
except KeyError:
break
self.neighbors = []
for c in range(99):
try:
self.neighbors.append(f[1].header['NEIGH%02d' % (c + 1)])
except KeyError:
break
self.oiter = f[1].header['OITER']
self.optimize_gp = f[1].header['OPTGP']
self.osigma = f[1].header['OSIGMA']
self.planets = []
for i in range(99):
try:
t0 = f[1].header['P%02dT0' % (i + 1)]
per = f[1].header['P%02dPER' % (i + 1)]
dur = f[1].header['P%02dDUR' % (i + 1)]
self.planets.append((t0, per, dur))
except KeyError:
break
self.quality = f[1].data['QUALITY']
self.saturated = f[1].header['SATUR']
self.saturation_tolerance = f[1].header['SATTOL']
self.time = f[1].data['TIME']
self._norm = np.array(self.fraw)
# Chunk arrays
self.breakpoints = []
self.cdpp_arr = []
self.cdppv_arr = []
self.cdppr_arr = []
for c in range(99):
try:
self.breakpoints.append(f[1].header['BRKPT%02d' % (c + 1)])
self.cdpp_arr.append(f[1].header['CDPP%02d' % (c + 1)])
self.cdppr_arr.append(f[1].header['CDPPR%02d' % (c + 1)])
self.cdppv_arr.append(f[1].header['CDPPV%02d' % (c + 1)])
except KeyError:
break
self.lam = [[f[1].header['LAMB%02d%02d' % (c + 1, o + 1)]
for o in range(self.pld_order)]
for c in range(len(self.breakpoints))]
if self.model_name == 'iPLD':
self.reclam = [[f[1].header['RECL%02d%02d' % (c + 1, o + 1)]
for o in range(self.pld_order)]
for c in range(len(self.breakpoints))]
# Masks
self.badmask = np.where(self.quality & 2 ** (QUALITY_BAD - 1))[0]
self.nanmask = np.where(self.quality & 2 ** (QUALITY_NAN - 1))[0]
self.outmask = np.where(self.quality & 2 ** (QUALITY_OUT - 1))[0]
self.recmask = np.where(self.quality & 2 ** (QUALITY_REC - 1))[0]
self.transitmask = np.where(
self.quality & 2 ** (QUALITY_TRN - 1))[0]
# CBVs
self.XCBV = np.empty((len(self.time), 0))
for i in range(99):
try:
self.XCBV = np.hstack(
[self.XCBV,
f[1].data['CBV%02d' % (i + 1)].reshape(-1, 1)])
except KeyError:
break
# These are not stored in the fits file; we don't need them
self.saturated_aperture_name = None
self.apertures = None
self.Xpos = None
self.Ypos = None
self.fpix_err = None
self.parent_model = None
self.lambda_arr = None
self.meta = None
self._transit_model = None
self.transit_depth = None
def plot_aperture(self, show=True):
'''
Plot sample postage stamps for the target with the aperture
outline marked, as well as a high-res target image (if available).
:param bool show: Show the plot or return the `(fig, ax)` instance? \
Default :py:obj:`True`
'''
# Set up the axes
fig, ax = pl.subplots(2, 2, figsize=(6, 8))
fig.subplots_adjust(top=0.975, bottom=0.025, left=0.05,
right=0.95, hspace=0.05, wspace=0.05)
ax = ax.flatten()
fig.canvas.set_window_title(
'%s %d' % (self._mission.IDSTRING, self.ID))
super(Everest, self).plot_aperture(ax, labelsize=12)
if show:
pl.show()
pl.close()
else:
return fig, ax
def plot(self, show=True, plot_raw=True, plot_gp=True,
plot_bad=True, plot_out=True, plot_cbv=True,
simple=False):
'''
Plots the final de-trended light curve.
:param bool show: Show the plot or return the `(fig, ax)` instance? \
Default :py:obj:`True`
:param bool plot_raw: Show the raw light curve? Default :py:obj:`True`
:param bool plot_gp: Show the GP model prediction? \
Default :py:obj:`True`
:param bool plot_bad: Show and indicate the bad data points? \
Default :py:obj:`True`
:param bool plot_out: Show and indicate the outliers? \
Default :py:obj:`True`
:param bool plot_cbv: Plot the CBV-corrected light curve? \
Default :py:obj:`True`. If :py:obj:`False`, plots the \
de-trended but uncorrected light curve.
'''
log.info('Plotting the light curve...')
# Set up axes
if plot_raw:
fig, axes = pl.subplots(2, figsize=(13, 9), sharex=True)
fig.subplots_adjust(hspace=0.1)
axes = [axes[1], axes[0]]
if plot_cbv:
fluxes = [self.fcor, self.fraw]
else:
fluxes = [self.flux, self.fraw]
labels = ['EVEREST Flux', 'Raw Flux']
else:
fig, axes = pl.subplots(1, figsize=(13, 6))
axes = [axes]
if plot_cbv:
fluxes = [self.fcor]
else:
fluxes = [self.flux]
labels = ['EVEREST Flux']
fig.canvas.set_window_title('EVEREST Light curve')
# Set up some stuff
time = self.time
badmask = self.badmask
nanmask = self.nanmask
outmask = self.outmask
transitmask = self.transitmask
fraw_err = self.fraw_err
breakpoints = self.breakpoints
if self.cadence == 'sc':
ms = 2
else:
ms = 4
# Get the cdpps
cdpps = [[self.get_cdpp(self.flux), self.get_cdpp_arr(self.flux)],
[self.get_cdpp(self.fraw), self.get_cdpp_arr(self.fraw)]]
self.cdpp = cdpps[0][0]
self.cdpp_arr = cdpps[0][1]
for n, ax, flux, label, c in zip([0, 1], axes, fluxes, labels, cdpps):
# Initialize CDPP
cdpp = c[0]
cdpp_arr = c[1]
# Plot the good data points
ax.plot(self.apply_mask(time), self.apply_mask(flux),
ls='none', marker='.', color='k', markersize=ms, alpha=0.5)
# Plot the outliers
bnmask = np.array(
list(set(np.concatenate([badmask, nanmask]))), dtype=int)
bmask = [i for i in self.badmask if i not in self.nanmask]
def O1(x): return x[outmask]
def O2(x): return x[bmask]
def O3(x): return x[transitmask]
if plot_out:
ax.plot(O1(time), O1(flux), ls='none', color="#777777",
marker='.', markersize=ms, alpha=0.5)
if plot_bad:
ax.plot(O2(time), O2(flux), 'r.', markersize=ms, alpha=0.25)
ax.plot(O3(time), O3(flux), 'b.', markersize=ms, alpha=0.25)
# Plot the GP
if n == 0 and plot_gp and self.cadence != 'sc':
gp = GP(self.kernel, self.kernel_params)
gp.compute(self.apply_mask(time), self.apply_mask(fraw_err))
med = np.nanmedian(self.apply_mask(flux))
y, _ = gp.predict(self.apply_mask(flux) - med, time)
y += med
ax.plot(self.apply_mask(time), self.apply_mask(
y), 'r-', lw=0.5, alpha=0.5)
# Appearance
if n == 0:
ax.set_xlabel('Time (%s)' %
self._mission.TIMEUNITS, fontsize=18)
ax.set_ylabel(label, fontsize=18)
for brkpt in breakpoints[:-1]:
ax.axvline(time[brkpt], color='r', ls='--', alpha=0.25)
if len(cdpp_arr) == 2:
ax.annotate('%.2f ppm' % cdpp_arr[0], xy=(0.02, 0.975),
xycoords='axes fraction',
ha='left', va='top', fontsize=12, color='r',
zorder=99)
ax.annotate('%.2f ppm' % cdpp_arr[1], xy=(0.98, 0.975),
xycoords='axes fraction',
ha='right', va='top', fontsize=12,
color='r', zorder=99)
elif len(cdpp_arr) < 6:
for n in range(len(cdpp_arr)):
if n > 0:
x = (self.time[self.breakpoints[n - 1]] - self.time[0]
) / (self.time[-1] - self.time[0]) + 0.02
else:
x = 0.02
ax.annotate('%.2f ppm' % cdpp_arr[n], xy=(x, 0.975),
xycoords='axes fraction',
ha='left', va='top', fontsize=10,
zorder=99, color='r')
else:
ax.annotate('%.2f ppm' % cdpp, xy=(0.02, 0.975),
xycoords='axes fraction',
ha='left', va='top', fontsize=12,
color='r', zorder=99)
ax.margins(0.01, 0.1)
# Get y lims that bound 99% of the flux
f = np.concatenate([np.delete(f, bnmask) for f in fluxes])
N = int(0.995 * len(f))
hi, lo = f[np.argsort(f)][[N, -N]]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
ax.get_yaxis().set_major_formatter(Formatter.Flux)
# Indicate off-axis outliers
for i in np.where(flux < ylim[0])[0]:
if i in bmask:
color = "#ffcccc"
if not plot_bad:
continue
elif i in outmask:
color = "#cccccc"
if not plot_out:
continue
elif i in nanmask:
continue
else:
color = "#ccccff"
ax.annotate('', xy=(time[i], ylim[0]), xycoords='data',
xytext=(0, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="-|>", color=color))
for i in np.where(flux > ylim[1])[0]:
if i in bmask:
color = "#ffcccc"
if not plot_bad:
continue
elif i in outmask:
color = "#cccccc"
if not plot_out:
continue
elif i in nanmask:
continue
else:
color = "#ccccff"
ax.annotate('', xy=(time[i], ylim[1]), xycoords='data',
xytext=(0, -15), textcoords='offset points',
arrowprops=dict(arrowstyle="-|>", color=color))
# Show total CDPP improvement
pl.figtext(0.5, 0.94, '%s %d' % (self._mission.IDSTRING, self.ID),
fontsize=18, ha='center', va='bottom')
pl.figtext(0.5, 0.905,
r'$%.2f\ \mathrm{ppm} \rightarrow %.2f\ \mathrm{ppm}$' %
(self.cdppr, self.cdpp), fontsize=14,
ha='center', va='bottom')
if show:
pl.show()
pl.close()
else:
if plot_raw:
return fig, axes
else:
return fig, axes[0]
def dvs(self):
'''
Shows the data validation summary (DVS) for the target.
'''
DVS(self.ID, season=self.season, mission=self.mission,
model=self.model_name, clobber=self.clobber)
def plot_pipeline(self, pipeline, *args, **kwargs):
'''
Plots the light curve for the target de-trended with a given pipeline.
:param str pipeline: The name of the pipeline (lowercase). Options \
are 'everest2', 'everest1', and other mission-specific \
pipelines. For `K2`, the available pipelines are 'k2sff' \
and 'k2sc'.
Additional :py:obj:`args` and :py:obj:`kwargs` are passed directly to
the :py:func:`pipelines.plot` function of the mission.
'''
if pipeline != 'everest2':
return getattr(missions, self.mission).pipelines.plot(self.ID,
pipeline,
*args,
**kwargs)
else:
# We're going to plot the everest 2 light curve like we plot
# the other pipelines for easy comparison
plot_raw = kwargs.get('plot_raw', False)
plot_cbv = kwargs.get('plot_cbv', True)
show = kwargs.get('show', True)
if plot_raw:
y = self.fraw
ylabel = 'Raw Flux'
elif plot_cbv:
y = self.fcor
ylabel = "EVEREST2 Flux"
else:
y = self.flux
ylabel = "EVEREST2 Flux"
# Remove nans
bnmask = np.concatenate([self.nanmask, self.badmask])
time = np.delete(self.time, bnmask)
flux = np.delete(y, bnmask)
# Plot it
fig, ax = pl.subplots(1, figsize=(10, 4))
fig.subplots_adjust(bottom=0.15)
ax.plot(time, flux, "k.", markersize=3, alpha=0.5)
# Axis limits
N = int(0.995 * len(flux))
hi, lo = flux[np.argsort(flux)][[N, -N]]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
# Plot bad data points
ax.plot(self.time[self.badmask], y[self.badmask],
"r.", markersize=3, alpha=0.2)
# Show the CDPP
ax.annotate('%.2f ppm' % self._mission.CDPP(flux),
xy=(0.98, 0.975), xycoords='axes fraction',
ha='right', va='top', fontsize=12, color='r',
zorder=99)
# Appearance
ax.margins(0, None)
ax.set_xlabel("Time (%s)" % self._mission.TIMEUNITS, fontsize=16)
ax.set_ylabel(ylabel, fontsize=16)
fig.canvas.set_window_title("EVEREST2: EPIC %d" % (self.ID))
if show:
pl.show()
pl.close()
else:
return fig, ax
def get_pipeline(self, *args, **kwargs):
'''
Returns the `time` and `flux` arrays for the target obtained by a given
pipeline.
Options :py:obj:`args` and :py:obj:`kwargs` are passed directly to
the :py:func:`pipelines.get` function of the mission.
'''
return getattr(missions, self.mission).pipelines.get(self.ID, *args,
**kwargs)
def mask_planet(self, t0, period, dur=0.2):
'''
Mask all of the transits/eclipses of a given planet/EB. After calling
this method, you must re-compute the model by calling
:py:meth:`compute` in order for the mask to take effect.
:param float t0: The time of first transit (same units as light curve)
:param float period: The period of the planet in days
:param foat dur: The transit duration in days. Default 0.2
'''
mask = []
t0 += np.ceil((self.time[0] - dur - t0) / period) * period
for t in np.arange(t0, self.time[-1] + dur, period):
mask.extend(np.where(np.abs(self.time - t) < dur / 2.)[0])
self.transitmask = np.array(
list(set(np.concatenate([self.transitmask, mask]))))
def _plot_weights(self, show=True):
'''
.. warning:: Untested!
'''
# Set up the axes
fig = pl.figure(figsize=(12, 12))
fig.subplots_adjust(top=0.95, bottom=0.025, left=0.1, right=0.92)
fig.canvas.set_window_title(
'%s %d' % (self._mission.IDSTRING, self.ID))
ax = [pl.subplot2grid((80, 130), (20 * j, 25 * i), colspan=23,
rowspan=18)
for j in range(len(self.breakpoints) * 2)
for i in range(1 + 2 * (self.pld_order - 1))]
cax = [pl.subplot2grid((80, 130),
(20 * j, 25 * (1 + 2 * (self.pld_order - 1))),
colspan=4, rowspan=18)
for j in range(len(self.breakpoints) * 2)]
ax = np.array(ax).reshape(2 * len(self.breakpoints), -1)
cax = np.array(cax)
# Check number of segments
if len(self.breakpoints) > 3:
log.error('Cannot currently plot weights for light ' +
'curves with more than 3 segments.')
return
# Loop over all PLD orders and over all chunks
npix = len(self.fpix[1])
ap = self.aperture.flatten()
ncol = 1 + 2 * (len(self.weights[0]) - 1)
raw_weights = np.zeros(
(len(self.breakpoints), ncol, self.aperture.shape[0],
self.aperture.shape[1]), dtype=float)
scaled_weights = np.zeros(
(len(self.breakpoints), ncol, self.aperture.shape[0],
self.aperture.shape[1]), dtype=float)
# Loop over orders
for o in range(len(self.weights[0])):
if o == 0:
oi = 0
else:
oi = 1 + 2 * (o - 1)
# Loop over chunks
for b in range(len(self.weights)):
c = self.get_chunk(b)
rw_ii = np.zeros(npix)
rw_ij = np.zeros(npix)
sw_ii = np.zeros(npix)
sw_ij = np.zeros(npix)
X = np.nanmedian(self.X(o, c), axis=0)
# Compute all sets of pixels at this PLD order, then
# loop over them and assign the weights to the correct pixels
sets = np.array(list(multichoose(np.arange(npix).T, o + 1)))
for i, s in enumerate(sets):
if (o == 0) or (s[0] == s[1]):
# Not the cross-terms
j = s[0]
rw_ii[j] += self.weights[b][o][i]
sw_ii[j] += X[i] * self.weights[b][o][i]
else:
# Cross-terms
for j in s:
rw_ij[j] += self.weights[b][o][i]
sw_ij[j] += X[i] * self.weights[b][o][i]
# Make the array 2D and plot it
rw = np.zeros_like(ap, dtype=float)
sw = np.zeros_like(ap, dtype=float)
n = 0
for i, a in enumerate(ap):
if (a & 1):
rw[i] = rw_ii[n]
sw[i] = sw_ii[n]
n += 1
raw_weights[b][oi] = rw.reshape(*self.aperture.shape)
scaled_weights[b][oi] = sw.reshape(*self.aperture.shape)
if o > 0:
# Make the array 2D and plot it
rw = np.zeros_like(ap, dtype=float)
sw = np.zeros_like(ap, dtype=float)
n = 0
for i, a in enumerate(ap):
if (a & 1):
rw[i] = rw_ij[n]
sw[i] = sw_ij[n]
n += 1
raw_weights[b][oi + 1] = rw.reshape(*self.aperture.shape)
scaled_weights[b][oi +
1] = sw.reshape(*self.aperture.shape)
# Plot the images
log.info('Plotting the PLD weights...')
rdbu = pl.get_cmap('RdBu_r')
rdbu.set_bad('k')
for b in range(len(self.weights)):
rmax = max([-raw_weights[b][o].min() for o in range(ncol)] +
[raw_weights[b][o].max() for o in range(ncol)])
smax = max([-scaled_weights[b][o].min() for o in range(ncol)] +
[scaled_weights[b][o].max() for o in range(ncol)])
for o in range(ncol):
imr = ax[2 * b, o].imshow(raw_weights[b][o], aspect='auto',
interpolation='nearest', cmap=rdbu,
origin='lower', vmin=-rmax,
vmax=rmax)
ims = ax[2 * b + 1, o].imshow(scaled_weights[b][o],
aspect='auto',
interpolation='nearest',
cmap=rdbu, origin='lower',
vmin=-smax, vmax=smax)
# Colorbars
def fmt(x, pos):
a, b = '{:.0e}'.format(x).split('e')
b = int(b)
if float(a) > 0:
a = r'+' + a
elif float(a) == 0:
return ''
return r'${} \times 10^{{{}}}$'.format(a, b)
cbr = pl.colorbar(imr, cax=cax[2 * b], format=FuncFormatter(fmt))
cbr.ax.tick_params(labelsize=8)
cbs = pl.colorbar(
ims, cax=cax[2 * b + 1], format=FuncFormatter(fmt))
cbs.ax.tick_params(labelsize=8)
# Plot aperture contours
def PadWithZeros(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 0
vector[-pad_width[1]:] = 0
return vector
ny, nx = self.aperture.shape
contour = np.zeros((ny, nx))
contour[np.where(self.aperture)] = 1
contour = np.lib.pad(contour, 1, PadWithZeros)
highres = zoom(contour, 100, order=0, mode='nearest')
extent = np.array([-1, nx, -1, ny])
for axis in ax.flatten():
axis.contour(highres, levels=[
0.5], extent=extent, origin='lower', colors='r',
linewidths=1)
# Check for saturated columns
for x in range(self.aperture.shape[0]):
for y in range(self.aperture.shape[1]):
if self.aperture[x][y] == AP_SATURATED_PIXEL:
axis.fill([y - 0.5, y + 0.5, y + 0.5, y - 0.5],
[x - 0.5, x - 0.5, x + 0.5, x + 0.5],
fill=False, hatch='xxxxx', color='r', lw=0)
axis.set_xlim(-0.5, nx - 0.5)
axis.set_ylim(-0.5, ny - 0.5)
axis.set_xticks([])
axis.set_yticks([])
# Labels
titles = [r'$1^{\mathrm{st}}$',
r'$2^{\mathrm{nd}}\ (i = j)$',
r'$2^{\mathrm{nd}}\ (i \neq j)$',
r'$3^{\mathrm{rd}}\ (i = j)$',
r'$3^{\mathrm{rd}}\ (i \neq j)$'] + ['' for i in range(10)]
for i, axis in enumerate(ax[0]):
axis.set_title(titles[i], fontsize=12)
for j in range(len(self.weights)):
ax[2 * j, 0].text(-0.55, -0.15, r'$%d$' % (j + 1),
fontsize=16, transform=ax[2 * j, 0].transAxes)
ax[2 * j, 0].set_ylabel(r'$w_{ij}$', fontsize=18)
ax[2 * j + 1,
0].set_ylabel(r'$\bar{X}_{ij} \cdot w_{ij}$', fontsize=18)
if show:
pl.show()
pl.close()
else:
return fig, ax, cax
def _plot_chunks(self, show=True, plot_bad=True, plot_out=True):
'''
'''
log.info('Plotting the light curve...')
# Set up axes
fig, ax = pl.subplots(len(self.breakpoints), figsize=(10, 8))
fig.canvas.set_window_title('EVEREST Light curve')
if self.cadence == 'sc':
ms = 2
else:
ms = 4
# Calculate the fluxes and cdpps
fluxes = [None for i in range(len(self.breakpoints))]
cdpps = [None for i in range(len(self.breakpoints))]
for b in range(len(self.breakpoints)):
m = self.get_masked_chunk(b)
c = np.arange(len(self.time))
mK = GetCovariance(self.kernel, self.kernel_params,
self.time[m], self.fraw_err[m])
med = np.nanmedian(self.fraw[m])
f = self.fraw[m] - med
A = np.zeros((len(m), len(m)))
B = np.zeros((len(c), len(m)))
for n in range(self.pld_order):
if (self.lam_idx >= n) and (self.lam[b][n] is not None):
XM = self.X(n, m)
XC = self.X(n, c)
A += self.lam[b][n] * np.dot(XM, XM.T)
B += self.lam[b][n] * np.dot(XC, XM.T)
del XM, XC
W = np.linalg.solve(mK + A, f)
model = np.dot(B, W)
del A, B, W
fluxes[b] = self.fraw - model + np.nanmedian(model)
cdpps[b] = self.get_cdpp_arr(fluxes[b])
# Loop over all chunks
for i in range(len(self.breakpoints)):
# Get current flux/cdpp
flux = fluxes[i]
cdpp_arr = cdpps[i]
# Plot the good data points
ax[i].plot(self.apply_mask(self.time), self.apply_mask(
flux), ls='none', marker='.', color='k', markersize=ms,
alpha=0.5)
# Plot the outliers
bnmask = np.array(
list(set(np.concatenate([self.badmask, self.nanmask]))),
dtype=int)
def O1(x): return x[self.outmask]
def O2(x): return x[bnmask]
def O3(x): return x[self.transitmask]
if plot_out:
ax[i].plot(O1(self.time), O1(flux), ls='none',
color="#777777", marker='.', markersize=ms,
alpha=0.5)
if plot_bad:
ax[i].plot(O2(self.time), O2(flux), 'r.',
markersize=ms, alpha=0.25)
ax[i].plot(O3(self.time), O3(flux), 'b.',
markersize=ms, alpha=0.25)
# Appearance
if i == len(self.breakpoints) - 1:
ax[i].set_xlabel('Time (%s)' %
self._mission.TIMEUNITS, fontsize=18)
ax[i].set_ylabel('Flux %d' % (i + 1), fontsize=18)
for brkpt in self.breakpoints[:-1]:
ax[i].axvline(self.time[brkpt], color='r', ls='--', alpha=0.25)
if len(self.breakpoints) == 2:
ax[i].annotate('%.2f ppm' % cdpp_arr[0], xy=(0.02, 0.975),
xycoords='axes fraction',
ha='left', va='top', fontsize=12, color='r',
zorder=99)
ax[i].annotate('%.2f ppm' % cdpp_arr[1], xy=(0.98, 0.975),
xycoords='axes fraction',
ha='right', va='top', fontsize=12,
color='r', zorder=99)
elif len(self.breakpoints) < 6:
for n in range(len(self.breakpoints)):
if n > 0:
x = (self.time[self.breakpoints[n - 1]] - self.time[0]
) / (self.time[-1] - self.time[0]) + 0.02
else:
x = 0.02
ax[i].annotate('%.2f ppm' % cdpp_arr[n], xy=(x, 0.975),
xycoords='axes fraction',
ha='left', va='top', fontsize=10,
zorder=99, color='r')
else:
ax[i].annotate('%.2f ppm' % cdpp_arr[0], xy=(0.02, 0.975),
xycoords='axes fraction',
ha='left', va='top', fontsize=12,
color='r', zorder=99)
ax[i].margins(0.01, 0.1)
if i == 0:
a = self.time[0]
else:
a = self.time[self.breakpoints[i - 1]]
b = self.time[self.breakpoints[i]]
ax[i].axvspan(a, b, color='b', alpha=0.1, zorder=-99)
# Get y lims that bound 99% of the flux
f = np.concatenate([np.delete(f, bnmask) for f in fluxes])
N = int(0.995 * len(f))
hi, lo = f[np.argsort(f)][[N, -N]]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax[i].set_ylim(ylim)
ax[i].get_yaxis().set_major_formatter(Formatter.Flux)
# Indicate off-axis outliers
for j in np.where(flux < ylim[0])[0]:
if j in bnmask:
color = "#ffcccc"
if not plot_bad:
continue
elif j in self.outmask:
color = "#cccccc"
if not plot_out:
continue
else:
color = "#ccccff"
ax[i].annotate('', xy=(self.time[j], ylim[0]), xycoords='data',
xytext=(0, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="-|>", color=color))
for j in np.where(flux > ylim[1])[0]:
if j in bnmask:
color = "#ffcccc"
if not plot_bad:
continue
elif j in self.outmask:
color = "#cccccc"
if not plot_out:
continue
else:
color = "#ccccff"
ax[i].annotate('', xy=(self.time[j], ylim[1]), xycoords='data',
xytext=(0, -15), textcoords='offset points',
arrowprops=dict(arrowstyle="-|>", color=color))
if show:
pl.show()
pl.close()
else:
return fig, axes
def _save_npz(self):
'''
Saves all of the de-trending information to disk in an `npz` file
'''
# Save the data
d = dict(self.__dict__)
d.pop('_weights', None)
d.pop('_A', None)
d.pop('_B', None)
d.pop('_f', None)
d.pop('_mK', None)
d.pop('K', None)
d.pop('dvs', None)
d.pop('clobber', None)
d.pop('clobber_tpf', None)
d.pop('_mission', None)
d.pop('debug', None)
np.savez(os.path.join(self.dir, self.name + '.npz'), **d)
def optimize(self, piter=3, pmaxf=300, ppert=0.1):
'''
Runs :py:obj:`pPLD` on the target in an attempt to further optimize the
values of the PLD priors. See :py:class:`everest.detrender.pPLD`.
'''
self._save_npz()
optimized = pPLD(self.ID, piter=piter, pmaxf=pmaxf,
ppert=ppert, debug=True, clobber=True)
optimized.publish()
self.reset()
def plot_folded(self, t0, period, dur=0.2):
'''
Plot the light curve folded on a given `period` and centered at `t0`.
When plotting folded transits, please mask them using
:py:meth:`mask_planet` and re-compute the model using
:py:meth:`compute`.
:param float t0: The time at which to center the plot \
(same units as light curve)
:param float period: The period of the folding operation
:param float dur: The transit duration in days. Default 0.2
'''
# Mask the planet
self.mask_planet(t0, period, dur)
# Whiten
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err))
med = np.nanmedian(self.apply_mask(self.flux))
y, _ = gp.predict(self.apply_mask(self.flux) - med, self.time)
fwhite = (self.flux - y)
fwhite /= np.nanmedian(fwhite)
# Fold
tfold = (self.time - t0 - period / 2.) % period - period / 2.
# Crop
inds = np.where(np.abs(tfold) < 2 * dur)[0]
x = tfold[inds]
y = fwhite[inds]
# Plot
fig, ax = pl.subplots(1, figsize=(9, 5))
fig.subplots_adjust(bottom=0.125)
ax.plot(x, y, 'k.', alpha=0.5)
# Get ylims
yfin = np.delete(y, np.where(np.isnan(y)))
lo, hi = yfin[np.argsort(yfin)][[3, -3]]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax.set_ylim(*ylim)
# Appearance
ax.set_xlabel(r'Time (days)', fontsize=18)
ax.set_ylabel(r'Normalized Flux', fontsize=18)
fig.canvas.set_window_title(
'%s %d' % (self._mission.IDSTRING, self.ID))
pl.show()
def plot_transit_model(self, show=True, fold=None, ax=None):
'''
Plot the light curve de-trended with a join instrumental + transit
model with the best fit transit model overlaid. The transit model
should be specified using the :py:obj:`transit_model` attribute
and should be an instance or list of instances of
:py:class:`everest.transit.TransitModel`.
:param bool show: Show the plot, or return the `fig, ax` instances? \
Default `True`
:param str fold: The name of the planet/transit model on which to \
fold. If only one model is present, can be set to \
:py:obj:`True`. Default :py:obj:`False` \
(does not fold the data).
:param ax: A `matplotlib` axis instance to use for plotting. \
Default :py:obj:`None`
'''
if self.transit_model is None:
raise ValueError("No transit model provided!")
if self.transit_depth is None:
self.compute()
if fold is not None:
if (fold is True and len(self.transit_model) > 1) or \
(type(fold) is not str):
raise Exception(
"Kwarg `fold` should be the name of the transit " +
"model on which to fold the data.")
if fold is True:
# We are folding on the first index of `self.transit_model`
fold = 0
elif type(fold) is str:
# Figure out the index of the transit model on which to fold
fold = np.argmax(
[fold == tm.name for tm in self.transit_model])
log.info('Plotting the transit model folded ' +
'on transit model index %d...' % fold)
else:
log.info('Plotting the transit model...')
# Set up axes
if ax is None:
if fold is not None:
fig, ax = pl.subplots(1, figsize=(8, 5))
else:
fig, ax = pl.subplots(1, figsize=(13, 6))
fig.canvas.set_window_title('EVEREST Light curve')
else:
fig = pl.gcf()
# Set up some stuff
if self.cadence == 'sc':
ms = 2
else:
ms = 4
# Fold?
if fold is not None:
times = self.transit_model[fold].params.get('times', None)
if times is not None:
time = self.time - \
[times[np.argmin(np.abs(ti - times))] for ti in self.time]
t0 = times[0]
else:
t0 = self.transit_model[fold].params.get('t0', 0.)
period = self.transit_model[fold].params.get('per', 10.)
time = (self.time - t0 - period / 2.) % period - period / 2.
dur = 0.01 * \
len(np.where(self.transit_model[fold](
np.linspace(t0 - 0.5, t0 + 0.5, 100)) < 0)[0])
else:
time = self.time
ax.plot(self.apply_mask(time), self.apply_mask(self.flux),
ls='none', marker='.', color='k', markersize=ms, alpha=0.5)
ax.plot(time[self.outmask], self.flux[self.outmask],
ls='none', marker='.', color='k', markersize=ms, alpha=0.5)
ax.plot(time[self.transitmask], self.flux[self.transitmask],
ls='none', marker='.', color='k', markersize=ms, alpha=0.5)
# Plot the transit + GP model
med = np.nanmedian(self.apply_mask(self.flux))
transit_model = \
med * np.sum([depth * tm(self.time)
for tm, depth in zip(self.transit_model,
self.transit_depth)], axis=0)
gp = GP(self.kernel, self.kernel_params, white=False)
gp.compute(self.apply_mask(self.time), self.apply_mask(self.fraw_err))
y, _ = gp.predict(self.apply_mask(
self.flux - transit_model) - med, self.time)
if fold is not None:
flux = (self.flux - y) / med
ax.plot(self.apply_mask(time), self.apply_mask(flux),
ls='none', marker='.', color='k', markersize=ms, alpha=0.5)
ax.plot(time[self.outmask], flux[self.outmask], ls='none',
marker='.', color='k', markersize=ms, alpha=0.5)
ax.plot(time[self.transitmask], flux[self.transitmask],
ls='none', marker='.', color='k', markersize=ms, alpha=0.5)
hires_time = np.linspace(-5 * dur, 5 * dur, 1000)
hires_transit_model = 1 + \
self.transit_depth[fold] * \
self.transit_model[fold](hires_time + t0)
ax.plot(hires_time, hires_transit_model, 'r-', lw=1, alpha=1)
else:
flux = self.flux
y += med
y += transit_model
ax.plot(time, y, 'r-', lw=1, alpha=1)
# Plot the bad data points
bnmask = np.array(
list(set(np.concatenate([self.badmask, self.nanmask]))), dtype=int)
bmask = [i for i in self.badmask if i not in self.nanmask]
ax.plot(time[bmask], flux[bmask], 'r.', markersize=ms, alpha=0.25)
# Appearance
ax.set_ylabel('EVEREST Flux', fontsize=18)
ax.margins(0.01, 0.1)
if fold is not None:
ax.set_xlabel('Time From Transit Center (days)', fontsize=18)
ax.set_xlim(-3 * dur, 3 * dur)
else:
ax.set_xlabel('Time (%s)' % self._mission.TIMEUNITS, fontsize=18)
for brkpt in self.breakpoints[:-1]:
ax.axvline(time[brkpt], color='r', ls='--', alpha=0.25)
ax.get_yaxis().set_major_formatter(Formatter.Flux)
# Get y lims that bound most of the flux
if fold is not None:
lo = np.min(hires_transit_model)
pad = 1.5 * (1 - lo)
ylim = (lo - pad, 1 + pad)
else:
f = np.delete(flux, bnmask)
N = int(0.995 * len(f))
hi, lo = f[np.argsort(f)][[N, -N]]
pad = (hi - lo) * 0.1
ylim = (lo - pad, hi + pad)
ax.set_ylim(ylim)
# Indicate off-axis outliers
for i in np.where(flux < ylim[0])[0]:
if i in bmask:
color = "#ffcccc"
else:
color = "#ccccff"
ax.annotate('', xy=(time[i], ylim[0]), xycoords='data',
xytext=(0, 15), textcoords='offset points',
arrowprops=dict(arrowstyle="-|>", color=color,
alpha=0.5))
for i in np.where(flux > ylim[1])[0]:
if i in bmask:
color = "#ffcccc"
else:
color = "#ccccff"
ax.annotate('', xy=(time[i], ylim[1]), xycoords='data',
xytext=(0, -15), textcoords='offset points',
arrowprops=dict(arrowstyle="-|>", color=color,
alpha=0.5))
if show:
pl.show()
pl.close()
else:
return fig, ax
|
rodlugerREPO_NAMEeverestPATH_START.@everest_extracted@everest-master@everest@user.py@.PATH_END.py
|
{
"filename": "_textcase.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymapbox/hoverlabel/font/_textcase.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcaseValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="textcase",
parent_name="densitymapbox.hoverlabel.font",
**kwargs,
):
super(TextcaseValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
values=kwargs.pop("values", ["normal", "word caps", "upper", "lower"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymapbox@hoverlabel@font@_textcase.py@.PATH_END.py
|
{
"filename": "shallow_water_init_conds.py",
"repo_name": "ExeClim/Isca",
"repo_path": "Isca_extracted/Isca-master/src/extra/python/scripts/shallow_water_init_conds.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-s
from typing import NoReturn
import numpy as np
import pdb
import create_timeseries as cts
import xarray as xar
import gauss_grid as gg
import matplotlib.pyplot as plt
import windspharm as wsp
import pdb
def convert_to_vor_div(u_in, v_in, lat_arr, planet_radius):
"""convert spherical polar velocities to vor and div"""
uwnd, uwnd_info = wsp.tools.prep_data(u_in, 'yx')
vwnd, vwnd_info = wsp.tools.prep_data(v_in, 'yx')
# It is also required that the latitude dimension is north-to-south. Again the
# bundled tools make this easy.
lat_1d_ordered, uwnd, vwnd = wsp.tools.order_latdim(lat_arr[:,0], uwnd, vwnd)
# Create a VectorWind instance to handle the computation of streamfunction and
# velocity potential.
w = wsp.standard.VectorWind(uwnd, vwnd, rsphere=planet_radius, gridtype='gaussian')
# Compute the streamfunction and velocity potential. Also use the bundled
# tools to re-shape the outputs to the 4D shape of the wind components as they
# were read off files.
vor = w.vorticity()
div = w.divergence()
# sf, vp = w.sfvp()
vor = wsp.tools.recover_data(vor, uwnd_info)
div = wsp.tools.recover_data(div, uwnd_info)
return vor[::-1,:], div[::-1,:] #need to reverse latitude reordering
def set_u_v_height_field(lon_in, lat_in, lonb_in, latb_in, epsilon, alpha, beta, m, r_0, planet_radius, northern_hemisphere=True):
"""Configure an initial condition for u, v and h given some
balance condition. Use parameters and gradient-wind balance for Saturn
from 10.1016/j.icarus.2017.06.006"""
deformation_scale = 3200e3 #p62 of Rostami et al 2017
f_0 = 3.2e-4
timescale = (f_0)**-1
velocity_scale = deformation_scale/timescale
lat_rad_2d = np.deg2rad(lat_in)
lon_rad_2d = np.deg2rad(lon_in)
if northern_hemisphere:
r_array = (planet_radius * (np.pi/2. - lat_rad_2d))/deformation_scale #non-dim
else:
r_array = (planet_radius * (np.pi/2. + lat_rad_2d))/deformation_scale #non-dim
v = np.zeros_like(lat_in)
u = epsilon * ((r_array - r_0)**alpha)* np.exp(-m*((r_array-r_0)**beta))
v_si_units = v * velocity_scale
u_si_units = u * velocity_scale
if northern_hemisphere:
grad_geopot = ((u_si_units**2)/(r_array* deformation_scale)) + (f_0*np.sin(lat_rad_2d)*u_si_units)
else:
#I've changed the sign of the coriolis term here. Clearly this isn't really happening, but in this funny radial coordinate system the sign of u would change for the opposite hemisphere, thus necessitating the sign change.
grad_geopot = ((u_si_units**2)/(r_array* deformation_scale)) - (f_0*np.sin(lat_rad_2d)*u_si_units)
geopotential = np.zeros_like(grad_geopot)
if northern_hemisphere:
for lat_idx in range(1, len(lat_rad_2d[:,0])):
geopotential[lat_idx,:] = geopotential[lat_idx-1,:] + 0.5*(grad_geopot[lat_idx-1,:]+grad_geopot[lat_idx,:])*(r_array[lat_idx]-r_array[lat_idx-1])
else:
r_array_opposite = r_array[::-1,:]
grad_geopot_opposite = grad_geopot[::-1,:]
for lat_idx in range(1, len(lat_rad_2d[:,0])):
geopotential[lat_idx,:] = geopotential[lat_idx-1,:] + 0.5*(grad_geopot_opposite[lat_idx-1,:]+grad_geopot_opposite[lat_idx,:])*(r_array_opposite[lat_idx]-r_array_opposite[lat_idx-1])
geopotential = geopotential[::-1,:]
#we want to pass a geopotential field that has an area-mean of zero. This is because we want to preserve the mean geopotential that the model sets as its h_0 parameter.
delta_lat_arr = np.diff(latb_in, axis=0)[:,0:-1]
area_array = np.cos(np.deg2rad(lat_in))*np.deg2rad(delta_lat_arr)
area_av_geopot = np.sum(geopotential*area_array)/np.sum(area_array)
geopotential_av_removed = geopotential-area_av_geopot
area_av_final = np.sum(geopotential_av_removed*area_array)/np.sum(area_array)
print(f'old mean = {area_av_geopot}, final area_av geopot = {area_av_final}')
geopotential_si_units = geopotential_av_removed * deformation_scale
h_0 = (deformation_scale*f_0)**2.
return u_si_units, v_si_units, geopotential_si_units, h_0, grad_geopot
nlat=128
nlon=256
latitudes, latitude_bounds_2 = gg.gaussian_latitudes(int(nlat/2))
latitude_bounds = [latitude_bound[0] for latitude_bound in latitude_bounds_2] + [latitude_bounds_2[-1][1]]
longitudes = np.linspace(0., 360., nlon, endpoint=False)
delta_lon = longitudes[1]-longitudes[0]
longitude_bounds = [lon_val-(0.5*delta_lon) for lon_val in longitudes] + [np.max(longitudes)+(0.5*delta_lon)]
time_arr_adj=None
lon_array_2d, lat_array_2d = np.meshgrid(longitudes, latitudes)
lonb_array_2d, latb_array_2d = np.meshgrid(longitude_bounds, latitude_bounds)
#Note that in the following we're making the initial condition symmetric about the equator. This is because if you only set the initial conditions in the northern hemisphere then you end up needing a very large set of latitudinal functions to get that level of asymmetry, and the code gets very upset when translating that to a finite spectral representation. Making it symmetric gets rid of this problem, at least to some extent.
epsilon = 0.15*2.
alpha = 0.42
beta = 1.3
r_0 = 0.
m_param = 1.
planet_radius = 55000e3
u_array_vortex, v_array_vortex, height_array_vortex, h_0, grad_geopot_vortex = set_u_v_height_field(lon_array_2d, lat_array_2d,lonb_array_2d, latb_array_2d, epsilon, alpha, beta, m_param, r_0, planet_radius)
u_array_vortex_sp, v_array_vortex_sp, height_array_vortex_sp, h_0_sp, grad_geopot_vortex_sp = set_u_v_height_field(lon_array_2d, lat_array_2d,lonb_array_2d, latb_array_2d, epsilon, alpha, beta, m_param, r_0, planet_radius, northern_hemisphere=False)
epsilon = 0.08
alpha = 0.
beta = 2.
r_0 = 3.37
m_param = 3.
planet_radius = 55000e3
u_array_jet, v_array_jet, height_array_jet, h_0, grad_geopot_jet = set_u_v_height_field(lon_array_2d, lat_array_2d,lonb_array_2d, latb_array_2d, epsilon, alpha, beta, m_param, r_0, planet_radius)
u_array_jet_sp, v_array_jet_sp, height_array_jet_sp, h_0_sp, grad_geopot_jet_sp = set_u_v_height_field(lon_array_2d, lat_array_2d,lonb_array_2d, latb_array_2d, epsilon, alpha, beta, m_param, r_0, planet_radius, northern_hemisphere=False)
u_array_total = u_array_vortex+u_array_vortex_sp + u_array_jet+u_array_jet_sp
v_array_total = v_array_vortex+v_array_vortex_sp + v_array_jet+v_array_jet_sp
height_array_total = height_array_vortex+height_array_vortex_sp + height_array_jet+height_array_jet_sp
grad_geopot_total = grad_geopot_vortex + grad_geopot_vortex_sp + grad_geopot_jet + grad_geopot_jet_sp
vor_array, div_array = convert_to_vor_div(u_array_total, v_array_total, height_array_total, planet_radius)
p_full=None
p_half=None
npfull=None
nphalf=None
#Output it to a netcdf file.
file_name='rostami_t85_jet_and_vortex_mk7_gg.nc'
number_dict={}
number_dict['nlat']=nlat
number_dict['nlon']=nlon
number_dict['nlatb']=nlat+1
number_dict['nlonb']=nlon+1
number_dict['npfull']=npfull
number_dict['nphalf']=nphalf
number_dict['ntime']=None
data_dict = {
'vor': vor_array,
'height': height_array_total,
'div': div_array,
'ucomp': u_array_total,
'vcomp': v_array_total,
'grad_geopot': grad_geopot_total
}
time_units=None
cts.output_multiple_variables_to_file(data_dict,latitudes,longitudes,latitude_bounds,longitude_bounds,p_full,p_half,time_arr_adj,time_units,file_name,number_dict)
print(f'Must set h_0 parameter in code to be {h_0}')
|
ExeClimREPO_NAMEIscaPATH_START.@Isca_extracted@Isca-master@src@extra@python@scripts@shallow_water_init_conds.py@.PATH_END.py
|
{
"filename": "test_cdms.py",
"repo_name": "D-arioSpace/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/linelists/cdms/tests/test_cdms.py",
"type": "Python"
}
|
import numpy as np
import pytest
import os
from astropy import units as u
from astropy.table import Table
from astroquery.linelists.cdms.core import CDMS, parse_letternumber
from astroquery.utils.mocks import MockResponse
colname_set = set(['FREQ', 'ERR', 'LGINT', 'DR', 'ELO', 'GUP', 'TAG', 'QNFMT',
'Ju', 'Jl', "vu", "F1u", "F2u", "F3u", "vl", "Ku", "Kl",
"F1l", "F2l", "F3l", "name", "MOLWT", "Lab"])
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def mockreturn(*args, method='GET', data={}, url='', **kwargs):
if method == 'GET':
molecule = url.split('cdmstab')[1].split('.')[0]
with open(data_path(molecule+".data"), 'rb') as fh:
content = fh.read()
return MockResponse(content=content)
elif method == 'POST':
molecule = dict(data)['Molecules']
with open(data_path("post_response.html"), 'r') as fh:
content = fh.read().format(replace=molecule).encode()
return MockResponse(content=content)
@pytest.fixture
def patch_post(request):
mp = request.getfixturevalue("monkeypatch")
mp.setattr(CDMS, '_request', mockreturn)
return mp
def test_input_async():
response = CDMS.query_lines_async(min_frequency=100 * u.GHz,
max_frequency=1000 * u.GHz,
min_strength=-500,
molecule="028503 CO, v=0",
get_query_payload=True)
response = dict(response)
assert response['Molecules'] == "028503 CO, v=0"
np.testing.assert_almost_equal(response['MinNu'], 100.)
np.testing.assert_almost_equal(response['MaxNu'], 1000.)
def test_input_multi():
response = CDMS.query_lines_async(min_frequency=500 * u.GHz,
max_frequency=1000 * u.GHz,
min_strength=-500,
molecule=r"^H[2D]O(-\d\d|)\+$",
parse_name_locally=True,
get_query_payload=True)
response = dict(response)
assert response['Molecules'] == '018505 H2O+'
np.testing.assert_almost_equal(response['MinNu'], 500.)
np.testing.assert_almost_equal(response['MaxNu'], 1000.)
def test_query(patch_post):
tbl = CDMS.query_lines(min_frequency=100 * u.GHz,
max_frequency=1000 * u.GHz,
min_strength=-500,
molecule="CO")
assert isinstance(tbl, Table)
assert len(tbl) == 8
assert set(tbl.keys()) == colname_set
assert tbl['FREQ'][0] == 115271.2018
assert tbl['ERR'][0] == .0005
assert tbl['LGINT'][0] == -7.1425
assert tbl['GUP'][0] == 3
assert tbl['GUP'][7] == 17
def test_parseletternumber():
"""
Very Important:
Exactly two characters are available for each quantum number. Therefore, half
integer quanta are rounded up ! In addition, capital letters are used to
indicate quantum numbers larger than 99. E. g. A0 is 100, Z9 is 359. Small
types are used to signal corresponding negative quantum numbers.
"""
# examples from the docs
assert parse_letternumber("A0") == 100
assert parse_letternumber("Z9") == 359
# inferred?
assert parse_letternumber("z9") == -359
assert parse_letternumber("ZZ") == 3535
def test_hc7s(patch_post):
"""
Test for a very complicated molecule
CDMS.query_lines_async(100*u.GHz, 100.755608*u.GHz, molecule='HC7S', parse_name_locally=True)
"""
tbl = CDMS.query_lines(100*u.GHz, 100.755608*u.GHz, molecule='HC7S',)
assert isinstance(tbl, Table)
assert len(tbl) == 5
assert set(tbl.keys()) == colname_set
assert tbl['FREQ'][0] == 100694.065
assert tbl['ERR'][0] == 0.4909
assert tbl['LGINT'][0] == -3.9202
assert tbl['MOLWT'][0] == 117
assert tbl['GUP'][0] == 255
assert tbl['Ju'][0] == 126
assert tbl['Jl'][0] == 125
assert tbl['vu'][0] == 127
assert tbl['vl'][0] == 126
assert tbl['Ku'][0] == -1
assert tbl['Kl'][0] == 1
assert tbl['F1u'][0] == 127
assert tbl['F1l'][0] == 126
def test_hc7n(patch_post):
"""
Regression test for 2409, specifically that GUP>1000 was not being
processed correctly b/c the first digit of GUP was being included in the
previous column (frequency)
CDMS.query_lines(200*u.GHz, 230.755608*u.GHz, molecule='HC7N',parse_name_locally=True)
"""
tbl = CDMS.query_lines(200*u.GHz, 230.755608*u.GHz, molecule='HC7N')
assert isinstance(tbl, Table)
assert len(tbl) == 27
assert set(tbl.keys()) == colname_set
assert tbl['FREQ'][0] == 200693.406
assert tbl['ERR'][0] == 0.01
assert tbl['LGINT'][0] == -2.241
assert tbl['MOLWT'][0] == 99
assert tbl['GUP'][0] == 1071
assert tbl['Ju'][0] == 178
assert tbl['Jl'][0] == 177
assert tbl['vu'][0].mask
assert tbl['vl'][0].mask
assert tbl['Ku'][0].mask
assert tbl['Kl'][0].mask
assert tbl['F1u'][0].mask
assert tbl['F1l'][0].mask
assert tbl['Lab'][0]
|
D-arioSpaceREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@linelists@cdms@tests@test_cdms.py@.PATH_END.py
|
{
"filename": "brick.py",
"repo_name": "astroweaver/the_farmer",
"repo_path": "the_farmer_extracted/the_farmer-master/farmer/brick.py",
"type": "Python"
}
|
from collections import OrderedDict
import config as conf
from .image import BaseImage
from .utils import load_brick_position, dilate_and_group, clean_catalog, build_regions, run_group
from .group import Group
import logging
import os, time
from functools import partial
from astropy.nddata import Cutout2D
import astropy.units as u
import numpy as np
from pathos.pools import ProcessPool
from copy import copy
from astropy.wcs.utils import proj_plane_pixel_scales
class Brick(BaseImage):
def __init__(self, brick_id=None, position=None, size=None, load=True, silent=False, tag=None) -> None:
if not np.isscalar(brick_id):
if len(brick_id) == 1:
brick_id = brick_id[0]
stag = ''
if tag is not None:
stag = f'_{tag}'
self.filename = f'B{brick_id}{stag}.h5'
self.logger = logging.getLogger(f'farmer.brick_{brick_id}')
# if silent:
# self.logger.setLevel(logging.ERROR)
if load:
self.logger.info(f'Trying to load brick from {self.filename}...')
attributes = self.read_hdf5()
for key in attributes:
self.__dict__[key] = attributes[key]
self.logger.debug(f' ... {key}')
# TODO cross-check with config
else:
# Housekeeping
self.brick_id = brick_id
self.bands = []
self.wcs = {}
self.pixel_scales = {}
self.data = {}
self.headers = {}
self.properties = {}
self.catalogs = {}
self.type = 'brick'
self.n_sources = {}
self.group_ids = {}
# self.group_pops = {}
self.model_catalog = OrderedDict()
self.model_tracker = OrderedDict()
self.model_tracker_groups = OrderedDict()
self.catalog_band='detection'
self.catalog_imgtype='science'
self.phot_priors = conf.PHOT_PRIORS
self.model_priors = conf.MODEL_PRIORS
self.config = {}
for key in conf.__dict__:
if not key.startswith('_'):
self.config[key] = conf.__dict__[key]
# Position
if (brick_id is not None) & ((position is not None) | (size is not None)):
raise RuntimeError('Cannot create brick from BOTH brick_id AND position/size!')
if brick_id is not None:
self.position, self.size, self.buffsize = load_brick_position(brick_id)
else:
self.position, self.size = position, size
self.buffsize = (self.size[0]+2*conf.BRICK_BUFFER, self.size[1]+2*conf.BRICK_BUFFER)
self.logger.info(f'Spawned brick #{self.brick_id} at ({self.position.ra:2.1f}, {self.position.dec:2.1f}) with size {self.size[0].to(u.arcmin):2.3f} X {self.size[1].to(u.arcmin):2.3f}')
def get_figprefix(self, imgtype, band):
return f'B{self.brick_id}_{band}_{imgtype}'
def get_bands(self):
return np.array(self.bands)
def summary(self):
print(f'Summary of brick {self.brick_id}')
print(f'Located at ({self.position.ra:2.2f}, {self.position.dec:2.2f}) with size {self.size[0]:2.2f} x {self.size[1]:2.2f}')
print(f' (w/ buffer: {self.buffsize[0]:2.2f} x {self.buffsize[1]:2.2f})')
print(f'Has {len(self.bands)} bands: {self.bands}')
for band in self.bands:
print(f' --- Data {band} ---')
for imgtype in self.data[band].keys():
if imgtype.startswith('psf'): continue
if isinstance(self.data[band][imgtype], dict): continue
img = self.data[band][imgtype].data
tsum, mean, med, std = np.nansum(img), np.nanmean(img), np.nanmedian(img), np.nanstd(img)
print(f' {imgtype} ... {np.shape(img)} ( {tsum:2.2f} / {mean:2.2f} / {med:2.2f} / {std:2.2f})')
# print(f'--- Properties {band} ---')
for attr in self.properties[band].keys():
print(f' {attr} ... {self.properties[band][attr]}')
def add_band(self, mosaic, overwrite=False):
if (~overwrite) & (mosaic.band in self.bands):
raise RuntimeError(f'{mosaic.band} already exists in brick #{self.brick_id}!')
# Loop over provided data
for imgtype in mosaic.data.keys():
if imgtype in ('science', 'weight', 'mask', 'background', 'rms', 'model', 'residual', 'chi'):
fill_value = np.nan
if imgtype == 'mask':
fill_value = True
try:
cutout = Cutout2D(mosaic.data[imgtype], self.position, self.buffsize, wcs=mosaic.wcs,
copy=True, mode='partial', fill_value = fill_value)
if imgtype == 'science':
# Add band information
self.data[mosaic.band] = {}
self.properties[mosaic.band] = {}
self.headers[mosaic.band] = {}
self.n_sources[mosaic.band] = {}
self.catalogs[mosaic.band] = {}
self.group_ids[mosaic.band] = {}
# self.group_pops[mosaic.band] = {}
self.bands.append(mosaic.band)
except:
self.logger.warning(f'{mosaic.band} mosaic has no overlap with detection footprint! Skipping band.')
return
self.logger.debug(f'... data \"{imgtype}\" subimage cut from {mosaic.band} at {cutout.input_position_original}')
self.data[mosaic.band][imgtype] = cutout
if imgtype in ('science', 'weight', 'mask'):
self.headers[mosaic.band][imgtype] = mosaic.headers[imgtype] #TODO update WCS!
if imgtype == 'science':
self.wcs[mosaic.band] = cutout.wcs
self.pixel_scales[mosaic.band] = proj_plane_pixel_scales(cutout.wcs) * u.deg
self.estimate_properties(band=mosaic.band, imgtype=imgtype)
elif imgtype in ('segmap', 'groupmap'):
self.transfer_maps()
elif imgtype in ('psfcoords', 'psflist'):
if imgtype == 'psflist': continue # do these together!
if mosaic.data['psfcoords'] != 'none':
# within_brick = np.array([coord.contained_by(self.wcs[mosaic.band]) for coord in mosaic.data['psfcoords']])
within_brick = mosaic.data['psfcoords'].contained_by(self.wcs[mosaic.band])
if np.sum(within_brick) == 0:
# separation = np.array([coord.separation(self.position).to(u.arcsec).value for coord in mosaic.data['psfcoords']])
separation = mosaic.data['psfcoords'].separation(self.position).to(u.arcsec).value
nearest = np.argmin(separation)
self.logger.warning(f'No PSF coords within brick for {mosaic.band}! Adopting nearest at {mosaic.data["psfcoords"][nearest]}')
self.data[mosaic.band]['psfcoords'] = mosaic.data['psfcoords'][nearest]
self.data[mosaic.band]['psflist'] = [mosaic.data['psflist'][nearest]]
else:
self.data[mosaic.band]['psfcoords'] = mosaic.data['psfcoords'][within_brick]
self.data[mosaic.band]['psflist'] = mosaic.data['psflist'][within_brick]
else:
self.data[mosaic.band]['psfcoords'] = mosaic.data['psfcoords']
self.data[mosaic.band]['psflist'] = mosaic.data['psflist']
for imgtype in ('psfcoords', 'psflist'):
self.logger.debug(f'... data \"{imgtype}\" adopted from mosaic')
else:
self.data[mosaic.band][imgtype] = mosaic.data[imgtype]
self.logger.debug(f'... data \"{imgtype}\" adopted from mosaic')
# Loop over properties
for attr in mosaic.properties.keys():
self.properties[mosaic.band][attr] = mosaic.properties[attr]
self.logger.debug(f'... property \"{attr}\" adopted from mosaic')
# make a big filler
filler = np.zeros_like(mosaic.data['science']) # big, but OK...
subheader = self.headers[mosaic.band]['science'].copy()
subheader.update(cutout.wcs.to_header())
# if weights or masks dont exist, make them as dummy arrays
if 'weight' not in self.data[mosaic.band]:
self.logger.debug(f'... data \"weight\" subimage generated as ones at {cutout.input_position_original}')
cutout = Cutout2D(filler, self.position, self.buffsize, wcs=mosaic.wcs, mode='partial', fill_value = np.nan, copy=True)
self.data[mosaic.band]['weight'] = cutout
self.headers[mosaic.band]['weight'] = subheader
if 'mask' not in self.data[mosaic.band]:
self.logger.debug(f'... data \"mask\" subimage generated as ones at {cutout.input_position_original}')
cutout = Cutout2D(filler, self.position, self.buffsize, wcs=mosaic.wcs, mode='partial', fill_value = np.nan, copy=True)
self.data[mosaic.band]['mask'] = cutout
self.headers[mosaic.band]['mask'] = subheader
if 'background' not in self.data[mosaic.band]:
self.logger.debug(f'... data \"background\" subimage generated as ones at {cutout.input_position_original}')
cutout = Cutout2D(filler, self.position, self.buffsize, wcs=mosaic.wcs, mode='partial', fill_value = np.nan, copy=True)
self.data[mosaic.band]['background'] = cutout
self.headers[mosaic.band]['background'] = subheader
if 'rms' not in self.data[mosaic.band]:
self.logger.debug(f'... data \"rms\" subimage generated as ones at {cutout.input_position_original}')
cutout = Cutout2D(filler, self.position, self.buffsize, wcs=mosaic.wcs, mode='partial', fill_value = np.nan, copy=True)
self.data[mosaic.band]['rms'] = cutout
self.headers[mosaic.band]['rms'] = subheader
del filler
# get background info if backregion is 'brick' -- WILL overwrite inhereted info if it exists...
if 'backregion' in self.properties[mosaic.band]:
if self.properties[mosaic.band]['backregion'] == 'brick':
self.estimate_background(band=mosaic.band, imgtype='science')
self.logger.info(f'Added {mosaic.band} to brick #{self.brick_id}')
# TODO -- should be able to INHERET catalogs from the parent mosaic, if they exist!
def extract(self, band='detection', imgtype='science', background=None):
if self.properties[band]['subtract_background']:
background = self.get_background(band)
catalog, segmap = self._extract(band, imgtype='science', background=background)
# clean out buffer -- these are bricks!
self.logger.info('Removing sources detected in brick buffer...')
cutout = Cutout2D(self.data[band][imgtype].data, self.position, self.size, wcs=self.data[band][imgtype].wcs)
mask = Cutout2D(np.zeros(cutout.data.shape), self.position, self.buffsize, wcs=cutout.wcs, fill_value=1, mode='partial').data.astype(bool)
segmap = Cutout2D(segmap, self.position, self.buffsize, self.wcs[band], fill_value=0, mode='partial')
# do I actually need to do this?
if np.any(mask):
catalog, segmap.data = clean_catalog(catalog, mask, segmap=segmap.data)
mask[mask & (segmap.data>0)] = False
# save stuff
self.catalogs[band][imgtype] = catalog
self.data[band]['segmap'] = segmap
self.headers[band]['segmap'] = self.headers[band]['science']
self.data[band]['weight'].data[mask] = 0 #removes buffer but keeps segment pixels
self.data[band]['mask'].data[mask] = True # adds buffer to existing mask
self.n_sources[band][imgtype] = len(catalog)
# add ids
self.catalogs[band][imgtype].add_column(self.brick_id * np.ones(self.n_sources[band][imgtype], dtype=np.int32), name='brick_id', index=0)
self.catalogs[band][imgtype].add_column(1+np.arange(self.n_sources[band][imgtype]), name='id', index=0)
# add world positions
skycoords = self.data[band][imgtype].wcs.all_pix2world(catalog['x'], catalog['y'], 0)
self.catalogs[band][imgtype].add_column(skycoords[0]*u.deg, name=f'ra', index=1, )
self.catalogs[band][imgtype].add_column(skycoords[1]*u.deg, name=f'dec', index=2)
# generate regions file
build_regions(self.catalogs[band][imgtype], self.pixel_scales[band][0], # you better have square pixels!
outpath = os.path.join(conf.PATH_ANCILLARY, f'B{self.brick_id}_{band}_{imgtype}_objects.reg'))
def identify_groups(self, band='detection', imgtype='science', radius=conf.DILATION_RADIUS, overwrite=False):
"""Takes the catalog and segmap
"""
catalog = self.catalogs[band][imgtype]
segmap = self.data[band]['segmap'].data
radius = radius.to(u.arcsec)
radius_px = radius / (self.wcs['detection'].pixel_scale_matrix[-1,-1] * u.deg).to(u.arcsec) # this won't be so great for non-aligned images...
radius_rpx = round(radius_px.value)
self.logger.debug(f'Dilation radius of {radius} or {radius_px:2.2f} px rounded to {radius_rpx} px')
group_ids, group_pops, groupmap = dilate_and_group(catalog, segmap, radius=radius_rpx, fill_holes=True)
if overwrite:
self.catalogs[band][imgtype]['group_id'] = group_ids
self.catalogs[band][imgtype]['group_pop'] = group_pops
else:
self.catalogs[band][imgtype].add_column(group_ids, name='group_id', index=3)
self.catalogs[band][imgtype].add_column(group_pops, name='group_pop', index=3)
self.data[band]['groupmap'] = Cutout2D(groupmap, self.position, self.buffsize[::-1], self.wcs[band], mode='partial', fill_value = 0)
self.group_ids[band][imgtype] = np.unique(group_ids)
# self.group_pops[band][imgtype] = dict(zip(group_ids, group_pops))
self.headers[band]['groupmap'] = self.headers[band]['science']
def spawn_group(self, group_id=None, imgtype='science', bands=None, silent=False):
# Instantiate brick
if not silent:
self.logger.info(f'Spawning Group #{group_id} from Brick #{self.brick_id}...')
group = Group(group_id, self, imgtype=imgtype, silent=silent)
if group.rejected:
self.logger.warning(f'Group #{group_id} cannot be created!')
return group
# Cut up science, weight, and mask, if available
group.add_bands(self, bands=bands)
nsrcs = group.n_sources[group.catalog_band][imgtype]
source_ids = np.array(group.get_catalog()['id'])
group.source_ids = source_ids
if not silent:
self.logger.debug(f'Group #{group_id} has {nsrcs} sources: {source_ids}')
if nsrcs > conf.GROUP_SIZE_LIMIT:
if not silent:
self.logger.warning(f'Group #{group_id} has {nsrcs} sources, but the limit is set to {conf.GROUP_SIZE_LIMIT}! Skipping...')
group.rejected = True
return group
group.model_priors = self.model_priors
group.phot_priors = self.phot_priors
# Loop over model catalog
group.model_catalog = {}
for source_id, model in self.model_catalog.items():
if source_id not in source_ids: continue
group.model_catalog[source_id] = model
group.model_tracker[source_id] = {}
for stage, stats in self.model_tracker[source_id].items():
group.model_tracker[source_id][stage] = stats
group.model_tracker['group'] = {}
if group_id in self.model_tracker_groups:
for stage, stats in self.model_tracker_groups[group_id].items():
group.model_tracker['group'][stage] = stats
# # transfer maps
# group.transfer_maps(group_id=group_id)
# Return it
return group
def detect_sources(self, band='detection', imgtype='science'):
# detection
self.extract(band=band, imgtype=imgtype)
# # grouping
self.identify_groups(band=band, imgtype=imgtype)
# transfer maps
self.transfer_maps()
if conf.PLOT > 1:
self.plot_image(imgtype='science')
return self.catalogs[band][imgtype]
def process_groups(self, group_ids=None, imgtype='science', bands=None, mode='all'):
self.logger.info(f'Processing groups for brick {self.brick_id}...')
tstart = time.time()
if group_ids is None:
group_ids = self.group_ids['detection'][imgtype]
elif np.isscalar(group_ids):
group_ids = [group_ids,]
if mode == 'pass':
bands = ['detection',]
groups = (self.spawn_group(group_id, bands=bands, silent=(conf.NCPUS > 0)) for group_id in group_ids)
# loop or parallel groups
if (conf.NCPUS == 0) | (len(group_ids) == 1):
for group in groups:
group = run_group(group, mode=mode)
# cleanup and hand back to brick
self.absorb(group)
else:
with ProcessPool(ncpus=conf.NCPUS) as pool:
pool.restart()
import tqdm
result = list(tqdm.tqdm(pool.imap(partial(run_group, mode=mode), groups), total=len(group_ids)))
# result = list(pool.imap(partial(run_group, mode=mode), groups))
[self.absorb(group) for group in result]
# self.logger.setLevel(level)
self.logger.info(f'Brick {self.brick_id} has processed {len(group_ids)} groups ({time.time() - tstart:2.2f}s)')
# def run_group(self, group, mode='all'):
# if not group.rejected:
# if mode == 'all':
# status = group.determine_models()
# if status:
# group.force_models()
# elif mode == 'model':
# group.determine_models()
# elif mode == 'photometry':
# group.force_models()
# elif mode == 'pass':
# pass
# else:
# self.logger.warning(f'Group {group.group_id} has been rejected!')
# return group
def absorb(self, group): # eventually allow mosaic to do this too! absorb bricks + make a huge model catalog!
group_id, model_catalog, model_tracker = group
# check ownership
# assert self.brick_id == brick_id, 'Group does not belong to this brick!'
# # rebuild maps NOTE don't do this with cutouts. Realize it for the brick itself.
# for band in self.data:
# if band == 'detection': # doesn't need new seg/group, and has no models
# continue
# for imgtype in group.data[band]:
# if imgtype in ('model', 'residual', 'chi'):
# (ymin, ymax), (xmin, xmax) = group.data[band][imgtype].bbox_original
# (yminc, ymaxc), (xminc, xmaxc) = group.data[band][imgtype].bbox_cutout
# if imgtype not in self.data[band].keys():
# self.data[band][imgtype] = copy(self.data[band]['science'])
# self.data[band][imgtype].data[ymin:ymax, xmin:xmax] \
# += group.data[band][imgtype].data[yminc:ymaxc, xminc:xmaxc]
# model catalog
for source in list(model_catalog.keys()):
self.model_catalog[source] = model_catalog[source]
# model tracker
for source in list(model_tracker.keys()):
if source == 'group':
self.model_tracker_groups[group_id] = model_tracker[source]
else:
self.model_tracker[source] = model_tracker[source]
self.logger.debug(f'Group {group_id} has been absorbed')
del group
|
astroweaverREPO_NAMEthe_farmerPATH_START.@the_farmer_extracted@the_farmer-master@farmer@brick.py@.PATH_END.py
|
{
"filename": "codeutil.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipykernel/py2/ipykernel/codeutil.py",
"type": "Python"
}
|
# encoding: utf-8
"""Utilities to enable code objects to be pickled.
Any process that import this module will be able to pickle code objects. This
includes the func_code attribute of any function. Once unpickled, new
functions can be built using new.function(code, globals()). Eventually
we need to automate all of this so that functions themselves can be pickled.
Reference: A. Tremols, P Cogolo, "Python Cookbook," p 302-305
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import warnings
warnings.warn("ipykernel.codeutil is deprecated since IPykernel 4.3.1. It has moved to ipyparallel.serialize", DeprecationWarning)
import sys
import types
try:
import copyreg # Py 3
except ImportError:
import copy_reg as copyreg # Py 2
def code_ctor(*args):
return types.CodeType(*args)
def reduce_code(co):
args = [co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, co.co_consts, co.co_names,
co.co_varnames, co.co_filename, co.co_name, co.co_firstlineno,
co.co_lnotab, co.co_freevars, co.co_cellvars]
if sys.version_info[0] >= 3:
args.insert(1, co.co_kwonlyargcount)
return code_ctor, tuple(args)
copyreg.pickle(types.CodeType, reduce_code)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipykernel@py2@ipykernel@codeutil.py@.PATH_END.py
|
{
"filename": "variable.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/cli/variable.py",
"type": "Python"
}
|
import json
from typing import Any, Dict, List, Optional, Union
import pendulum
import typer
from rich.pretty import Pretty
from rich.table import Table
from typing_extensions import Annotated
from prefect.cli._types import PrefectTyper
from prefect.cli._utilities import exit_with_error, exit_with_success
from prefect.cli.root import app, is_interactive
from prefect.client.orchestration import get_client
from prefect.client.schemas.actions import VariableCreate, VariableUpdate
from prefect.exceptions import ObjectNotFound
variable_app = PrefectTyper(name="variable", help="Manage variables.")
app.add_typer(variable_app)
@variable_app.command("ls")
async def list_variables(
limit: int = typer.Option(
100,
"--limit",
help="The maximum number of variables to return.",
),
):
"""
List variables.
"""
async with get_client() as client:
variables = await client.read_variables(
limit=limit,
)
table = Table(
title="Variables",
caption="List Variables using `prefect variable ls`",
show_header=True,
)
table.add_column("Name", style="blue", no_wrap=True)
# values can be up 5000 characters so truncate early
table.add_column("Value", style="blue", no_wrap=True, max_width=50)
table.add_column("Created", style="blue", no_wrap=True)
table.add_column("Updated", style="blue", no_wrap=True)
for variable in sorted(variables, key=lambda x: f"{x.name}"):
table.add_row(
variable.name,
json.dumps(variable.value),
pendulum.instance(variable.created).diff_for_humans(),
pendulum.instance(variable.updated).diff_for_humans(),
)
app.console.print(table)
@variable_app.command("inspect")
async def inspect(
name: str,
):
"""
View details about a variable.
Arguments:
name: the name of the variable to inspect
"""
async with get_client() as client:
variable = await client.read_variable_by_name(
name=name,
)
if not variable:
exit_with_error(f"Variable {name!r} not found.")
app.console.print(Pretty(variable))
@variable_app.command("get")
async def get(
name: str,
):
"""
Get a variable's value.
Arguments:
name: the name of the variable to get
"""
async with get_client() as client:
variable = await client.read_variable_by_name(
name=name,
)
if variable:
app.console.print(json.dumps(variable.value))
else:
exit_with_error(f"Variable {name!r} not found.")
def parse_value(
value: str,
) -> Union[str, int, float, bool, None, Dict[str, Any], List[str]]:
try:
parsed_value = json.loads(value)
except json.JSONDecodeError:
parsed_value = value
return parsed_value
@variable_app.command("set")
async def _set(
name: str,
value: str,
overwrite: bool = typer.Option(
False,
"--overwrite",
help="Overwrite the variable if it already exists.",
),
tag: Annotated[
Optional[List[str]], typer.Option(help="Tag to associate with the variable.")
] = None,
):
"""
Set a variable.
If the variable already exists, use `--overwrite` to update it.
Arguments:
name: the name of the variable to set
value: the value of the variable to set
--overwrite: overwrite the variable if it already exists
--tag: tag to associate with the variable (you may pass multiple)
"""
async with get_client() as client:
variable = await client.read_variable_by_name(name)
var_dict = {"name": name, "value": parse_value(value), "tags": tag or []}
if variable:
if not overwrite:
exit_with_error(
f"Variable {name!r} already exists. Use `--overwrite` to update it."
)
await client.update_variable(VariableUpdate(**var_dict))
else:
await client.create_variable(VariableCreate(**var_dict))
exit_with_success(f"Set variable {name!r}.")
@variable_app.command("unset", aliases=["delete"])
async def unset(
name: str,
):
"""
Unset a variable.
Arguments:
name: the name of the variable to unset
"""
async with get_client() as client:
try:
if is_interactive() and not typer.confirm(
f"Are you sure you want to unset variable {name!r}?"
):
exit_with_error("Unset aborted.")
await client.delete_variable_by_name(
name=name,
)
except ObjectNotFound:
exit_with_error(f"Variable {name!r} not found.")
exit_with_success(f"Unset variable {name!r}.")
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@cli@variable.py@.PATH_END.py
|
{
"filename": "config_example.py",
"repo_name": "jw-lin/lightbeam",
"repo_path": "lightbeam_extracted/lightbeam-master/config_example.py",
"type": "Python"
}
|
''' example configuration file for run_bpm_example.py '''
################################
## free space wavelength (um) ##
################################
wl0 = 1.55
########################
## lantern parameters ##
########################
zex = 30000 # length of lantern, in um
scale = 1/4 # how much smaller the input end is wrt the output end
rcore = 4.5 * scale # how large the lantern cores are, at the input (um)
rclad = 16.5 # how large the lantern cladding is, at the input (um)
ncore = 1.4504 + 0.0088 # lantern core refractive index
nclad = 1.4504 # cladding index
njack = 1.4504 - 5.5e-3 # jacket index
###################################
## sampling grid parameters (um) ##
###################################
xw0 = 128 # simulation zone x width (um)
yw0 = 128 # simulation zone y width (um)
zw = zex
ds = 1 # base grid resolution (um)
dz = 1 # z stepping resolution (um)
#############################
## mesh refinement options ##
#############################
ref_val = 1e-4 # controls adaptive meshing. lower -> more careful
remesh_every = 50 # how many z-steps to go before recomputing the adaptive mesh
max_remesh_iters = 6 # maximum amount of subdivisions when computing the adaptive mesh
xw_func = None # optional functions which allow the simulation zone to "grow" with z, which may save on computation time
yw_func = None
##################
## PML settings ##
##################
num_PML = 12
sig_max = 3. + 0.j
######################
## set launch field ##
######################
import numpy as np
import matplotlib.pyplot as plt
import LPmodes
from misc import normalize
xa = np.linspace(-xw0/2,xw0/2,int(xw0/ds)+1)
ya = np.linspace(-yw0/2,yw0/2,int(yw0/ds)+1)
xg,yg = np.meshgrid(xa,ya)
u0 = normalize(LPmodes.lpfield(xg,yg,2,1,rclad,wl0,nclad,njack,'cos'))
fplanewidth = 0 # manually reset the width of the input field. set to 0 to match field extend with grid extent.
#####################
## reference index ##
#####################
n0 = 1.4504
dynamic_n0 = False
###################
## monitor field ##
###################
u1_func = None
#############################
## write out field dist to ##
#############################
writeto = None
# generate optical element
import optics
optic = optics.lant19(rcore,rclad,ncore,nclad,njack,rclad/3,zex,final_scale=1/scale)
#######################
## initial core locs ##
#######################
xpos_i = optic.core_locs[:,0]
ypos_i = optic.core_locs[:,1]
#####################
## final core locs ##
#####################
xpos = xpos_i / scale
ypos = ypos_i / scale
|
jw-linREPO_NAMElightbeamPATH_START.@lightbeam_extracted@lightbeam-master@config_example.py@.PATH_END.py
|
{
"filename": "test_survival.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/stats/tests/test_survival.py",
"type": "Python"
}
|
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from scipy import stats
from scipy.stats import _survival
def _kaplan_meier_reference(times, censored):
# This is a very straightforward implementation of the Kaplan-Meier
# estimator that does almost everything differently from the implementation
# in stats.ecdf.
# Begin by sorting the raw data. Note that the order of death and loss
# at a given time matters: death happens first. See [2] page 461:
# "These conventions may be paraphrased by saying that deaths recorded as
# of an age t are treated as if they occurred slightly before t, and losses
# recorded as of an age t are treated as occurring slightly after t."
# We implement this by sorting the data first by time, then by `censored`,
# (which is 0 when there is a death and 1 when there is only a loss).
dtype = [('time', float), ('censored', int)]
data = np.array([(t, d) for t, d in zip(times, censored)], dtype=dtype)
data = np.sort(data, order=('time', 'censored'))
times = data['time']
died = np.logical_not(data['censored'])
m = times.size
n = np.arange(m, 0, -1) # number at risk
sf = np.cumprod((n - died) / n)
# Find the indices of the *last* occurrence of unique times. The
# corresponding entries of `times` and `sf` are what we want.
_, indices = np.unique(times[::-1], return_index=True)
ref_times = times[-indices - 1]
ref_sf = sf[-indices - 1]
return ref_times, ref_sf
class TestSurvival:
@staticmethod
def get_random_sample(rng, n_unique):
# generate random sample
unique_times = rng.random(n_unique)
# convert to `np.int32` to resolve `np.repeat` failure in 32-bit CI
repeats = rng.integers(1, 4, n_unique).astype(np.int32)
times = rng.permuted(np.repeat(unique_times, repeats))
censored = rng.random(size=times.size) > rng.random()
sample = stats.CensoredData.right_censored(times, censored)
return sample, times, censored
def test_input_validation(self):
message = '`sample` must be a one-dimensional sequence.'
with pytest.raises(ValueError, match=message):
stats.ecdf([[1]])
with pytest.raises(ValueError, match=message):
stats.ecdf(1)
message = '`sample` must not contain nan'
with pytest.raises(ValueError, match=message):
stats.ecdf([np.nan])
message = 'Currently, only uncensored and right-censored data...'
with pytest.raises(NotImplementedError, match=message):
stats.ecdf(stats.CensoredData.left_censored([1], censored=[True]))
message = 'method` must be one of...'
res = stats.ecdf([1, 2, 3])
with pytest.raises(ValueError, match=message):
res.cdf.confidence_interval(method='ekki-ekki')
with pytest.raises(ValueError, match=message):
res.sf.confidence_interval(method='shrubbery')
message = 'confidence_level` must be a scalar between 0 and 1'
with pytest.raises(ValueError, match=message):
res.cdf.confidence_interval(-1)
with pytest.raises(ValueError, match=message):
res.sf.confidence_interval([0.5, 0.6])
message = 'The confidence interval is undefined at some observations.'
with pytest.warns(RuntimeWarning, match=message):
ci = res.cdf.confidence_interval()
message = 'Confidence interval bounds do not implement...'
with pytest.raises(NotImplementedError, match=message):
ci.low.confidence_interval()
with pytest.raises(NotImplementedError, match=message):
ci.high.confidence_interval()
def test_edge_cases(self):
res = stats.ecdf([])
assert_equal(res.cdf.quantiles, [])
assert_equal(res.cdf.probabilities, [])
res = stats.ecdf([1])
assert_equal(res.cdf.quantiles, [1])
assert_equal(res.cdf.probabilities, [1])
def test_unique(self):
# Example with unique observations; `stats.ecdf` ref. [1] page 80
sample = [6.23, 5.58, 7.06, 6.42, 5.20]
res = stats.ecdf(sample)
ref_x = np.sort(np.unique(sample))
ref_cdf = np.arange(1, 6) / 5
ref_sf = 1 - ref_cdf
assert_equal(res.cdf.quantiles, ref_x)
assert_equal(res.cdf.probabilities, ref_cdf)
assert_equal(res.sf.quantiles, ref_x)
assert_equal(res.sf.probabilities, ref_sf)
def test_nonunique(self):
# Example with non-unique observations; `stats.ecdf` ref. [1] page 82
sample = [0, 2, 1, 2, 3, 4]
res = stats.ecdf(sample)
ref_x = np.sort(np.unique(sample))
ref_cdf = np.array([1/6, 2/6, 4/6, 5/6, 1])
ref_sf = 1 - ref_cdf
assert_equal(res.cdf.quantiles, ref_x)
assert_equal(res.cdf.probabilities, ref_cdf)
assert_equal(res.sf.quantiles, ref_x)
assert_equal(res.sf.probabilities, ref_sf)
def test_evaluate_methods(self):
# Test CDF and SF `evaluate` methods
rng = np.random.default_rng(1162729143302572461)
sample, _, _ = self.get_random_sample(rng, 15)
res = stats.ecdf(sample)
x = res.cdf.quantiles
xr = x + np.diff(x, append=x[-1]+1)/2 # right shifted points
assert_equal(res.cdf.evaluate(x), res.cdf.probabilities)
assert_equal(res.cdf.evaluate(xr), res.cdf.probabilities)
assert_equal(res.cdf.evaluate(x[0]-1), 0) # CDF starts at 0
assert_equal(res.cdf.evaluate([-np.inf, np.inf]), [0, 1])
assert_equal(res.sf.evaluate(x), res.sf.probabilities)
assert_equal(res.sf.evaluate(xr), res.sf.probabilities)
assert_equal(res.sf.evaluate(x[0]-1), 1) # SF starts at 1
assert_equal(res.sf.evaluate([-np.inf, np.inf]), [1, 0])
# ref. [1] page 91
t1 = [37, 43, 47, 56, 60, 62, 71, 77, 80, 81] # times
d1 = [0, 0, 1, 1, 0, 0, 0, 1, 1, 1] # 1 means deaths (not censored)
r1 = [1, 1, 0.875, 0.75, 0.75, 0.75, 0.75, 0.5, 0.25, 0] # reference SF
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html
t2 = [8, 12, 26, 14, 21, 27, 8, 32, 20, 40]
d2 = [1, 1, 1, 1, 1, 1, 0, 0, 0, 0]
r2 = [0.9, 0.788, 0.675, 0.675, 0.54, 0.405, 0.27, 0.27, 0.27]
t3 = [33, 28, 41, 48, 48, 25, 37, 48, 25, 43]
d3 = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0]
r3 = [1, 0.875, 0.75, 0.75, 0.6, 0.6, 0.6]
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/bs704_survival4.html
t4 = [24, 3, 11, 19, 24, 13, 14, 2, 18, 17,
24, 21, 12, 1, 10, 23, 6, 5, 9, 17]
d4 = [0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1]
r4 = [0.95, 0.95, 0.897, 0.844, 0.844, 0.844, 0.844, 0.844, 0.844,
0.844, 0.76, 0.676, 0.676, 0.676, 0.676, 0.507, 0.507]
# https://www.real-statistics.com/survival-analysis/kaplan-meier-procedure/confidence-interval-for-the-survival-function/
t5 = [3, 5, 8, 10, 5, 5, 8, 12, 15, 14, 2, 11, 10, 9, 12, 5, 8, 11]
d5 = [1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1]
r5 = [0.944, 0.889, 0.722, 0.542, 0.542, 0.542, 0.361, 0.181, 0.181, 0.181]
@pytest.mark.parametrize("case", [(t1, d1, r1), (t2, d2, r2), (t3, d3, r3),
(t4, d4, r4), (t5, d5, r5)])
def test_right_censored_against_examples(self, case):
# test `ecdf` against other implementations on example problems
times, died, ref = case
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
assert_allclose(res.sf.probabilities, ref, atol=1e-3)
assert_equal(res.sf.quantiles, np.sort(np.unique(times)))
# test reference implementation against other implementations
res = _kaplan_meier_reference(times, np.logical_not(died))
assert_equal(res[0], np.sort(np.unique(times)))
assert_allclose(res[1], ref, atol=1e-3)
@pytest.mark.parametrize('seed', [182746786639392128, 737379171436494115,
576033618403180168, 308115465002673650])
def test_right_censored_against_reference_implementation(self, seed):
# test `ecdf` against reference implementation on random problems
rng = np.random.default_rng(seed)
n_unique = rng.integers(10, 100)
sample, times, censored = self.get_random_sample(rng, n_unique)
res = stats.ecdf(sample)
ref = _kaplan_meier_reference(times, censored)
assert_allclose(res.sf.quantiles, ref[0])
assert_allclose(res.sf.probabilities, ref[1])
# If all observations are uncensored, the KM estimate should match
# the usual estimate for uncensored data
sample = stats.CensoredData(uncensored=times)
res = _survival._ecdf_right_censored(sample) # force Kaplan-Meier
ref = stats.ecdf(times)
assert_equal(res[0], ref.sf.quantiles)
assert_allclose(res[1], ref.cdf.probabilities, rtol=1e-14)
assert_allclose(res[2], ref.sf.probabilities, rtol=1e-14)
def test_right_censored_ci(self):
# test "greenwood" confidence interval against example 4 (URL above).
times, died = self.t4, self.d4
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
ref_allowance = [0.096, 0.096, 0.135, 0.162, 0.162, 0.162, 0.162,
0.162, 0.162, 0.162, 0.214, 0.246, 0.246, 0.246,
0.246, 0.341, 0.341]
sf_ci = res.sf.confidence_interval()
cdf_ci = res.cdf.confidence_interval()
allowance = res.sf.probabilities - sf_ci.low.probabilities
assert_allclose(allowance, ref_allowance, atol=1e-3)
assert_allclose(sf_ci.low.probabilities,
np.clip(res.sf.probabilities - allowance, 0, 1))
assert_allclose(sf_ci.high.probabilities,
np.clip(res.sf.probabilities + allowance, 0, 1))
assert_allclose(cdf_ci.low.probabilities,
np.clip(res.cdf.probabilities - allowance, 0, 1))
assert_allclose(cdf_ci.high.probabilities,
np.clip(res.cdf.probabilities + allowance, 0, 1))
# test "log-log" confidence interval against Mathematica
# e = {24, 3, 11, 19, 24, 13, 14, 2, 18, 17, 24, 21, 12, 1, 10, 23, 6, 5,
# 9, 17}
# ci = {1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0}
# R = EventData[e, ci]
# S = SurvivalModelFit[R]
# S["PointwiseIntervals", ConfidenceLevel->0.95,
# ConfidenceTransform->"LogLog"]
ref_low = [0.694743, 0.694743, 0.647529, 0.591142, 0.591142, 0.591142,
0.591142, 0.591142, 0.591142, 0.591142, 0.464605, 0.370359,
0.370359, 0.370359, 0.370359, 0.160489, 0.160489]
ref_high = [0.992802, 0.992802, 0.973299, 0.947073, 0.947073, 0.947073,
0.947073, 0.947073, 0.947073, 0.947073, 0.906422, 0.856521,
0.856521, 0.856521, 0.856521, 0.776724, 0.776724]
sf_ci = res.sf.confidence_interval(method='log-log')
assert_allclose(sf_ci.low.probabilities, ref_low, atol=1e-6)
assert_allclose(sf_ci.high.probabilities, ref_high, atol=1e-6)
def test_right_censored_ci_example_5(self):
# test "exponential greenwood" confidence interval against example 5
times, died = self.t5, self.d5
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
lower = np.array([0.66639, 0.624174, 0.456179, 0.287822, 0.287822,
0.287822, 0.128489, 0.030957, 0.030957, 0.030957])
upper = np.array([0.991983, 0.970995, 0.87378, 0.739467, 0.739467,
0.739467, 0.603133, 0.430365, 0.430365, 0.430365])
sf_ci = res.sf.confidence_interval(method='log-log')
cdf_ci = res.cdf.confidence_interval(method='log-log')
assert_allclose(sf_ci.low.probabilities, lower, atol=1e-5)
assert_allclose(sf_ci.high.probabilities, upper, atol=1e-5)
assert_allclose(cdf_ci.low.probabilities, 1-upper, atol=1e-5)
assert_allclose(cdf_ci.high.probabilities, 1-lower, atol=1e-5)
# Test against R's `survival` library `survfit` function, 90%CI
# library(survival)
# options(digits=16)
# time = c(3, 5, 8, 10, 5, 5, 8, 12, 15, 14, 2, 11, 10, 9, 12, 5, 8, 11)
# status = c(1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1)
# res = survfit(Surv(time, status)
# ~1, conf.type = "log-log", conf.int = 0.90)
# res$time; res$lower; res$upper
low = [0.74366748406861172, 0.68582332289196246, 0.50596835651480121,
0.32913131413336727, 0.32913131413336727, 0.32913131413336727,
0.15986912028781664, 0.04499539918147757, 0.04499539918147757,
0.04499539918147757]
high = [0.9890291867238429, 0.9638835422144144, 0.8560366823086629,
0.7130167643978450, 0.7130167643978450, 0.7130167643978450,
0.5678602982997164, 0.3887616766886558, 0.3887616766886558,
0.3887616766886558]
sf_ci = res.sf.confidence_interval(method='log-log',
confidence_level=0.9)
assert_allclose(sf_ci.low.probabilities, low)
assert_allclose(sf_ci.high.probabilities, high)
# And with conf.type = "plain"
low = [0.8556383113628162, 0.7670478794850761, 0.5485720663578469,
0.3441515412527123, 0.3441515412527123, 0.3441515412527123,
0.1449184105424544, 0., 0., 0.]
high = [1., 1., 0.8958723780865975, 0.7391817920806210,
0.7391817920806210, 0.7391817920806210, 0.5773038116797676,
0.3642270254596720, 0.3642270254596720, 0.3642270254596720]
sf_ci = res.sf.confidence_interval(confidence_level=0.9)
assert_allclose(sf_ci.low.probabilities, low)
assert_allclose(sf_ci.high.probabilities, high)
def test_right_censored_ci_nans(self):
# test `ecdf` confidence interval on a problem that results in NaNs
times, died = self.t1, self.d1
sample = stats.CensoredData.right_censored(times, np.logical_not(died))
res = stats.ecdf(sample)
# Reference values generated with Matlab
# format long
# t = [37 43 47 56 60 62 71 77 80 81];
# d = [0 0 1 1 0 0 0 1 1 1];
# censored = ~d1;
# [f, x, flo, fup] = ecdf(t, 'Censoring', censored, 'Alpha', 0.05);
x = [37, 47, 56, 77, 80, 81]
flo = [np.nan, 0, 0, 0.052701464070711, 0.337611126231790, np.nan]
fup = [np.nan, 0.35417230377, 0.5500569798, 0.9472985359, 1.0, np.nan]
i = np.searchsorted(res.cdf.quantiles, x)
message = "The confidence interval is undefined at some observations"
with pytest.warns(RuntimeWarning, match=message):
ci = res.cdf.confidence_interval()
# Matlab gives NaN as the first element of the CIs. Mathematica agrees,
# but R's survfit does not. It makes some sense, but it's not what the
# formula gives, so skip that element.
assert_allclose(ci.low.probabilities[i][1:], flo[1:])
assert_allclose(ci.high.probabilities[i][1:], fup[1:])
# [f, x, flo, fup] = ecdf(t, 'Censoring', censored, 'Function',
# 'survivor', 'Alpha', 0.05);
flo = [np.nan, 0.64582769623, 0.449943020228, 0.05270146407, 0, np.nan]
fup = [np.nan, 1.0, 1.0, 0.947298535929289, 0.662388873768210, np.nan]
i = np.searchsorted(res.cdf.quantiles, x)
with pytest.warns(RuntimeWarning, match=message):
ci = res.sf.confidence_interval()
assert_allclose(ci.low.probabilities[i][1:], flo[1:])
assert_allclose(ci.high.probabilities[i][1:], fup[1:])
# With the same data, R's `survival` library `survfit` function
# doesn't produce the leading NaN
# library(survival)
# options(digits=16)
# time = c(37, 43, 47, 56, 60, 62, 71, 77, 80, 81)
# status = c(0, 0, 1, 1, 0, 0, 0, 1, 1, 1)
# res = survfit(Surv(time, status)
# ~1, conf.type = "plain", conf.int = 0.95)
# res$time
# res$lower
# res$upper
low = [1., 1., 0.64582769623233816, 0.44994302022779326,
0.44994302022779326, 0.44994302022779326, 0.44994302022779326,
0.05270146407071086, 0., np.nan]
high = [1., 1., 1., 1., 1., 1., 1., 0.9472985359292891,
0.6623888737682101, np.nan]
assert_allclose(ci.low.probabilities, low)
assert_allclose(ci.high.probabilities, high)
# It does with conf.type="log-log", as do we
with pytest.warns(RuntimeWarning, match=message):
ci = res.sf.confidence_interval(method='log-log')
low = [np.nan, np.nan, 0.38700001403202522, 0.31480711370551911,
0.31480711370551911, 0.31480711370551911, 0.31480711370551911,
0.08048821148507734, 0.01049958986680601, np.nan]
high = [np.nan, np.nan, 0.9813929658789660, 0.9308983170906275,
0.9308983170906275, 0.9308983170906275, 0.9308983170906275,
0.8263946341076415, 0.6558775085110887, np.nan]
assert_allclose(ci.low.probabilities, low)
assert_allclose(ci.high.probabilities, high)
def test_right_censored_against_uncensored(self):
rng = np.random.default_rng(7463952748044886637)
sample = rng.integers(10, 100, size=1000)
censored = np.zeros_like(sample)
censored[np.argmax(sample)] = True
res = stats.ecdf(sample)
ref = stats.ecdf(stats.CensoredData.right_censored(sample, censored))
assert_equal(res.sf.quantiles, ref.sf.quantiles)
assert_equal(res.sf._n, ref.sf._n)
assert_equal(res.sf._d[:-1], ref.sf._d[:-1]) # difference @ [-1]
assert_allclose(res.sf._sf[:-1], ref.sf._sf[:-1], rtol=1e-14)
def test_plot_iv(self):
rng = np.random.default_rng(1769658657308472721)
n_unique = rng.integers(10, 100)
sample, _, _ = self.get_random_sample(rng, n_unique)
res = stats.ecdf(sample)
try:
import matplotlib.pyplot as plt # noqa: F401
res.sf.plot() # no other errors occur
except (ModuleNotFoundError, ImportError):
message = r"matplotlib must be installed to use method `plot`."
with pytest.raises(ModuleNotFoundError, match=message):
res.sf.plot()
class TestLogRank:
@pytest.mark.parametrize(
"x, y, statistic, pvalue",
# Results validate with R
# library(survival)
# options(digits=16)
#
# futime_1 <- c(8, 12, 26, 14, 21, 27, 8, 32, 20, 40)
# fustat_1 <- c(1, 1, 1, 1, 1, 1, 0, 0, 0, 0)
# rx_1 <- c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
#
# futime_2 <- c(33, 28, 41, 48, 48, 25, 37, 48, 25, 43)
# fustat_2 <- c(1, 1, 1, 0, 0, 0, 0, 0, 0, 0)
# rx_2 <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1)
#
# futime <- c(futime_1, futime_2)
# fustat <- c(fustat_1, fustat_2)
# rx <- c(rx_1, rx_2)
#
# survdiff(formula = Surv(futime, fustat) ~ rx)
#
# Also check against another library which handle alternatives
# library(nph)
# logrank.test(futime, fustat, rx, alternative = "two.sided")
# res["test"]
[(
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html
# uncensored, censored
[[8, 12, 26, 14, 21, 27], [8, 32, 20, 40]],
[[33, 28, 41], [48, 48, 25, 37, 48, 25, 43]],
# chi2, ["two-sided", "less", "greater"]
6.91598157449,
[0.008542873404, 0.9957285632979385, 0.004271436702061537]
),
(
# https://sphweb.bumc.bu.edu/otlt/mph-modules/bs/bs704_survival/BS704_Survival5.html
[[19, 6, 5, 4], [20, 19, 17, 14]],
[[16, 21, 7], [21, 15, 18, 18, 5]],
0.835004855038,
[0.3608293039, 0.8195853480676912, 0.1804146519323088]
),
(
# Bland, Altman, "The logrank test", BMJ, 2004
# https://www.bmj.com/content/328/7447/1073.short
[[6, 13, 21, 30, 37, 38, 49, 50, 63, 79, 86, 98, 202, 219],
[31, 47, 80, 82, 82, 149]],
[[10, 10, 12, 13, 14, 15, 16, 17, 18, 20, 24, 24, 25, 28, 30,
33, 35, 37, 40, 40, 46, 48, 76, 81, 82, 91, 112, 181],
[34, 40, 70]],
7.49659416854,
[0.006181578637, 0.003090789318730882, 0.9969092106812691]
)]
)
def test_log_rank(self, x, y, statistic, pvalue):
x = stats.CensoredData(uncensored=x[0], right=x[1])
y = stats.CensoredData(uncensored=y[0], right=y[1])
for i, alternative in enumerate(["two-sided", "less", "greater"]):
res = stats.logrank(x=x, y=y, alternative=alternative)
# we return z and use the normal distribution while other framework
# return z**2. The p-value are directly comparable, but we have to
# square the statistic
assert_allclose(res.statistic**2, statistic, atol=1e-10)
assert_allclose(res.pvalue, pvalue[i], atol=1e-10)
def test_raises(self):
sample = stats.CensoredData([1, 2])
msg = r"`y` must be"
with pytest.raises(ValueError, match=msg):
stats.logrank(x=sample, y=[[1, 2]])
msg = r"`x` must be"
with pytest.raises(ValueError, match=msg):
stats.logrank(x=[[1, 2]], y=sample)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@stats@tests@test_survival.py@.PATH_END.py
|
{
"filename": "warnings.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/traitlets/py3/traitlets/utils/warnings.py",
"type": "Python"
}
|
from __future__ import annotations
import inspect
import os
import typing as t
import warnings
def warn(msg: str, category: t.Any, *, stacklevel: int, source: t.Any = None) -> None:
"""Like warnings.warn(), but category and stacklevel are required.
You pretty much never want the default stacklevel of 1, so this helps
encourage setting it explicitly."""
warnings.warn(msg, category=category, stacklevel=stacklevel, source=source)
def deprecated_method(method: t.Any, cls: t.Any, method_name: str, msg: str) -> None:
"""Show deprecation warning about a magic method definition.
Uses warn_explicit to bind warning to method definition instead of triggering code,
which isn't relevant.
"""
warn_msg = f"{cls.__name__}.{method_name} is deprecated in traitlets 4.1: {msg}"
for parent in inspect.getmro(cls):
if method_name in parent.__dict__:
cls = parent
break
# limit deprecation messages to once per package
package_name = cls.__module__.split(".", 1)[0]
key = (package_name, msg)
if not should_warn(key):
return
try:
fname = inspect.getsourcefile(method) or "<unknown>"
lineno = inspect.getsourcelines(method)[1] or 0
except (OSError, TypeError) as e:
# Failed to inspect for some reason
warn(
warn_msg + ("\n(inspection failed) %s" % e),
DeprecationWarning,
stacklevel=2,
)
else:
warnings.warn_explicit(warn_msg, DeprecationWarning, fname, lineno)
_deprecations_shown = set()
def should_warn(key: t.Any) -> bool:
"""Add our own checks for too many deprecation warnings.
Limit to once per package.
"""
env_flag = os.environ.get("TRAITLETS_ALL_DEPRECATIONS")
if env_flag and env_flag != "0":
return True
if key not in _deprecations_shown:
_deprecations_shown.add(key)
return True
else:
return False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@traitlets@py3@traitlets@utils@warnings.py@.PATH_END.py
|
{
"filename": "winatt.py",
"repo_name": "shkarupa-alex/tfswin",
"repo_path": "tfswin_extracted/tfswin-master/tfswin/winatt.py",
"type": "Python"
}
|
import numpy as np
import tensorflow as tf
from keras.src import initializers, layers, ops
from keras.src.layers.input_spec import InputSpec
from keras.src.saving import register_keras_serializable
from tfswin.window import window_partition_fused, window_reverse_fused
@register_keras_serializable(package='TFSwin')
class WindowAttention(layers.Layer):
def __init__(self, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.,
window_pretrain=0, swin_v2=False, **kwargs):
super().__init__(**kwargs)
self.input_spec = [
InputSpec(ndim=4), InputSpec(ndim=0, dtype='int32'), InputSpec(ndim=1, dtype='int32'),
InputSpec(ndim=5)]
self.num_heads = num_heads
self.qkv_bias = qkv_bias
self.qk_scale = qk_scale
self.attn_drop = attn_drop
self.proj_drop = proj_drop
self.window_pretrain = window_pretrain
self.swin_v2 = swin_v2
def build(self, input_shape):
# noinspection PyAttributeOutsideInit
self.channels = input_shape[0][-1]
if self.channels is None:
raise ValueError('Channel dimensions of the inputs should be defined. Found `None`.')
qkv_bias = not self.swin_v2 and self.qkv_bias
# noinspection PyAttributeOutsideInit
self.qkv = layers.Dense(self.channels * 3, use_bias=qkv_bias, name='qkv', dtype=self.dtype_policy)
self.qkv.build(input_shape[0])
if self.swin_v2:
# noinspection PyAttributeOutsideInit
self.scale = self.add_weight(
name='logit_scale',
shape=[self.num_heads, 1, 1],
initializer=initializers.Constant(np.log(10.)),
trainable=True,
dtype=self.dtype)
# noinspection PyAttributeOutsideInit
self.cpb0 = layers.Dense(512, activation='relu', name='cpb_mlp.0', dtype=self.dtype_policy)
self.cpb0.build([1, None, None, 2])
self.cpb1 = layers.Dense(self.num_heads, activation='sigmoid', use_bias=False, name=f'cpb_mlp.2', dtype=self.dtype_policy)
self.cpb1.build([1, None, None, 512])
# noinspection PyAttributeOutsideInit
self.q_bias = None
# noinspection PyAttributeOutsideInit
self.v_bias = None
if self.qkv_bias:
self.q_bias = self.add_weight(
name='q_bias',
shape=[self.channels],
initializer='zeros',
trainable=True,
dtype=self.dtype)
self.v_bias = self.add_weight(
name='v_bias',
shape=[self.channels],
initializer='zeros',
trainable=True,
dtype=self.dtype)
else:
# noinspection PyAttributeOutsideInit
self.scale = self.qk_scale or (self.channels // self.num_heads) ** -0.5
# noinspection PyAttributeOutsideInit
self.relative_bias = self.add_weight(
name='relative_position_bias_table',
shape=[(2 * self.window_pretrain - 1) ** 2, self.num_heads],
initializer=initializers.TruncatedNormal(stddev=0.02),
trainable=True,
dtype=self.dtype)
# noinspection PyAttributeOutsideInit
self.drop_attn = layers.Dropout(self.attn_drop, dtype=self.dtype_policy)
# noinspection PyAttributeOutsideInit
self.proj = layers.Dense(self.channels, name='proj', dtype=self.dtype_policy)
self.proj.build(input_shape[0])
# noinspection PyAttributeOutsideInit
self.drop_proj = layers.Dropout(self.proj_drop, dtype=self.dtype_policy)
super().build(input_shape)
def relative_table(self, window_size):
offset = ops.arange(1 - window_size, window_size)
offset = ops.cast(offset, self.compute_dtype)
offset = ops.stack(ops.meshgrid(offset, offset, indexing='ij'))
offset = ops.transpose(offset, [1, 2, 0])[None]
window = self.window_pretrain if self.window_pretrain > 0 else window_size
offset *= 8. / (ops.cast(window, self.compute_dtype) - 1.)
offset = ops.sign(offset) * ops.log1p(ops.abs(offset)) / np.log(8)
return offset
def with_mask(self, attn, mask, length):
mask_windows = ops.shape(mask)[1]
attn = ops.reshape(attn, [-1, mask_windows, self.num_heads, length, length])
attn += mask
attn = ops.reshape(attn, [-1, self.num_heads, length, length])
return attn
def call(self, inputs, **kwargs):
inputs, window_size, relative_index, attention_mask = inputs
height, width = ops.shape(inputs)[1:3]
length = window_size ** 2
qkv = self.qkv(inputs)
if self.swin_v2 and self.qkv_bias:
k_bias = tf.zeros_like(self.v_bias, self.compute_dtype)
qkv_bias = tf.concat([self.q_bias, k_bias, self.v_bias], axis=0)
qkv = tf.nn.bias_add(qkv, qkv_bias)
# QKV heads partition - fused with windows partitioning
# qkv = ops.reshape(qkv, [-1, length, 3, self.num_heads, self.channels // self.num_heads])
# qkv = ops.transpose(qkv, [2, 0, 3, 1, 4])
qkv = window_partition_fused(qkv, height, width, window_size, self.num_heads)
q, k, v = tf.unstack(qkv, 3)
if self.swin_v2:
scale = ops.minimum(self.scale, np.log(1. / .01))
scale = ops.cast(ops.exp(scale), self.compute_dtype)
q = tf.math.l2_normalize(q, axis=-1, epsilon=1.55e-5)
k = tf.math.l2_normalize(k, axis=-1, epsilon=1.55e-5)
else:
scale = self.scale
q *= scale
k = ops.swapaxes(k, -2, -1)
attn = ops.matmul(q, k)
if self.swin_v2:
relative_bias = self.cpb0(self.relative_table(window_size))
relative_bias = self.cpb1(relative_bias)
relative_bias = ops.reshape(relative_bias, [-1, self.num_heads])
bias = ops.take(relative_bias, relative_index, axis=0) * 16.
else:
bias = ops.take(self.relative_bias, relative_index, axis=0)
bias = ops.reshape(bias, [length, length, -1])
bias = ops.transpose(bias, [2, 0, 1])
attn = attn + bias[None]
attn = self.with_mask(attn, attention_mask, length)
attn = tf.nn.softmax(attn)
attn = self.drop_attn(attn)
outputs = tf.matmul(attn, v)
# V heads merge - fused with windows merging
# outputs = ops.transpose(outputs, [0, 2, 1, 3])
# outputs = ops.reshape(outputs, [-1, length, self.channels])
outputs = window_reverse_fused(outputs, height, width, window_size, self.num_heads)
outputs = self.proj(outputs)
outputs = self.drop_proj(outputs)
return outputs
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = super().get_config()
config.update({
'num_heads': self.num_heads,
'qkv_bias': self.qkv_bias,
'qk_scale': self.qk_scale,
'attn_drop': self.attn_drop,
'proj_drop': self.proj_drop,
'window_pretrain': self.window_pretrain,
'swin_v2': self.swin_v2
})
return config
|
shkarupa-alexREPO_NAMEtfswinPATH_START.@tfswin_extracted@tfswin-master@tfswin@winatt.py@.PATH_END.py
|
{
"filename": "plot_b_aperture.py",
"repo_name": "jpierel14/space_phot",
"repo_path": "space_phot_extracted/space_phot-main/Docs/source/_examples/plot_b_aperture.py",
"type": "Python"
}
|
"""
===================
Aperture Photometry
===================
Measuring PSF Photometry with space_phot.
"""
###############################################################
# An example HST Dataset is downloaded, and then we measure
# aperture photometry. This is public HST data for the
# gravitationally lensed SN 2022riv
import sys,os,glob
from astropy.io import fits
from astropy.table import Table
from astropy.nddata import extract_array
from astropy.coordinates import SkyCoord
from astropy import wcs
from astropy.wcs.utils import skycoord_to_pixel
from astropy import units as u
import numpy as np
import matplotlib.pyplot as plt
from astroquery.mast import Observations
from astropy.visualization import (simple_norm,LinearStretch)
import space_phot
####################################################################
#
# ----------
# HST Images
# ----------
#
# **Download some Data**
#
# For this example we download HST FLT images from MAST.
obs_table = Observations.query_criteria(obs_id='hst_16264_12_wfc3_ir_f110w_iebc12')
obs_table1 = obs_table[obs_table['filters']=='F110W']
data_products_by_obs = Observations.get_product_list(obs_table1)
data_products_by_obs = data_products_by_obs[data_products_by_obs['calib_level']==2]
data_products_by_obs = data_products_by_obs[data_products_by_obs['productSubGroupDescription']=='FLT'][:3]
Observations.download_products(data_products_by_obs,extension='fits')
####################################################################
# **Examine the first Image**
#
files = glob.glob('mastDownload/HST/*/*flt.fits')
ref_image = files[0]
ref_fits = fits.open(ref_image)
ref_data = fits.open(ref_image)['SCI',1].data
norm1 = simple_norm(ref_data,stretch='linear',min_cut=-1,max_cut=10)
plt.imshow(ref_data, origin='lower',
norm=norm1,cmap='gray')
plt.gca().tick_params(labelcolor='none',axis='both',color='none')
plt.show()
####################################################################
# **Zoom in to see the Supernova**
#
sn_location = SkyCoord('21:29:40.2110','+0:05:24.154',unit=(u.hourangle,u.deg))
ref_y,ref_x = skycoord_to_pixel(sn_location,wcs.WCS(ref_fits['SCI',1],ref_fits))
ref_cutout = extract_array(ref_data,(11,11),(ref_x,ref_y))
norm1 = simple_norm(ref_cutout,stretch='linear',min_cut=-1,max_cut=10)
plt.imshow(ref_cutout, origin='lower',
norm=norm1,cmap='gray')
plt.title('SN2022riv')
plt.gca().tick_params(labelcolor='none',axis='both',color='none')
plt.show()
####################################################################
# **Measure the aperture photometry**
#
hst_obs = space_phot.observation2(files)
hst_obs.aperture_photometry(sn_location,radius=3,
skyan_in=5,skyan_out=7)
print(hst_obs.aperture_result.phot_cal_table)
####################################################################
#
# -----------
# JWST Images
# -----------
#
# **Download some Data**
#
# For this example we download JWST cal images from MAST. We just use
# 4 of the 8 dithered exposures for speed here, but in principle
# space_phot can handle as many as are needed (given time).
obs_table = Observations.query_criteria(obs_id='jw02767-o002_t001_nircam_clear-f150w')
data_products_by_obs = Observations.get_product_list(obs_table)
data_products_by_obs = data_products_by_obs[data_products_by_obs['calib_level']==2]
data_products_by_obs = data_products_by_obs[data_products_by_obs['productSubGroupDescription']=='CAL']
# Just take the nrcb3 cals (where the SN is located)
to_remove = []
for i in range(len(data_products_by_obs)):
if not data_products_by_obs[i]['obs_id'].endswith('nrcb3'):
to_remove.append(i)
data_products_by_obs.remove_rows(to_remove)
Observations.download_products(data_products_by_obs[:4],extension='fits')
####################################################################
# **Examine the first Image**
#
files = glob.glob('mastDownload/JWST/*/*cal.fits')
ref_image = files[0]
ref_fits = fits.open(ref_image)
ref_data = fits.open(ref_image)['SCI',1].data
norm1 = simple_norm(ref_data,stretch='linear',min_cut=-1,max_cut=10)
plt.imshow(ref_data, origin='lower',
norm=norm1,cmap='gray')
plt.gca().tick_params(labelcolor='none',axis='both',color='none')
plt.show()
####################################################################
# **Zoom in to see the Supernova**
#
sn_location = SkyCoord('21:29:40.2103','+0:05:24.158',unit=(u.hourangle,u.deg))
ref_y,ref_x = skycoord_to_pixel(sn_location,wcs.WCS(ref_fits['SCI',1],ref_fits))
ref_cutout = extract_array(ref_data,(11,11),(ref_x,ref_y))
norm1 = simple_norm(ref_cutout,stretch='linear',min_cut=-1,max_cut=10)
plt.imshow(ref_cutout, origin='lower',
norm=norm1,cmap='gray')
plt.title('SN2022riv')
plt.gca().tick_params(labelcolor='none',axis='both',color='none')
plt.show()
####################################################################
# **Measure the aperture photometry**
#
jwst_obs = space_phot.observation2(files)
jwst_obs.aperture_photometry(sn_location,encircled_energy='70')
print(jwst_obs.aperture_result.phot_cal_table)
|
jpierel14REPO_NAMEspace_photPATH_START.@space_phot_extracted@space_phot-main@Docs@source@_examples@plot_b_aperture.py@.PATH_END.py
|
{
"filename": "mod.md",
"repo_name": "wlxu/RelicClass",
"repo_path": "RelicClass_extracted/RelicClass-master/doc/input/mod.md",
"type": "Markdown"
}
|
Updating the manual
===================
Author: D. C. Hooper (hooper@physik.rwth-aachen.de)
This pdf manual and accompanying web version have been generated using the `doxygen` software (http://www.doxygen.org). This software directly reads the code and extracts the necessary comments to form the manual, meaning it is very easy to generate newer versions of the manual as desired.
### For CLASS developpers: ###
To maintain the usefulness of the manual, a new version should be generated after any major upgrade to `CLASS`. To keep track of how up-to-date the manual is the title page also displays the last modification date. The manual is generated automatically from the code, excepted a few chapters written manually in the files
README.md
doc/input/chap2.md
doc/input/chap3.md
doc/input/mod.md
external_Pk/README.md
You can update these files, or add new ones that should be declared in the `INPUT=` field of `doc/input/doxyconf`.
Generating a new version of this manual is straightforward. First, you need to install the `doxygen` software, which can be done by following the instructions on the software's webpage. The location where you install this software is irrelevant; it doesn't need to be in the same folder as `CLASS`. For Mac OSX, homebrew users can install the software with `brew install doxygen --with-graphviz`.
Once installed, navigate to the class/doc/input directory and run the first script
` . make1.sh`
This will generate a new version of the html manual and the necessary files to make the pdf version. Unfortunately, `doxygen` does not yet offer the option to automatically order the output chapters in the pdf version of the manual. Hence, before compiling the pdf, this must be done manually. To do this you need to find the `refman.tex` file in class/doc/manual/latex. With this file you can modify the title page, headers, footers, and chapter ordering for the final pdf. Usually we just make two things: add manually the line
\vspace*{1cm}
{\large Last updated \today}\\
after
{\Large C\+L\+A\+SS M\+A\+N\+U\+AL }\\
and move manually the chapters `"The external Pk mode"` and `"Updating the manual"` to the end, after the automatically generated part. Once you have this file with your desired configuration, navigate back to the class/doc/input directory, and run the second script
` . make2.sh`
You should now be able to find the finished pdf in `class/doc/manual/CLASS_MANUAL.pdf`. Finally you can commit the changes to git, but not all the content of `doc/` is necessary: only `doc/README`, `doc/input/`, `doc/manual/CLASS_MANUAL.pdf`, `doc/manual/html/`. This means that before committing you will have to do a: `git add doc/manual/html/`, but NOT a: `git add doc/manual/latex/`!
As a final comment, doxygen uses two main configuration files: `doxyconf` and `doxygen.sty`, both located in class/doc/input. Changes to these files can dramatically impact the outcome, so any modifications to these files should be done with great care.
|
wlxuREPO_NAMERelicClassPATH_START.@RelicClass_extracted@RelicClass-master@doc@input@mod.md@.PATH_END.py
|
{
"filename": "index.md",
"repo_name": "janosch314/GWFish",
"repo_path": "GWFish_extracted/GWFish-main/docs/source/index.md",
"type": "Markdown"
}
|
# GWFish
GWFish is a Fisher matrix code geared towards future gravitational-wave detectors.
It is able to provide estimates of the signal-to-noise ratio and of the errors on
our estimates of the parameters, for a signal as it would be seen by one or more
future detectors, among:
- [LIGO](https://ligo.caltech.edu) / [Virgo](https://www.virgo-gw.eu/) in their fifth observing run;
- [Kagra](https://gwcenter.icrr.u-tokyo.ac.jp/en/);
- [Einstein Telescope](http://www.et-gw.eu);
- [Lunar Gravitational Wave Antenna](http://socrate.cs.unicam.it/);
- [Cosmic Explorer](https://cosmicexplorer.org);
- [LISA](https://lisamission.org);
- Voyager.
It is able to account for a time-varying antenna pattern, since we expect these
detectors to be sensitive in the low-frequency regime, for which the motion of the
Earth / Moon / satellites is significant across the signal duration.
For more information about the theory than what is discussed here, refer to
the __[`GWFish` paper](https://www.sciencedirect.com/science/article/abs/pii/S2213133722000853?via%3Dihub)__.
This software is developed by the [gravitation group](https://wikiet.gssi.it/index.php/Main_Page)
at the [Gran Sasso Science Institute](https://www.gssi.it/).
```{seealso}
This documentation is written according to the [diátaxis framework](https://diataxis.fr).
```
```{toctree}
:glob:
:maxdepth: 1
:titlesonly:
:caption: Introduction
installation.md
glossary.md
```
```{toctree}
:glob:
:maxdepth: 1
:titlesonly:
:caption: Tutorials
tutorials/*
```
```{toctree}
:glob:
:maxdepth: 1
:titlesonly:
:caption: How-to guides
how-to/*
```
```{toctree}
:glob:
:maxdepth: 1
:titlesonly:
:caption: Explanation
explanation/*
```
```{toctree}
:glob:
:maxdepth: 1
:titlesonly:
:caption: Technical Reference
reference/*
```
|
janosch314REPO_NAMEGWFishPATH_START.@GWFish_extracted@GWFish-main@docs@source@index.md@.PATH_END.py
|
{
"filename": "_maxpoints.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/indicator/stream/_maxpoints.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="maxpoints", parent_name="indicator.stream", **kwargs
):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 10000),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@indicator@stream@_maxpoints.py@.PATH_END.py
|
{
"filename": "_version.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython-genutils/py2/ipython_genutils/_version.py",
"type": "Python"
}
|
version_info = (0, 2, 0)
__version__ = '.'.join(map(str, version_info))
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython-genutils@py2@ipython_genutils@_version.py@.PATH_END.py
|
{
"filename": "test_5_occurrence.py",
"repo_name": "GijsMulders/epos",
"repo_path": "epos_extracted/epos-master/EPOS/scriptdir/tests/test_5_occurrence.py",
"type": "Python"
}
|
#! /usr/bin/env ipython
'''
Test if EPOS can calculate occurrence rates
Plots should appear in the directory
png/test_5/occurrence/
'''
import EPOS
''' initialize the EPOS class '''
epos= EPOS.epos(name='test_5')
''' load the kepler dr25 exoplanets and survey efficiency '''
obs, survey= EPOS.kepler.dr25(Huber=True, Vetting=True, score=0.9)
epos.set_observation(**obs)
epos.set_survey(**survey)
''' define the parameteric distribution, here a power-law in radius and period '''
epos.set_parametric(EPOS.fitfunctions.powerlaw2D)
epos.fitpars.add('pps', 2.0, min=0)
epos.fitpars.add('P1',0.3, is2D=True)
epos.fitpars.add('P2',-0.2, dx=0.1, is2D=True)
''' define the simulated range (trim) and the range compared to observations (zoom) '''
epos.set_ranges(xtrim=[10,730],ytrim=[0.5,4.],xzoom=[20,300],yzoom=[0.7,3], Occ=True)
''' define bins where occurrence is calculated'''
epos.set_bins(xbins=[[20,300],[0.9*365,2.2*365]], ybins=[[0.7,3],[0.7,1.5]]) # eta_zoom, eta_earth
''' Run the Monte Carlo Simulation once '''
EPOS.run.once(epos)
''' Calculate the occurrence rates '''
EPOS.occurrence.all(epos)
''' Adjust axes'''
epos.plotpars['textsize']= 12
epos.xtrim[1]= 1000
''' plot occurrence rates '''
EPOS.plot.occurrence.all(epos)
|
GijsMuldersREPO_NAMEeposPATH_START.@epos_extracted@epos-master@EPOS@scriptdir@tests@test_5_occurrence.py@.PATH_END.py
|
{
"filename": "bao_correlation_Chen2019.py",
"repo_name": "Samreay/Barry",
"repo_path": "Barry_extracted/Barry-master/barry/models/bao_correlation_Chen2019.py",
"type": "Python"
}
|
import sys
sys.path.append("../..")
from barry.models import PowerChen2019
from barry.models.bao_correlation import CorrelationFunctionFit
import numpy as np
class CorrChen2019(CorrelationFunctionFit):
"""xi(s) model inspired from Chen 2019.
See https://ui.adsabs.harvard.edu/abs/2019JCAP...09..017C/abstract for details.
"""
def __init__(
self,
name="Corr Chen 2019",
fix_params=("om", "beta"),
smooth_type=None,
recon=None,
smooth=False,
correction=None,
isotropic=False,
poly_poles=(0, 2),
marg=None,
include_binmat=True,
broadband_type="spline",
**kwargs,
):
super().__init__(
name=name,
fix_params=fix_params,
smooth_type=smooth_type,
smooth=smooth,
correction=correction,
isotropic=isotropic,
poly_poles=poly_poles,
marg=marg,
includeb2=False,
include_binmat=include_binmat,
broadband_type=broadband_type,
**kwargs,
)
self.parent = PowerChen2019(
fix_params=fix_params,
smooth_type=smooth_type,
recon=recon,
smooth=smooth,
correction=correction,
isotropic=isotropic,
marg=marg,
broadband_type=None,
)
self.set_marg(fix_params, do_bias=False)
def declare_parameters(self):
super().declare_parameters()
self.add_param("beta", r"$\beta$", 0.01, 4.0, None) # RSD parameter f/b
self.add_param("sigma_s", r"$\Sigma_s$", 0.0, 10.0, 5.0) # Fingers-of-god damping
if __name__ == "__main__":
import sys
sys.path.append("../..")
from barry.datasets.dataset_correlation_function import CorrelationFunction_DESI_KP4
from barry.config import setup_logging
from barry.models.model import Correction
setup_logging()
dataset = CorrelationFunction_DESI_KP4(
recon="sym",
fit_poles=[0, 2],
min_dist=52.0,
max_dist=150.0,
realisation=None,
num_mocks=1000,
reduce_cov_factor=25,
)
data = dataset.get_data()
model = CorrChen2019(
recon=dataset.recon,
isotropic=dataset.isotropic,
marg="full",
fix_params=["om"],
poly_poles=dataset.fit_poles,
correction=Correction.NONE,
n_poly=3,
)
model.set_default("sigma_s", 0.0, min=0.0, max=20.0, sigma=2.0, prior="gaussian")
# Load in a pre-existing BAO template
pktemplate = np.loadtxt("../../barry/data/desi_kp4/DESI_Pk_template.dat")
model.parent.kvals, model.parent.pksmooth, model.parent.pkratio = pktemplate.T
model.sanity_check(dataset)
|
SamreayREPO_NAMEBarryPATH_START.@Barry_extracted@Barry-master@barry@models@bao_correlation_Chen2019.py@.PATH_END.py
|
{
"filename": "code_of_conduct.md",
"repo_name": "sbi-dev/sbi",
"repo_path": "sbi_extracted/sbi-main/docs/docs/code_of_conduct.md",
"type": "Markdown"
}
|
{!CODE_OF_CONDUCT.md!}
|
sbi-devREPO_NAMEsbiPATH_START.@sbi_extracted@sbi-main@docs@docs@code_of_conduct.md@.PATH_END.py
|
{
"filename": "_z.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/surface/contours/_z.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="z", parent_name="surface.contours", **kwargs):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Z"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the color of the contour lines.
end
Sets the end contour level value. Must be more
than `contours.start`
highlight
Determines whether or not contour lines about
the z dimension are highlighted on hover.
highlightcolor
Sets the color of the highlighted contour
lines.
highlightwidth
Sets the width of the highlighted contour
lines.
project
:class:`plotly.graph_objects.surface.contours.z
.Project` instance or dict with compatible
properties
show
Determines whether or not contour lines about
the z dimension are drawn.
size
Sets the step between each contour level. Must
be positive.
start
Sets the starting contour level value. Must be
less than `contours.end`
usecolormap
An alternate to "color". Determines whether or
not the contour lines are colored using the
trace "colorscale".
width
Sets the width of the contour lines.
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@surface@contours@_z.py@.PATH_END.py
|
{
"filename": "test_array_ext.py",
"repo_name": "enthought/mayavi",
"repo_path": "mayavi_extracted/mayavi-master/tvtk/tests/test_array_ext.py",
"type": "Python"
}
|
"""Unit tests for the array related extension code.
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
import unittest
import numpy
from tvtk.array_handler import ID_TYPE_CODE, set_id_type_array_py
from tvtk.array_ext import set_id_type_array
class TestArrayExt(unittest.TestCase):
def check(self, set_id_type_array):
N = 5
a = numpy.zeros((N, 4), ID_TYPE_CODE)
a[:, 1] = 1
a[:, 2] = 2
a[:, 3] = 3
def diff_arr(x, y):
return numpy.sum(numpy.ravel(x) - numpy.ravel(y[:, 1:]))
# Test contiguous arrays.
b = numpy.zeros((N, 5), ID_TYPE_CODE)
set_id_type_array(a, b)
self.assertEqual(diff_arr(a, b), 0)
# Test non-contiguous arrays.
b = numpy.zeros((N, 3), ID_TYPE_CODE)
set_id_type_array(a[:, ::2], b)
self.assertEqual(diff_arr(a[:, ::2], b), 0)
# Test 1D array.
b = numpy.zeros(N*5, ID_TYPE_CODE)
set_id_type_array(a, b)
self.assertEqual(diff_arr(a, numpy.reshape(b, (N, 5))), 0)
# Test assertions.
d = a.astype('d')
b = numpy.zeros((N, 5), ID_TYPE_CODE)
self.assertRaises(AssertionError, set_id_type_array,
d, b)
# B should b contiguous.
b = numpy.zeros((N, 10), ID_TYPE_CODE)
self.assertRaises(AssertionError, set_id_type_array,
a, b[:, ::2])
self.assertRaises(AssertionError, set_id_type_array,
a[0], b)
# Test size check assertion.
b = numpy.zeros((N, 4), ID_TYPE_CODE)
self.assertRaises(AssertionError, set_id_type_array,
a, b)
b = numpy.zeros(N*6, ID_TYPE_CODE)
self.assertRaises(AssertionError, set_id_type_array,
a, b)
# This should work!
set_id_type_array(a, b[:N*5])
self.assertEqual(diff_arr(a, numpy.reshape(b[:N*5], (N, 5))), 0)
def test_set_id_type_array(self):
self.check(set_id_type_array)
def test_set_id_type_array_py(self):
self.check(set_id_type_array_py)
if __name__ == "__main__":
unittest.main()
|
enthoughtREPO_NAMEmayaviPATH_START.@mayavi_extracted@mayavi-master@tvtk@tests@test_array_ext.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "nickhand/pyRSD",
"repo_path": "pyRSD_extracted/pyRSD-master/pyRSD/rsd/power/gal/Pcs/__init__.py",
"type": "Python"
}
|
from .. import GalaxyPowerTerm, ZeroShotNoise
from .PcAsA import PcAsA
from .PcAsB import PcAsB
from .PcBsA import PcBsA
from .PcBsB import PcBsB
class Pcs(GalaxyPowerTerm):
"""
The cross specturm of central and satellite galaxies
"""
name = "Pcs"
def __init__(self, model):
super(Pcs, self).__init__(model, PcAsA, PcAsB, PcBsA, PcBsB)
@property
def coefficient(self):
return 2*self.model.fs*(1-self.model.fs)
def __call__(self, k, mu):
"""
The total central x satellite cross spectrum
"""
with ZeroShotNoise(self.model):
toret = super(Pcs, self).__call__(k, mu)
return toret
|
nickhandREPO_NAMEpyRSDPATH_START.@pyRSD_extracted@pyRSD-master@pyRSD@rsd@power@gal@Pcs@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "threeML/threeML",
"repo_path": "threeML_extracted/threeML-master/threeML/plugins/__init__.py",
"type": "Python"
}
|
threeMLREPO_NAMEthreeMLPATH_START.@threeML_extracted@threeML-master@threeML@plugins@__init__.py@.PATH_END.py
|
|
{
"filename": "album.py",
"repo_name": "dstndstn/astrometry.net",
"repo_path": "astrometry.net_extracted/astrometry.net-main/net/views/album.py",
"type": "Python"
}
|
import os
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest, QueryDict
from django.shortcuts import get_object_or_404, redirect, render
from django.template import Context, RequestContext, loader
from django.contrib.auth.decorators import login_required
from django import forms
from django.http import HttpResponseRedirect
from django.contrib import messages
from astrometry.net.models import *
from astrometry.net import settings
from astrometry.net.log import *
from astrometry.net.tmpfile import *
from astrometry.net.util import get_page, get_session_form, store_session_form
from astrometry.net.util import NoBulletsRadioSelect
from astrometry.util.run_command import run_command
from astrometry.net.views.comment import *
def album(req, album_id=None):
album = get_object_or_404(Album, pk=album_id)
comment_form = get_session_form(req.session, PartialCommentForm)
page_number = req.GET.get('page',1)
page = get_page(album.user_images.public_only(req.user),4*3,page_number)
context = {
'album': album,
'comment_form': comment_form,
'image_page': page,
'request': req,
}
if album.is_public() or (album.user == req.user and req.user.is_authenticated):
template = 'album/view.html'
#elif SharedHideable.objects.filter(shared_with=req.user.id, hideable=album).count():
# template = 'album/view.html'
else:
messages.error(req, "Sorry, you don't have permission to view this content.")
template = 'album/permission_denied.html'
return render(req, template, context)
class AlbumForm(forms.ModelForm):
class Meta:
model = Album
exclude = ('user', 'owner', 'user_images', 'tags', 'created_at', 'comment_receiver')
widgets = {
'description': forms.Textarea(attrs={'cols':60,'rows':3}),
'publicly_visible': NoBulletsRadioSelect(),
}
def clean(self):
cleaned_data = self.cleaned_data
title = cleaned_data.get('title')
if title:
# make sure the user doesn't have another album with the same title
query = Album.objects.filter(user=self.instance.user, title=title)
query = query.exclude(pk=self.instance.id)
if query.count() != 0:
self._errors['title'] = self.error_class(['You already have an album with this title.'])
del cleaned_data['title']
return cleaned_data
@login_required
def edit(req, album_id=None):
album = get_object_or_404(Album, pk=album_id)
if album.user != req.user:
messages.error(req, "Sorry, you don't have permission to view this content.")
return render(req, 'album/permission_denied.html')
if req.method == 'POST':
form = AlbumForm(req.POST, instance=album)
if form.is_valid():
form.save()
messages.success(req, 'Album details successfully updated.')
return redirect(album)
else:
messages.error(req, 'Please fix the following errors:')
else:
form = AlbumForm(instance=album)
context = {
'album_form': form,
'album': album,
}
return render(req, 'album/edit.html', context)
@login_required
def new(req):
if req.method == 'POST':
album = Album(user=req.user)
form = AlbumForm(req.POST, instance=album)
if form.is_valid():
form.save(commit=False)
album.comment_receiver=CommentReceiver.objects.create()
album.save()
messages.success(req, "Album '%s' successfully created." % album.title)
return redirect(album)
else:
store_session_form(req.session, AlbumForm, req.POST)
messages.error(req, 'Please fix the following errors:')
return redirect(req.POST.get('from','/'))
else:
pass
@login_required
def delete(req, album_id):
album = get_object_or_404(Album, pk=album_id)
redirect_url = req.GET.get('next','/')
if album.user == req.user:
album.delete()
messages.success(req, "Album '%s' successfully deleted." % album.title)
return HttpResponseRedirect(redirect_url)
else:
# render a "you don't have permission" view
pass
|
dstndstnREPO_NAMEastrometry.netPATH_START.@astrometry.net_extracted@astrometry.net-main@net@views@album.py@.PATH_END.py
|
{
"filename": "test_colours.py",
"repo_name": "Samreay/ChainConsumer",
"repo_path": "ChainConsumer_extracted/ChainConsumer-master/tests/test_colours.py",
"type": "Python"
}
|
import numpy as np
import pytest
from chainconsumer.color_finder import ALL_COLOURS, colors
def test_colors_rgb2hex_1():
c = np.array([1, 1, 1, 1])
assert colors.get_formatted([c])[0] == "#ffffff"
def test_colors_rgb2hex_2():
c = np.array([0, 0, 0.5, 1])
assert colors.get_formatted([c])[0] == "#000080"
def test_colors_alias_works():
assert colors.format("b") in ALL_COLOURS["blue"]
def test_colors_name_works():
assert colors.format("blue") in ALL_COLOURS["blue"]
def test_colors_error_on_garbage():
with pytest.raises(ValueError):
colors.get_formatted(["java"])
def test_clamp1():
assert colors._clamp(-10) == 0
def test_clamp2():
assert colors._clamp(10) == 10
def test_clamp3():
assert colors._clamp(1000) == 255
|
SamreayREPO_NAMEChainConsumerPATH_START.@ChainConsumer_extracted@ChainConsumer-master@tests@test_colours.py@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/waterfall/outsidetextfont/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="style", parent_name="waterfall.outsidetextfont", **kwargs
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@waterfall@outsidetextfont@_style.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/tools/graph_transforms/README.md",
"type": "Markdown"
}
|
# Graph Transform Tool
## Table of Contents
* [Introduction](#introduction)
* [Using the Graph Transform Tool](#using-the-graph-transform-tool)
* [Inspecting Graphs](#inspecting-graphs)
* [Common Use Cases](#common-use-cases)
* [Optimizing for Deployment](#optimizing-for-deployment)
* [Fixing Missing Kernel Errors on
Mobile](#fixing-missing-kernel-errors-on-mobile)
* [Shrinking File Size](#shrinking-file-size)
* [Eight-bit Calculations](#eight-bit-calculations)
* [Transform Reference](#transform-reference)
* [add_default_attributes](#add_default_attributes)
* [backport_concatv2](#backport_concatv2)
* [flatten_atrous_conv](#flatten_atrous_conv)
* [fold_batch_norms](#fold_batch_norms)
* [fold_constants](#fold_constants)
* [fold_old_batch_norms](#fold_old_batch_norms)
* [freeze_requantization_ranges](#freeze_requantization_ranges)
* [fuse_convolutions](#fuse_convolutions)
* [insert_logging](#insert_logging)
* [merge_duplicate_nodes](#merge_duplicate_nodes)
* [obfuscate_names](#obfuscate_names)
* [quantize_nodes](#quantize_nodes)
* [quantize_weights](#quantize_weights)
* [remove_attribute](#remove_attribute)
* [remove_device](#remove_device)
* [remove_nodes](#remove_nodes)
* [rename_attribute](#rename_attribute)
* [rename_op](#rename_op)
* [round_weights](#round_weights)
* [sparsify_gather](#sparsify_gather)
* [set_device](#set_device)
* [sort_by_execution_order](#sort_by_execution_order)
* [strip_unused_nodes](#strip_unused_nodes)
* [Writing Your Own Transforms](#writing-your-own-transforms)
* [Transform Functions](#transform-functions)
* [Pattern Syntax](#pattern-syntax)
* [ReplaceMatchingOpTypes](#replacematchingoptypes)
* [Parameters](#parameters)
* [Function Libraries](#function-libraries)
* [Registering](#registering)
## Introduction
When you have finished training a model and want to deploy it in production,
you'll often want to modify it to better run in its final environment. For
example if you're targeting a phone you might want to shrink the file size by
quantizing the weights, or optimize away batch normalization or other
training-only features. The Graph Transform framework offers a suite of tools
for modifying computational graphs, and a framework to make it easy to write
your own modifications.
This guide is structured into three main parts, first giving some tutorials on
how to perform common tasks, second a reference covering all of the different
transformations that are included, together with the options that apply to them,
and third a guide to creating your own transforms.
## Using the Graph Transform Tool
The Graph Transform tool is designed to work on models that are saved as
GraphDef files, usually in a binary protobuf format. This is the low-level
definition of a TensorFlow computational graph, including a list of nodes and
the input and output connections between them. If you're using a Python API to
train your model, this will usually be saved out in the same directory as your
checkpoints, and usually has a '.pb' suffix.
If you want to work with the values of your trained parameters, for example to
quantize weights, you'll need to run
[tensorflow/python/tools/freeze_graph.py](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/freeze_graph.py)
to convert the checkpoint values into embedded constants within the graph file
itself.
You call the Graph Transform tool itself like this:
```bash
bazel build tensorflow/tools/graph_transforms:transform_graph
bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
--in_graph=tensorflow_inception_graph.pb \
--out_graph=optimized_inception_graph.pb \
--inputs='Mul:0' \
--outputs='softmax:0' \
--transforms='
strip_unused_nodes(type=float, shape="1,299,299,3")
remove_nodes(op=Identity, op=CheckNumerics)
fold_old_batch_norms
'
```
The arguments here are specifying where to read the graph from, where to write
the transformed version to, what the input and output layers are, and what
transforms to modify the graph with. The transforms are given as a list of
names, and can each have arguments themselves. These transforms define the
pipeline of modifications that are applied in order to produce the output.
Sometimes you need some transforms to happen before others, and the ordering
within the list lets you specify which happen first.
Note that the optimization
`remove_nodes(op=Identity, op=CheckNumerics)` will break the model with control
flow operations, such as `tf.cond`, `tf.map_fn`, and `tf.while`.
## Inspecting Graphs
Many of the transforms that the tool supports need to know what the input and
output layers of the model are. The best source for these is the model training
process, where for a classifier the inputs will be the nodes that receive the
data from the training set, and the output will be the predictions. If you're
unsure, the
[`summarize_graph`](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/tools/graph_transforms/summarize_graph_main.cc)
tool can inspect the model and provide guesses about likely input and output nodes,
as well as other information that's useful for debugging. Here's an example of
how to use it on the [Inception V3
graph](https://storage.googleapis.com/download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz):
```bash
bazel build tensorflow/tools/graph_transforms:summarize_graph
bazel-bin/tensorflow/tools/graph_transforms/summarize_graph --in_graph=tensorflow_inception_graph.pb
```
## Common Use Cases
This section has small guides for some of the most frequently-used
transformation pipelines, aimed at users who want to quickly accomplish one of
these tasks. A lot of them will use the Inception V3 model for their examples,
which can be downloaded from
[https://storage.googleapis.com/download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz](https://storage.googleapis.com/download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz).
### Optimizing for Deployment
If you've finished training your model and want to deploy it on a server or a
mobile device, you'll want it to run as fast as possible, and with as few
non-essential dependencies as you can. This recipe removes all of the nodes that
aren't called during inference, shrinks expressions that are always constant
into single nodes, and optimizes away some multiply operations used during batch
normalization by pre-multiplying the weights for convolutions.
```bash
bazel build tensorflow/tools/graph_transforms:transform_graph
bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
--in_graph=tensorflow_inception_graph.pb \
--out_graph=optimized_inception_graph.pb \
--inputs='Mul' \
--outputs='softmax' \
--transforms='
strip_unused_nodes(type=float, shape="1,299,299,3")
remove_nodes(op=Identity, op=CheckNumerics)
fold_constants(ignore_errors=true)
fold_batch_norms
fold_old_batch_norms'
```
The batch norm folding is included twice because there are two different flavors
of batch normalization used in TensorFlow. The older version was implemented
with a single op like BatchNormWithGlobalNormalization or FusedBatchNorm, and
BatchNormWithGlobalNormalization was deprecated in favor of a more recent
approach using individual ops to implement the same computation. The two
transforms are in there so that both styles are recognized and optimized.
### Fixing Missing Kernel Errors on Mobile
The mobile version of TensorFlow is focused on inference, and so by default the
list of supported ops (defined in
[tensorflow/core/kernels/BUILD:android_extended_ops](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/BUILD)
for Bazel doesn't include a lot that are training related. This can cause
`No OpKernel was registered to support Op` errors when a GraphDef is loaded,
even if the op isn't going to be executed.
If you see this error and it's an op that you do actually want to run on mobile,
then you'll need to make local modifications to the build files to include the
right .cc file that defines it. In a lot of cases the op is just a vestigial
remnant from the training process though, and if that's true then you can run
the [strip_unused_nodes](#strip_unused_nodes), specifying the inputs and outputs
of your inference usage, to remove those unnecessary nodes:
```bash
bazel build tensorflow/tools/graph_transforms:transform_graph
bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
--in_graph=tensorflow_inception_graph.pb \
--out_graph=optimized_inception_graph.pb \
--inputs='Mul' \
--outputs='softmax' \
--transforms='
strip_unused_nodes(type=float, shape="1,299,299,3")
fold_constants(ignore_errors=true)
fold_batch_norms
fold_old_batch_norms'
```
### Shrinking File Size
If you're looking to deploy your model as part of a mobile app, then keeping the
download size as small as possible is important. For most TensorFlow models, the
largest contributors to the file size are the weights passed in to convolutional
and fully-connected layers, so anything that can reduce the storage size for
those is very useful. Luckily most neural networks are resistant to noise, so
it's possible to change the representation of those weights in a lossy way
without losing very much accuracy overall.
On both iOS and Android app packages are compressed before download, so the
simplest way to reduce the bandwidth your users need to receive your app is to
provide raw data that compresses more easily. By default the weights are stored
as floating-point values, and even tiny differences between numbers result in
very different bit patterns, and so these don't compress very well. If you round
the weights so that nearby numbers are stored as exactly the same values, the
resulting bit stream has a lot more repetition and so compresses down a lot more
effectively. To try this technique on your model, run the
[round_weights](#round_weights) transform.
```bash
bazel build tensorflow/tools/graph_transforms:transform_graph
bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
--in_graph=tensorflow_inception_graph.pb \
--out_graph=optimized_inception_graph.pb \
--inputs='Mul' \
--outputs='softmax' \
--transforms='
strip_unused_nodes(type=float, shape="1,299,299,3")
fold_constants(ignore_errors=true)
fold_batch_norms
fold_old_batch_norms
round_weights(num_steps=256)'
```
You should see that the `optimized_inception_graph.pb` output file is the same
size as the input, but if you run zip on it to compress it, it's almost 70%
smaller than if you zip the original! The nice thing about this transform is
that it doesn't change the structure of the graph at all, so it's running
exactly the same operations and should have the same latency and memory usage as
before. You can adjust the `num_steps` parameter to control how many values each
weight buffer is rounded to, so lower numbers will increase the compression at
the cost of accuracy.
As a further step, you can store the weights into eight-bit values directly.
Here's the recipe for that:
```bash
bazel build tensorflow/tools/graph_transforms:transform_graph
bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
--in_graph=tensorflow_inception_graph.pb \
--out_graph=optimized_inception_graph.pb \
--inputs='Mul' \
--outputs='softmax' \
--transforms='
strip_unused_nodes(type=float, shape="1,299,299,3")
fold_constants(ignore_errors=true)
fold_batch_norms
fold_old_batch_norms
quantize_weights'
```
You should see that the size of the output graph is about a quarter of the
original. The downside to this approach compared to round_weights is that extra
decompression ops are inserted to convert the eight-bit values back into
floating point, but optimizations in TensorFlow's runtime should ensure these
results are cached and so you shouldn't see the graph run any more slowly.
So far we've been concentrating on weights because those generally take up the
most space. If you have a graph with a lot of small nodes in it, the names of
those nodes can start to take up a noticeable amount of space too. To shrink
those down, you can run the [obfuscate_names](#obfuscate_names) transform, which
replaces all the names (except for inputs and outputs) with short, cryptic but
unique ids:
```bash
bazel build tensorflow/tools/graph_transforms:transform_graph
bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
--in_graph=tensorflow_inception_graph.pb \
--out_graph=optimized_inception_graph.pb \
--inputs='Mul:0' \
--outputs='softmax:0' \
--transforms='
obfuscate_names'
```
### Eight-bit Calculations
For some platforms it's very helpful to be able to do as many calculations as
possible in eight-bit, rather than floating-point. The support for this in
TensorFlow is still experimental and evolving, but you can convert models into
quantized form using the graph transform tool:
```bash
bazel build tensorflow/tools/graph_transforms:transform_graph
bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
--in_graph=tensorflow_inception_graph.pb \
--out_graph=optimized_inception_graph.pb \
--inputs='Mul' \
--outputs='softmax' \
--transforms='
add_default_attributes
strip_unused_nodes(type=float, shape="1,299,299,3")
remove_nodes(op=Identity, op=CheckNumerics)
fold_constants(ignore_errors=true)
fold_batch_norms
fold_old_batch_norms
quantize_weights
quantize_nodes
strip_unused_nodes
sort_by_execution_order'
```
This process converts all the operations in the graph that have eight-bit
quantized equivalents, and leaves the rest in floating point. Only a subset of
ops are supported, and on many platforms the quantized code may actually be
slower than the float equivalents, but this is a way of increasing performance
substantially when all the circumstances are right.
A full guide to optimizing for quantization is beyond the scope of this guide,
but one thing that can help is using the FakeQuantWithMinMaxVars op after Conv2D
or similar operations during training. This trains the min/max variables that
control the range used for quantization, so that the range doesn't have to be
calculated dynamically by RequantizationRange during inference.
## Transform Reference
The --transforms string is parsed as a series of transform names, each of which
can have multiple named arguments inside parentheses. Arguments are separated by
commas, and double-quotes (") can be used to hold argument values if they
themselves contain commas (for example shape definitions).
The --inputs and --outputs are shared across all transforms, since it's common
to need to know what the ingoing and outgoing nodes in the graph are. You should
make sure you set these correctly before calling the graph transform tool, and
if you're in doubt check with the model's author, or use the [`summarize_graph`](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/graph_transforms#inspecting-graphs) tool
to examine likely inputs and outputs.
All transforms can be passed the `ignore_errors` flag, with the value set to
either true or false. By default any errors that happen within a transform will
abort the whole process, but if you enable this then an error will just be
logged and the transform skipped. This is especially useful for optional
transforms where version errors or other unimportant problems may trigger an
error.
### add_default_attributes
Args: None
When attributes are added to ops in new versions of TensorFlow, they often have
defaults to ensure backwards compatible behavior with their original versions.
These defaults usually get added when the graph is loaded by the runtime, but if
your model is going to be processed outside of the main TensorFlow framework it
can be useful to run this update process as a transform. This process finds any
op attributes that are defined in the current TensorFlow list of ops but not
within the saved model, and sets them to the defined default for that attribute.
### backport_concatv2
Args: None
If you have a GraphDef file that has been produced by a newer version of the
TensorFlow framework and includes ConcatV2, and you want to run it on an older
version that only supports Concat, this transform will take care of converting
those newer ops to the equivalent older form.
### flatten_atrous_conv
Args: None \
Prerequisites: [fold_constants](#fold_constants)
This transform flattens atrous convolution, corresponding to a sequence of
SpaceToBatchND-Conv2D-BatchToSpaceND operations, converting it to a regular
Conv2D op with upsampled filters. This transforms should only be used in order
to run graphs having atrous convolution on platforms that do not yet natively
support SpaceToBatchND and BatchToSpaceND operations. You will need to make
sure you run [fold_constants](#fold_constants) after this transform. If
applicable, you should run this transform before
[fold_batch_norms](#fold_batch_norms).
### fold_batch_norms
Args: None \
Prerequisites: [fold_constants](#fold_constants)
This transform tries to optimize away the Mul that's introduced after a Conv2D
(or a MatMul) when batch normalization has been used during training. It scans
the graph for any channel-wise multiplies immediately after convolutions, and
multiplies the convolution's (or matrix multiplication's) weights with the Mul
instead so this can be omitted at inference time. You'll need to make sure you
run [fold_constants](#fold_constants) first, since the pattern can only be
spotted if the normal complex expression that's produced by training for the Mul
input is collapsed down into a simple constant.
### fold_constants
Args:
* clear_output_shapes: Clears tensor shape information saved as attributes.
Some older graphs contains out-of-date information and may cause import
errors. Defaults to true.
Prerequisites: None
Looks for any sub-graphs within the model that always evaluate to constant
expressions, and replaces them with those constants. This optimization is always
executed at run-time after the graph is loaded, so running it offline first
won't help latency, but it can simplify the graph and so make further processing
easier. It's often useful to call this with `fold_constants(ignore_errors=true)`
to continue on past transient errors, since this is just an optimization phase.
### fold_old_batch_norms
Args: None \
Prerequisites: None
In the early days of TensorFlow, batch normalization was implemented using
single monolithic ops like `BatchNormWithGlobalNormalization` or
`FusedBatchNorm`. In modern versions, adding batch normalization from Python
will give you a series of smaller math ops instead, to achieve the same effect
without special-purpose code. If you have a graph that uses the older-style,
this transform will recognize and optimize those ops for inference, in the same
way that the [fold_batch_norms](#fold_batch_norms) transform does for the new
approach.
### freeze_requantization_ranges
Args:
* min_max_log_file: Path to a log file containing ranges for ops.
* min_percentile: Percentage cutoff to use to calculate an overall min.
Defaults to 5.
* max_percentile: Percentage cutoff to use to calculate an overall max.
Defaults to 5.
Quantized operations like convolution or matrix multiplies take their inputs as
8-bit, but produce 32-bit results. To do further operations on these, they need
to be converted back down to the lower depth. To make the most of those eight
bits, you need to scale the thirty-two bits of original data down using a scale
that matches the range that's actually being used.
Because that range information isn't stored in the original graph, the
[quantization process](#eight-bit-calculations) inserts RequantizationRange ops
before each conversion from 32 to 8 bits. This op looks at the 32-bit output and
calculates the current min and max every time it's run.
This isn't incredibly time-consuming, but it is extra work that's nice to avoid
if possible. One way of optimizing that away is replacing those
RequantizationRange ops with a pair of Const nodes holding known min/max values,
so the scaling down can be done without having to inspect the output every time.
That's what this transform does. It's usually used in conjunction with a copy of
the graph that's had [insert_logging](#insert_logging) run on it to instrument
it to record the min/max values to stderr. Why is logging used rather than
writing to a normal file? As you'll see later, to get best results you want to
collect data from a lot of runs on real data, and for mobile apps especially
it's a lot easier to do this by copying log files. As an example, here's how
you'd add the logging operations for a quantized version of the Inception v3
graph:
```bash
bazel build tensorflow/tools/graph_transforms:transform_graph
bazel-bin/tensorflow/tools/graph_transforms/transform_graph \
--in_graph=/tmp/quantized_inception.pb \
--out_graph=/tmp/logged_quantized_inception.pb \
--inputs=Mul \
--outputs=softmax \
--transforms='
insert_logging(op=RequantizationRange, show_name=true, message="__requant_min_max:")\
'
```
Now, when you run the `/tmp/logged_quantized_inception.pb` graph, it will write
out log statements that show the value of the min and max calculated by each
RequantizationRange op. Here's an example of running label_image and saving the
log:
```bash
bazel build tensorflow/examples/label_image:label_image
bazel-bin/tensorflow/examples/label_image/label_image \
--image=${HOME}/Downloads/grace_hopper.jpg \
--input_layer=Mul \
--output_layer=softmax \
--graph=/tmp/logged_quantized_inception.pb \
--labels=${HOME}/Downloads/imagenet_comp_graph_label_strings.txt \
2>/tmp/min_max_log_small.txt
```
If you look in `/tmp/min_max_log_small.txt`, you'll see a lot of lines like
this:
```
I0108 21:45:42.261883 1972 logging_ops.cc:79] ;conv/Conv2D/eightbit/requant_range__print__;__requant_min_max:[-20.887871][22.274715]
```
This is a simple way of serializing the name of the RequantizationRange op and
its min/max values every time it's run. It's a file like this that you pass into
the transform as the `min_max_log_file` argument. The transform will attempt to
extract all of the min/max values associated with ops, ignoring any irrelevant
lines in the log, and replace the RequantizationRange ops with two Const nodes
containing the found values.
This isn't the whole story though. The min/max values can vary a lot depending
on what the particular inputs to the graph are on any given run, which means
picking ranges based on just one run can lead to clipping of values and a loss
of accuracy. To get better results, you need to run your network against a range
of different inputs. In Inception's case, I often use a thousand different
images from the training set. You can then pass the whole concatenated log from
all of the runs into the transform, and it will pick ranges based on the
aggregate of the values found for each RequantizationRange op.
To ensure that outliers don't increase the range too much, and so decrease the
accuracy by putting too many bits into rare extreme values, the `min_percentile`
and `max_percentile` arguments control how the overall min and max are chosen.
At their default values of 5, this means that the lowest 5% of the minimum
values will be discarded, taking the minimum of the remainder, and the
equivalent for the maximum.
### fuse_convolutions
Args: None \
Prerequisites: None
For graphs that use ResizeBilinear or MirrorPad ops before convolutions (e.g. to
scale up in the later stages of an image style transfer model), it can improve
memory usage and latency to combine the spatial transformations with the
convolution's im2col patch generation. This transform looks out for that
particular pattern of ops and replaces them with a fused version that combines
the resizing and padding with the convolution.
### insert_logging
Args:
* op: Insert a Print after every occurrence of this op type. Can be repeated
to cover multiple types. If not present, all op types will be instrumented.
* prefix: Insert a Print after every node whose name starts with this value.
Can be repeated to cover multiple nodes. If not present, all node names will
be matched.
* show_op: If true, the op type will be prepended to all log messages.
* show_name: If true, the node's name will be prepended to all log messages.
* message: Arbitrary text to log before the values.
* first_n: How many times to print before suppressing. Defaults to -1, which
means never stop.
* summarize: How long numerical results can be before they're truncated.
Defaults to 1024.
The Print operator writes strings to stderr when it's run inside a graph, and
prints out the numerical results of the node that it's reading from. This can be
very useful when you're debugging and want to follow particular internal values
while a graph is running. This transform allows you to insert those ops at
particular points in the graph, and customize the message that's displayed. It's
also used in conjunction with the
[freeze_requantization_ranges](#freeze_requantization_ranges) transform to
output information that it needs.
### merge_duplicate_nodes
Args: None \
Prerequisites: None
If there are Const nodes with the same types and contents, or nodes with the
same inputs and attributes, this transform will merge them together. It can be
useful when you want to cut down the number of nodes in a graph that has a lot
of redundancy (e.g. this transform is always run as part of
[quantize_nodes](#quantize_nodes) since the processing there can introduce
duplicates of constants that are used in the quantize/dequantize process).
### obfuscate_names
Args: None \
Prerequisites: None
Replaces all nodes' names with short generated ids, other than the inputs and
outputs. This also updates all references within the graph so that the structure
is preserved. This can be useful if you want to shrink the file size, or if you
want to make it harder to understand the architecture of your model before
releasing it.
### quantize_nodes
Args:
* input_min: The lowest float value for any quantized placeholder inputs.
* input_max: The highest float value for any quantized placeholder inputs. If
both input_min and input_max are set, then any float placeholders in the
graph will be replaced with quantized versions, and consts will be created
to pass the range to subsequent operations.
* fallback_min: The lowest float value to use for requantizing activation
layers.
* fallback_max: The highest float value to use for requantizing activation
layers. If both fallback_min and fallback_max are set, then instead of using
RequantizationRange ops to figure out the useful range dynamically when
converting the 32-bit output of ops like QuantizedConv2D and
QuantizedBiasAdd, hardwired consts with these values will be used instead.
This can help performance, if you know the range of your activation layers
ahead of time.
Prerequisites: [quantize_weights](#quantize_weights)
Replaces any calculation nodes with their eight-bit equivalents (if available),
and adds in conversion layers to allow remaining float operations to
interoperate. This is one of the most complex transforms, and involves multiple
passes and a lot of rewriting. It's also still an active area of research, so
results may vary depending on the platform and operations you're using in your
model. You should run quantize_weights first to ensure your Const ops are in
eight-bit form.
### quantize_weights
Args:
* minimum_size: Tensors with fewer elements than this won't be quantized
(defaults to 1024)
Prerequisites: None
Converts any large (more than minimum_size) float Const op into an eight-bit
equivalent, followed by a float conversion op so that the result is usable by
subsequent nodes. This is mostly useful for [shrinking file
sizes](#shrinking-file-size), but also helps with the more advanced
[quantize_nodes](#quantize_nodes) transform. Even though there are no
prerequisites, it is advisable to run [fold_batch_norms](#fold_batch_norms) or
[fold_old_batch_norms](#fold_old_batch_norms), because rounding variances down
to zero may cause significant loss of precision.
### remove_attribute
Args:
* attribute_name: Name of the attribute you want to remove.
* op_name: Optional name of a single op to restrict the removal to.
Prerequisites: None
Deletes the given attribute from either all nodes, or just the one specified in
`op_name`. This can be a dangerous transform since it's easy to leave your graph
in an invalid state if you remove a required attribute. It can be useful in
special circumstances though.
### remove_device
Args: None \
Prerequisites: None
All ops can have a hardware device specified. This can be a problem when you're
loading a graph on a different system than the model was trained on, since some
specified devices may not be available. In order to work with graphs like these,
you can run this transform to wipe the slate clean and delete the device
specifier from all ops.
### remove_control_dependencies
Args: None \
Prerequisites: None
Removes all control dependencies from the graph.
### remove_nodes
Args:
* op: The name of the op you want to remove. Can be repeated to remove
multiple ops.
Prerequisites: None
This is a potentially dangerous transform that looks for single-input,
single-output ops with the given names, removes them from the graph, and rewires
all inputs that use to pull from them to pull from the preceding node instead.
This is most useful for getting rid of ops like `CheckNumerics` that are useful
during training but just complicate the graph and increase latency during
inference. It's dangerous because it's possible that removing some ops may
change the output of your graph, so make sure you check the overall accuracy
after using this.
### rename_attribute
Args:
* old_attribute_name: Current name of the attribute you want to rename.
* new_attribute_name: Name that you want the attribute to have now.
* op_name: If this is set, only change attributes for a given op type,
otherwise apply to all nodes with attribute names that match.
Prerequisites: None
Changes the name of the given attribute. This is often useful for upgrading
graph files as op definitions change over versions, since the renaming is often
enough to deal with minor changes.
### rename_op
Args:
* old_op_name: Current name of the operation.
* new_op_name: Name to change to.
Prerequisites: None
Finds all ops with the given name, and changes them to the new one. This can be
useful for version upgrading if the changes between ops are minor apart from the
name.
### round_weights
Args:
* num_steps: How many unique values to use in each buffer.
Prerequisites: None
Rounds all float values in large Const ops (more than 15 elements) to the given
number of steps. The unique values are chosen per buffer by linearly allocating
between the largest and smallest values present. This is useful when you'll be
deploying on mobile, and you want a model that will compress effectively. See
[shrinking file size](#shrinking-file-size) for more details. Even though there
are no prerequisites, it is advisable to run
[fold_batch_norms](#fold_batch_norms) or
[fold_old_batch_norms](#fold_old_batch_norms), because rounding variances down
to zero may cause significant loss of precision.
### sparsify_gather
Args: None \
Prerequisites: None
Transform 'Gather' op to a sparsified version where 'params' input of 'Gather'
is replaced from a dense 'Const' to a 'HashTable'. 'Gather' op itself is
replaced by a hashtable lookup. This is mostly useful for reducing sparse
TF.learn linear model memory footprint.
### set_device
Args:
* device: What device to assign to ops.
* if_default: If this is true, only assign to ops with empty existing devices.
Updates nodes to use the specified device. A device is a way to tell the code
that executes the graph which piece of hardware it should run particular nodes
on. The right assignment to use may change between training and deployment, so
this transform (and [remove_device](#remove_device)) provide a way of updating
the placement. If the `is_default` parameter is set, then only ops that don't
have a device assigned already will be updated. This is mostly useful for
preprocessing of graphs for other stages that expect all ops to have an explicit
device assigned.
### sort_by_execution_order
Args: None \
Prerequisites: None
Arranges the nodes in the GraphDef in topological order, so that the inputs of
any given node are always earlier than the node itself. This is especially
useful when you're targeting a minimal inference engine, since you can just
execute the nodes in the given order knowing that the inputs will be computed
before they're needed.
### strip_unused_nodes
Args:
* type: Default type for any new Placeholder nodes generated, for example
int32, float, quint8.
* shape: Default shape for any new Placeholder nodes generated, as
comma-separated dimensions. For example shape="1,299,299,3". The double
quotes are important, since otherwise the commas will be taken as argument
separators.
* name: Identifier for the placeholder arguments.
* type_for_name: What type to use for the previously-given name.
* shape_for_name: What shape to use for the previously-given name.
Prerequisites: None
Removes all nodes not used in calculated the layers given in `--outputs`, fed by
`--inputs`. This is often useful for removing training-only nodes like
save-and-restore or summary ops. It's also handy for solving the [missing kernel
errors problem](#fixing-missing-kernel-errors-on-mobile) when there are decode
or other ops you don't need in the inference path.
The biggest complication is that it sometimes has to create new Placeholder ops,
so there are options to control their characteristics. This will happen if you
bypass a DecodeJpeg op by specifying an input layer deeper in the network, for
example, so you can pass in a raw image array instead of an encoded string as an
input. The decode op will be removed, together with the Placeholder that fed it,
but a new Placeholder is needed for the input layer you specify. The type and
shape arguments let you control the attributes of any new Placeholders that are
created. Plain `type` and `shape` set global defaults, but if you have different
inputs with varying characteristics, you'll need to pass in a list of arguments
where the preceding name specifies what layer each applies to. For example, if
you had two inputs in1 and in2, you could call `strip_unused_nodes(name=in1,
type_for_name=int32, shape_for_name="2,3", name=in2, type_for_name=float,
shape_for_name="1,10,10,3")`.
## Writing Your Own Transforms
The Graph Transform Tool is designed to make it as easy as possible to create
your own optimization, modification, and pre-processing transforms. At their
heart, all of the transforms take in a valid GraphDef, make some changes, and
output a new GraphDef. Each GraphDef is just a list of NodeDefs, each defining
one node in the graph and its connections. You can find more information on the
format at [this guide to TensorFlow model
files](https://www.tensorflow.org/versions/master/extend/tool_developers/index.html),
but for a simple example take a look at
[tensorflow/tools/graph_transforms/rename_op.cc](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/graph_transforms/rename_op.cc),
which implements the [rename_op](#rename_op) transform:
```C++
Status RenameOp(const GraphDef& input_graph_def,
const TransformFuncContext& context,
GraphDef* output_graph_def) {
if (!context.params.count("old_op_name") ||
(context.params.at("old_op_name").size() != 1) ||
!context.params.count("new_op_name") ||
(context.params.at("new_op_name").size() != 1)) {
return errors::InvalidArgument(
"rename_op expects exactly one 'old_op_name' and 'new_op_name' "
"argument, e.g. rename_op(old_op_name=Mul, new_op_name=Multiply)");
}
const string old_op_name = context.params.at("old_op_name")[0];
const string new_op_name = context.params.at("new_op_name")[0];
output_graph_def->Clear();
for (const NodeDef& node : input_graph_def.node()) {
NodeDef* new_node = output_graph_def->mutable_node()->Add();
new_node->CopyFrom(node);
if (node.op() == old_op_name) {
new_node->set_op(new_op_name);
}
}
return OkStatus();
}
REGISTER_GRAPH_TRANSFORM("rename_op", RenameOp);
```
The heart of this transform is the loop through the input_graph_def's nodes. We
go through each op, add a new one to the output, copy the original's contents,
and then change the op over if it matches the parameters. There's a standard set
of parameters for every transform, so they all take in a GraphDef and context,
and write out into a new GraphDef. The registration macro at the bottom lets the
tool know what function to call when it finds the `rename_op` string in a
transforms list.
### Transform Functions
The standard signature that all transform functions have is defined as
`TransformFunc`, which takes in an input GraphDef, a `TransformFuncContext`
containing environment information, writes to an output GraphDef, and returns a
Status indicating whether the transform succeeded.
The `TransformFuncContext` has a list of the inputs and outputs for the graph,
and the [parameter arguments](#parameters) that were passed into the transform
by the user.
If you write a function that matches this signature, and [register
it](#registration), the graph transform tool will take care of calling it.
### Pattern Syntax
The `rename_op` example only needs to look at a single node at a time, but one
of the most common needs is to modify small sub-graphs within a model. To make
this easy, the Graph Transform Tool provides the `OpTypePattern` syntax. This is
a simple and compact way to specify patterns of nodes that you want to look for.
The format is:
```
OP_TYPE_PATTERN ::= "{" OP "," INPUTS "}"
INPUTS ::= OP_TYPE_PATTERN
```
The `OP` field can either contain a single "*", which means match any op type,
one op type (for example "Const"), or a set of op types separated by `|` symbols
(for example "Conv2D|MatMul|BiasAdd"). General regex patterns are not supported,
just these special cases.
You can think of these patterns as very limited regular expressions designed to
pick out sub-trees in graphs. They are deliberately very constrained to the kind
of things we commonly find ourselves needing to do, to make creating and
debugging as straightforward as possible.
For example, if you want all Conv2D nodes that have a constant as their second
input, you would set up a pattern like this, using C++ initializer lists to
populate the structure:
```C++
OpTypePattern conv_pattern({"Conv2D", {{"*"}, {"Const"}}});
```
It can be easier to visualize these initializers using indentation to show the
tree structure more clearly:
```C++
OpTypePattern conv_pattern({
"Conv2D",
{
{"*"},
{"Const"}
}
});
```
In plain English this is saying, a Conv2D op with two inputs, the first of which
is any op type, and the second is a Const op.
Here's a much more complex example, from the [quantize_nodes](#quantize_nodes)
transform:
```C++
{"QuantizeV2",
{
{"Dequantize"},
{"Min",
{
{"Reshape",
{
{"Dequantize"},
{"Const"},
}
},
{"Const"},
}
},
{"Max",
{
{"Reshape",
{
{"Dequantize"},
{"Const"},
}
},
{"Const"},
}
},
}
}
```
This is looking for QuantizeV2 nodes, with three inputs, the first of which is a
Dequantize, the second is a Min that ultimately pulls from a Dequantize, and the
third is a Max which does the same. Assuming we know the Dequantize ops are
pulling from the same eight-bit buffer, the end result of this sub-graph is a
no-op, since it's just turning the eight-bit buffer into float, and then
immediately converting it back to eight-bits, so if we look for this pattern and
remove it we can optimize the graph without changing the result.
### ReplaceMatchingOpTypes
It's very common to want to find all occurrences of a particular sub-graph in a
model, and replace them all with a different sub-graph that keeps the same local
input and output connections. For example with
[fuse_convolutions](#fuse_convolutions), we needed to find all Conv2D ops that
read their inputs from BilinearResizes, and replace those combinations with a
single FusedResizeAndPadConv2D op, but without affecting other ops.
To make that sort of transformation easy, we created the
`ReplaceMatchingOpTypes` helper. This takes in a graph, an `OpTypePattern`
defining the sub-graph to look for, and a callback function to run for every
occurrence it finds. The job of this callback function is to look at the
`NodeMatch` that contains information about the current sub-graph, and return a
new sub-graph in the new_nodes list that will be used to replace the old
sub-graph.
You can see how it's used in practice in the
[fuse_convolutions](#fuse_convolutions) code:
```C++
TF_RETURN_IF_ERROR(ReplaceMatchingOpTypes(
input_graph_def, // clang-format off
{"Conv2D",
{
{"ResizeBilinear"},
{"*"}
}
}, // clang-format on
[](const NodeMatch& match, const std::set<string>& input_nodes,
const std::set<string>& output_nodes,
std::vector<NodeDef>* new_nodes) {
// Find all the nodes we expect in the subgraph.
const NodeDef& conv_node = match.node;
const NodeDef& resize_node = match.inputs[0].node;
const NodeDef& weights_node = match.inputs[1].node;
// We'll be reusing the old weights.
new_nodes->push_back(weights_node);
// Create a 'no-op' mirror padding node that has no effect.
NodeDef pad_dims_node;
pad_dims_node.set_op("Const");
pad_dims_node.set_name(conv_node.name() + "_dummy_paddings");
SetNodeAttr("dtype", DT_INT32, &pad_dims_node);
SetNodeTensorAttr<int32>("value", {4, 2}, {0, 0, 0, 0, 0, 0, 0, 0},
&pad_dims_node);
new_nodes->push_back(pad_dims_node);
// Set up the new fused version of the convolution op.
NodeDef fused_conv;
fused_conv.set_op("FusedResizeAndPadConv2D");
fused_conv.set_name(match.node.name());
AddNodeInput(resize_node.input(0), &fused_conv);
AddNodeInput(resize_node.input(1), &fused_conv);
AddNodeInput(pad_dims_node.name(), &fused_conv);
AddNodeInput(conv_node.input(1), &fused_conv);
CopyNodeAttr(resize_node, "align_corners", "resize_align_corners",
&fused_conv);
SetNodeAttr("mode", "REFLECT", &fused_conv);
CopyNodeAttr(conv_node, "T", "T", &fused_conv);
CopyNodeAttr(conv_node, "padding", "padding", &fused_conv);
CopyNodeAttr(conv_node, "strides", "strides", &fused_conv);
new_nodes->push_back(fused_conv);
return OkStatus();
},
{}, &replaced_graph_def));
```
Here you can see we define the pattern to look for, and in the callback function
use information from each of the nodes in the old sub-graph to create a new
fused node. We also copy over the old weights input node so that isn't lost.
There are a few things to know about the `ReplaceMatchingOpTypes` function:
* All of the nodes in any matching sub-graphs are removed from the new graph
created by the function. If any of them are needed, it's the callback
function's responsibility to add them back in. There's a `CopyOriginalMatch`
convenience call that will copy over all of the original nodes if you decide
you don't actually want to modify a particular sub-graph.
* It is assumed that the same nodes will never appear in more than one matched
sub-graph. This is to ensure that sub-trees are only replaced once, but it
may mean that some sub-graphs aren't spotted if they overlap with earlier
matches.
* The calling framework tries to ensure that the graph remains sane, by
looking at the new_nodes that are returned and making sure that no nodes
which are needed as inputs by nodes outside the sub-graph are removed. These
important nodes are listed in the `output_nodes` argument that's passed into
each replacement function call. You can disable this checking by setting
`allow_inconsistencies` to true in the options, but otherwise any
replacements that break the graph constraints will be canceled. If you do
allow inconsistencies, it's your transform's responsibility to fix them up
before you return your final result. Functions like `RenameNodeInputs` can
be useful if you are doing wholesale node renaming for example.
### Parameters
The arguments that are in parentheses after the transform name when the tool is
called are parsed and placed into the params member of the TransformFuncContext
that's given to each transform. For every named argument, there's a vector of
strings containing all the values that it was given, in the order they were
given. These are treated a bit like command-line parameters, and it's the
transform's responsibility to parse them into the data types it needs, and raise
errors by returning a bad Status if any of them are ill-formed.
As an example, here's a hypothetical transform call:
```
some_transform(foo=a, foo=b, bar=2, bob="1,2,3")
```
Here's what the std::map of strings looks like in the params member:
```
{{"foo", {"a", "b"}}, {"bar", {"2"}}, {"bob", {"1,2,3"}}}
```
The double quotes around the comma-separated argument to `bob` are important
because otherwise they'll be treated as separate arguments, and the parsing will
fail.
Here's an example of how [round_weights](#round_weights) reads its `num_steps`
parameter:
```C++
TF_RETURN_IF_ERROR(context.GetOneInt32Parameter("num_steps", 256, &num_steps));
```
If the conversion fails or the parameter occurs more than once the helper
function will raise a meaningful error through the status result of the
transform. If the parameter isn't specified at all then the default will be
used.
### Function Libraries
A newer feature of TensorFlow is the ability to create libraries of functions as
part of graphs. These are a bit like templates, which define macro operations in
terms of smaller components, which can then be instantiated with different input
and output connections inside the graph just like regular ops. Right now the
graph transform tool just copies these libraries between the input and output
graphs, but it's likely that more complex operations will be supported on them
in the future.
### Registering
The Graph Transform Tool associates names of transforms with the code to
implement them using the `REGISTER_GRAPH_TRANSFORM()` macro. This takes a string
and a function, and automatically registers the transform with the tool. You
will need to watch out for a few things though:
* Because it's using global C++ objects in each file under the hood, the
linker can sometimes strip them out and lose the registration. In Bazel you
need to make sure you're linking any new transforms in as libraries, and use
the `alwayslink` flag in your `cc_binary` call.
* You should be able to create your own copy of the transform_graph tool by
linking against the transform_graph_main_lib library in
tensorflow/tools/graph_transforms/BUILD. This contains all the `main()`
logic to parse command line arguments and call transforms.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@tools@graph_transforms@README.md@.PATH_END.py
|
{
"filename": "pull_data.py",
"repo_name": "annayqho/TheCannon",
"repo_path": "TheCannon_extracted/TheCannon-master/code/lamost/abundances/pull_data.py",
"type": "Python"
}
|
""" Generate the files to run the abundances paper """
import pyfits
import numpy as np
from TheCannon import dataset
import sys
#sys.path.append("/Users/annaho/Dropbox/Research/TheCannon/code/lamost")
#from get_colors import get_colors
def load_all_ref_label():
DATA_DIR = "/Users/annaho/Data/LAMOST/Abundances"
a = pyfits.open(
DATA_DIR + "/casey_lamost_paper_one_cross_match_with_colors.fits")
tbl = a[1].data
a.close()
ref_id = tbl['lamost_id']
ref_id = np.array(ref_id)
ref_id = np.array([val.strip() for val in ref_id])
snrg = tbl['snrg']
labels = ['TEFF', 'LOGG', 'AK_WISE',
'AL_H', 'CA_H', 'C_H', 'FE_H', 'MG_H', 'MN_H',
'NI_H', 'N_H', 'O_H', 'SI_H', 'TI_H']
np.savez("label_names.npz", labels)
nlabel = len(labels)
nobj = len(ref_id)
ref_label = np.zeros((nobj, nlabel))
for ii,label in enumerate(labels):
ref_label[:,ii] = tbl[label]
np.savez("ref_id.npz", ref_id)
np.savez("ref_label.npz", ref_label)
return ref_id
def load_all_ref_spectra(ref_id):
DATA_DIR = "/Users/annaho/Data/LAMOST/Label_Transfer"
wl = np.load(DATA_DIR + "/../Abundances/wl_cols.npz")['arr_0']
all_ref_ivar = np.load("%s/tr_ivar.npz" %DATA_DIR)['arr_0']
all_ref_flux = np.load("%s/tr_flux.npz" %DATA_DIR)['arr_0']
all_id = np.load("%s/tr_id.npz" %DATA_DIR)['arr_0']
all_id = np.array([val.decode('utf-8') for val in all_id])
inds = np.array([np.where(all_id==val)[0][0] for val in ref_id])
ref_flux = all_ref_flux[inds]
ref_ivar = all_ref_ivar[inds]
mask = np.load("%s/../Abundances/mask.npz" %DATA_DIR)['arr_0']
ref_ivar_masked = apply_mask(wl[0:3626], ref_ivar, mask)
ref_id_col, ref_flux_col, ref_ivar_col = find_colors(
ref_id, ref_flux, ref_ivar_masked)
np.savez("ref_id_col.npz", ref_id_col)
np.savez("ref_flux.npz", ref_flux_col)
np.savez("ref_ivar.npz", ref_ivar_col)
ds = dataset.Dataset(
wl[0:3626], ref_id_col, ref_flux_col[:,3626], ref_ivar_col[:,3626],
[], [], [], [])
np.savez("ref_snr.npz", ds.tr_SNR)
def apply_mask(wl, ref_ivar, mask):
# Mask out wl
# Mask out tellurics, DIBs, the Na double, the end of hte spectrum
print("Applying mask")
label_names = ['T_{eff}', '\log g', '[M/H]', '[C/M]', '[N/M]',
'[\\alpha/M]', 'Ak']
ref_ivar[:,mask] = 0.0
end = wl > 8750
ref_ivar[:,end] = 0.0
return ref_ivar
def add_to_wl():
# Add wavelengths to wl
for col in np.arange(ncol):
delt = ((wl[1:]-wl[:-1] )/ (wl[1:] + wl[:-1]))[0]
new_wl = (wl[-1]*delt + wl[-1]) / (1-delt)
wl = np.append(wl, new_wl)
np.savez("wl_cols.npz", wl)
def find_colors(ref_id, ref_flux, ref_ivar):
# Find colors
DATA_DIR = "/Users/annaho/Data/LAMOST/Mass_And_Age"
print("Finding colors")
a = pyfits.open(DATA_DIR + "/lamost_catalog_colors.fits")
data = a[1].data
a.close()
all_ids = data['LAMOST_ID_1']
all_ids = np.array([val.strip() for val in all_ids])
ref_id_col = np.intersect1d(all_ids, ref_id)
inds = np.array([np.where(all_ids==val)[0][0] for val in ref_id_col])
all_id, all_col, all_col_ivar = get_colors(
DATA_DIR + "/lamost_catalog_colors.fits")
col = all_col[:,inds]
col_ivar = all_col_ivar[:,inds]
bad_ivar = np.logical_or(np.isnan(col_ivar), col_ivar==np.inf)
col_ivar[bad_ivar] = 0.0
bad_flux = np.logical_or(np.isnan(col), col==np.inf)
col[bad_flux] = 1.0
col_ivar[bad_flux] = 0.0
# add them to the wl, flux and ivar arrays
inds = np.array([np.where(ref_id==val)[0][0] for val in ref_id_col])
ref_flux_col = np.hstack((ref_flux[inds], col.T))
ref_ivar_col = np.hstack((ref_ivar[inds], col_ivar.T))
return ref_id_col, ref_flux_col, ref_ivar_col
if __name__=="__main__":
ref_id = load_all_ref_label()
#load_all_ref_spectra(ref_id)
|
annayqhoREPO_NAMETheCannonPATH_START.@TheCannon_extracted@TheCannon-master@code@lamost@abundances@pull_data.py@.PATH_END.py
|
{
"filename": "development.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/android/development.md",
"type": "Markdown"
}
|
# Development tools for Android
TensorFlow Lite provides a number of tools for integrating models into Android
apps. This page describes development tools for use in building apps with
Kotlin, Java, and C++, as well as support for TensorFlow Lite development in
Android Studio.
Key Point: In general, you should use the [TensorFlow Lite Task
Library](#task_library) for integrating TensorFlow Lite into your Android app,
unless your use case is not supported by that library. If it's not supported by
the Task Library, use the [TensorFlow Lite library](#lite_lib) and [Support
library](#support_lib).
To get started quickly writing Android code, see the
[Quickstart for Android](../android/quickstart)
## Tools for building with Kotlin and Java
The following sections describe development tools for TensorFlow Lite that use
the Kotlin and Java languages.
### TensorFlow Lite Task Library {:#task_library}
TensorFlow Lite Task Library contains a set of powerful and easy-to-use
task-specific libraries for app developers to build with TensorFlow Lite.
It provides optimized out-of-box model interfaces for popular machine learning
tasks, such as image classification, question and answer, etc. The model
interfaces are specifically designed for each task to achieve the best
performance and usability. Task Library works cross-platform and is supported on
Java and C++.
To use the Task Library in your Android app, use the AAR from MavenCentral for
[Task Vision library](https://search.maven.org/artifact/org.tensorflow/tensorflow-lite-task-vision)
,
[Task Text library](https://search.maven.org/artifact/org.tensorflow/tensorflow-lite-task-text)
and
[Task Audio Library](https://search.maven.org/artifact/org.tensorflow/tensorflow-lite-task-audio)
, respectively.
You can specify this in your `build.gradle` dependencies as follows:
```build
dependencies {
implementation 'org.tensorflow:tensorflow-lite-task-vision:+'
implementation 'org.tensorflow:tensorflow-lite-task-text:+'
implementation 'org.tensorflow:tensorflow-lite-task-audio:+'
}
```
If you use nightly snapshots, make sure you add the
[Sonatype snapshot repository](./lite_build#use_nightly_snapshots) to your
project.
See the introduction in the
[TensorFlow Lite Task Library overview](../inference_with_metadata/task_library/overview.md)
for more details.
### TensorFlow Lite library {:#lite_lib}
Use the TensorFlow Lite library in your Android app by adding the
[AAR hosted at MavenCentral](https://search.maven.org/artifact/org.tensorflow/tensorflow-lite)
to your development project.
You can specify this in your `build.gradle` dependencies as follows:
```build
dependencies {
implementation 'org.tensorflow:tensorflow-lite:+'
}
```
If you use nightly snapshots, make sure you add the
[Sonatype snapshot repository](./lite_build#use_nightly_snapshots) to your
project.
This AAR includes binaries for all of the
[Android ABIs](https://developer.android.com/ndk/guides/abis). You can reduce
the size of your application's binary by only including the ABIs you need to
support.
Unless you are targeting specific hardware, you should omit the `x86`, `x86_64`,
and `arm32` ABIs in most cases. You can configure this with the following Gradle
configuration. It specifically includes only `armeabi-v7a` and `arm64-v8a`, and
should cover most modern Android devices.
```build
android {
defaultConfig {
ndk {
abiFilters 'armeabi-v7a', 'arm64-v8a'
}
}
}
```
To learn more about `abiFilters`, see
[Android ABIs](https://developer.android.com/ndk/guides/abis)
in the Android NDK documentation.
### TensorFlow Lite Support Library {:#support_lib}
The TensorFlow Lite Android Support Library makes it easier to integrate models
into your application. It provides high-level APIs that help transform raw input
data into the form required by the model, and interpret the model's output,
reducing the amount of boilerplate code required.
It supports common data formats for inputs and outputs, including images and
arrays. It also provides pre- and post-processing units that perform tasks such
as image resizing and cropping.
Use the Support Library in your Android app by including the TensorFlow Lite
[Support Library AAR hosted at MavenCentral](https://search.maven.org/artifact/org.tensorflow/tensorflow-lite-support).
You can specify this in your `build.gradle` dependencies as follows:
```build
dependencies {
implementation 'org.tensorflow:tensorflow-lite-support:+'
}
```
If you use nightly snapshots, make sure you add the
[Sonatype snapshot repository](./lite_build#use_nightly_snapshots) to your
project.
For instructions on how to get started, see the
[TensorFlow Lite Android Support Library](../inference_with_metadata/lite_support.md).
### Minimum Android SDK versions for libraries
| Library | `minSdkVersion` | Device Requirements |
| --------------------------- | --------------- | ---------------------- |
| tensorflow-lite | 19 | NNAPI usage requires |
: : : API 27+ :
| tensorflow-lite-gpu | 19 | GLES 3.1 or OpenCL |
: : : (typically only :
: : : available on API 21+ :
| tensorflow-lite-hexagon | 19 | - |
| tensorflow-lite-support | 19 | - |
| tensorflow-lite-task-vision | 21 | android.graphics.Color |
: : : related API requires :
: : : API 26+ :
| tensorflow-lite-task-text | 21 | - |
| tensorflow-lite-task-audio | 23 | - |
| tensorflow-lite-metadata | 19 | - |
### Using Android Studio
In addition to the development libraries described above, Android Studio
also provides support for integrating TensorFlow Lite models, as described
below.
#### Android Studio ML Model Binding
The ML Model Binding feature of Android Studio 4.1 and later allows you to
import `.tflite` model files into your existing Android app, and generate
interface classes to make it easier to integrate your code with a model.
To import a TensorFlow Lite (TFLite) model:
1. Right-click on the module you would like to use the TFLite model or click on
**File > New > Other > TensorFlow Lite Model**.
1. Select the location of your TensorFlow Lite file. Note that the tooling
configures the module's dependency with ML Model binding and
automatically adds all required dependencies to your Android module's
`build.gradle` file.
Note: Select the second checkbox for importing TensorFlow GPU if you
want to use [GPU acceleration](../performance/gpu).
1. Click `Finish` to begin the import process. When the import is finished, the
tool displays a screen describing the model, including its input and output
tensors.
1. To start using the model, select Kotlin or Java, copy and paste the code
in the **Sample Code** section.
You can return to the model information screen by double clicking the TensorFlow
Lite model under the `ml` directory in Android Studio. For more information on
using the Modle Binding feature of Android Studio, see the Android Studio
[release notes](https://developer.android.com/studio/releases#4.1-tensor-flow-lite-models).
For an overview of using model binding in Android Studio, see the code example
[instructions](https://github.com/tensorflow/examples/blob/master/lite/examples/image_classification/android/README.md).
## Tools for building with C and C++
The C and C++ libraries for TensorFlow Lite are primarily intended for
developers using the Android Native Development Kit (NDK) to build their apps.
There are two ways to use TFLite through C++ if you build your app with the NDK:
### TFLite C API
Using this API is the *recommended* approach for developers using the NDK.
Download the
[TensorFlow Lite AAR hosted at MavenCentral](https://search.maven.org/artifact/org.tensorflow/tensorflow/tensorflow-lite)
file, rename to `tensorflow-lite-*.zip`, and unzip it. You must include the four
header files in the `headers/tensorflow/lite/` and `headers/tensorflow/lite/c/`
folders and the relevant `libtensorflowlite_jni.so` dynamic library in the `jni/`
folder in your NDK project.
The `c_api.h` header file contains basic documentation about using the TFLite C
API.
### TFLite C++ API
If you want to use TFLite through C++ API, you can build the C++ shared
libraries:
32bit armeabi-v7a:
```sh
bazel build -c opt --config=android_arm //tensorflow/lite:libtensorflowlite.so
```
64bit arm64-v8a:
```sh
bazel build -c opt --config=android_arm64 //tensorflow/lite:libtensorflowlite.so
```
Currently, there is no straightforward way to extract all header files needed,
so you must include all header files in `tensorflow/lite/` from the TensorFlow
repository. Additionally, you will need header files from
[FlatBuffers](https://github.com/google/flatbuffers) and
[Abseil](https://github.com/abseil/abseil-cpp).
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@android@development.md@.PATH_END.py
|
{
"filename": "base_cxx_network.py",
"repo_name": "pynucastro/pynucastro",
"repo_path": "pynucastro_extracted/pynucastro-main/pynucastro/networks/base_cxx_network.py",
"type": "Python"
}
|
"""Support for a pure C++ reaction network. These functions will
write the C++ code necessary to integrate a reaction network
comprised of the rates that are passed in.
"""
import itertools
import re
import shutil
import sys
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
import numpy as np
import sympy
from pynucastro.networks.rate_collection import RateCollection
from pynucastro.networks.sympy_network_support import SympyRates
from pynucastro.rates import DerivedRate
from pynucastro.screening import get_screening_map
class BaseCxxNetwork(ABC, RateCollection):
"""Interpret the collection of rates and nuclei and produce the
C++ code needed to integrate the network.
"""
def __init__(self, *args, **kwargs):
"""Initialize the C++ network. We take a single argument: a list
of rate files that will make up the network
"""
super().__init__(*args, **kwargs)
# Get the template files for writing this network code
self.template_files = self._get_template_files()
self.symbol_rates = SympyRates()
self.ydot_out_result = None
self.solved_ydot = False
self.jac_out_result = None
self.jac_null_entries = None
self.solved_jacobian = False
self.function_specifier = "inline"
self.dtype = "double"
self.array_namespace = ""
# a dictionary of functions to call to handle specific parts
# of the C++ template
self.ftags = {}
self.ftags['<nrat_reaclib>'] = self._nrat_reaclib
self.ftags['<nrat_tabular>'] = self._nrat_tabular
self.ftags['<nrxn>'] = self._nrxn
self.ftags['<rate_names>'] = self._rate_names
self.ftags['<ebind>'] = self._ebind
self.ftags['<compute_screening_factors>'] = self._compute_screening_factors
self.ftags['<table_num>'] = self._table_num
self.ftags['<declare_tables>'] = self._declare_tables
self.ftags['<table_declare_meta>'] = self._table_declare_meta
self.ftags['<table_init_meta>'] = self._table_init_meta
self.ftags['<compute_tabular_rates>'] = self._compute_tabular_rates
self.ftags['<ydot>'] = self._ydot
self.ftags['<ydot_weak>'] = self._ydot_weak
self.ftags['<enuc_add_energy_rate>'] = self._enuc_add_energy_rate
self.ftags['<jacnuc>'] = self._jacnuc
self.ftags['<initial_mass_fractions>'] = self._initial_mass_fractions
self.ftags['<reaclib_rate_functions>'] = self._reaclib_rate_functions
self.ftags['<rate_struct>'] = self._rate_struct
self.ftags['<fill_reaclib_rates>'] = self._fill_reaclib_rates
self.ftags['<approx_rate_functions>'] = self._approx_rate_functions
self.ftags['<fill_approx_rates>'] = self._fill_approx_rates
self.ftags['<part_fun_data>'] = self._fill_partition_function_data
self.ftags['<part_fun_declare>'] = self._fill_partition_function_declare
self.ftags['<part_fun_cases>'] = self._fill_partition_function_cases
self.ftags['<spin_state_cases>'] = self._fill_spin_state_cases
self.indent = ' '
@abstractmethod
def _get_template_files(self):
# This method should be overridden by derived classes
# to support specific output templates.
# This method returns a list of strings that are file paths to template files.
return []
def get_indent_amt(self, l, k):
"""determine the amount of spaces to indent a line"""
rem = re.match(r'\A'+k+r'\(([0-9]*)\)\Z', l)
return int(rem.group(1))
def _write_network(self, odir=None):
"""
This writes the RHS, jacobian and ancillary files for the system of ODEs that
this network describes, using the template files.
"""
# pylint: disable=arguments-differ
# Prepare RHS terms
if not self.solved_ydot:
self.compose_ydot()
if not self.solved_jacobian:
self.compose_jacobian()
# Process template files
for tfile in self.template_files:
outfile = tfile.name.replace('.template', '')
if odir is not None:
odir = Path(odir)
if not odir.is_dir():
try:
odir.mkdir()
except OSError:
sys.exit(f"unable to create directory {odir}")
outfile = odir/outfile
with open(tfile) as ifile, open(outfile, "w") as of:
for l in ifile:
ls = l.strip()
foundkey = False
for k, func in self.ftags.items():
if k in ls:
foundkey = True
n_indent = self.get_indent_amt(ls, k)
func(n_indent, of)
if not foundkey:
of.write(l)
# Copy any tables in the network to the current directory
# if the table file cannot be found, print a warning and continue.
for tr in self.tabular_rates:
tdir = tr.rfile_path.resolve().parent
if tdir != Path.cwd():
tdat_file = Path(tdir, tr.table_file)
if tdat_file.is_file():
shutil.copy(tdat_file, odir or Path.cwd())
else:
warnings.warn(UserWarning(f'Table data file {tr.table_file} not found.'))
def compose_ydot(self):
"""create the expressions for dYdt for the nuclei, where Y is the
molar fraction.
This will take the form of a dict, where the key is a nucleus, and the
value is a list of tuples, with the forward-reverse pairs of a rate
"""
ydot = {}
for n in self.unique_nuclei:
if not self.nuclei_rate_pairs[n]:
ydot[n] = None
else:
ydot_sym_terms = []
for rp in self.nuclei_rate_pairs[n]:
if rp.forward is not None:
fwd = self.symbol_rates.ydot_term_symbol(rp.forward, n)
else:
fwd = None
if rp.reverse is not None:
rvs = self.symbol_rates.ydot_term_symbol(rp.reverse, n)
else:
rvs = None
ydot_sym_terms.append((fwd, rvs))
ydot[n] = ydot_sym_terms
self.ydot_out_result = ydot
self.solved_ydot = True
def compose_jacobian(self):
"""Create the Jacobian matrix, df/dY"""
jac_null = []
jac_sym = []
for nj in self.unique_nuclei:
for ni in self.unique_nuclei:
rsym_is_null = True
rsym = float(sympy.sympify(0.0))
for r in self.nuclei_consumed[nj]:
rsym_add, rsym_add_null = self.symbol_rates.jacobian_term_symbol(r, nj, ni)
rsym = rsym + rsym_add
rsym_is_null = rsym_is_null and rsym_add_null
for r in self.nuclei_produced[nj]:
rsym_add, rsym_add_null = self.symbol_rates.jacobian_term_symbol(r, nj, ni)
rsym = rsym + rsym_add
rsym_is_null = rsym_is_null and rsym_add_null
jac_sym.append(rsym)
jac_null.append(rsym_is_null)
self.jac_out_result = jac_sym
self.jac_null_entries = jac_null
self.solved_jacobian = True
def _compute_screening_factors(self, n_indent, of):
if not self.do_screening:
screening_map = []
else:
screening_map = get_screening_map(self.get_rates(),
symmetric_screening=self.symmetric_screening)
for i, scr in enumerate(screening_map):
nuc1_info = f'{float(scr.n1.Z)}_rt, {float(scr.n1.A)}_rt'
nuc2_info = f'{float(scr.n2.Z)}_rt, {float(scr.n2.A)}_rt'
if not (scr.n1.dummy or scr.n2.dummy):
# Scope the screening calculation to avoid multiple definitions of scn_fac.
of.write(f'\n{self.indent*n_indent}' + '{')
of.write(f'\n{self.indent*(n_indent+1)}constexpr auto scn_fac = scrn::calculate_screen_factor({nuc1_info}, {nuc2_info});\n\n')
# Insert a static assert (which will always pass) to require the
# compiler to evaluate the screen factor at compile time.
of.write(f'\n{self.indent*(n_indent+1)}static_assert(scn_fac.z1 == {float(scr.n1.Z)}_rt);\n\n')
of.write(f'\n{self.indent*(n_indent+1)}actual_screen(pstate, scn_fac, scor, dscor_dt);\n')
of.write(f'{self.indent*n_indent}' + '}\n\n')
if scr.name == "He4_He4_He4":
# we don't need to do anything here, but we want to avoid immediately applying the screening
pass
elif scr.name == "He4_He4_He4_dummy":
# make sure the previous iteration was the first part of 3-alpha
assert screening_map[i - 1].name == "He4_He4_He4"
# handle the second part of the screening for 3-alpha
of.write(f'\n{self.indent*n_indent}' + '{')
of.write(f'\n{self.indent*(n_indent+1)}constexpr auto scn_fac2 = scrn::calculate_screen_factor({nuc1_info}, {nuc2_info});\n\n')
of.write(f'\n{self.indent*(n_indent+1)}static_assert(scn_fac2.z1 == {float(scr.n1.Z)}_rt);\n\n')
of.write(f'\n{self.indent*(n_indent+1)}actual_screen(pstate, scn_fac2, scor2, dscor2_dt);\n')
of.write(f'\n{self.indent*n_indent}' + '}\n\n')
# there might be both the forward and reverse 3-alpha
# if we are doing symmetric screening
for rr in scr.rates:
of.write('\n')
of.write(f'{self.indent*n_indent}ratraw = rate_eval.screened_rates(k_{rr.cname()});\n')
of.write(f'{self.indent*n_indent}rate_eval.screened_rates(k_{rr.cname()}) *= scor * scor2;\n')
of.write(f'{self.indent*n_indent}if constexpr (std::is_same_v<T, rate_derivs_t>) {{\n')
of.write(f'{self.indent*n_indent} dratraw_dT = rate_eval.dscreened_rates_dT(k_{rr.cname()});\n')
of.write(f'{self.indent*n_indent} rate_eval.dscreened_rates_dT(k_{rr.cname()}) = ratraw * (scor * dscor2_dt + dscor_dt * scor2) + dratraw_dT * scor * scor2;\n')
of.write(f'{self.indent*n_indent}}}\n')
else:
# there might be several rates that have the same
# reactants and therefore the same screening applies
# -- handle them all now
for rr in scr.rates:
of.write('\n')
of.write(f'{self.indent*n_indent}ratraw = rate_eval.screened_rates(k_{rr.cname()});\n')
of.write(f'{self.indent*n_indent}rate_eval.screened_rates(k_{rr.cname()}) *= scor;\n')
of.write(f'{self.indent*n_indent}if constexpr (std::is_same_v<T, rate_derivs_t>) {{\n')
of.write(f'{self.indent*n_indent} dratraw_dT = rate_eval.dscreened_rates_dT(k_{rr.cname()});\n')
of.write(f'{self.indent*n_indent} rate_eval.dscreened_rates_dT(k_{rr.cname()}) = ratraw * dscor_dt + dratraw_dT * scor;\n')
of.write(f'{self.indent*n_indent}}}\n')
of.write('\n')
def _nrat_reaclib(self, n_indent, of):
# Writes the number of Reaclib rates
of.write(f'{self.indent*n_indent}const int NrateReaclib = {len(self.reaclib_rates + self.derived_rates)};\n')
def _nrat_tabular(self, n_indent, of):
# Writes the number of tabular rates
of.write(f'{self.indent*n_indent}const int NrateTabular = {len(self.tabular_rates)};\n')
def _nrxn(self, n_indent, of):
for i, r in enumerate(self.all_rates):
of.write(f'{self.indent*n_indent}k_{r.cname()} = {i+1},\n')
of.write(f'{self.indent*n_indent}NumRates = k_{self.all_rates[-1].cname()}\n')
def _rate_names(self, n_indent, of):
for i, r in enumerate(self.all_rates):
if i < len(self.all_rates)-1:
cont = ","
else:
cont = ""
of.write(f'{self.indent*n_indent}"{r.cname()}"{cont} // {i+1},\n')
def _ebind(self, n_indent, of):
for nuc in self.unique_nuclei:
of.write(f'{self.indent*n_indent}ebind_per_nucleon({nuc.cindex()}) = {nuc.nucbind}_rt;\n')
def _table_num(self, n_indent, of):
of.write(f'{self.indent*n_indent}const int num_tables = {len(self.tabular_rates)};\n')
def _declare_tables(self, n_indent, of):
for r in self.tabular_rates:
idnt = self.indent*n_indent
of.write(f'{idnt}extern AMREX_GPU_MANAGED table_t {r.table_index_name}_meta;\n')
of.write(f'{idnt}extern AMREX_GPU_MANAGED {self.array_namespace}Array3D<{self.dtype}, 1, {r.table_temp_lines}, 1, {r.table_rhoy_lines}, 1, {r.table_num_vars}> {r.table_index_name}_data;\n')
of.write(f'{idnt}extern AMREX_GPU_MANAGED {self.array_namespace}Array1D<{self.dtype}, 1, {r.table_rhoy_lines}> {r.table_index_name}_rhoy;\n')
of.write(f'{idnt}extern AMREX_GPU_MANAGED {self.array_namespace}Array1D<{self.dtype}, 1, {r.table_temp_lines}> {r.table_index_name}_temp;\n')
of.write('\n')
def _table_declare_meta(self, n_indent, of):
for r in self.tabular_rates:
idnt = self.indent*n_indent
of.write(f"{idnt}AMREX_GPU_MANAGED table_t {r.table_index_name}_meta;\n")
of.write(f'{idnt}AMREX_GPU_MANAGED {self.array_namespace}Array3D<{self.dtype}, 1, {r.table_temp_lines}, 1, {r.table_rhoy_lines}, 1, {r.table_num_vars}> {r.table_index_name}_data;\n')
of.write(f'{idnt}AMREX_GPU_MANAGED {self.array_namespace}Array1D<{self.dtype}, 1, {r.table_rhoy_lines}> {r.table_index_name}_rhoy;\n')
of.write(f'{idnt}AMREX_GPU_MANAGED {self.array_namespace}Array1D<{self.dtype}, 1, {r.table_temp_lines}> {r.table_index_name}_temp;\n\n')
def _table_init_meta(self, n_indent, of):
for r in self.tabular_rates:
idnt = self.indent*n_indent
of.write(f'{idnt}{r.table_index_name}_meta.ntemp = {r.table_temp_lines};\n')
of.write(f'{idnt}{r.table_index_name}_meta.nrhoy = {r.table_rhoy_lines};\n')
of.write(f'{idnt}{r.table_index_name}_meta.nvars = {r.table_num_vars};\n')
of.write(f'{idnt}{r.table_index_name}_meta.nheader = {r.table_header_lines};\n\n')
of.write(f'{idnt}init_tab_info({r.table_index_name}_meta, "{r.table_file}", {r.table_index_name}_rhoy, {r.table_index_name}_temp, {r.table_index_name}_data);\n\n')
of.write('\n')
def _compute_tabular_rates(self, n_indent, of):
if len(self.tabular_rates) > 0:
idnt = self.indent*n_indent
for r in self.tabular_rates:
of.write(f'{idnt}tabular_evaluate({r.table_index_name}_meta, {r.table_index_name}_rhoy, {r.table_index_name}_temp, {r.table_index_name}_data,\n')
of.write(f'{idnt} rhoy, state.T, rate, drate_dt, edot_nu, edot_gamma);\n')
of.write(f'{idnt}rate_eval.screened_rates(k_{r.cname()}) = rate;\n')
of.write(f'{idnt}if constexpr (std::is_same_v<T, rate_derivs_t>) {{\n')
of.write(f'{idnt} rate_eval.dscreened_rates_dT(k_{r.cname()}) = drate_dt;\n')
of.write(f'{idnt}}}\n')
of.write(f'{idnt}rate_eval.enuc_weak += C::Legacy::n_A * {self.symbol_rates.name_y}({r.reactants[0].cindex()}) * (edot_nu + edot_gamma);\n')
of.write('\n')
def _cxxify(self, s):
# This is a helper function that converts sympy cxxcode to the actual c++ code we use.
return self.symbol_rates.cxxify(s)
def _write_ydot_nuc(self, n_indent, of, ydot_nuc):
# Helper function to write out ydot of a specific nuclei
for j, pair in enumerate(ydot_nuc):
# pair here is the forward, reverse pair for a single rate as it affects
# nucleus n
if pair.count(None) == 0:
num = 2
elif pair.count(None) == 1:
num = 1
else:
raise NotImplementedError("a rate pair must contain at least one rate")
of.write(f"{2*self.indent*n_indent}")
if num == 2:
of.write("(")
if pair[0] is not None:
sol_value = self._cxxify(sympy.cxxcode(pair[0], precision=15,
standard="c++11"))
of.write(f"{sol_value}")
if num == 2:
of.write(" + ")
if pair[1] is not None:
sol_value = self._cxxify(sympy.cxxcode(pair[1], precision=15,
standard="c++11"))
of.write(f"{sol_value}")
if num == 2:
of.write(")")
if j == len(ydot_nuc)-1:
of.write(";\n\n")
else:
of.write(" +\n")
def _ydot(self, n_indent, of):
# Write YDOT
for n in self.unique_nuclei:
if self.ydot_out_result[n] is None:
of.write(f"{self.indent*n_indent}{self.symbol_rates.name_ydot_nuc}({n.cindex()}) = 0.0_rt;\n\n")
continue
of.write(f"{self.indent*n_indent}{self.symbol_rates.name_ydot_nuc}({n.cindex()}) =\n")
self._write_ydot_nuc(n_indent, of, self.ydot_out_result[n])
def _ydot_weak(self, n_indent, of):
# Writes ydot for tabular weak reactions only
# Get the tabular weak rates first.
idnt = self.indent*n_indent
if len(self.tabular_rates) > 0:
for r in self.tabular_rates:
of.write(f'{idnt}tabular_evaluate({r.table_index_name}_meta, {r.table_index_name}_rhoy, {r.table_index_name}_temp, {r.table_index_name}_data,\n')
of.write(f'{idnt} rhoy, state.T, rate, drate_dt, edot_nu, edot_gamma);\n')
of.write(f'{idnt}rate_eval.screened_rates(k_{r.cname()}) = rate;\n')
of.write(f'{idnt}rate_eval.enuc_weak += C::Legacy::n_A * {self.symbol_rates.name_y}({r.reactants[0].cindex()}) * (edot_nu + edot_gamma);\n')
of.write('\n')
of.write(f'{idnt}auto screened_rates = rate_eval.screened_rates;\n')
of.write('\n')
# Compose and write ydot weak
for n in self.unique_nuclei:
has_weak_rates = any(
(rp.forward is not None and rp.forward.tabular) or
(rp.reverse is not None and rp.reverse.tabular)
for rp in self.nuclei_rate_pairs[n]
)
if not self.nuclei_rate_pairs[n] or not has_weak_rates:
of.write(f"{self.indent*n_indent}{self.symbol_rates.name_ydot_nuc}({n.cindex()}) = 0.0_rt;\n\n")
continue
ydot_sym_terms = []
for rp in self.nuclei_rate_pairs[n]:
fwd = None
if rp.forward is not None and rp.forward.tabular:
fwd = self.symbol_rates.ydot_term_symbol(rp.forward, n)
rvs = None
if rp.reverse is not None and rp.reverse.tabular:
rvs = self.symbol_rates.ydot_term_symbol(rp.reverse, n)
if (fwd, rvs).count(None) < 2:
ydot_sym_terms.append((fwd, rvs))
of.write(f"{self.indent*n_indent}{self.symbol_rates.name_ydot_nuc}({n.cindex()}) =\n")
self._write_ydot_nuc(n_indent, of, ydot_sym_terms)
def _enuc_add_energy_rate(self, n_indent, of):
# Add tabular per-reaction neutrino energy generation rates to the energy generation rate
# (not thermal neutrinos)
idnt = self.indent * n_indent
for r in self.tabular_rates:
if len(r.reactants) != 1:
sys.exit('ERROR: Unknown energy rate corrections for a reaction where the number of reactants is not 1.')
else:
reactant = r.reactants[0]
of.write(f'{idnt}enuc += C::Legacy::n_A * {self.symbol_rates.name_y}({reactant.cindex()}) * rate_eval.add_energy_rate(k_{r.cname()});\n')
def _jacnuc(self, n_indent, of):
# now make the Jacobian
n_unique_nuclei = len(self.unique_nuclei)
for jnj, nj in enumerate(self.unique_nuclei):
for ini, ni in enumerate(self.unique_nuclei):
jac_idx = n_unique_nuclei*jnj + ini
if not self.jac_null_entries[jac_idx]:
jvalue = self._cxxify(sympy.cxxcode(self.jac_out_result[jac_idx], precision=15,
standard="c++11"))
of.write(f"{self.indent*(n_indent)}scratch = {jvalue};\n")
of.write(f"{self.indent*n_indent}jac.set({nj.cindex()}, {ni.cindex()}, scratch);\n\n")
def _initial_mass_fractions(self, n_indent, of):
for i, _ in enumerate(self.unique_nuclei):
if i == 0:
of.write(f"{self.indent*n_indent}unit_test.X{i+1} = 1.0\n")
else:
of.write(f"{self.indent*n_indent}unit_test.X{i+1} = 0.0\n")
def _reaclib_rate_functions(self, n_indent, of):
assert n_indent == 0, "function definitions must be at top level"
for r in self.reaclib_rates + self.derived_rates:
of.write(r.function_string_cxx(dtype=self.dtype, specifiers=self.function_specifier))
def _rate_struct(self, n_indent, of):
assert n_indent == 0, "function definitions must be at top level"
of.write("struct rate_t {\n")
of.write(f" {self.array_namespace}Array1D<{self.dtype}, 1, NumRates> screened_rates;\n")
of.write(f" {self.dtype} enuc_weak;\n")
of.write("};\n\n")
of.write("struct rate_derivs_t {\n")
of.write(f" {self.array_namespace}Array1D<{self.dtype}, 1, NumRates> screened_rates;\n")
of.write(f" {self.array_namespace}Array1D<{self.dtype}, 1, NumRates> dscreened_rates_dT;\n")
of.write(f" {self.dtype} enuc_weak;\n")
of.write("};\n\n")
def _approx_rate_functions(self, n_indent, of):
assert n_indent == 0, "function definitions must be at top level"
for r in self.approx_rates:
of.write(r.function_string_cxx(dtype=self.dtype, specifiers=self.function_specifier))
def _fill_reaclib_rates(self, n_indent, of):
if self.derived_rates:
of.write(f"{self.indent*n_indent}part_fun::pf_cache_t pf_cache{{}};\n\n")
for r in self.reaclib_rates + self.derived_rates:
if isinstance(r, DerivedRate):
of.write(f"{self.indent*n_indent}rate_{r.cname()}<do_T_derivatives>(tfactors, rate, drate_dT, pf_cache);\n")
else:
of.write(f"{self.indent*n_indent}rate_{r.cname()}<do_T_derivatives>(tfactors, rate, drate_dT);\n")
of.write(f"{self.indent*n_indent}rate_eval.screened_rates(k_{r.cname()}) = rate;\n")
of.write(f"{self.indent*n_indent}if constexpr (std::is_same_v<T, rate_derivs_t>) {{\n")
of.write(f"{self.indent*n_indent} rate_eval.dscreened_rates_dT(k_{r.cname()}) = drate_dT;\n\n")
of.write(f"{self.indent*n_indent}}}\n")
def _fill_approx_rates(self, n_indent, of):
for r in self.approx_rates:
args = ["rate_eval"]
if r.rate_eval_needs_rho:
args.append("rho")
if r.rate_eval_needs_comp:
args.append("Y")
args += ["rate", "drate_dT"]
of.write(f"{self.indent*n_indent}rate_{r.cname()}<T>({', '.join(args)});\n")
of.write(f"{self.indent*n_indent}rate_eval.screened_rates(k_{r.cname()}) = rate;\n")
of.write(f"{self.indent*n_indent}if constexpr (std::is_same_v<T, rate_derivs_t>) {{\n")
of.write(f"{self.indent*n_indent} rate_eval.dscreened_rates_dT(k_{r.cname()}) = drate_dT;\n\n")
of.write(f"{self.indent*n_indent}}}\n")
def _fill_partition_function_declare(self, n_indent, of):
temp_arrays, temp_indices = self.dedupe_partition_function_temperatures()
for i, temp in enumerate(temp_arrays):
decl = f"extern AMREX_GPU_MANAGED amrex::Array1D<{self.dtype}, 0, npts_{i+1}>"
# number of points
of.write(f"{self.indent*n_indent}constexpr int npts_{i+1} = {len(temp)};\n\n")
# write the temperature out, but for readability, split it to 5 values per line
of.write(f"{self.indent*n_indent}// this is T9\n\n")
of.write(f"{self.indent*n_indent}{decl} temp_array_{i+1};\n\n")
for n, i in temp_indices.items():
# write the partition function data out, but for readability, split
# it to 5 values per line
# temp_indices is keyed by the nucleus and the value is the temperature index
of.write(f"{self.indent*n_indent}// this is log10(partition function)\n\n")
decl = f"extern AMREX_GPU_MANAGED amrex::Array1D<{self.dtype}, 0, npts_{i+1}>"
of.write(f"{self.indent*n_indent}{decl} {n}_pf_array;\n\n")
def _fill_partition_function_data(self, n_indent, of):
# itertools recipe
def batched(iterable, n):
"Batch data into tuples of length n. The last batch may be shorter."
# batched('ABCDEFG', 3) --> ABC DEF G
if n < 1:
raise ValueError('n must be at least one')
it = iter(iterable)
while batch := tuple(itertools.islice(it, n)):
yield batch
temp_arrays, temp_indices = self.dedupe_partition_function_temperatures()
for i, temp in enumerate(temp_arrays):
# number of points
decl = f"AMREX_GPU_MANAGED amrex::Array1D<{self.dtype}, 0, npts_{i+1}>"
# write the temperature out, but for readability, split it to 5 values per line
of.write(f"{self.indent*n_indent}// this is T9\n\n")
of.write(f"{self.indent*n_indent}{decl} temp_array_{i+1}= {{\n")
for data in batched(temp / 1.0e9, 5):
tmp = " ".join([f"{t}," for t in data])
of.write(f"{self.indent*(n_indent+1)}{tmp}\n")
of.write(f"{self.indent*n_indent}}};\n\n")
if i == len(temp_arrays) - 1:
of.write("\n")
for n, i in temp_indices.items():
# write the partition function data out, but for readability, split
# it to 5 values per line
of.write(f"{self.indent*n_indent}// this is log10(partition function)\n\n")
decl = f"AMREX_GPU_MANAGED amrex::Array1D<{self.dtype}, 0, npts_{i+1}>"
of.write(f"{self.indent*n_indent}{decl} {n}_pf_array = {{\n")
for data in batched(np.log10(n.partition_function.partition_function), 5):
tmp = " ".join([f"{x}," for x in data])
of.write(f"{self.indent*(n_indent+1)}{tmp}\n")
of.write(f"{self.indent*n_indent}}};\n\n")
def _fill_partition_function_cases(self, n_indent, of):
_, temp_indices = self.dedupe_partition_function_temperatures()
for n, i in temp_indices.items():
of.write(f"{self.indent*n_indent}case {n.cindex()}:\n")
of.write(f"{self.indent*(n_indent+1)}part_fun::interpolate_pf(tfactors.T9, part_fun::temp_array_{i+1}, part_fun::{n}_pf_array, pf, dpf_dT);\n")
of.write(f"{self.indent*(n_indent+1)}break;\n\n")
def _fill_spin_state_cases(self, n_indent, of):
def key_func(nuc):
if nuc.spin_states is None:
return -1
return nuc.spin_states
# group identical cases together to satisfy clang-tidy
nuclei = sorted(self.unique_nuclei + self.approx_nuclei, key=key_func)
for spin_state, group in itertools.groupby(nuclei, key=key_func):
if spin_state == -1:
continue
for n in group:
of.write(f"{self.indent*n_indent}case {n.cindex()}:\n")
of.write(f"{self.indent*(n_indent+1)}spin = {spin_state};\n")
of.write(f"{self.indent*(n_indent+1)}break;\n\n")
|
pynucastroREPO_NAMEpynucastroPATH_START.@pynucastro_extracted@pynucastro-main@pynucastro@networks@base_cxx_network.py@.PATH_END.py
|
{
"filename": "considerations.ipynb",
"repo_name": "nicokurtovic/SIMIO",
"repo_path": "SIMIO_extracted/SIMIO-main/docs/content/considerations.ipynb",
"type": "Jupyter Notebook"
}
|
# Considerations
## Input image details
For each template, aim to have a pixel size at least $\times6\sim10$ times smaller than the angular resolution and image sizes larger than $\times6\sim10''$. For optimal results, generate a different image for observations at different distances. In short, smaller pixel sizes and larger image sizes are always better. For a discussion about the input image size and the assumptions for a correct Fourier Transform calculation, we refer you to [Tazzari et al. (2018)](https://ui.adsabs.harvard.edu/abs/2018MNRAS.476.4527T/abstract).
## CASA Warnings
CASA will write several warnings in the terminal while executing SIMIO. You can ignore them if they are included in the following list:
**Leap Second**: This ```SEVERE``` warning does not affect the results, unless you are working with VLBI or extremely high time precision data. Please check this [page](https://casaguides.nrao.edu/index.php/Fixing_out_of_date_TAI_UTC_tables_(missing_information_on_leap_seconds)).
```python
# SEVERE MeasTable::dUTC(Double) (file ../../measures/Measures/MeasTable.cc, line 4290) Leap second table TAI_UTC seems out-of-date. Until the table is updated (see the CASA documentation or your system admin), times and coordinates derived from UTC could be wrong by 1s or more.
```
**Non-optimal architecture for synthetic measurement sets**: As the templates are a combination of several observations, different spectral windows of the measurement sets have different frequency coverage and number of scans. Therefore, the Fourier Transform of the input model is calculated for each one separately (using the function split). The final measurement set is a concatenation of all the single spectral windows. The ```WARN``` will appear every time a new spectral window is concatenated.
The issue of a non-optimal architecture for the synthetic observation has no impact on the visibilities or the imaging products. A future version of SIMIO-continuum will explore a more efficient procedure to concatenate the synthetic observation.
```python
# WARN MSConcat::concatenate (file ../../ms/MSOper/MSConcat.cc, line 825) Zero or negative scan numbers in MS. May lead to duplicate scan numbers in concatenated MS.
```
|
nicokurtovicREPO_NAMESIMIOPATH_START.@SIMIO_extracted@SIMIO-main@docs@content@considerations.ipynb@.PATH_END.py
|
{
"filename": "ncl.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/lexers/ncl.py",
"type": "Python"
}
|
"""
pygments.lexers.ncl
~~~~~~~~~~~~~~~~~~~
Lexers for NCAR Command Language.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['NCLLexer']
class NCLLexer(RegexLexer):
"""
Lexer for NCL code.
"""
name = 'NCL'
aliases = ['ncl']
filenames = ['*.ncl']
mimetypes = ['text/ncl']
url = 'https://www.ncl.ucar.edu'
version_added = '2.2'
flags = re.MULTILINE
tokens = {
'root': [
(r';.*\n', Comment),
include('strings'),
include('core'),
(r'[a-zA-Z_]\w*', Name),
include('nums'),
(r'[\s]+', Text),
],
'core': [
# Statements
(words((
'begin', 'break', 'continue', 'create', 'defaultapp', 'do',
'else', 'end', 'external', 'exit', 'True', 'False', 'file', 'function',
'getvalues', 'graphic', 'group', 'if', 'list', 'load', 'local',
'new', '_Missing', 'Missing', 'noparent', 'procedure',
'quit', 'QUIT', 'Quit', 'record', 'return', 'setvalues', 'stop',
'then', 'while'), prefix=r'\b', suffix=r'\s*\b'),
Keyword),
# Data Types
(words((
'ubyte', 'uint', 'uint64', 'ulong', 'string', 'byte',
'character', 'double', 'float', 'integer', 'int64', 'logical',
'long', 'short', 'ushort', 'enumeric', 'numeric', 'snumeric'),
prefix=r'\b', suffix=r'\s*\b'),
Keyword.Type),
# Operators
(r'[\%^*+\-/<>]', Operator),
# punctuation:
(r'[\[\]():@$!&|.,\\{}]', Punctuation),
(r'[=:]', Punctuation),
# Intrinsics
(words((
'abs', 'acos', 'addfile', 'addfiles', 'all', 'angmom_atm', 'any',
'area_conserve_remap', 'area_hi2lores', 'area_poly_sphere',
'asciiread', 'asciiwrite', 'asin', 'atan', 'atan2', 'attsetvalues',
'avg', 'betainc', 'bin_avg', 'bin_sum', 'bw_bandpass_filter',
'cancor', 'cbinread', 'cbinwrite', 'cd_calendar', 'cd_inv_calendar',
'cdfbin_p', 'cdfbin_pr', 'cdfbin_s', 'cdfbin_xn', 'cdfchi_p',
'cdfchi_x', 'cdfgam_p', 'cdfgam_x', 'cdfnor_p', 'cdfnor_x',
'cdft_p', 'cdft_t', 'ceil', 'center_finite_diff',
'center_finite_diff_n', 'cfftb', 'cfftf', 'cfftf_frq_reorder',
'charactertodouble', 'charactertofloat', 'charactertointeger',
'charactertolong', 'charactertoshort', 'charactertostring',
'chartodouble', 'chartofloat', 'chartoint', 'chartointeger',
'chartolong', 'chartoshort', 'chartostring', 'chiinv', 'clear',
'color_index_to_rgba', 'conform', 'conform_dims', 'cos', 'cosh',
'count_unique_values', 'covcorm', 'covcorm_xy', 'craybinnumrec',
'craybinrecread', 'create_graphic', 'csa1', 'csa1d', 'csa1s',
'csa1x', 'csa1xd', 'csa1xs', 'csa2', 'csa2d', 'csa2l', 'csa2ld',
'csa2ls', 'csa2lx', 'csa2lxd', 'csa2lxs', 'csa2s', 'csa2x',
'csa2xd', 'csa2xs', 'csa3', 'csa3d', 'csa3l', 'csa3ld', 'csa3ls',
'csa3lx', 'csa3lxd', 'csa3lxs', 'csa3s', 'csa3x', 'csa3xd',
'csa3xs', 'csc2s', 'csgetp', 'css2c', 'cssetp', 'cssgrid', 'csstri',
'csvoro', 'cumsum', 'cz2ccm', 'datatondc', 'day_of_week',
'day_of_year', 'days_in_month', 'default_fillvalue', 'delete',
'depth_to_pres', 'destroy', 'determinant', 'dewtemp_trh',
'dgeevx_lapack', 'dim_acumrun_n', 'dim_avg', 'dim_avg_n',
'dim_avg_wgt', 'dim_avg_wgt_n', 'dim_cumsum', 'dim_cumsum_n',
'dim_gamfit_n', 'dim_gbits', 'dim_max', 'dim_max_n', 'dim_median',
'dim_median_n', 'dim_min', 'dim_min_n', 'dim_num', 'dim_num_n',
'dim_numrun_n', 'dim_pqsort', 'dim_pqsort_n', 'dim_product',
'dim_product_n', 'dim_rmsd', 'dim_rmsd_n', 'dim_rmvmean',
'dim_rmvmean_n', 'dim_rmvmed', 'dim_rmvmed_n', 'dim_spi_n',
'dim_standardize', 'dim_standardize_n', 'dim_stat4', 'dim_stat4_n',
'dim_stddev', 'dim_stddev_n', 'dim_sum', 'dim_sum_n', 'dim_sum_wgt',
'dim_sum_wgt_n', 'dim_variance', 'dim_variance_n', 'dimsizes',
'doubletobyte', 'doubletochar', 'doubletocharacter',
'doubletofloat', 'doubletoint', 'doubletointeger', 'doubletolong',
'doubletoshort', 'dpres_hybrid_ccm', 'dpres_plevel', 'draw',
'draw_color_palette', 'dsgetp', 'dsgrid2', 'dsgrid2d', 'dsgrid2s',
'dsgrid3', 'dsgrid3d', 'dsgrid3s', 'dspnt2', 'dspnt2d', 'dspnt2s',
'dspnt3', 'dspnt3d', 'dspnt3s', 'dssetp', 'dtrend', 'dtrend_msg',
'dtrend_msg_n', 'dtrend_n', 'dtrend_quadratic',
'dtrend_quadratic_msg_n', 'dv2uvf', 'dv2uvg', 'dz_height',
'echo_off', 'echo_on', 'eof2data', 'eof_varimax', 'eofcor',
'eofcor_pcmsg', 'eofcor_ts', 'eofcov', 'eofcov_pcmsg', 'eofcov_ts',
'eofunc', 'eofunc_ts', 'eofunc_varimax', 'equiv_sample_size', 'erf',
'erfc', 'esacr', 'esacv', 'esccr', 'esccv', 'escorc', 'escorc_n',
'escovc', 'exit', 'exp', 'exp_tapersh', 'exp_tapersh_wgts',
'exp_tapershC', 'ezfftb', 'ezfftb_n', 'ezfftf', 'ezfftf_n',
'f2fosh', 'f2foshv', 'f2fsh', 'f2fshv', 'f2gsh', 'f2gshv', 'fabs',
'fbindirread', 'fbindirwrite', 'fbinnumrec', 'fbinread',
'fbinrecread', 'fbinrecwrite', 'fbinwrite', 'fft2db', 'fft2df',
'fftshift', 'fileattdef', 'filechunkdimdef', 'filedimdef',
'fileexists', 'filegrpdef', 'filevarattdef', 'filevarchunkdef',
'filevarcompressleveldef', 'filevardef', 'filevardimsizes',
'filwgts_lancos', 'filwgts_lanczos', 'filwgts_normal',
'floattobyte', 'floattochar', 'floattocharacter', 'floattoint',
'floattointeger', 'floattolong', 'floattoshort', 'floor',
'fluxEddy', 'fo2fsh', 'fo2fshv', 'fourier_info', 'frame', 'fspan',
'ftcurv', 'ftcurvd', 'ftcurvi', 'ftcurvp', 'ftcurvpi', 'ftcurvps',
'ftcurvs', 'ftest', 'ftgetp', 'ftkurv', 'ftkurvd', 'ftkurvp',
'ftkurvpd', 'ftsetp', 'ftsurf', 'g2fsh', 'g2fshv', 'g2gsh',
'g2gshv', 'gamma', 'gammainc', 'gaus', 'gaus_lobat',
'gaus_lobat_wgt', 'gc_aangle', 'gc_clkwise', 'gc_dangle',
'gc_inout', 'gc_latlon', 'gc_onarc', 'gc_pnt2gc', 'gc_qarea',
'gc_tarea', 'generate_2d_array', 'get_color_index',
'get_color_rgba', 'get_cpu_time', 'get_isolines', 'get_ncl_version',
'get_script_name', 'get_script_prefix_name', 'get_sphere_radius',
'get_unique_values', 'getbitsone', 'getenv', 'getfiledimsizes',
'getfilegrpnames', 'getfilepath', 'getfilevaratts',
'getfilevarchunkdimsizes', 'getfilevardims', 'getfilevardimsizes',
'getfilevarnames', 'getfilevartypes', 'getvaratts', 'getvardims',
'gradsf', 'gradsg', 'greg2jul', 'grid2triple', 'hlsrgb', 'hsvrgb',
'hydro', 'hyi2hyo', 'idsfft', 'igradsf', 'igradsg', 'ilapsf',
'ilapsg', 'ilapvf', 'ilapvg', 'ind', 'ind_resolve', 'int2p',
'int2p_n', 'integertobyte', 'integertochar', 'integertocharacter',
'integertoshort', 'inttobyte', 'inttochar', 'inttoshort',
'inverse_matrix', 'isatt', 'isbigendian', 'isbyte', 'ischar',
'iscoord', 'isdefined', 'isdim', 'isdimnamed', 'isdouble',
'isenumeric', 'isfile', 'isfilepresent', 'isfilevar',
'isfilevaratt', 'isfilevarcoord', 'isfilevardim', 'isfloat',
'isfunc', 'isgraphic', 'isint', 'isint64', 'isinteger',
'isleapyear', 'islogical', 'islong', 'ismissing', 'isnan_ieee',
'isnumeric', 'ispan', 'isproc', 'isshort', 'issnumeric', 'isstring',
'isubyte', 'isuint', 'isuint64', 'isulong', 'isunlimited',
'isunsigned', 'isushort', 'isvar', 'jul2greg', 'kmeans_as136',
'kolsm2_n', 'kron_product', 'lapsf', 'lapsg', 'lapvf', 'lapvg',
'latlon2utm', 'lclvl', 'lderuvf', 'lderuvg', 'linint1', 'linint1_n',
'linint2', 'linint2_points', 'linmsg', 'linmsg_n', 'linrood_latwgt',
'linrood_wgt', 'list_files', 'list_filevars', 'list_hlus',
'list_procfuncs', 'list_vars', 'ListAppend', 'ListCount',
'ListGetType', 'ListIndex', 'ListIndexFromName', 'ListPop',
'ListPush', 'ListSetType', 'loadscript', 'local_max', 'local_min',
'log', 'log10', 'longtobyte', 'longtochar', 'longtocharacter',
'longtoint', 'longtointeger', 'longtoshort', 'lspoly', 'lspoly_n',
'mask', 'max', 'maxind', 'min', 'minind', 'mixed_layer_depth',
'mixhum_ptd', 'mixhum_ptrh', 'mjo_cross_coh2pha',
'mjo_cross_segment', 'moc_globe_atl', 'monthday', 'natgrid',
'natgridd', 'natgrids', 'ncargpath', 'ncargversion', 'ndctodata',
'ndtooned', 'new', 'NewList', 'ngezlogo', 'nggcog', 'nggetp',
'nglogo', 'ngsetp', 'NhlAddAnnotation', 'NhlAddData',
'NhlAddOverlay', 'NhlAddPrimitive', 'NhlAppGetDefaultParentId',
'NhlChangeWorkstation', 'NhlClassName', 'NhlClearWorkstation',
'NhlDataPolygon', 'NhlDataPolyline', 'NhlDataPolymarker',
'NhlDataToNDC', 'NhlDestroy', 'NhlDraw', 'NhlFrame', 'NhlFreeColor',
'NhlGetBB', 'NhlGetClassResources', 'NhlGetErrorObjectId',
'NhlGetNamedColorIndex', 'NhlGetParentId',
'NhlGetParentWorkstation', 'NhlGetWorkspaceObjectId',
'NhlIsAllocatedColor', 'NhlIsApp', 'NhlIsDataComm', 'NhlIsDataItem',
'NhlIsDataSpec', 'NhlIsTransform', 'NhlIsView', 'NhlIsWorkstation',
'NhlName', 'NhlNDCPolygon', 'NhlNDCPolyline', 'NhlNDCPolymarker',
'NhlNDCToData', 'NhlNewColor', 'NhlNewDashPattern', 'NhlNewMarker',
'NhlPalGetDefined', 'NhlRemoveAnnotation', 'NhlRemoveData',
'NhlRemoveOverlay', 'NhlRemovePrimitive', 'NhlSetColor',
'NhlSetDashPattern', 'NhlSetMarker', 'NhlUpdateData',
'NhlUpdateWorkstation', 'nice_mnmxintvl', 'nngetaspectd',
'nngetaspects', 'nngetp', 'nngetsloped', 'nngetslopes', 'nngetwts',
'nngetwtsd', 'nnpnt', 'nnpntd', 'nnpntend', 'nnpntendd',
'nnpntinit', 'nnpntinitd', 'nnpntinits', 'nnpnts', 'nnsetp', 'num',
'obj_anal_ic', 'omega_ccm', 'onedtond', 'overlay', 'paleo_outline',
'pdfxy_bin', 'poisson_grid_fill', 'pop_remap', 'potmp_insitu_ocn',
'prcwater_dp', 'pres2hybrid', 'pres_hybrid_ccm', 'pres_sigma',
'print', 'print_table', 'printFileVarSummary', 'printVarSummary',
'product', 'pslec', 'pslhor', 'pslhyp', 'qsort', 'rand',
'random_chi', 'random_gamma', 'random_normal', 'random_setallseed',
'random_uniform', 'rcm2points', 'rcm2rgrid', 'rdsstoi',
'read_colormap_file', 'reg_multlin', 'regcoef', 'regCoef_n',
'regline', 'relhum', 'replace_ieeenan', 'reshape', 'reshape_ind',
'rgba_to_color_index', 'rgbhls', 'rgbhsv', 'rgbyiq', 'rgrid2rcm',
'rhomb_trunc', 'rip_cape_2d', 'rip_cape_3d', 'round', 'rtest',
'runave', 'runave_n', 'set_default_fillvalue', 'set_sphere_radius',
'setfileoption', 'sfvp2uvf', 'sfvp2uvg', 'shaec', 'shagc',
'shgetnp', 'shgetp', 'shgrid', 'shorttobyte', 'shorttochar',
'shorttocharacter', 'show_ascii', 'shsec', 'shsetp', 'shsgc',
'shsgc_R42', 'sigma2hybrid', 'simpeq', 'simpne', 'sin',
'sindex_yrmo', 'sinh', 'sizeof', 'sleep', 'smth9', 'snindex_yrmo',
'solve_linsys', 'span_color_indexes', 'span_color_rgba',
'sparse_matrix_mult', 'spcorr', 'spcorr_n', 'specx_anal',
'specxy_anal', 'spei', 'sprintf', 'sprinti', 'sqrt', 'sqsort',
'srand', 'stat2', 'stat4', 'stat_medrng', 'stat_trim',
'status_exit', 'stdatmus_p2tdz', 'stdatmus_z2tdp', 'stddev',
'str_capital', 'str_concat', 'str_fields_count', 'str_get_cols',
'str_get_dq', 'str_get_field', 'str_get_nl', 'str_get_sq',
'str_get_tab', 'str_index_of_substr', 'str_insert', 'str_is_blank',
'str_join', 'str_left_strip', 'str_lower', 'str_match',
'str_match_ic', 'str_match_ic_regex', 'str_match_ind',
'str_match_ind_ic', 'str_match_ind_ic_regex', 'str_match_ind_regex',
'str_match_regex', 'str_right_strip', 'str_split',
'str_split_by_length', 'str_split_csv', 'str_squeeze', 'str_strip',
'str_sub_str', 'str_switch', 'str_upper', 'stringtochar',
'stringtocharacter', 'stringtodouble', 'stringtofloat',
'stringtoint', 'stringtointeger', 'stringtolong', 'stringtoshort',
'strlen', 'student_t', 'sum', 'svd_lapack', 'svdcov', 'svdcov_sv',
'svdstd', 'svdstd_sv', 'system', 'systemfunc', 'tan', 'tanh',
'taper', 'taper_n', 'tdclrs', 'tdctri', 'tdcudp', 'tdcurv',
'tddtri', 'tdez2d', 'tdez3d', 'tdgetp', 'tdgrds', 'tdgrid',
'tdgtrs', 'tdinit', 'tditri', 'tdlbla', 'tdlblp', 'tdlbls',
'tdline', 'tdlndp', 'tdlnpa', 'tdlpdp', 'tdmtri', 'tdotri',
'tdpara', 'tdplch', 'tdprpa', 'tdprpi', 'tdprpt', 'tdsetp',
'tdsort', 'tdstri', 'tdstrs', 'tdttri', 'thornthwaite', 'tobyte',
'tochar', 'todouble', 'tofloat', 'toint', 'toint64', 'tointeger',
'tolong', 'toshort', 'tosigned', 'tostring', 'tostring_with_format',
'totype', 'toubyte', 'touint', 'touint64', 'toulong', 'tounsigned',
'toushort', 'trend_manken', 'tri_trunc', 'triple2grid',
'triple2grid2d', 'trop_wmo', 'ttest', 'typeof', 'undef',
'unique_string', 'update', 'ushorttoint', 'ut_calendar',
'ut_inv_calendar', 'utm2latlon', 'uv2dv_cfd', 'uv2dvf', 'uv2dvg',
'uv2sfvpf', 'uv2sfvpg', 'uv2vr_cfd', 'uv2vrdvf', 'uv2vrdvg',
'uv2vrf', 'uv2vrg', 'v5d_close', 'v5d_create', 'v5d_setLowLev',
'v5d_setUnits', 'v5d_write', 'v5d_write_var', 'variance', 'vhaec',
'vhagc', 'vhsec', 'vhsgc', 'vibeta', 'vinth2p', 'vinth2p_ecmwf',
'vinth2p_ecmwf_nodes', 'vinth2p_nodes', 'vintp2p_ecmwf', 'vr2uvf',
'vr2uvg', 'vrdv2uvf', 'vrdv2uvg', 'wavelet', 'wavelet_default',
'weibull', 'wgt_area_smooth', 'wgt_areaave', 'wgt_areaave2',
'wgt_arearmse', 'wgt_arearmse2', 'wgt_areasum2', 'wgt_runave',
'wgt_runave_n', 'wgt_vert_avg_beta', 'wgt_volave', 'wgt_volave_ccm',
'wgt_volrmse', 'wgt_volrmse_ccm', 'where', 'wk_smooth121', 'wmbarb',
'wmbarbmap', 'wmdrft', 'wmgetp', 'wmlabs', 'wmsetp', 'wmstnm',
'wmvect', 'wmvectmap', 'wmvlbl', 'wrf_avo', 'wrf_cape_2d',
'wrf_cape_3d', 'wrf_dbz', 'wrf_eth', 'wrf_helicity', 'wrf_ij_to_ll',
'wrf_interp_1d', 'wrf_interp_2d_xy', 'wrf_interp_3d_z',
'wrf_latlon_to_ij', 'wrf_ll_to_ij', 'wrf_omega', 'wrf_pvo',
'wrf_rh', 'wrf_slp', 'wrf_smooth_2d', 'wrf_td', 'wrf_tk',
'wrf_updraft_helicity', 'wrf_uvmet', 'wrf_virtual_temp',
'wrf_wetbulb', 'wrf_wps_close_int', 'wrf_wps_open_int',
'wrf_wps_rddata_int', 'wrf_wps_rdhead_int', 'wrf_wps_read_int',
'wrf_wps_write_int', 'write_matrix', 'write_table', 'yiqrgb',
'z2geouv', 'zonal_mpsi', 'addfiles_GetVar', 'advect_variable',
'area_conserve_remap_Wrap', 'area_hi2lores_Wrap',
'array_append_record', 'assignFillValue', 'byte2flt',
'byte2flt_hdf', 'calcDayAnomTLL', 'calcMonAnomLLLT',
'calcMonAnomLLT', 'calcMonAnomTLL', 'calcMonAnomTLLL',
'calculate_monthly_values', 'cd_convert', 'changeCase',
'changeCaseChar', 'clmDayTLL', 'clmDayTLLL', 'clmMon2clmDay',
'clmMonLLLT', 'clmMonLLT', 'clmMonTLL', 'clmMonTLLL', 'closest_val',
'copy_VarAtts', 'copy_VarCoords', 'copy_VarCoords_1',
'copy_VarCoords_2', 'copy_VarMeta', 'copyatt', 'crossp3',
'cshstringtolist', 'cssgrid_Wrap', 'dble2flt', 'decimalPlaces',
'delete_VarAtts', 'dim_avg_n_Wrap', 'dim_avg_wgt_n_Wrap',
'dim_avg_wgt_Wrap', 'dim_avg_Wrap', 'dim_cumsum_n_Wrap',
'dim_cumsum_Wrap', 'dim_max_n_Wrap', 'dim_min_n_Wrap',
'dim_rmsd_n_Wrap', 'dim_rmsd_Wrap', 'dim_rmvmean_n_Wrap',
'dim_rmvmean_Wrap', 'dim_rmvmed_n_Wrap', 'dim_rmvmed_Wrap',
'dim_standardize_n_Wrap', 'dim_standardize_Wrap',
'dim_stddev_n_Wrap', 'dim_stddev_Wrap', 'dim_sum_n_Wrap',
'dim_sum_wgt_n_Wrap', 'dim_sum_wgt_Wrap', 'dim_sum_Wrap',
'dim_variance_n_Wrap', 'dim_variance_Wrap', 'dpres_plevel_Wrap',
'dtrend_leftdim', 'dv2uvF_Wrap', 'dv2uvG_Wrap', 'eof_north',
'eofcor_Wrap', 'eofcov_Wrap', 'eofunc_north', 'eofunc_ts_Wrap',
'eofunc_varimax_reorder', 'eofunc_varimax_Wrap', 'eofunc_Wrap',
'epsZero', 'f2fosh_Wrap', 'f2foshv_Wrap', 'f2fsh_Wrap',
'f2fshv_Wrap', 'f2gsh_Wrap', 'f2gshv_Wrap', 'fbindirSwap',
'fbinseqSwap1', 'fbinseqSwap2', 'flt2dble', 'flt2string',
'fo2fsh_Wrap', 'fo2fshv_Wrap', 'g2fsh_Wrap', 'g2fshv_Wrap',
'g2gsh_Wrap', 'g2gshv_Wrap', 'generate_resample_indices',
'generate_sample_indices', 'generate_unique_indices',
'genNormalDist', 'get1Dindex', 'get1Dindex_Collapse',
'get1Dindex_Exclude', 'get_file_suffix', 'GetFillColor',
'GetFillColorIndex', 'getFillValue', 'getind_latlon2d',
'getVarDimNames', 'getVarFillValue', 'grib_stime2itime',
'hyi2hyo_Wrap', 'ilapsF_Wrap', 'ilapsG_Wrap', 'ind_nearest_coord',
'indStrSubset', 'int2dble', 'int2flt', 'int2p_n_Wrap', 'int2p_Wrap',
'isMonotonic', 'isStrSubset', 'latGau', 'latGauWgt', 'latGlobeF',
'latGlobeFo', 'latRegWgt', 'linint1_n_Wrap', 'linint1_Wrap',
'linint2_points_Wrap', 'linint2_Wrap', 'local_max_1d',
'local_min_1d', 'lonFlip', 'lonGlobeF', 'lonGlobeFo', 'lonPivot',
'merge_levels_sfc', 'mod', 'month_to_annual',
'month_to_annual_weighted', 'month_to_season', 'month_to_season12',
'month_to_seasonN', 'monthly_total_to_daily_mean', 'nameDim',
'natgrid_Wrap', 'NewCosWeight', 'niceLatLon2D', 'NormCosWgtGlobe',
'numAsciiCol', 'numAsciiRow', 'numeric2int',
'obj_anal_ic_deprecated', 'obj_anal_ic_Wrap', 'omega_ccm_driver',
'omega_to_w', 'oneDtostring', 'pack_values', 'pattern_cor', 'pdfx',
'pdfxy', 'pdfxy_conform', 'pot_temp', 'pot_vort_hybrid',
'pot_vort_isobaric', 'pres2hybrid_Wrap', 'print_clock',
'printMinMax', 'quadroots', 'rcm2points_Wrap', 'rcm2rgrid_Wrap',
'readAsciiHead', 'readAsciiTable', 'reg_multlin_stats',
'region_ind', 'regline_stats', 'relhum_ttd', 'replaceSingleChar',
'RGBtoCmap', 'rgrid2rcm_Wrap', 'rho_mwjf', 'rm_single_dims',
'rmAnnCycle1D', 'rmInsufData', 'rmMonAnnCycLLLT', 'rmMonAnnCycLLT',
'rmMonAnnCycTLL', 'runave_n_Wrap', 'runave_Wrap', 'short2flt',
'short2flt_hdf', 'shsgc_R42_Wrap', 'sign_f90', 'sign_matlab',
'smth9_Wrap', 'smthClmDayTLL', 'smthClmDayTLLL', 'SqrtCosWeight',
'stat_dispersion', 'static_stability', 'stdMonLLLT', 'stdMonLLT',
'stdMonTLL', 'stdMonTLLL', 'symMinMaxPlt', 'table_attach_columns',
'table_attach_rows', 'time_to_newtime', 'transpose',
'triple2grid_Wrap', 'ut_convert', 'uv2dvF_Wrap', 'uv2dvG_Wrap',
'uv2vrF_Wrap', 'uv2vrG_Wrap', 'vr2uvF_Wrap', 'vr2uvG_Wrap',
'w_to_omega', 'wallClockElapseTime', 'wave_number_spc',
'wgt_areaave_Wrap', 'wgt_runave_leftdim', 'wgt_runave_n_Wrap',
'wgt_runave_Wrap', 'wgt_vertical_n', 'wind_component',
'wind_direction', 'yyyyddd_to_yyyymmdd', 'yyyymm_time',
'yyyymm_to_yyyyfrac', 'yyyymmdd_time', 'yyyymmdd_to_yyyyddd',
'yyyymmdd_to_yyyyfrac', 'yyyymmddhh_time', 'yyyymmddhh_to_yyyyfrac',
'zonal_mpsi_Wrap', 'zonalAve', 'calendar_decode2', 'cd_string',
'kf_filter', 'run_cor', 'time_axis_labels', 'ut_string',
'wrf_contour', 'wrf_map', 'wrf_map_overlay', 'wrf_map_overlays',
'wrf_map_resources', 'wrf_map_zoom', 'wrf_overlay', 'wrf_overlays',
'wrf_user_getvar', 'wrf_user_ij_to_ll', 'wrf_user_intrp2d',
'wrf_user_intrp3d', 'wrf_user_latlon_to_ij', 'wrf_user_list_times',
'wrf_user_ll_to_ij', 'wrf_user_unstagger', 'wrf_user_vert_interp',
'wrf_vector', 'gsn_add_annotation', 'gsn_add_polygon',
'gsn_add_polyline', 'gsn_add_polymarker',
'gsn_add_shapefile_polygons', 'gsn_add_shapefile_polylines',
'gsn_add_shapefile_polymarkers', 'gsn_add_text', 'gsn_attach_plots',
'gsn_blank_plot', 'gsn_contour', 'gsn_contour_map',
'gsn_contour_shade', 'gsn_coordinates', 'gsn_create_labelbar',
'gsn_create_legend', 'gsn_create_text',
'gsn_csm_attach_zonal_means', 'gsn_csm_blank_plot',
'gsn_csm_contour', 'gsn_csm_contour_map', 'gsn_csm_contour_map_ce',
'gsn_csm_contour_map_overlay', 'gsn_csm_contour_map_polar',
'gsn_csm_hov', 'gsn_csm_lat_time', 'gsn_csm_map', 'gsn_csm_map_ce',
'gsn_csm_map_polar', 'gsn_csm_pres_hgt',
'gsn_csm_pres_hgt_streamline', 'gsn_csm_pres_hgt_vector',
'gsn_csm_streamline', 'gsn_csm_streamline_contour_map',
'gsn_csm_streamline_contour_map_ce',
'gsn_csm_streamline_contour_map_polar', 'gsn_csm_streamline_map',
'gsn_csm_streamline_map_ce', 'gsn_csm_streamline_map_polar',
'gsn_csm_streamline_scalar', 'gsn_csm_streamline_scalar_map',
'gsn_csm_streamline_scalar_map_ce',
'gsn_csm_streamline_scalar_map_polar', 'gsn_csm_time_lat',
'gsn_csm_vector', 'gsn_csm_vector_map', 'gsn_csm_vector_map_ce',
'gsn_csm_vector_map_polar', 'gsn_csm_vector_scalar',
'gsn_csm_vector_scalar_map', 'gsn_csm_vector_scalar_map_ce',
'gsn_csm_vector_scalar_map_polar', 'gsn_csm_x2y', 'gsn_csm_x2y2',
'gsn_csm_xy', 'gsn_csm_xy2', 'gsn_csm_xy3', 'gsn_csm_y',
'gsn_define_colormap', 'gsn_draw_colormap', 'gsn_draw_named_colors',
'gsn_histogram', 'gsn_labelbar_ndc', 'gsn_legend_ndc', 'gsn_map',
'gsn_merge_colormaps', 'gsn_open_wks', 'gsn_panel', 'gsn_polygon',
'gsn_polygon_ndc', 'gsn_polyline', 'gsn_polyline_ndc',
'gsn_polymarker', 'gsn_polymarker_ndc', 'gsn_retrieve_colormap',
'gsn_reverse_colormap', 'gsn_streamline', 'gsn_streamline_map',
'gsn_streamline_scalar', 'gsn_streamline_scalar_map', 'gsn_table',
'gsn_text', 'gsn_text_ndc', 'gsn_vector', 'gsn_vector_map',
'gsn_vector_scalar', 'gsn_vector_scalar_map', 'gsn_xy', 'gsn_y',
'hsv2rgb', 'maximize_output', 'namedcolor2rgb', 'namedcolor2rgba',
'reset_device_coordinates', 'span_named_colors'), prefix=r'\b'),
Name.Builtin),
# Resources
(words((
'amDataXF', 'amDataYF', 'amJust', 'amOn', 'amOrthogonalPosF',
'amParallelPosF', 'amResizeNotify', 'amSide', 'amTrackData',
'amViewId', 'amZone', 'appDefaultParent', 'appFileSuffix',
'appResources', 'appSysDir', 'appUsrDir', 'caCopyArrays',
'caXArray', 'caXCast', 'caXMaxV', 'caXMinV', 'caXMissingV',
'caYArray', 'caYCast', 'caYMaxV', 'caYMinV', 'caYMissingV',
'cnCellFillEdgeColor', 'cnCellFillMissingValEdgeColor',
'cnConpackParams', 'cnConstFEnableFill', 'cnConstFLabelAngleF',
'cnConstFLabelBackgroundColor', 'cnConstFLabelConstantSpacingF',
'cnConstFLabelFont', 'cnConstFLabelFontAspectF',
'cnConstFLabelFontColor', 'cnConstFLabelFontHeightF',
'cnConstFLabelFontQuality', 'cnConstFLabelFontThicknessF',
'cnConstFLabelFormat', 'cnConstFLabelFuncCode', 'cnConstFLabelJust',
'cnConstFLabelOn', 'cnConstFLabelOrthogonalPosF',
'cnConstFLabelParallelPosF', 'cnConstFLabelPerimColor',
'cnConstFLabelPerimOn', 'cnConstFLabelPerimSpaceF',
'cnConstFLabelPerimThicknessF', 'cnConstFLabelSide',
'cnConstFLabelString', 'cnConstFLabelTextDirection',
'cnConstFLabelZone', 'cnConstFUseInfoLabelRes',
'cnExplicitLabelBarLabelsOn', 'cnExplicitLegendLabelsOn',
'cnExplicitLineLabelsOn', 'cnFillBackgroundColor', 'cnFillColor',
'cnFillColors', 'cnFillDotSizeF', 'cnFillDrawOrder', 'cnFillMode',
'cnFillOn', 'cnFillOpacityF', 'cnFillPalette', 'cnFillPattern',
'cnFillPatterns', 'cnFillScaleF', 'cnFillScales', 'cnFixFillBleed',
'cnGridBoundFillColor', 'cnGridBoundFillPattern',
'cnGridBoundFillScaleF', 'cnGridBoundPerimColor',
'cnGridBoundPerimDashPattern', 'cnGridBoundPerimOn',
'cnGridBoundPerimThicknessF', 'cnHighLabelAngleF',
'cnHighLabelBackgroundColor', 'cnHighLabelConstantSpacingF',
'cnHighLabelCount', 'cnHighLabelFont', 'cnHighLabelFontAspectF',
'cnHighLabelFontColor', 'cnHighLabelFontHeightF',
'cnHighLabelFontQuality', 'cnHighLabelFontThicknessF',
'cnHighLabelFormat', 'cnHighLabelFuncCode', 'cnHighLabelPerimColor',
'cnHighLabelPerimOn', 'cnHighLabelPerimSpaceF',
'cnHighLabelPerimThicknessF', 'cnHighLabelString', 'cnHighLabelsOn',
'cnHighLowLabelOverlapMode', 'cnHighUseLineLabelRes',
'cnInfoLabelAngleF', 'cnInfoLabelBackgroundColor',
'cnInfoLabelConstantSpacingF', 'cnInfoLabelFont',
'cnInfoLabelFontAspectF', 'cnInfoLabelFontColor',
'cnInfoLabelFontHeightF', 'cnInfoLabelFontQuality',
'cnInfoLabelFontThicknessF', 'cnInfoLabelFormat',
'cnInfoLabelFuncCode', 'cnInfoLabelJust', 'cnInfoLabelOn',
'cnInfoLabelOrthogonalPosF', 'cnInfoLabelParallelPosF',
'cnInfoLabelPerimColor', 'cnInfoLabelPerimOn',
'cnInfoLabelPerimSpaceF', 'cnInfoLabelPerimThicknessF',
'cnInfoLabelSide', 'cnInfoLabelString', 'cnInfoLabelTextDirection',
'cnInfoLabelZone', 'cnLabelBarEndLabelsOn', 'cnLabelBarEndStyle',
'cnLabelDrawOrder', 'cnLabelMasking', 'cnLabelScaleFactorF',
'cnLabelScaleValueF', 'cnLabelScalingMode', 'cnLegendLevelFlags',
'cnLevelCount', 'cnLevelFlag', 'cnLevelFlags', 'cnLevelSelectionMode',
'cnLevelSpacingF', 'cnLevels', 'cnLineColor', 'cnLineColors',
'cnLineDashPattern', 'cnLineDashPatterns', 'cnLineDashSegLenF',
'cnLineDrawOrder', 'cnLineLabelAngleF', 'cnLineLabelBackgroundColor',
'cnLineLabelConstantSpacingF', 'cnLineLabelCount',
'cnLineLabelDensityF', 'cnLineLabelFont', 'cnLineLabelFontAspectF',
'cnLineLabelFontColor', 'cnLineLabelFontColors',
'cnLineLabelFontHeightF', 'cnLineLabelFontQuality',
'cnLineLabelFontThicknessF', 'cnLineLabelFormat',
'cnLineLabelFuncCode', 'cnLineLabelInterval', 'cnLineLabelPerimColor',
'cnLineLabelPerimOn', 'cnLineLabelPerimSpaceF',
'cnLineLabelPerimThicknessF', 'cnLineLabelPlacementMode',
'cnLineLabelStrings', 'cnLineLabelsOn', 'cnLinePalette',
'cnLineThicknessF', 'cnLineThicknesses', 'cnLinesOn',
'cnLowLabelAngleF', 'cnLowLabelBackgroundColor',
'cnLowLabelConstantSpacingF', 'cnLowLabelCount', 'cnLowLabelFont',
'cnLowLabelFontAspectF', 'cnLowLabelFontColor',
'cnLowLabelFontHeightF', 'cnLowLabelFontQuality',
'cnLowLabelFontThicknessF', 'cnLowLabelFormat', 'cnLowLabelFuncCode',
'cnLowLabelPerimColor', 'cnLowLabelPerimOn', 'cnLowLabelPerimSpaceF',
'cnLowLabelPerimThicknessF', 'cnLowLabelString', 'cnLowLabelsOn',
'cnLowUseHighLabelRes', 'cnMaxDataValueFormat', 'cnMaxLevelCount',
'cnMaxLevelValF', 'cnMaxPointDistanceF', 'cnMinLevelValF',
'cnMissingValFillColor', 'cnMissingValFillPattern',
'cnMissingValFillScaleF', 'cnMissingValPerimColor',
'cnMissingValPerimDashPattern', 'cnMissingValPerimGridBoundOn',
'cnMissingValPerimOn', 'cnMissingValPerimThicknessF',
'cnMonoFillColor', 'cnMonoFillPattern', 'cnMonoFillScale',
'cnMonoLevelFlag', 'cnMonoLineColor', 'cnMonoLineDashPattern',
'cnMonoLineLabelFontColor', 'cnMonoLineThickness', 'cnNoDataLabelOn',
'cnNoDataLabelString', 'cnOutOfRangeFillColor',
'cnOutOfRangeFillPattern', 'cnOutOfRangeFillScaleF',
'cnOutOfRangePerimColor', 'cnOutOfRangePerimDashPattern',
'cnOutOfRangePerimOn', 'cnOutOfRangePerimThicknessF',
'cnRasterCellSizeF', 'cnRasterMinCellSizeF', 'cnRasterModeOn',
'cnRasterSampleFactorF', 'cnRasterSmoothingOn', 'cnScalarFieldData',
'cnSmoothingDistanceF', 'cnSmoothingOn', 'cnSmoothingTensionF',
'cnSpanFillPalette', 'cnSpanLinePalette', 'ctCopyTables',
'ctXElementSize', 'ctXMaxV', 'ctXMinV', 'ctXMissingV', 'ctXTable',
'ctXTableLengths', 'ctXTableType', 'ctYElementSize', 'ctYMaxV',
'ctYMinV', 'ctYMissingV', 'ctYTable', 'ctYTableLengths',
'ctYTableType', 'dcDelayCompute', 'errBuffer',
'errFileName', 'errFilePtr', 'errLevel', 'errPrint', 'errUnitNumber',
'gsClipOn', 'gsColors', 'gsEdgeColor', 'gsEdgeDashPattern',
'gsEdgeDashSegLenF', 'gsEdgeThicknessF', 'gsEdgesOn',
'gsFillBackgroundColor', 'gsFillColor', 'gsFillDotSizeF',
'gsFillIndex', 'gsFillLineThicknessF', 'gsFillOpacityF',
'gsFillScaleF', 'gsFont', 'gsFontAspectF', 'gsFontColor',
'gsFontHeightF', 'gsFontOpacityF', 'gsFontQuality',
'gsFontThicknessF', 'gsLineColor', 'gsLineDashPattern',
'gsLineDashSegLenF', 'gsLineLabelConstantSpacingF', 'gsLineLabelFont',
'gsLineLabelFontAspectF', 'gsLineLabelFontColor',
'gsLineLabelFontHeightF', 'gsLineLabelFontQuality',
'gsLineLabelFontThicknessF', 'gsLineLabelFuncCode',
'gsLineLabelString', 'gsLineOpacityF', 'gsLineThicknessF',
'gsMarkerColor', 'gsMarkerIndex', 'gsMarkerOpacityF', 'gsMarkerSizeF',
'gsMarkerThicknessF', 'gsSegments', 'gsTextAngleF',
'gsTextConstantSpacingF', 'gsTextDirection', 'gsTextFuncCode',
'gsTextJustification', 'gsnAboveYRefLineBarColors',
'gsnAboveYRefLineBarFillScales', 'gsnAboveYRefLineBarPatterns',
'gsnAboveYRefLineColor', 'gsnAddCyclic', 'gsnAttachBorderOn',
'gsnAttachPlotsXAxis', 'gsnBelowYRefLineBarColors',
'gsnBelowYRefLineBarFillScales', 'gsnBelowYRefLineBarPatterns',
'gsnBelowYRefLineColor', 'gsnBoxMargin', 'gsnCenterString',
'gsnCenterStringFontColor', 'gsnCenterStringFontHeightF',
'gsnCenterStringFuncCode', 'gsnCenterStringOrthogonalPosF',
'gsnCenterStringParallelPosF', 'gsnContourLineThicknessesScale',
'gsnContourNegLineDashPattern', 'gsnContourPosLineDashPattern',
'gsnContourZeroLineThicknessF', 'gsnDebugWriteFileName', 'gsnDraw',
'gsnFrame', 'gsnHistogramBarWidthPercent', 'gsnHistogramBinIntervals',
'gsnHistogramBinMissing', 'gsnHistogramBinWidth',
'gsnHistogramClassIntervals', 'gsnHistogramCompare',
'gsnHistogramComputePercentages',
'gsnHistogramComputePercentagesNoMissing',
'gsnHistogramDiscreteBinValues', 'gsnHistogramDiscreteClassValues',
'gsnHistogramHorizontal', 'gsnHistogramMinMaxBinsOn',
'gsnHistogramNumberOfBins', 'gsnHistogramPercentSign',
'gsnHistogramSelectNiceIntervals', 'gsnLeftString',
'gsnLeftStringFontColor', 'gsnLeftStringFontHeightF',
'gsnLeftStringFuncCode', 'gsnLeftStringOrthogonalPosF',
'gsnLeftStringParallelPosF', 'gsnMajorLatSpacing',
'gsnMajorLonSpacing', 'gsnMaskLambertConformal',
'gsnMaskLambertConformalOutlineOn', 'gsnMaximize',
'gsnMinorLatSpacing', 'gsnMinorLonSpacing', 'gsnPanelBottom',
'gsnPanelCenter', 'gsnPanelDebug', 'gsnPanelFigureStrings',
'gsnPanelFigureStringsBackgroundFillColor',
'gsnPanelFigureStringsFontHeightF', 'gsnPanelFigureStringsJust',
'gsnPanelFigureStringsPerimOn', 'gsnPanelLabelBar', 'gsnPanelLeft',
'gsnPanelMainFont', 'gsnPanelMainFontColor',
'gsnPanelMainFontHeightF', 'gsnPanelMainString', 'gsnPanelRight',
'gsnPanelRowSpec', 'gsnPanelScalePlotIndex', 'gsnPanelTop',
'gsnPanelXF', 'gsnPanelXWhiteSpacePercent', 'gsnPanelYF',
'gsnPanelYWhiteSpacePercent', 'gsnPaperHeight', 'gsnPaperMargin',
'gsnPaperOrientation', 'gsnPaperWidth', 'gsnPolar',
'gsnPolarLabelDistance', 'gsnPolarLabelFont',
'gsnPolarLabelFontHeightF', 'gsnPolarLabelSpacing', 'gsnPolarTime',
'gsnPolarUT', 'gsnRightString', 'gsnRightStringFontColor',
'gsnRightStringFontHeightF', 'gsnRightStringFuncCode',
'gsnRightStringOrthogonalPosF', 'gsnRightStringParallelPosF',
'gsnScalarContour', 'gsnScale', 'gsnShape', 'gsnSpreadColorEnd',
'gsnSpreadColorStart', 'gsnSpreadColors', 'gsnStringFont',
'gsnStringFontColor', 'gsnStringFontHeightF', 'gsnStringFuncCode',
'gsnTickMarksOn', 'gsnXAxisIrregular2Linear', 'gsnXAxisIrregular2Log',
'gsnXRefLine', 'gsnXRefLineColor', 'gsnXRefLineDashPattern',
'gsnXRefLineThicknessF', 'gsnXYAboveFillColors', 'gsnXYBarChart',
'gsnXYBarChartBarWidth', 'gsnXYBarChartColors',
'gsnXYBarChartColors2', 'gsnXYBarChartFillDotSizeF',
'gsnXYBarChartFillLineThicknessF', 'gsnXYBarChartFillOpacityF',
'gsnXYBarChartFillScaleF', 'gsnXYBarChartOutlineOnly',
'gsnXYBarChartOutlineThicknessF', 'gsnXYBarChartPatterns',
'gsnXYBarChartPatterns2', 'gsnXYBelowFillColors', 'gsnXYFillColors',
'gsnXYFillOpacities', 'gsnXYLeftFillColors', 'gsnXYRightFillColors',
'gsnYAxisIrregular2Linear', 'gsnYAxisIrregular2Log', 'gsnYRefLine',
'gsnYRefLineColor', 'gsnYRefLineColors', 'gsnYRefLineDashPattern',
'gsnYRefLineDashPatterns', 'gsnYRefLineThicknessF',
'gsnYRefLineThicknesses', 'gsnZonalMean', 'gsnZonalMeanXMaxF',
'gsnZonalMeanXMinF', 'gsnZonalMeanYRefLine', 'lbAutoManage',
'lbBottomMarginF', 'lbBoxCount', 'lbBoxEndCapStyle', 'lbBoxFractions',
'lbBoxLineColor', 'lbBoxLineDashPattern', 'lbBoxLineDashSegLenF',
'lbBoxLineThicknessF', 'lbBoxLinesOn', 'lbBoxMajorExtentF',
'lbBoxMinorExtentF', 'lbBoxSeparatorLinesOn', 'lbBoxSizing',
'lbFillBackground', 'lbFillColor', 'lbFillColors', 'lbFillDotSizeF',
'lbFillLineThicknessF', 'lbFillPattern', 'lbFillPatterns',
'lbFillScaleF', 'lbFillScales', 'lbJustification', 'lbLabelAlignment',
'lbLabelAngleF', 'lbLabelAutoStride', 'lbLabelBarOn',
'lbLabelConstantSpacingF', 'lbLabelDirection', 'lbLabelFont',
'lbLabelFontAspectF', 'lbLabelFontColor', 'lbLabelFontHeightF',
'lbLabelFontQuality', 'lbLabelFontThicknessF', 'lbLabelFuncCode',
'lbLabelJust', 'lbLabelOffsetF', 'lbLabelPosition', 'lbLabelStride',
'lbLabelStrings', 'lbLabelsOn', 'lbLeftMarginF', 'lbMaxLabelLenF',
'lbMinLabelSpacingF', 'lbMonoFillColor', 'lbMonoFillPattern',
'lbMonoFillScale', 'lbOrientation', 'lbPerimColor',
'lbPerimDashPattern', 'lbPerimDashSegLenF', 'lbPerimFill',
'lbPerimFillColor', 'lbPerimOn', 'lbPerimThicknessF',
'lbRasterFillOn', 'lbRightMarginF', 'lbTitleAngleF',
'lbTitleConstantSpacingF', 'lbTitleDirection', 'lbTitleExtentF',
'lbTitleFont', 'lbTitleFontAspectF', 'lbTitleFontColor',
'lbTitleFontHeightF', 'lbTitleFontQuality', 'lbTitleFontThicknessF',
'lbTitleFuncCode', 'lbTitleJust', 'lbTitleOffsetF', 'lbTitleOn',
'lbTitlePosition', 'lbTitleString', 'lbTopMarginF', 'lgAutoManage',
'lgBottomMarginF', 'lgBoxBackground', 'lgBoxLineColor',
'lgBoxLineDashPattern', 'lgBoxLineDashSegLenF', 'lgBoxLineThicknessF',
'lgBoxLinesOn', 'lgBoxMajorExtentF', 'lgBoxMinorExtentF',
'lgDashIndex', 'lgDashIndexes', 'lgItemCount', 'lgItemOrder',
'lgItemPlacement', 'lgItemPositions', 'lgItemType', 'lgItemTypes',
'lgJustification', 'lgLabelAlignment', 'lgLabelAngleF',
'lgLabelAutoStride', 'lgLabelConstantSpacingF', 'lgLabelDirection',
'lgLabelFont', 'lgLabelFontAspectF', 'lgLabelFontColor',
'lgLabelFontHeightF', 'lgLabelFontQuality', 'lgLabelFontThicknessF',
'lgLabelFuncCode', 'lgLabelJust', 'lgLabelOffsetF', 'lgLabelPosition',
'lgLabelStride', 'lgLabelStrings', 'lgLabelsOn', 'lgLeftMarginF',
'lgLegendOn', 'lgLineColor', 'lgLineColors', 'lgLineDashSegLenF',
'lgLineDashSegLens', 'lgLineLabelConstantSpacingF', 'lgLineLabelFont',
'lgLineLabelFontAspectF', 'lgLineLabelFontColor',
'lgLineLabelFontColors', 'lgLineLabelFontHeightF',
'lgLineLabelFontHeights', 'lgLineLabelFontQuality',
'lgLineLabelFontThicknessF', 'lgLineLabelFuncCode',
'lgLineLabelStrings', 'lgLineLabelsOn', 'lgLineThicknessF',
'lgLineThicknesses', 'lgMarkerColor', 'lgMarkerColors',
'lgMarkerIndex', 'lgMarkerIndexes', 'lgMarkerSizeF', 'lgMarkerSizes',
'lgMarkerThicknessF', 'lgMarkerThicknesses', 'lgMonoDashIndex',
'lgMonoItemType', 'lgMonoLineColor', 'lgMonoLineDashSegLen',
'lgMonoLineLabelFontColor', 'lgMonoLineLabelFontHeight',
'lgMonoLineThickness', 'lgMonoMarkerColor', 'lgMonoMarkerIndex',
'lgMonoMarkerSize', 'lgMonoMarkerThickness', 'lgOrientation',
'lgPerimColor', 'lgPerimDashPattern', 'lgPerimDashSegLenF',
'lgPerimFill', 'lgPerimFillColor', 'lgPerimOn', 'lgPerimThicknessF',
'lgRightMarginF', 'lgTitleAngleF', 'lgTitleConstantSpacingF',
'lgTitleDirection', 'lgTitleExtentF', 'lgTitleFont',
'lgTitleFontAspectF', 'lgTitleFontColor', 'lgTitleFontHeightF',
'lgTitleFontQuality', 'lgTitleFontThicknessF', 'lgTitleFuncCode',
'lgTitleJust', 'lgTitleOffsetF', 'lgTitleOn', 'lgTitlePosition',
'lgTitleString', 'lgTopMarginF', 'mpAreaGroupCount',
'mpAreaMaskingOn', 'mpAreaNames', 'mpAreaTypes', 'mpBottomAngleF',
'mpBottomMapPosF', 'mpBottomNDCF', 'mpBottomNPCF',
'mpBottomPointLatF', 'mpBottomPointLonF', 'mpBottomWindowF',
'mpCenterLatF', 'mpCenterLonF', 'mpCenterRotF', 'mpCountyLineColor',
'mpCountyLineDashPattern', 'mpCountyLineDashSegLenF',
'mpCountyLineThicknessF', 'mpDataBaseVersion', 'mpDataResolution',
'mpDataSetName', 'mpDefaultFillColor', 'mpDefaultFillPattern',
'mpDefaultFillScaleF', 'mpDynamicAreaGroups', 'mpEllipticalBoundary',
'mpFillAreaSpecifiers', 'mpFillBoundarySets', 'mpFillColor',
'mpFillColors', 'mpFillColors-default', 'mpFillDotSizeF',
'mpFillDrawOrder', 'mpFillOn', 'mpFillPatternBackground',
'mpFillPattern', 'mpFillPatterns', 'mpFillPatterns-default',
'mpFillScaleF', 'mpFillScales', 'mpFillScales-default',
'mpFixedAreaGroups', 'mpGeophysicalLineColor',
'mpGeophysicalLineDashPattern', 'mpGeophysicalLineDashSegLenF',
'mpGeophysicalLineThicknessF', 'mpGreatCircleLinesOn',
'mpGridAndLimbDrawOrder', 'mpGridAndLimbOn', 'mpGridLatSpacingF',
'mpGridLineColor', 'mpGridLineDashPattern', 'mpGridLineDashSegLenF',
'mpGridLineThicknessF', 'mpGridLonSpacingF', 'mpGridMaskMode',
'mpGridMaxLatF', 'mpGridPolarLonSpacingF', 'mpGridSpacingF',
'mpInlandWaterFillColor', 'mpInlandWaterFillPattern',
'mpInlandWaterFillScaleF', 'mpLabelDrawOrder', 'mpLabelFontColor',
'mpLabelFontHeightF', 'mpLabelsOn', 'mpLambertMeridianF',
'mpLambertParallel1F', 'mpLambertParallel2F', 'mpLandFillColor',
'mpLandFillPattern', 'mpLandFillScaleF', 'mpLeftAngleF',
'mpLeftCornerLatF', 'mpLeftCornerLonF', 'mpLeftMapPosF',
'mpLeftNDCF', 'mpLeftNPCF', 'mpLeftPointLatF',
'mpLeftPointLonF', 'mpLeftWindowF', 'mpLimbLineColor',
'mpLimbLineDashPattern', 'mpLimbLineDashSegLenF',
'mpLimbLineThicknessF', 'mpLimitMode', 'mpMaskAreaSpecifiers',
'mpMaskOutlineSpecifiers', 'mpMaxLatF', 'mpMaxLonF',
'mpMinLatF', 'mpMinLonF', 'mpMonoFillColor', 'mpMonoFillPattern',
'mpMonoFillScale', 'mpNationalLineColor', 'mpNationalLineDashPattern',
'mpNationalLineThicknessF', 'mpOceanFillColor', 'mpOceanFillPattern',
'mpOceanFillScaleF', 'mpOutlineBoundarySets', 'mpOutlineDrawOrder',
'mpOutlineMaskingOn', 'mpOutlineOn', 'mpOutlineSpecifiers',
'mpPerimDrawOrder', 'mpPerimLineColor', 'mpPerimLineDashPattern',
'mpPerimLineDashSegLenF', 'mpPerimLineThicknessF', 'mpPerimOn',
'mpPolyMode', 'mpProjection', 'mpProvincialLineColor',
'mpProvincialLineDashPattern', 'mpProvincialLineDashSegLenF',
'mpProvincialLineThicknessF', 'mpRelativeCenterLat',
'mpRelativeCenterLon', 'mpRightAngleF', 'mpRightCornerLatF',
'mpRightCornerLonF', 'mpRightMapPosF', 'mpRightNDCF',
'mpRightNPCF', 'mpRightPointLatF', 'mpRightPointLonF',
'mpRightWindowF', 'mpSatelliteAngle1F', 'mpSatelliteAngle2F',
'mpSatelliteDistF', 'mpShapeMode', 'mpSpecifiedFillColors',
'mpSpecifiedFillDirectIndexing', 'mpSpecifiedFillPatterns',
'mpSpecifiedFillPriority', 'mpSpecifiedFillScales',
'mpTopAngleF', 'mpTopMapPosF', 'mpTopNDCF', 'mpTopNPCF',
'mpTopPointLatF', 'mpTopPointLonF', 'mpTopWindowF',
'mpUSStateLineColor', 'mpUSStateLineDashPattern',
'mpUSStateLineDashSegLenF', 'mpUSStateLineThicknessF',
'pmAnnoManagers', 'pmAnnoViews', 'pmLabelBarDisplayMode',
'pmLabelBarHeightF', 'pmLabelBarKeepAspect', 'pmLabelBarOrthogonalPosF',
'pmLabelBarParallelPosF', 'pmLabelBarSide', 'pmLabelBarWidthF',
'pmLabelBarZone', 'pmLegendDisplayMode', 'pmLegendHeightF',
'pmLegendKeepAspect', 'pmLegendOrthogonalPosF',
'pmLegendParallelPosF', 'pmLegendSide', 'pmLegendWidthF',
'pmLegendZone', 'pmOverlaySequenceIds', 'pmTickMarkDisplayMode',
'pmTickMarkZone', 'pmTitleDisplayMode', 'pmTitleZone',
'prGraphicStyle', 'prPolyType', 'prXArray', 'prYArray',
'sfCopyData', 'sfDataArray', 'sfDataMaxV', 'sfDataMinV',
'sfElementNodes', 'sfExchangeDimensions', 'sfFirstNodeIndex',
'sfMissingValueV', 'sfXArray', 'sfXCActualEndF', 'sfXCActualStartF',
'sfXCEndIndex', 'sfXCEndSubsetV', 'sfXCEndV', 'sfXCStartIndex',
'sfXCStartSubsetV', 'sfXCStartV', 'sfXCStride', 'sfXCellBounds',
'sfYArray', 'sfYCActualEndF', 'sfYCActualStartF', 'sfYCEndIndex',
'sfYCEndSubsetV', 'sfYCEndV', 'sfYCStartIndex', 'sfYCStartSubsetV',
'sfYCStartV', 'sfYCStride', 'sfYCellBounds', 'stArrowLengthF',
'stArrowStride', 'stCrossoverCheckCount',
'stExplicitLabelBarLabelsOn', 'stLabelBarEndLabelsOn',
'stLabelFormat', 'stLengthCheckCount', 'stLevelColors',
'stLevelCount', 'stLevelPalette', 'stLevelSelectionMode',
'stLevelSpacingF', 'stLevels', 'stLineColor', 'stLineOpacityF',
'stLineStartStride', 'stLineThicknessF', 'stMapDirection',
'stMaxLevelCount', 'stMaxLevelValF', 'stMinArrowSpacingF',
'stMinDistanceF', 'stMinLevelValF', 'stMinLineSpacingF',
'stMinStepFactorF', 'stMonoLineColor', 'stNoDataLabelOn',
'stNoDataLabelString', 'stScalarFieldData', 'stScalarMissingValColor',
'stSpanLevelPalette', 'stStepSizeF', 'stStreamlineDrawOrder',
'stUseScalarArray', 'stVectorFieldData', 'stZeroFLabelAngleF',
'stZeroFLabelBackgroundColor', 'stZeroFLabelConstantSpacingF',
'stZeroFLabelFont', 'stZeroFLabelFontAspectF',
'stZeroFLabelFontColor', 'stZeroFLabelFontHeightF',
'stZeroFLabelFontQuality', 'stZeroFLabelFontThicknessF',
'stZeroFLabelFuncCode', 'stZeroFLabelJust', 'stZeroFLabelOn',
'stZeroFLabelOrthogonalPosF', 'stZeroFLabelParallelPosF',
'stZeroFLabelPerimColor', 'stZeroFLabelPerimOn',
'stZeroFLabelPerimSpaceF', 'stZeroFLabelPerimThicknessF',
'stZeroFLabelSide', 'stZeroFLabelString', 'stZeroFLabelTextDirection',
'stZeroFLabelZone', 'tfDoNDCOverlay', 'tfPlotManagerOn',
'tfPolyDrawList', 'tfPolyDrawOrder', 'tiDeltaF', 'tiMainAngleF',
'tiMainConstantSpacingF', 'tiMainDirection', 'tiMainFont',
'tiMainFontAspectF', 'tiMainFontColor', 'tiMainFontHeightF',
'tiMainFontQuality', 'tiMainFontThicknessF', 'tiMainFuncCode',
'tiMainJust', 'tiMainOffsetXF', 'tiMainOffsetYF', 'tiMainOn',
'tiMainPosition', 'tiMainSide', 'tiMainString', 'tiUseMainAttributes',
'tiXAxisAngleF', 'tiXAxisConstantSpacingF', 'tiXAxisDirection',
'tiXAxisFont', 'tiXAxisFontAspectF', 'tiXAxisFontColor',
'tiXAxisFontHeightF', 'tiXAxisFontQuality', 'tiXAxisFontThicknessF',
'tiXAxisFuncCode', 'tiXAxisJust', 'tiXAxisOffsetXF',
'tiXAxisOffsetYF', 'tiXAxisOn', 'tiXAxisPosition', 'tiXAxisSide',
'tiXAxisString', 'tiYAxisAngleF', 'tiYAxisConstantSpacingF',
'tiYAxisDirection', 'tiYAxisFont', 'tiYAxisFontAspectF',
'tiYAxisFontColor', 'tiYAxisFontHeightF', 'tiYAxisFontQuality',
'tiYAxisFontThicknessF', 'tiYAxisFuncCode', 'tiYAxisJust',
'tiYAxisOffsetXF', 'tiYAxisOffsetYF', 'tiYAxisOn', 'tiYAxisPosition',
'tiYAxisSide', 'tiYAxisString', 'tmBorderLineColor',
'tmBorderThicknessF', 'tmEqualizeXYSizes', 'tmLabelAutoStride',
'tmSciNoteCutoff', 'tmXBAutoPrecision', 'tmXBBorderOn',
'tmXBDataLeftF', 'tmXBDataRightF', 'tmXBFormat', 'tmXBIrrTensionF',
'tmXBIrregularPoints', 'tmXBLabelAngleF', 'tmXBLabelConstantSpacingF',
'tmXBLabelDeltaF', 'tmXBLabelDirection', 'tmXBLabelFont',
'tmXBLabelFontAspectF', 'tmXBLabelFontColor', 'tmXBLabelFontHeightF',
'tmXBLabelFontQuality', 'tmXBLabelFontThicknessF',
'tmXBLabelFuncCode', 'tmXBLabelJust', 'tmXBLabelStride', 'tmXBLabels',
'tmXBLabelsOn', 'tmXBMajorLengthF', 'tmXBMajorLineColor',
'tmXBMajorOutwardLengthF', 'tmXBMajorThicknessF', 'tmXBMaxLabelLenF',
'tmXBMaxTicks', 'tmXBMinLabelSpacingF', 'tmXBMinorLengthF',
'tmXBMinorLineColor', 'tmXBMinorOn', 'tmXBMinorOutwardLengthF',
'tmXBMinorPerMajor', 'tmXBMinorThicknessF', 'tmXBMinorValues',
'tmXBMode', 'tmXBOn', 'tmXBPrecision', 'tmXBStyle', 'tmXBTickEndF',
'tmXBTickSpacingF', 'tmXBTickStartF', 'tmXBValues', 'tmXMajorGrid',
'tmXMajorGridLineColor', 'tmXMajorGridLineDashPattern',
'tmXMajorGridThicknessF', 'tmXMinorGrid', 'tmXMinorGridLineColor',
'tmXMinorGridLineDashPattern', 'tmXMinorGridThicknessF',
'tmXTAutoPrecision', 'tmXTBorderOn', 'tmXTDataLeftF',
'tmXTDataRightF', 'tmXTFormat', 'tmXTIrrTensionF',
'tmXTIrregularPoints', 'tmXTLabelAngleF', 'tmXTLabelConstantSpacingF',
'tmXTLabelDeltaF', 'tmXTLabelDirection', 'tmXTLabelFont',
'tmXTLabelFontAspectF', 'tmXTLabelFontColor', 'tmXTLabelFontHeightF',
'tmXTLabelFontQuality', 'tmXTLabelFontThicknessF',
'tmXTLabelFuncCode', 'tmXTLabelJust', 'tmXTLabelStride', 'tmXTLabels',
'tmXTLabelsOn', 'tmXTMajorLengthF', 'tmXTMajorLineColor',
'tmXTMajorOutwardLengthF', 'tmXTMajorThicknessF', 'tmXTMaxLabelLenF',
'tmXTMaxTicks', 'tmXTMinLabelSpacingF', 'tmXTMinorLengthF',
'tmXTMinorLineColor', 'tmXTMinorOn', 'tmXTMinorOutwardLengthF',
'tmXTMinorPerMajor', 'tmXTMinorThicknessF', 'tmXTMinorValues',
'tmXTMode', 'tmXTOn', 'tmXTPrecision', 'tmXTStyle', 'tmXTTickEndF',
'tmXTTickSpacingF', 'tmXTTickStartF', 'tmXTValues', 'tmXUseBottom',
'tmYLAutoPrecision', 'tmYLBorderOn', 'tmYLDataBottomF',
'tmYLDataTopF', 'tmYLFormat', 'tmYLIrrTensionF',
'tmYLIrregularPoints', 'tmYLLabelAngleF', 'tmYLLabelConstantSpacingF',
'tmYLLabelDeltaF', 'tmYLLabelDirection', 'tmYLLabelFont',
'tmYLLabelFontAspectF', 'tmYLLabelFontColor', 'tmYLLabelFontHeightF',
'tmYLLabelFontQuality', 'tmYLLabelFontThicknessF',
'tmYLLabelFuncCode', 'tmYLLabelJust', 'tmYLLabelStride', 'tmYLLabels',
'tmYLLabelsOn', 'tmYLMajorLengthF', 'tmYLMajorLineColor',
'tmYLMajorOutwardLengthF', 'tmYLMajorThicknessF', 'tmYLMaxLabelLenF',
'tmYLMaxTicks', 'tmYLMinLabelSpacingF', 'tmYLMinorLengthF',
'tmYLMinorLineColor', 'tmYLMinorOn', 'tmYLMinorOutwardLengthF',
'tmYLMinorPerMajor', 'tmYLMinorThicknessF', 'tmYLMinorValues',
'tmYLMode', 'tmYLOn', 'tmYLPrecision', 'tmYLStyle', 'tmYLTickEndF',
'tmYLTickSpacingF', 'tmYLTickStartF', 'tmYLValues', 'tmYMajorGrid',
'tmYMajorGridLineColor', 'tmYMajorGridLineDashPattern',
'tmYMajorGridThicknessF', 'tmYMinorGrid', 'tmYMinorGridLineColor',
'tmYMinorGridLineDashPattern', 'tmYMinorGridThicknessF',
'tmYRAutoPrecision', 'tmYRBorderOn', 'tmYRDataBottomF',
'tmYRDataTopF', 'tmYRFormat', 'tmYRIrrTensionF',
'tmYRIrregularPoints', 'tmYRLabelAngleF', 'tmYRLabelConstantSpacingF',
'tmYRLabelDeltaF', 'tmYRLabelDirection', 'tmYRLabelFont',
'tmYRLabelFontAspectF', 'tmYRLabelFontColor', 'tmYRLabelFontHeightF',
'tmYRLabelFontQuality', 'tmYRLabelFontThicknessF',
'tmYRLabelFuncCode', 'tmYRLabelJust', 'tmYRLabelStride', 'tmYRLabels',
'tmYRLabelsOn', 'tmYRMajorLengthF', 'tmYRMajorLineColor',
'tmYRMajorOutwardLengthF', 'tmYRMajorThicknessF', 'tmYRMaxLabelLenF',
'tmYRMaxTicks', 'tmYRMinLabelSpacingF', 'tmYRMinorLengthF',
'tmYRMinorLineColor', 'tmYRMinorOn', 'tmYRMinorOutwardLengthF',
'tmYRMinorPerMajor', 'tmYRMinorThicknessF', 'tmYRMinorValues',
'tmYRMode', 'tmYROn', 'tmYRPrecision', 'tmYRStyle', 'tmYRTickEndF',
'tmYRTickSpacingF', 'tmYRTickStartF', 'tmYRValues', 'tmYUseLeft',
'trGridType', 'trLineInterpolationOn',
'trXAxisType', 'trXCoordPoints', 'trXInterPoints', 'trXLog',
'trXMaxF', 'trXMinF', 'trXReverse', 'trXSamples', 'trXTensionF',
'trYAxisType', 'trYCoordPoints', 'trYInterPoints', 'trYLog',
'trYMaxF', 'trYMinF', 'trYReverse', 'trYSamples', 'trYTensionF',
'txAngleF', 'txBackgroundFillColor', 'txConstantSpacingF', 'txDirection',
'txFont', 'HLU-Fonts', 'txFontAspectF', 'txFontColor',
'txFontHeightF', 'txFontOpacityF', 'txFontQuality',
'txFontThicknessF', 'txFuncCode', 'txJust', 'txPerimColor',
'txPerimDashLengthF', 'txPerimDashPattern', 'txPerimOn',
'txPerimSpaceF', 'txPerimThicknessF', 'txPosXF', 'txPosYF',
'txString', 'vcExplicitLabelBarLabelsOn', 'vcFillArrowEdgeColor',
'vcFillArrowEdgeThicknessF', 'vcFillArrowFillColor',
'vcFillArrowHeadInteriorXF', 'vcFillArrowHeadMinFracXF',
'vcFillArrowHeadMinFracYF', 'vcFillArrowHeadXF', 'vcFillArrowHeadYF',
'vcFillArrowMinFracWidthF', 'vcFillArrowWidthF', 'vcFillArrowsOn',
'vcFillOverEdge', 'vcGlyphOpacityF', 'vcGlyphStyle',
'vcLabelBarEndLabelsOn', 'vcLabelFontColor', 'vcLabelFontHeightF',
'vcLabelsOn', 'vcLabelsUseVectorColor', 'vcLevelColors',
'vcLevelCount', 'vcLevelPalette', 'vcLevelSelectionMode',
'vcLevelSpacingF', 'vcLevels', 'vcLineArrowColor',
'vcLineArrowHeadMaxSizeF', 'vcLineArrowHeadMinSizeF',
'vcLineArrowThicknessF', 'vcMagnitudeFormat',
'vcMagnitudeScaleFactorF', 'vcMagnitudeScaleValueF',
'vcMagnitudeScalingMode', 'vcMapDirection', 'vcMaxLevelCount',
'vcMaxLevelValF', 'vcMaxMagnitudeF', 'vcMinAnnoAngleF',
'vcMinAnnoArrowAngleF', 'vcMinAnnoArrowEdgeColor',
'vcMinAnnoArrowFillColor', 'vcMinAnnoArrowLineColor',
'vcMinAnnoArrowMinOffsetF', 'vcMinAnnoArrowSpaceF',
'vcMinAnnoArrowUseVecColor', 'vcMinAnnoBackgroundColor',
'vcMinAnnoConstantSpacingF', 'vcMinAnnoExplicitMagnitudeF',
'vcMinAnnoFont', 'vcMinAnnoFontAspectF', 'vcMinAnnoFontColor',
'vcMinAnnoFontHeightF', 'vcMinAnnoFontQuality',
'vcMinAnnoFontThicknessF', 'vcMinAnnoFuncCode', 'vcMinAnnoJust',
'vcMinAnnoOn', 'vcMinAnnoOrientation', 'vcMinAnnoOrthogonalPosF',
'vcMinAnnoParallelPosF', 'vcMinAnnoPerimColor', 'vcMinAnnoPerimOn',
'vcMinAnnoPerimSpaceF', 'vcMinAnnoPerimThicknessF', 'vcMinAnnoSide',
'vcMinAnnoString1', 'vcMinAnnoString1On', 'vcMinAnnoString2',
'vcMinAnnoString2On', 'vcMinAnnoTextDirection', 'vcMinAnnoZone',
'vcMinDistanceF', 'vcMinFracLengthF', 'vcMinLevelValF',
'vcMinMagnitudeF', 'vcMonoFillArrowEdgeColor',
'vcMonoFillArrowFillColor', 'vcMonoLineArrowColor',
'vcMonoWindBarbColor', 'vcNoDataLabelOn', 'vcNoDataLabelString',
'vcPositionMode', 'vcRefAnnoAngleF', 'vcRefAnnoArrowAngleF',
'vcRefAnnoArrowEdgeColor', 'vcRefAnnoArrowFillColor',
'vcRefAnnoArrowLineColor', 'vcRefAnnoArrowMinOffsetF',
'vcRefAnnoArrowSpaceF', 'vcRefAnnoArrowUseVecColor',
'vcRefAnnoBackgroundColor', 'vcRefAnnoConstantSpacingF',
'vcRefAnnoExplicitMagnitudeF', 'vcRefAnnoFont',
'vcRefAnnoFontAspectF', 'vcRefAnnoFontColor', 'vcRefAnnoFontHeightF',
'vcRefAnnoFontQuality', 'vcRefAnnoFontThicknessF',
'vcRefAnnoFuncCode', 'vcRefAnnoJust', 'vcRefAnnoOn',
'vcRefAnnoOrientation', 'vcRefAnnoOrthogonalPosF',
'vcRefAnnoParallelPosF', 'vcRefAnnoPerimColor', 'vcRefAnnoPerimOn',
'vcRefAnnoPerimSpaceF', 'vcRefAnnoPerimThicknessF', 'vcRefAnnoSide',
'vcRefAnnoString1', 'vcRefAnnoString1On', 'vcRefAnnoString2',
'vcRefAnnoString2On', 'vcRefAnnoTextDirection', 'vcRefAnnoZone',
'vcRefLengthF', 'vcRefMagnitudeF', 'vcScalarFieldData',
'vcScalarMissingValColor', 'vcScalarValueFormat',
'vcScalarValueScaleFactorF', 'vcScalarValueScaleValueF',
'vcScalarValueScalingMode', 'vcSpanLevelPalette', 'vcUseRefAnnoRes',
'vcUseScalarArray', 'vcVectorDrawOrder', 'vcVectorFieldData',
'vcWindBarbCalmCircleSizeF', 'vcWindBarbColor',
'vcWindBarbLineThicknessF', 'vcWindBarbScaleFactorF',
'vcWindBarbTickAngleF', 'vcWindBarbTickLengthF',
'vcWindBarbTickSpacingF', 'vcZeroFLabelAngleF',
'vcZeroFLabelBackgroundColor', 'vcZeroFLabelConstantSpacingF',
'vcZeroFLabelFont', 'vcZeroFLabelFontAspectF',
'vcZeroFLabelFontColor', 'vcZeroFLabelFontHeightF',
'vcZeroFLabelFontQuality', 'vcZeroFLabelFontThicknessF',
'vcZeroFLabelFuncCode', 'vcZeroFLabelJust', 'vcZeroFLabelOn',
'vcZeroFLabelOrthogonalPosF', 'vcZeroFLabelParallelPosF',
'vcZeroFLabelPerimColor', 'vcZeroFLabelPerimOn',
'vcZeroFLabelPerimSpaceF', 'vcZeroFLabelPerimThicknessF',
'vcZeroFLabelSide', 'vcZeroFLabelString', 'vcZeroFLabelTextDirection',
'vcZeroFLabelZone', 'vfCopyData', 'vfDataArray',
'vfExchangeDimensions', 'vfExchangeUVData', 'vfMagMaxV', 'vfMagMinV',
'vfMissingUValueV', 'vfMissingVValueV', 'vfPolarData',
'vfSingleMissingValue', 'vfUDataArray', 'vfUMaxV', 'vfUMinV',
'vfVDataArray', 'vfVMaxV', 'vfVMinV', 'vfXArray', 'vfXCActualEndF',
'vfXCActualStartF', 'vfXCEndIndex', 'vfXCEndSubsetV', 'vfXCEndV',
'vfXCStartIndex', 'vfXCStartSubsetV', 'vfXCStartV', 'vfXCStride',
'vfYArray', 'vfYCActualEndF', 'vfYCActualStartF', 'vfYCEndIndex',
'vfYCEndSubsetV', 'vfYCEndV', 'vfYCStartIndex', 'vfYCStartSubsetV',
'vfYCStartV', 'vfYCStride', 'vpAnnoManagerId', 'vpClipOn',
'vpHeightF', 'vpKeepAspect', 'vpOn', 'vpUseSegments', 'vpWidthF',
'vpXF', 'vpYF', 'wkAntiAlias', 'wkBackgroundColor', 'wkBackgroundOpacityF',
'wkColorMapLen', 'wkColorMap', 'wkColorModel', 'wkDashTableLength',
'wkDefGraphicStyleId', 'wkDeviceLowerX', 'wkDeviceLowerY',
'wkDeviceUpperX', 'wkDeviceUpperY', 'wkFileName', 'wkFillTableLength',
'wkForegroundColor', 'wkFormat', 'wkFullBackground', 'wkGksWorkId',
'wkHeight', 'wkMarkerTableLength', 'wkMetaName', 'wkOrientation',
'wkPDFFileName', 'wkPDFFormat', 'wkPDFResolution', 'wkPSFileName',
'wkPSFormat', 'wkPSResolution', 'wkPaperHeightF', 'wkPaperSize',
'wkPaperWidthF', 'wkPause', 'wkTopLevelViews', 'wkViews',
'wkVisualType', 'wkWidth', 'wkWindowId', 'wkXColorMode', 'wsCurrentSize',
'wsMaximumSize', 'wsThresholdSize', 'xyComputeXMax',
'xyComputeXMin', 'xyComputeYMax', 'xyComputeYMin', 'xyCoordData',
'xyCoordDataSpec', 'xyCurveDrawOrder', 'xyDashPattern',
'xyDashPatterns', 'xyExplicitLabels', 'xyExplicitLegendLabels',
'xyLabelMode', 'xyLineColor', 'xyLineColors', 'xyLineDashSegLenF',
'xyLineLabelConstantSpacingF', 'xyLineLabelFont',
'xyLineLabelFontAspectF', 'xyLineLabelFontColor',
'xyLineLabelFontColors', 'xyLineLabelFontHeightF',
'xyLineLabelFontQuality', 'xyLineLabelFontThicknessF',
'xyLineLabelFuncCode', 'xyLineThicknessF', 'xyLineThicknesses',
'xyMarkLineMode', 'xyMarkLineModes', 'xyMarker', 'xyMarkerColor',
'xyMarkerColors', 'xyMarkerSizeF', 'xyMarkerSizes',
'xyMarkerThicknessF', 'xyMarkerThicknesses', 'xyMarkers',
'xyMonoDashPattern', 'xyMonoLineColor', 'xyMonoLineLabelFontColor',
'xyMonoLineThickness', 'xyMonoMarkLineMode', 'xyMonoMarker',
'xyMonoMarkerColor', 'xyMonoMarkerSize', 'xyMonoMarkerThickness',
'xyXIrrTensionF', 'xyXIrregularPoints', 'xyXStyle', 'xyYIrrTensionF',
'xyYIrregularPoints', 'xyYStyle'), prefix=r'\b'),
Name.Builtin),
# Booleans
(r'\.(True|False)\.', Name.Builtin),
# Comparing Operators
(r'\.(eq|ne|lt|le|gt|ge|not|and|or|xor)\.', Operator.Word),
],
'strings': [
(r'(?s)"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double),
],
'nums': [
(r'\d+(?![.e])(_[a-z]\w+)?', Number.Integer),
(r'[+-]?\d*\.\d+(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
(r'[+-]?\d+\.\d*(e[-+]?\d+)?(_[a-z]\w+)?', Number.Float),
],
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@lexers@ncl.py@.PATH_END.py
|
{
"filename": "cache.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/packages/vaex-core/vaex/file/cache.py",
"type": "Python"
}
|
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import logging
import os
import mmap
import numpy as np
from pyarrow.fs import FileSystemHandler
import vaex.utils
import vaex.file
DEFAULT_BLOCK_SIZE = 1024*1024*1 # 1mb by default
logger = logging.getLogger("vaex.file.cache")
class FileSystemHandlerCached(FileSystemHandler):
"""Proxies it to use the CachedFile
"""
def __init__(self, fs, scheme, for_arrow=False):
self.fs = fs
self.scheme = scheme
self.for_arrow = for_arrow
self._file_cache = {}
def __eq__(self, other):
if isinstance(other, FileSystemHandlerCached):
return self.fs == other.fs
return NotImplemented
def __ne__(self, other):
if isinstance(other, FileSystemHandlerCached):
return self.fs != other.fs
return NotImplemented
def __getattr__(self, name):
return getattr(self.fs, name)
def open_input_stream(self, path):
from pyarrow import PythonFile
def real_open():
return self.fs.open_input_stream(path)
full_path = f'{self.scheme}://{path}'
# TODO: we may wait to cache the mmapped file
if full_path not in self._file_cache:
f = CachedFile(real_open, full_path, read_as_buffer=not self.for_arrow)
self._file_cache[full_path] = f
else:
previous = self._file_cache[full_path]
f = CachedFile(real_open, full_path, data_file=previous.data_file, mask_file=previous.mask_file)
if not self.for_arrow:
return f
f = vaex.file.FileProxy(f, full_path, None)
return PythonFile(f, mode="r")
def open_input_file(self, path):
from pyarrow import PythonFile
def real_open():
return self.fs.open_input_file(path)
full_path = f'{self.scheme}://{path}'
# TODO: we may wait to cache the mmapped file
if full_path not in self._file_cache:
f = CachedFile(real_open, full_path, read_as_buffer=not self.for_arrow)
self._file_cache[full_path] = f
else:
previous = self._file_cache[full_path]
f = CachedFile(real_open, full_path, data_file=previous.data_file, mask_file=previous.mask_file, read_as_buffer=not self.for_arrow)
if not self.for_arrow:
return f
f = vaex.file.FileProxy(f, full_path, None)
return PythonFile(f, mode="r")
# these are forwarded
def copy_file(self, *args, **kwargs):
return self.fs.copy_file(*args, **kwargs)
def create_dir(self, *args, **kwargs):
return self.fs.create_dir(*args, **kwargs)
def delete_dir(self, *args, **kwargs):
return self.fs.delete_dir(*args, **kwargs)
def delete_dir_contents(self, *args, **kwargs):
return self.fs.delete_dir_contents(*args, **kwargs)
def delete_file(self, *args, **kwargs):
return self.fs.delete_file(*args, **kwargs)
def delete_root_dir_contents(self, *args, **kwargs):
return self.fs.delete_root_dir_contents(*args, **kwargs)
def get_file_info(self, *args, **kwargs):
return self.fs.get_file_info(*args, **kwargs)
def get_file_info_selector(self, *args, **kwargs):
return self.fs.get_file_info_selector(*args, **kwargs)
def get_type_name(self, *args, **kwargs):
return self.fs.get_type_name(*args, **kwargs)
def move(self, *args, **kwargs):
return self.fs.move(*args, **kwargs)
def normalize_path(self, *args, **kwargs):
return self.fs.normalize_path(*args, **kwargs)
def open_append_stream(self, *args, **kwargs):
return self.fs.open_append_stream(*args, **kwargs)
def open_output_stream(self, *args, **kwargs):
return self.fs.open_output_stream(*args, **kwargs)
class MMappedFile:
"""Small wrapper around a memory mapped file"""
def __init__(self, path, length, dtype=np.uint8):
self.path = path
self.length = length
if not os.path.exists(path):
with open(self.path, 'wb') as fp:
fp.seek(self.length-1)
fp.write(b'\00')
fp.flush()
self.fp = open(self.path, 'rb+')
kwargs = {}
if vaex.utils.osname == "windows":
kwargs["access"] = mmap.ACCESS_WRITE
else:
kwargs["prot"] = mmap.PROT_WRITE
self.mmap = mmap.mmap(self.fp.fileno(), self.length)
self.memoryview = memoryview(self.mmap)
self.data = np.frombuffer(self.mmap, dtype=dtype, count=self.length)
def __getitem__(self, item):
return self.memoryview.__getitem__(item)
def _to_block_ceil(index, block_size):
return (index + block_size - 1) // block_size
def _to_block_floor(index, block_size):
return index // block_size
def _to_index(block, block_size):
return block * block_size
class CachedFile:
def __init__(self, file, path=None, cache_dir=None, block_size=DEFAULT_BLOCK_SIZE, data_file=None, mask_file=None, read_as_buffer=True):
"""Decorator that wraps a file object (typically a s3) by caching the content locally on disk.
The standard location for the cache is: `${VAEX_FS_PATH}/<protocol (e.g. s3)>/path/to/file.ext`
See `Configuration of paths<conf.html#cache-fs>`_ how to change this.
Arguments:
:file file or callable: if callable, invoking it should give a file like object
:path str: path of file, defaults of file.name
:cache_dir str: path of cache dir, defaults to `${VAEX_FS_PATH}`
"""
self.name = path
self.path = path
self.file = file
self.cache_dir = cache_dir
self.block_size = block_size
self.read_as_buffer = read_as_buffer
self.block_reads = 0
self.reads = 0
self.loc = 0
if data_file is None or mask_file is None:
o = urlparse(path)
if cache_dir is None:
cache_dir = vaex.settings.fs.path
self.cache_dir_path = os.path.join(cache_dir, o.scheme, o.netloc, o.path[1:])
self.cache_dir_path = os.path.join(cache_dir, o.scheme, o.netloc, o.path[1:])
lockname = os.path.join('file-cache', o.scheme, o.netloc, o.path[1:], 'create.lock')
os.makedirs(self.cache_dir_path, exist_ok=True)
self.data_path = os.path.join(self.cache_dir_path, 'data')
self.mask_path = os.path.join(self.cache_dir_path, 'mask')
# if possible, we avoid using the file
if os.path.exists(self.data_path):
with open(self.data_path, 'rb') as f:
f.seek(0, 2)
self.length = f.tell()
else:
self._use_file()
self.file.seek(0, 2)
self.length = self.file.tell()
self.mask_length = _to_block_ceil(self.length, self.block_size)
logging.debug('cache path: %s', self.cache_dir_path)
with vaex.utils.file_lock(lockname):
self.data_file = MMappedFile(self.data_path, self.length)
self.mask_file = MMappedFile(self.mask_path, self.mask_length)
else:
self.data_file = data_file
self.mask_file = mask_file
self.length = self.data_file.length
self.mask_length = self.mask_file.length
def readable(self):
return True
def writable(self):
return False
def seekable(self):
return True
def closed(self):
return self.file.closed()
def flush(self):
pass
def dup(self):
if callable(self.file):
file = self.file
else:
file = lambda: vaex.file.dup(self.file)
return CachedFile(file, self.path, self.cache_dir, self.block_size, data_file=self.data_file, mask_file=self.mask_file, read_as_buffer=self.read_as_buffer)
def tell(self):
return self.loc
def seek(self, loc, whence=0):
if whence == 0:
self.loc = loc
elif whence == 1:
self.loc = self.loc + loc
elif whence == 2:
self.loc = self.length + loc
assert (self.loc >= 0) and (self.loc <= self.length)
def _use_file(self):
if callable(self.file):
self.file = self.file()
def read(self, length=-1):
start = self.loc
end = self.loc + length if length != -1 else self.length
self._ensure_cached(start, end)
self.loc = end
buffer = self.data_file[start:end]
# arrow 1 and 2 don't accept a non-bytes object via the PythonFile.read() path
return buffer if self.read_as_buffer else buffer.tobytes()
def readinto(self, buffer):
start = self.loc
end = start + len(buffer)
self._ensure_cached(start, end)
buffer[:] = self.data_file[start:end]
self.loc = end
return len(buffer)
def read_buffer(self, byte_count):
start = self.loc
end = start + byte_count
self._ensure_cached(start, end)
self.loc = end
return self.data_file[start:end]
def _as_numpy(self, offset, byte_length, dtype):
# quick route that avoids memory copies
self._ensure_cached(offset, offset+byte_length)
return np.frombuffer(self.data_file[offset:offset+byte_length], dtype)
def _fetch_blocks(self, block_start, block_end):
start_blocked = _to_index(block_start, self.block_size)
end_blocked = min(self.length, _to_index(block_end, self.block_size))
self._use_file()
self.file.seek(start_blocked)
bytes_read = self.file.readinto(self.data_file[start_blocked:end_blocked])
expected = (end_blocked - start_blocked)
assert bytes_read == expected, f'Read {bytes_read}, expected {expected} ({start_blocked}-{end_blocked} out of {self.length})'
self.mask_file.data[block_start:block_end] = 1
self.reads += 1
self.block_reads += block_end - block_start
def _ensure_cached(self, start, end):
block_start = _to_block_floor(start, self.block_size)
block_end = _to_block_ceil(end, self.block_size)
missing = self.mask_file.data[block_start:block_end] == 0
if np.all(missing):
self._fetch_blocks(block_start, block_end)
elif np.any(missing):
i = block_start
done = False
while not done:
# find first block that is not cached
while i < block_end and self.mask_file.data[i] == 1:
i += 1
if i == block_end:
break
# find block that *is* cached
j = i + 1
while j < block_end and self.mask_file.data[j] == 0:
j += 1
self._fetch_blocks(i, j)
i = j
def close(self):
# if it is callable, the file is never opened
if not callable(self.file):
self.file.close()
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@packages@vaex-core@vaex@file@cache.py@.PATH_END.py
|
{
"filename": "_offset.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/bar/_offset.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OffsetValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="offset", parent_name="bar", **kwargs):
super(OffsetValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@bar@_offset.py@.PATH_END.py
|
{
"filename": "imviz_color_display.ipynb",
"repo_name": "spacetelescope/jdaviz",
"repo_path": "jdaviz_extracted/jdaviz-main/notebooks/concepts/imviz_color_display.ipynb",
"type": "Jupyter Notebook"
}
|
## Proof of concept using Imviz to display RGB layers
We start off by silencing warnings that can happen when loading data as well as deprecation warnings, for clarity:
```python
import warnings
warnings.simplefilter('ignore')
```
Import modules needed for this notebook.
```python
from glue.core.component_link import ComponentLink
from skimage.io import imread
from jdaviz import Imviz
```
Read in the RGB layers as different Numpy arrays.
```python
# Point filename to your RGB file (e.g., JPEG)
filename = 'ogle_galaxy_in_color.jpg'
im = imread(filename)
im_r = im[:, :, 0][::-1, :]
im_g = im[:, :, 1][::-1, :]
im_b = im[:, :, 2][::-1, :]
```
Start Imviz app and load RGB channels into different data layers.
```python
imviz = Imviz()
imviz.load_data(im_r, data_label='Red')
imviz.load_data(im_g, data_label='Green')
imviz.load_data(im_b, data_label='Blue')
viewer = imviz.default_viewer._obj
imviz.show()
```
Assign the different RGB layers.
```python
viewer.state.color_mode = 'One color per layer'
viewer.state.layers[0].color = 'red'
viewer.state.layers[1].color = 'green'
viewer.state.layers[2].color = 'blue'
```
**Note:** If you blink, you need to run this again. Don't blink!
```python
```
|
spacetelescopeREPO_NAMEjdavizPATH_START.@jdaviz_extracted@jdaviz-main@notebooks@concepts@imviz_color_display.ipynb@.PATH_END.py
|
{
"filename": "extraction.py",
"repo_name": "HiPERCAM/hipercam",
"repo_path": "hipercam_extracted/hipercam-master/hipercam/extraction.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Defines classes and functions used to detect and characterise
objects in photometric data.
Adds optional dependency on sep package ()
"""
import struct
import sep
from astropy.convolution import Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma
from numpy.lib import recfunctions
import numpy as np
# to save space
SMALL_TYPE = np.dtype(
[
("thresh", "<f4"),
("npix", "<i4"),
("tnpix", "<i4"),
("xmin", "<i4"),
("xmax", "<i4"),
("ymin", "<i4"),
("ymax", "<i4"),
("x", "<f8"),
("y", "<f8"),
("x2", "<f4"),
("y2", "<f4"),
("xy", "<f4"),
("errx2", "<f4"),
("erry2", "<f4"),
("errxy", "<f4"),
("a", "<f4"),
("b", "<f4"),
("theta", "<f4"),
("cxx", "<f4"),
("cyy", "<f4"),
("cxy", "<f4"),
("cflux", "<f4"),
("flux", "<f4"),
("cpeak", "<f4"),
("peak", "<f4"),
("xcpeak", "<i4"),
("ycpeak", "<i4"),
("xpeak", "<i4"),
("ypeak", "<i4"),
("flag", "<i4"),
("fwhm", "<f4"),
("hfd", "<f4"),
]
)
def findStars(wind, thresh, kernel_fwhm, return_bkg=False):
"""
Use sep to find objects in image. Not sure the outputs returned
are correct for xbin != ybin
Parameters
----------
wind : `~hipercam.window.Window`
window in which to detect objects.
thresh : float
threshold for object detection, in muliples of background RMS.
kernel_fwhm : float
Image is convolved with a Gaussian kernel of this FWHM prior to object
detection. Should be set to something similar to the typical FWHM in image.
return_bkg : bool
True to return the background as calculated by sep.Background
Returns
-------
objects : np.ndarray
Extracted object parameters (structured array).
For available fields, see sep documentation.
http://sep.readthedocs.io/en/v1.0.x/api/sep.extract.html#sep.extract
Most quantities are converted to unbinned coordinates and pixels,
apart from npix, and tnpix, the number of pixels in the objects.
bkg : `~sep.Background` [if return_bkg]
The background estimated by 'sep'. Use sep.subfrom
to subtract from data.
"""
# ensure float type, c-contiguous and native byte order
data = wind.data.astype("float")
bkg = sep.Background(data)
bkg.subfrom(data) # in-place background subtraction
sigma = kernel_fwhm * gaussian_fwhm_to_sigma
kernel = Gaussian2DKernel(sigma, x_size=3, y_size=3)
kernel.normalize()
objects = sep.extract(
data, thresh, err=bkg.globalrms, clean=True, filter_kernel=kernel.array
)
# add crude FWHM estimate and HFD measurement
fwhm = 2.0 * np.sqrt(np.log(2.0) * (objects["a"] ** 2 + objects["b"] ** 2))
hfr, flags = sep.flux_radius(
data,
objects["x"],
objects["y"],
25 * np.ones_like(objects["x"]),
0.5,
normflux=objects["cflux"],
)
bad_hfd = np.logical_or(flags != 0, hfr >= 25)
hfr[bad_hfd] = np.nan
objects = recfunctions.append_fields(objects, ("fwhm", "hfd"), (fwhm, 2 * hfr))
# convert to un-binned pixels
objects["xmin"] = (wind.x(objects["xmin"] - (wind.xbin - 1) / 2)).astype(np.int32)
objects["xmax"] = (wind.x(objects["xmax"] + (wind.xbin - 1) / 2)).astype(np.int32)
objects["ymin"] = (wind.y(objects["ymin"] - (wind.ybin - 1) / 2)).astype(np.int32)
objects["ymax"] = (wind.y(objects["ymax"] + (wind.ybin - 1) / 2)).astype(np.int32)
for key in ("x", "xcpeak", "xpeak"):
objects[key] = wind.x(objects[key])
for key in ("y", "ycpeak", "ypeak"):
objects[key] = wind.y(objects[key])
for key in ("xy", "cxy"):
objects[key] *= wind.xbin * wind.ybin
for key in ("x2", "a", "errx2", "cxx"):
objects[key] *= wind.xbin
for key in ("y2", "b", "erry2", "cyy"):
objects[key] *= wind.ybin
for key in ("fwhm", "hfd"):
objects[key] *= np.sqrt(wind.xbin * wind.ybin)
# change the data type to save on space
objects = objects.astype(SMALL_TYPE)
if return_bkg:
return (objects, bkg)
else:
return objects
|
HiPERCAMREPO_NAMEhipercamPATH_START.@hipercam_extracted@hipercam-master@hipercam@extraction.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/barpolar/marker/colorbar/title/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._font import Font
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(__name__, [], ["._font.Font"])
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@barpolar@marker@colorbar@title@__init__.py@.PATH_END.py
|
{
"filename": "enable_mlir_bridge.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/compiler/mlir/tensorflow/g3doc/enable_mlir_bridge.md",
"type": "Markdown"
}
|
# Enable MLIR-Based new TPU Bridge
**MLIR-Based new TPU Bridge is an experimental feature, tread lightly.**
## For TF 1.x-Based Models
In tf.ConfigProto.Experimental, there is a knob controlling whether the new TPU
Bridge is enabled or not. You can set it by using the following example code:
```
session_config = tf.ConfigProto(
......
experimental=tf.ConfigProto.Experimental(
enable_mlir_bridge=True,
),
......
)
```
## For TF 2.x-Based Models
Sessions and Session Configs are no longer available in TF 2.x. Instead, there
is a global **Context** that holds all the equivalences. You can manipulate the
**Context** with following code. Note that it must be added early in your
program (at least before any of your model computation).
```
tf.config.experimental.enable_mlir_bridge()
```
## How to disable the old TPU bridge?
Due to how TPU bridges are designed to work, you don't actually need to disable
the old bridge as they would not interfere with each other.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@compiler@mlir@tensorflow@g3doc@enable_mlir_bridge.md@.PATH_END.py
|
{
"filename": "recipes_FLAT.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/ghost/recipes/sq/recipes_FLAT.py",
"type": "Python"
}
|
"""
Recipes available to data with tags ``['GHOST', 'CAL', 'FLAT']``.
Default is ``makeProcessedFlat``.
"""
recipe_tags = set(['GHOST', 'CAL', 'FLAT'])
def makeProcessedFlat(p):
"""
This recipe performs the standardization and corrections needed to convert
the raw input flat images into a single stacked flat image. This output
processed flat is stored on disk using storeProcessedFlat and has a name
equal to the name of the first input flat image with "_flat.fits" appended.
An _xmodPolyfit.fits file is also created.
Parameters
----------
p : Primitives object
A primitive set matching the recipe_tags.
"""
p.prepare(attach_mdf=False)
p.addDQ()
p.addVAR(read_noise=True)
p.overscanCorrect()
p.biasCorrect()
p.ADUToElectrons()
p.addVAR(poisson_noise=True)
p.darkCorrect()
p.stackFrames(operation='median')
p.tileArrays()
p.traceFibers()
p.removeScatteredLight()
p.measureBlaze()
p.storeProcessedFlat()
return
_default = makeProcessedFlat
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@ghost@recipes@sq@recipes_FLAT.py@.PATH_END.py
|
{
"filename": "legendre.py",
"repo_name": "numpy/numpy",
"repo_path": "numpy_extracted/numpy-main/numpy/polynomial/legendre.py",
"type": "Python"
}
|
"""
==================================================
Legendre Series (:mod:`numpy.polynomial.legendre`)
==================================================
This module provides a number of objects (mostly functions) useful for
dealing with Legendre series, including a `Legendre` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Classes
-------
.. autosummary::
:toctree: generated/
Legendre
Constants
---------
.. autosummary::
:toctree: generated/
legdomain
legzero
legone
legx
Arithmetic
----------
.. autosummary::
:toctree: generated/
legadd
legsub
legmulx
legmul
legdiv
legpow
legval
legval2d
legval3d
leggrid2d
leggrid3d
Calculus
--------
.. autosummary::
:toctree: generated/
legder
legint
Misc Functions
--------------
.. autosummary::
:toctree: generated/
legfromroots
legroots
legvander
legvander2d
legvander3d
leggauss
legweight
legcompanion
legfit
legtrim
legline
leg2poly
poly2leg
See also
--------
numpy.polynomial
"""
import numpy as np
import numpy.linalg as la
from numpy.lib.array_utils import normalize_axis_index
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'legzero', 'legone', 'legx', 'legdomain', 'legline', 'legadd',
'legsub', 'legmulx', 'legmul', 'legdiv', 'legpow', 'legval', 'legder',
'legint', 'leg2poly', 'poly2leg', 'legfromroots', 'legvander',
'legfit', 'legtrim', 'legroots', 'Legendre', 'legval2d', 'legval3d',
'leggrid2d', 'leggrid3d', 'legvander2d', 'legvander3d', 'legcompanion',
'leggauss', 'legweight']
legtrim = pu.trimcoef
def poly2leg(pol):
"""
Convert a polynomial to a Legendre series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Legendre series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Legendre
series.
See Also
--------
leg2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> import numpy as np
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
Polynomial([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], ...
>>> c = P.Legendre(P.legendre.poly2leg(p.coef))
>>> c
Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = legadd(legmulx(res), pol[i])
return res
def leg2poly(c):
"""
Convert a Legendre series to a polynomial.
Convert an array representing the coefficients of a Legendre series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Legendre series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2leg
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy import polynomial as P
>>> c = P.Legendre(range(4))
>>> c
Legendre([0., 1., 2., 3.], domain=[-1., 1.], window=[-1., 1.], symbol='x')
>>> p = c.convert(kind=P.Polynomial)
>>> p
Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., ...
>>> P.legendre.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n < 3:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], (c1*(i - 1))/i)
c1 = polyadd(tmp, (polymulx(c1)*(2*i - 1))/i)
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Legendre
legdomain = np.array([-1., 1.])
# Legendre coefficients representing zero.
legzero = np.array([0])
# Legendre coefficients representing one.
legone = np.array([1])
# Legendre coefficients representing the identity x.
legx = np.array([0, 1])
def legline(off, scl):
"""
Legendre series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Legendre series for
``off + scl*x``.
See Also
--------
numpy.polynomial.polynomial.polyline
numpy.polynomial.chebyshev.chebline
numpy.polynomial.laguerre.lagline
numpy.polynomial.hermite.hermline
numpy.polynomial.hermite_e.hermeline
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legline(3,2)
array([3, 2])
>>> L.legval(-3, L.legline(3,2)) # should be -3
-3.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def legfromroots(roots):
"""
Generate a Legendre series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Legendre form, where the :math:`r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Legendre form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
numpy.polynomial.polynomial.polyfromroots
numpy.polynomial.chebyshev.chebfromroots
numpy.polynomial.laguerre.lagfromroots
numpy.polynomial.hermite.hermfromroots
numpy.polynomial.hermite_e.hermefromroots
Examples
--------
>>> import numpy.polynomial.legendre as L
>>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary
"""
return pu._fromroots(legline, legmul, roots)
def legadd(c1, c2):
"""
Add one Legendre series to another.
Returns the sum of two Legendre series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Legendre series of their sum.
See Also
--------
legsub, legmulx, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Legendre series
is a Legendre series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
array([4., 4., 4.])
"""
return pu._add(c1, c2)
def legsub(c1, c2):
"""
Subtract one Legendre series from another.
Returns the difference of two Legendre series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their difference.
See Also
--------
legadd, legmulx, legmul, legdiv, legpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Legendre
series is a Legendre series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legsub(c1,c2)
array([-2., 0., 2.])
>>> L.legsub(c2,c1) # -C.legsub(c1,c2)
array([ 2., 0., -2.])
"""
return pu._sub(c1, c2)
def legmulx(c):
"""Multiply a Legendre series by x.
Multiply the Legendre series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
See Also
--------
legadd, legsub, legmul, legdiv, legpow
Notes
-----
The multiplication uses the recursion relationship for Legendre
polynomials in the form
.. math::
xP_i(x) = ((i + 1)*P_{i + 1}(x) + i*P_{i - 1}(x))/(2i + 1)
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> L.legmulx([1,2,3])
array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
j = i + 1
k = i - 1
s = i + j
prd[j] = (c[i]*j)/s
prd[k] += (c[i]*i)/s
return prd
def legmul(c1, c2):
"""
Multiply one Legendre series by another.
Returns the product of two Legendre series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Legendre series coefficients representing their product.
See Also
--------
legadd, legsub, legmulx, legdiv, legpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Legendre polynomial basis set. Thus, to express
the product as a Legendre series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
>>> L.legmul(c1,c2) # multiplication requires "reprojection"
array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = legsub(c[-i]*xs, (c1*(nd - 1))/nd)
c1 = legadd(tmp, (legmulx(c1)*(2*nd - 1))/nd)
return legadd(c0, legmulx(c1))
def legdiv(c1, c2):
"""
Divide one Legendre series by another.
Returns the quotient-with-remainder of two Legendre series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Legendre series coefficients ordered from low to
high.
Returns
-------
quo, rem : ndarrays
Of Legendre series coefficients representing the quotient and
remainder.
See Also
--------
legadd, legsub, legmulx, legmul, legpow
Notes
-----
In general, the (polynomial) division of one Legendre series by another
results in quotient and remainder terms that are not in the Legendre
polynomial basis set. Thus, to express these results as a Legendre
series, it is necessary to "reproject" the results onto the Legendre
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
(array([3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
(array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary
"""
return pu._div(legmul, c1, c2)
def legpow(c, pow, maxpower=16):
"""Raise a Legendre series to a power.
Returns the Legendre series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Legendre series of power.
See Also
--------
legadd, legsub, legmulx, legmul, legdiv
"""
return pu._pow(legmul, c, pow, maxpower)
def legder(c, m=1, scl=1, axis=0):
"""
Differentiate a Legendre series.
Returns the Legendre series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*L_0 + 2*L_1 + 3*L_2``
while [[1,2],[1,2]] represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) +
2*L_0(x)*L_1(y) + 2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
Returns
-------
der : ndarray
Legendre series of the derivative.
See Also
--------
legint
Notes
-----
In general, the result of differentiating a Legendre series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3,4)
>>> L.legder(c)
array([ 6., 9., 20.])
>>> L.legder(c, 3)
array([60.])
>>> L.legder(c, scl=-1)
array([ -6., -9., -20.])
>>> L.legder(c, 2,-1)
array([ 9., 60.])
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt = pu._as_int(m, "the order of derivation")
iaxis = pu._as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
n = len(c)
if cnt >= n:
c = c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 2, -1):
der[j - 1] = (2*j - 1)*c[j]
c[j - 2] += c[j]
if n > 1:
der[1] = 3*c[2]
der[0] = c[1]
c = der
c = np.moveaxis(c, 0, iaxis)
return c
def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Legendre series.
Returns the Legendre series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``L_0 + 2*L_1 + 3*L_2`` while [[1,2],[1,2]]
represents ``1*L_0(x)*L_0(y) + 1*L_1(x)*L_0(y) + 2*L_0(x)*L_1(y) +
2*L_1(x)*L_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Legendre series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
Returns
-------
S : ndarray
Legendre series coefficient array of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.ndim(lbnd) != 0``, or
``np.ndim(scl) != 0``.
See Also
--------
legder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
:math:`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3)
>>> L.legint(c)
array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
>>> L.legint(c, 3)
array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary
-1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(c, k=3)
array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
>>> L.legint(c, lbnd=-2)
array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
>>> L.legint(c, scl=2)
array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary
"""
c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt = pu._as_int(m, "the order of integration")
iaxis = pu._as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if np.ndim(lbnd) != 0:
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
return c
c = np.moveaxis(c, iaxis, 0)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
if n > 1:
tmp[2] = c[1]/3
for j in range(2, n):
t = c[j]/(2*j + 1)
tmp[j + 1] = t
tmp[j - 1] -= t
tmp[0] += k[i] - legval(lbnd, tmp)
c = tmp
c = np.moveaxis(c, 0, iaxis)
return c
def legval(x, c, tensor=True):
"""
Evaluate a Legendre series at points x.
If `c` is of length ``n + 1``, this function returns the value:
.. math:: p(x) = c_0 * L_0(x) + c_1 * L_1(x) + ... + c_n * L_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then ``p(x)`` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
legval2d, leggrid2d, legval3d, leggrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
"""
c = np.array(c, ndmin=1, copy=None)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*((nd - 1)/nd)
c1 = tmp + c1*x*((2*nd - 1)/nd)
return c0 + c1*x
def legval2d(x, y, c):
"""
Evaluate a 2-D Legendre series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * L_i(x) * L_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points ``(x, y)``,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Legendre series at points formed
from pairs of corresponding values from `x` and `y`.
See Also
--------
legval, leggrid2d, legval3d, leggrid3d
"""
return pu._valnd(legval, c, x, y)
def leggrid2d(x, y, c):
"""
Evaluate a 2-D Legendre series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \\sum_{i,j} c_{i,j} * L_i(a) * L_j(b)
where the points ``(a, b)`` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape + y.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j is contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional Chebyshev series at points in the
Cartesian product of `x` and `y`.
See Also
--------
legval, legval2d, legval3d, leggrid3d
"""
return pu._gridnd(legval, c, x, y)
def legval3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * L_i(x) * L_j(y) * L_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
``(x, y, z)``, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
legval, legval2d, leggrid2d, leggrid3d
"""
return pu._valnd(legval, c, x, y, z)
def leggrid3d(x, y, z, c):
"""
Evaluate a 3-D Legendre series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * L_i(a) * L_j(b) * L_k(c)
where the points ``(a, b, c)`` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`, `y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
legval, legval2d, leggrid2d, legval3d
"""
return pu._gridnd(legval, c, x, y, z)
def legvander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = L_i(x)
where ``0 <= i <= deg``. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Legendre polynomial.
If `c` is a 1-D array of coefficients of length ``n + 1`` and `V` is the
array ``V = legvander(x, n)``, then ``np.dot(V, c)`` and
``legval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Legendre series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Legendre polynomial. The dtype will be the same as
the converted `x`.
"""
ideg = pu._as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=None, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
# Use forward recursion to generate the entries. This is not as accurate
# as reverse recursion in this application but it is more efficient.
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x*(2*i - 1) - v[i-2]*(i - 1))/i
return np.moveaxis(v, 0, -1)
def legvander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points ``(x, y)``. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (deg[1] + 1)*i + j] = L_i(x) * L_j(y),
where ``0 <= i <= deg[0]`` and ``0 <= j <= deg[1]``. The leading indices of
`V` index the points ``(x, y)`` and the last index encodes the degrees of
the Legendre polynomials.
If ``V = legvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``legval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg[1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
legvander, legvander3d, legval2d, legval3d
"""
return pu._vander_nd_flat((legvander, legvander), (x, y), deg)
def legvander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points ``(x, y, z)``. If `l`, `m`, `n` are the given degrees in `x`, `y`, `z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = L_i(x)*L_j(y)*L_k(z),
where ``0 <= i <= l``, ``0 <= j <= m``, and ``0 <= j <= n``. The leading
indices of `V` index the points ``(x, y, z)`` and the last index encodes
the degrees of the Legendre polynomials.
If ``V = legvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``legval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Legendre
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg[1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
legvander, legvander3d, legval2d, legval3d
"""
return pu._vander_nd_flat((legvander, legvander, legvander), (x, y, z), deg)
def legfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Legendre series to data.
Return the coefficients of a Legendre series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int or 1-D array_like
Degree(s) of the fitting polynomials. If `deg` is a single integer
all terms up to and including the `deg`'th term are included in the
fit. For NumPy versions >= 1.11.0 a list of integers specifying the
degrees of the terms to include may be used instead.
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the weight ``w[i]`` applies to the unsquared
residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
chosen so that the errors of the products ``w[i]*y[i]`` all have the
same variance. When using inverse-variance weighting, use
``w[i] = 1/sigma(y[i])``. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Legendre coefficients ordered from low to high. If `y` was
2-D, the coefficients for the data in column k of `y` are in
column `k`. If `deg` is specified as a list, coefficients for
terms not included in the fit are set equal to zero in the
returned `coef`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if ``full == True``
- residuals -- sum of squared residuals of the least squares fit
- rank -- the numerical rank of the scaled Vandermonde matrix
- singular_values -- singular values of the scaled Vandermonde matrix
- rcond -- value of `rcond`.
For more details, see `numpy.linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if ``full == False``. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.exceptions.RankWarning)
See Also
--------
numpy.polynomial.polynomial.polyfit
numpy.polynomial.chebyshev.chebfit
numpy.polynomial.laguerre.lagfit
numpy.polynomial.hermite.hermfit
numpy.polynomial.hermite_e.hermefit
legval : Evaluates a Legendre series.
legvander : Vandermonde matrix of Legendre series.
legweight : Legendre weight function (= 1).
numpy.linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Legendre series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where :math:`w_j` are the weights. This problem is solved by setting up
as the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, and `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `~exceptions.RankWarning` will be issued. This means that
the coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Legendre series are usually better conditioned than fits
using power series, but much can depend on the distribution of the
sample points and the smoothness of the data. If the quality of the fit
is inadequate splines may be a good alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
https://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
"""
return pu._fit(legvander, x, y, deg, rcond, full, w)
def legcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Legendre basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Legendre series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = 1./np.sqrt(2*np.arange(n) + 1)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.arange(1, n)*scl[:n-1]*scl[1:n]
bot[...] = top
mat[:, -1] -= (c[:-1]/c[-1])*(scl/scl[-1])*(n/(2*n - 1))
return mat
def legroots(c):
"""
Compute the roots of a Legendre series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * L_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
numpy.polynomial.polynomial.polyroots
numpy.polynomial.chebyshev.chebroots
numpy.polynomial.laguerre.lagroots
numpy.polynomial.hermite.hermroots
numpy.polynomial.hermite_e.hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such values.
Roots with multiplicity greater than 1 will also show larger errors as
the value of the series near such points is relatively insensitive to
errors in the roots. Isolated roots near the origin can be improved by
a few iterations of Newton's method.
The Legendre series basis polynomials aren't powers of ``x`` so the
results of this function may seem unintuitive.
Examples
--------
>>> import numpy.polynomial.legendre as leg
>>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
array([-0.85099543, -0.11407192, 0.51506735]) # may vary
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
# rotated companion matrix reduces error
m = legcompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
def leggauss(deg):
"""
Gauss-Legendre quadrature.
Computes the sample points and weights for Gauss-Legendre quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-1, 1]` with
the weight function :math:`f(x) = 1`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (L'_n(x_k) * L_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`L_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = pu._as_int(deg, "deg")
if ideg <= 0:
raise ValueError("deg must be a positive integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = legcompanion(c)
x = la.eigvalsh(m)
# improve roots by one application of Newton
dy = legval(x, c)
df = legval(x, legder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = legval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Legendre we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= 2. / w.sum()
return x, w
def legweight(x):
"""
Weight function of the Legendre polynomials.
The weight function is :math:`1` and the interval of integration is
:math:`[-1, 1]`. The Legendre polynomials are orthogonal, but not
normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
"""
w = x*0.0 + 1.0
return w
#
# Legendre series class
#
class Legendre(ABCPolyBase):
"""A Legendre series class.
The Legendre class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed below.
Parameters
----------
coef : array_like
Legendre coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1., 1.].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1., 1.].
symbol : str, optional
Symbol used to represent the independent variable in string
representations of the polynomial expression, e.g. for printing.
The symbol must be a valid Python identifier. Default value is 'x'.
.. versionadded:: 1.24
"""
# Virtual Functions
_add = staticmethod(legadd)
_sub = staticmethod(legsub)
_mul = staticmethod(legmul)
_div = staticmethod(legdiv)
_pow = staticmethod(legpow)
_val = staticmethod(legval)
_int = staticmethod(legint)
_der = staticmethod(legder)
_fit = staticmethod(legfit)
_line = staticmethod(legline)
_roots = staticmethod(legroots)
_fromroots = staticmethod(legfromroots)
# Virtual properties
domain = np.array(legdomain)
window = np.array(legdomain)
basis_name = 'P'
|
numpyREPO_NAMEnumpyPATH_START.@numpy_extracted@numpy-main@numpy@polynomial@legendre.py@.PATH_END.py
|
{
"filename": "objects.Range.ipynb",
"repo_name": "mwaskom/seaborn",
"repo_path": "seaborn_extracted/seaborn-master/doc/_docstrings/objects.Range.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import seaborn.objects as so
from seaborn import load_dataset
penguins = load_dataset("penguins")
```
This mark will often be used in the context of a stat transform that adds an errorbar interval:
```python
(
so.Plot(penguins, x="body_mass_g", y="species", color="sex")
.add(so.Dot(), so.Agg(), so.Dodge())
.add(so.Range(), so.Est(errorbar="sd"), so.Dodge())
)
```
One feature (or potential gotcha) is that the mark will pick up properties like `linestyle` and `linewidth`; exclude those properties from the relevant layer if this behavior is undesired:
```python
(
so.Plot(penguins, x="sex", y="body_mass_g", linestyle="species")
.facet("species")
.add(so.Line(marker="o"), so.Agg())
.add(so.Range(), so.Est(errorbar="sd"))
)
```
It's also possible to directly assign the minimum and maximum values for the range:
```python
(
penguins
.rename_axis(index="penguin")
.pipe(so.Plot, x="penguin", ymin="bill_depth_mm", ymax="bill_length_mm")
.add(so.Range(), color="island")
)
```
When `min`/`max` variables are neither computed as part of a transform or explicitly assigned, the range will cover the full extent of the data at each unique observation on the orient axis:
```python
(
so.Plot(penguins, x="sex", y="body_mass_g")
.facet("species")
.add(so.Dots(pointsize=6))
.add(so.Range(linewidth=2))
)
```
```python
```
|
mwaskomREPO_NAMEseabornPATH_START.@seaborn_extracted@seaborn-master@doc@_docstrings@objects.Range.ipynb@.PATH_END.py
|
{
"filename": "test_fit.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/skimage/measure/tests/test_fit.py",
"type": "Python"
}
|
import numpy as np
import pytest
from skimage._shared import testing
from skimage._shared._warnings import expected_warnings
from skimage._shared.testing import (
arch32,
is_wasm,
assert_almost_equal,
assert_array_less,
assert_equal,
xfail,
assert_stacklevel,
)
from skimage.measure import CircleModel, EllipseModel, LineModelND, ransac
from skimage.measure.fit import _dynamic_max_trials
from skimage.transform import AffineTransform
def test_line_model_predict():
model = LineModelND()
model.params = ((0, 0), (1, 1))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_nd_invalid_input():
with testing.raises(ValueError):
LineModelND().predict_x(np.zeros(1))
with testing.raises(ValueError):
LineModelND().predict_y(np.zeros(1))
with testing.raises(ValueError):
LineModelND().predict_x(np.zeros(1), np.zeros(1))
with testing.raises(ValueError):
LineModelND().predict_y(np.zeros(1))
with testing.raises(ValueError):
LineModelND().predict_y(np.zeros(1), np.zeros(1))
assert not LineModelND().estimate(np.empty((1, 3)))
assert not LineModelND().estimate(np.empty((1, 2)))
with testing.raises(ValueError):
LineModelND().residuals(np.empty((1, 3)))
def test_line_model_nd_predict():
model = LineModelND()
model.params = (np.array([0, 0]), np.array([0.2, 0.8]))
x = np.arange(-10, 10)
y = model.predict_y(x)
assert_almost_equal(x, model.predict_x(y))
def test_line_model_nd_estimate():
# generate original data without noise
model0 = LineModelND()
model0.params = (
np.array([0, 0, 0], dtype='float'),
np.array([1, 1, 1], dtype='float') / np.sqrt(3),
)
# we scale the unit vector with a factor 10 when generating points on the
# line in order to compensate for the scale of the random noise
data0 = (
model0.params[0] + 10 * np.arange(-100, 100)[..., np.newaxis] * model0.params[1]
)
# add gaussian noise to data
rng = np.random.default_rng(1234)
data = data0 + rng.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = LineModelND()
model_est.estimate(data)
# assert_almost_equal(model_est.residuals(data0), np.zeros(len(data)), 1)
# test whether estimated parameters are correct
# we use the following geometric property: two aligned vectors have
# a cross-product equal to zero
# test if direction vectors are aligned
assert_almost_equal(
np.linalg.norm(np.cross(model0.params[1], model_est.params[1])), 0, 1
)
# test if origins are aligned with the direction
a = model_est.params[0] - model0.params[0]
if np.linalg.norm(a) > 0:
a /= np.linalg.norm(a)
assert_almost_equal(np.linalg.norm(np.cross(model0.params[1], a)), 0, 1)
def test_line_model_nd_residuals():
model = LineModelND()
model.params = (np.array([0, 0, 0]), np.array([0, 0, 1]))
assert_equal(abs(model.residuals(np.array([[0, 0, 0]]))), 0)
assert_equal(abs(model.residuals(np.array([[0, 0, 1]]))), 0)
assert_equal(abs(model.residuals(np.array([[10, 0, 0]]))), 10)
# test params argument in model.rediduals
data = np.array([[10, 0, 0]])
params = (np.array([0, 0, 0]), np.array([2, 0, 0]))
assert_equal(abs(model.residuals(data, params=params)), 30)
def test_circle_model_invalid_input():
with testing.raises(ValueError):
CircleModel().estimate(np.empty((5, 3)))
def test_circle_model_predict():
model = CircleModel()
r = 5
model.params = (0, 0, r)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 5), (-5, 0), (0, -5)))
assert_almost_equal(xy, model.predict_xy(t))
def test_circle_model_estimate():
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add gaussian noise to data
rng = np.random.default_rng(1234)
data = data0 + rng.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = CircleModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params, model_est.params, 0)
def test_circle_model_int_overflow():
xy = np.array([[1, 0], [0, 1], [-1, 0], [0, -1]], dtype=np.int32)
xy += 500
model = CircleModel()
model.estimate(xy)
assert_almost_equal(model.params, [500, 500, 1])
def test_circle_model_residuals():
model = CircleModel()
model.params = (0, 0, 5)
assert_almost_equal(abs(model.residuals(np.array([[5, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[6, 6]]))), np.sqrt(2 * 6**2) - 5)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 5)
def test_circle_model_insufficient_data():
model = CircleModel()
warning_message = ["Input does not contain enough significant data points."]
with expected_warnings(warning_message):
model.estimate(np.array([[1, 2], [3, 4]]))
with expected_warnings(warning_message):
model.estimate(np.array([[0, 0], [1, 1], [2, 2]]))
warning_message = (
"Standard deviation of data is too small to estimate "
"circle with meaningful precision."
)
with pytest.warns(RuntimeWarning, match=warning_message) as _warnings:
assert not model.estimate(np.ones((6, 2)))
assert_stacklevel(_warnings)
assert len(_warnings) == 1
def test_circle_model_estimate_from_small_scale_data():
params = np.array([1.23e-90, 2.34e-90, 3.45e-100], dtype=np.float64)
angles = np.array(
[
0.107,
0.407,
1.108,
1.489,
2.216,
2.768,
3.183,
3.969,
4.840,
5.387,
5.792,
6.139,
],
dtype=np.float64,
)
data = CircleModel().predict_xy(angles, params=params)
model = CircleModel()
# assert that far small scale data can be estimated
assert model.estimate(data.astype(np.float64))
# test whether the predicted parameters are close to the original ones
assert_almost_equal(params, model.params)
def test_ellipse_model_invalid_input():
with testing.raises(ValueError):
EllipseModel().estimate(np.empty((5, 3)))
def test_ellipse_model_predict():
model = EllipseModel()
model.params = (0, 0, 5, 10, 0)
t = np.arange(0, 2 * np.pi, np.pi / 2)
xy = np.array(((5, 0), (0, 10), (-5, 0), (0, -10)))
assert_almost_equal(xy, model.predict_xy(t))
def test_ellipse_model_estimate():
for angle in range(0, 180, 15):
rad = np.deg2rad(angle)
# generate original data without noise
model0 = EllipseModel()
model0.params = (10, 20, 15, 25, rad)
t = np.linspace(0, 2 * np.pi, 100)
data0 = model0.predict_xy(t)
# add gaussian noise to data
rng = np.random.default_rng(1234)
data = data0 + rng.normal(size=data0.shape)
# estimate parameters of noisy data
model_est = EllipseModel()
model_est.estimate(data)
# test whether estimated parameters almost equal original parameters
assert_almost_equal(model0.params[:2], model_est.params[:2], 0)
res = model_est.residuals(data0)
assert_array_less(res, np.ones(res.shape))
def test_ellipse_parameter_stability():
"""The fit should be modified so that a > b"""
for angle in np.arange(0, 180 + 1, 1):
# generate rotation matrix
theta = np.deg2rad(angle)
c = np.cos(theta)
s = np.sin(theta)
R = np.array([[c, -s], [s, c]])
# generate points on ellipse
t = np.linspace(0, 2 * np.pi, 20)
a = 100
b = 50
points = np.array([a * np.cos(t), b * np.sin(t)])
points = R @ points
# fit model to points
ellipse_model = EllipseModel()
ellipse_model.estimate(points.T)
_, _, a_prime, b_prime, theta_prime = ellipse_model.params
assert_almost_equal(theta_prime, theta)
assert_almost_equal(a_prime, a)
assert_almost_equal(b_prime, b)
def test_ellipse_model_estimate_from_data():
data = np.array(
[
[264, 854],
[265, 875],
[268, 863],
[270, 857],
[275, 905],
[285, 915],
[305, 925],
[324, 934],
[335, 764],
[336, 915],
[345, 925],
[345, 945],
[354, 933],
[355, 745],
[364, 936],
[365, 754],
[375, 745],
[375, 735],
[385, 736],
[395, 735],
[394, 935],
[405, 727],
[415, 736],
[415, 727],
[425, 727],
[426, 929],
[435, 735],
[444, 933],
[445, 735],
[455, 724],
[465, 934],
[465, 735],
[475, 908],
[475, 726],
[485, 753],
[485, 728],
[492, 762],
[495, 745],
[491, 910],
[493, 909],
[499, 904],
[505, 905],
[504, 747],
[515, 743],
[516, 752],
[524, 855],
[525, 844],
[525, 885],
[533, 845],
[533, 873],
[535, 883],
[545, 874],
[543, 864],
[553, 865],
[553, 845],
[554, 825],
[554, 835],
[563, 845],
[565, 826],
[563, 855],
[563, 795],
[565, 735],
[573, 778],
[572, 815],
[574, 804],
[575, 665],
[575, 685],
[574, 705],
[574, 745],
[575, 875],
[572, 732],
[582, 795],
[579, 709],
[583, 805],
[583, 854],
[586, 755],
[584, 824],
[585, 655],
[581, 718],
[586, 844],
[585, 915],
[587, 905],
[594, 824],
[593, 855],
[590, 891],
[594, 776],
[596, 767],
[593, 763],
[603, 785],
[604, 775],
[603, 885],
[605, 753],
[605, 655],
[606, 935],
[603, 761],
[613, 802],
[613, 945],
[613, 965],
[615, 693],
[617, 665],
[623, 962],
[624, 972],
[625, 995],
[633, 673],
[633, 965],
[633, 683],
[633, 692],
[633, 954],
[634, 1016],
[635, 664],
[641, 804],
[637, 999],
[641, 956],
[643, 946],
[643, 926],
[644, 975],
[643, 655],
[646, 705],
[651, 664],
[651, 984],
[647, 665],
[651, 715],
[651, 725],
[651, 734],
[647, 809],
[651, 825],
[651, 873],
[647, 900],
[652, 917],
[651, 944],
[652, 742],
[648, 811],
[651, 994],
[652, 783],
[650, 911],
[654, 879],
],
dtype=np.int32,
)
# estimate parameters of real data
model = EllipseModel()
model.estimate(data)
# test whether estimated parameters are smaller then 1000, so means stable
assert_array_less(model.params[:4], np.full(4, 1000))
# test whether all parameters are more than 0. Negative values were the
# result of an integer overflow
assert_array_less(np.zeros(4), np.abs(model.params[:4]))
def test_ellipse_model_estimate_from_far_shifted_data():
params = np.array([1e6, 2e6, 0.5, 0.1, 0.5], dtype=np.float64)
angles = np.array(
[
0.107,
0.407,
1.108,
1.489,
2.216,
2.768,
3.183,
3.969,
4.840,
5.387,
5.792,
6.139,
],
dtype=np.float64,
)
data = EllipseModel().predict_xy(angles, params=params)
model = EllipseModel()
# assert that far shifted data can be estimated
assert model.estimate(data.astype(np.float64))
# test whether the predicted parameters are close to the original ones
assert_almost_equal(params, model.params)
# Passing on WASM
@xfail(
condition=arch32 and not is_wasm,
reason=(
'Known test failure on 32-bit platforms. See links for '
'details: '
'https://github.com/scikit-image/scikit-image/issues/3091 '
'https://github.com/scikit-image/scikit-image/issues/2670'
),
)
def test_ellipse_model_estimate_failers():
# estimate parameters of real data
model = EllipseModel()
warning_message = (
"Standard deviation of data is too small to estimate "
"ellipse with meaningful precision."
)
with pytest.warns(RuntimeWarning, match=warning_message) as _warnings:
assert not model.estimate(np.ones((6, 2)))
assert_stacklevel(_warnings)
assert len(_warnings) == 1
assert not model.estimate(np.array([[50, 80], [51, 81], [52, 80]]))
def test_ellipse_model_residuals():
model = EllipseModel()
# vertical line through origin
model.params = (0, 0, 10, 5, 0)
assert_almost_equal(abs(model.residuals(np.array([[10, 0]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 5]]))), 0)
assert_almost_equal(abs(model.residuals(np.array([[0, 10]]))), 5)
def test_ransac_shape():
# generate original data without noise
model0 = CircleModel()
model0.params = (10, 12, 3)
t = np.linspace(0, 2 * np.pi, 1000)
data0 = model0.predict_xy(t)
# add some faulty data
outliers = (10, 30, 200)
data0[outliers[0], :] = (1000, 1000)
data0[outliers[1], :] = (-50, 50)
data0[outliers[2], :] = (-100, -10)
# estimate parameters of corrupted data
model_est, inliers = ransac(data0, CircleModel, 3, 5, rng=1)
ransac(data0, CircleModel, 3, 5, rng=1)
# test whether estimated parameters equal original parameters
assert_almost_equal(model0.params, model_est.params)
for outlier in outliers:
assert outlier not in inliers
def test_ransac_geometric():
rng = np.random.default_rng(12373240)
# generate original data without noise
src = 100 * rng.random((50, 2))
model0 = AffineTransform(scale=(0.5, 0.3), rotation=1, translation=(10, 20))
dst = model0(src)
# add some faulty data
outliers = (0, 5, 20)
dst[outliers[0]] = (10000, 10000)
dst[outliers[1]] = (-100, 100)
dst[outliers[2]] = (50, 50)
# estimate parameters of corrupted data
model_est, inliers = ransac((src, dst), AffineTransform, 2, 20, rng=rng)
# test whether estimated parameters equal original parameters
assert_almost_equal(model0.params, model_est.params)
assert np.all(np.nonzero(inliers == False)[0] == outliers)
def test_ransac_is_data_valid():
def is_data_valid(data):
return data.shape[0] > 2
with expected_warnings(["No inliers found"]):
model, inliers = ransac(
np.empty((10, 2)),
LineModelND,
2,
np.inf,
is_data_valid=is_data_valid,
rng=1,
)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_is_model_valid():
def is_model_valid(model, data):
return False
with expected_warnings(["No inliers found"]):
model, inliers = ransac(
np.empty((10, 2)),
LineModelND,
2,
np.inf,
is_model_valid=is_model_valid,
rng=1,
)
assert_equal(model, None)
assert_equal(inliers, None)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
assert_equal(_dynamic_max_trials(100, 100, 2, 1), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
assert_equal(_dynamic_max_trials(95, 100, 2, 1), 16)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
assert_equal(_dynamic_max_trials(90, 100, 2, 1), 22)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
assert_equal(_dynamic_max_trials(70, 100, 2, 1), 54)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
assert_equal(_dynamic_max_trials(50, 100, 2, 1), 126)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
assert_equal(_dynamic_max_trials(95, 100, 8, 1), 34)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
assert_equal(_dynamic_max_trials(90, 100, 8, 1), 65)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
assert_equal(_dynamic_max_trials(70, 100, 8, 1), 608)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
assert_equal(_dynamic_max_trials(50, 100, 8, 1), 9210)
# e = 0%, min_samples = 5
assert_equal(_dynamic_max_trials(1, 100, 5, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 5, 1), 360436504051)
def test_ransac_dynamic_max_trials_clipping():
"""Test that the function behaves well when `nom` or `denom` become almost 1.0."""
# e = 0%, min_samples = 10
# Ensure that (1 - inlier_ratio ** min_samples) approx 1 does not fail.
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
EPSILON = np.finfo(np.float64).eps
desired = np.ceil(np.log(EPSILON) / np.log(1 - EPSILON))
assert desired > 0
assert_equal(_dynamic_max_trials(1, 100, 1000, 1), desired)
# Ensure that (1 - probability) approx 1 does not fail.
assert_equal(_dynamic_max_trials(1, 100, 10, 1e-40), 1)
assert_equal(_dynamic_max_trials(1, 100, 1000, 1e-40), 1)
def test_ransac_invalid_input():
# `residual_threshold` must be greater than zero
with testing.raises(ValueError):
ransac(np.zeros((10, 2)), None, min_samples=2, residual_threshold=-0.5)
# "`max_trials` must be greater than zero"
with testing.raises(ValueError):
ransac(
np.zeros((10, 2)), None, min_samples=2, residual_threshold=0, max_trials=-1
)
# `stop_probability` must be in range (0, 1)
with testing.raises(ValueError):
ransac(
np.zeros((10, 2)),
None,
min_samples=2,
residual_threshold=0,
stop_probability=-1,
)
# `stop_probability` must be in range (0, 1)
with testing.raises(ValueError):
ransac(
np.zeros((10, 2)),
None,
min_samples=2,
residual_threshold=0,
stop_probability=1.01,
)
# `min_samples` as ratio must be in range (0, nb)
with testing.raises(ValueError):
ransac(np.zeros((10, 2)), None, min_samples=0, residual_threshold=0)
# `min_samples` as ratio must be in range (0, nb]
with testing.raises(ValueError):
ransac(np.zeros((10, 2)), None, min_samples=11, residual_threshold=0)
# `min_samples` must be greater than zero
with testing.raises(ValueError):
ransac(np.zeros((10, 2)), None, min_samples=-1, residual_threshold=0)
def test_ransac_sample_duplicates():
class DummyModel:
"""Dummy model to check for duplicates."""
def estimate(self, data):
# Assert that all data points are unique.
assert_equal(np.unique(data).size, data.size)
return True
def residuals(self, data):
return np.ones(len(data), dtype=np.float64)
# Create dataset with four unique points. Force 10 iterations
# and check that there are no duplicated data points.
data = np.arange(4)
with expected_warnings(["No inliers found"]):
ransac(data, DummyModel, min_samples=3, residual_threshold=0.0, max_trials=10)
def test_ransac_with_no_final_inliers():
data = np.random.rand(5, 2)
with expected_warnings(['No inliers found. Model not fitted']):
model, inliers = ransac(
data,
model_class=LineModelND,
min_samples=3,
residual_threshold=0,
rng=1523427,
)
assert inliers is None
assert model is None
def test_ransac_non_valid_best_model():
"""Example from GH issue #5572"""
def is_model_valid(model, *random_data) -> bool:
"""Allow models with a maximum of 10 degree tilt from the vertical"""
tilt = abs(np.arccos(np.dot(model.params[1], [0, 0, 1])))
return tilt <= (10 / 180 * np.pi)
rng = np.random.RandomState(1)
data = np.linspace([0, 0, 0], [0.3, 0, 1], 1000) + rng.rand(1000, 3) - 0.5
with expected_warnings(["Estimated model is not valid"]):
ransac(
data,
LineModelND,
min_samples=2,
residual_threshold=0.3,
max_trials=50,
rng=0,
is_model_valid=is_model_valid,
)
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@skimage@measure@tests@test_fit.py@.PATH_END.py
|
{
"filename": "win32.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/colorama/py2/colorama/win32.py",
"type": "Python"
}
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
winapi_test = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleW
_SetConsoleTitleW.argtypes = [
wintypes.LPCWSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
_GetConsoleMode = windll.kernel32.GetConsoleMode
_GetConsoleMode.argtypes = [
wintypes.HANDLE,
POINTER(wintypes.DWORD)
]
_GetConsoleMode.restype = wintypes.BOOL
_SetConsoleMode = windll.kernel32.SetConsoleMode
_SetConsoleMode.argtypes = [
wintypes.HANDLE,
wintypes.DWORD
]
_SetConsoleMode.restype = wintypes.BOOL
def _winapi_test(handle):
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return bool(success)
def winapi_test():
return any(_winapi_test(h) for h in
(_GetStdHandle(STDOUT), _GetStdHandle(STDERR)))
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = _GetStdHandle(stream_id)
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = _GetStdHandle(stream_id)
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = _GetStdHandle(stream_id)
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = _GetStdHandle(stream_id)
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = _GetStdHandle(stream_id)
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
def GetConsoleMode(handle):
mode = wintypes.DWORD()
success = _GetConsoleMode(handle, byref(mode))
if not success:
raise ctypes.WinError()
return mode.value
def SetConsoleMode(handle, mode):
success = _SetConsoleMode(handle, mode)
if not success:
raise ctypes.WinError()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@colorama@py2@colorama@win32.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/merging/__init__.py",
"type": "Python"
}
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@merging@__init__.py@.PATH_END.py
|
|
{
"filename": "plotting.py",
"repo_name": "Hoeijmakers/StarRotator",
"repo_path": "StarRotator_extracted/StarRotator-master/lib/plotting.py",
"type": "Python"
}
|
def plot_star_3D():
#THIS IS NOT WORKING, ITS A WIP.
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm, colors
from mpl_toolkits.mplot3d import Axes3D
from scipy.special import sph_harm #import package to calculate spherical harmonics
import pdb
theta = np.linspace(0, 2*np.pi, 100) #setting range for theta
phi = np.linspace(0, np.pi, 100) #setting range for phi
phi, theta = np.meshgrid(phi, theta) #setting the grid for phi and theta
#Setting the cartesian coordinates of the unit sphere
#Converting phi, theta, z to cartesian coordinates
x = np.sin(phi)*np.cos(theta)
y = np.sin(phi)*np.sin(theta)
z = np.cos(phi)
#Setting the aspect ratio to 1 which makes the sphere look spherical and not elongated
fig = plt.figure(figsize=plt.figaspect(1.)) #aspect ratio
axes = fig.add_subplot(111, projection='3d') #sets figure to 3d
fig.suptitle('m=4 l=4', fontsize=18, x=0.52, y=.85)
m, l = 4, 4 #m and l control the mode of pulsation and overall appearance of the figure
#Calculating the spherical harmonic Y(l,m) and normalizing it
axes.view_init(30, 45)
plt.ion()
plt.show()
for idx,angle in enumerate(np.linspace(0,360,20)):
figcolors = sph_harm(m, l, theta+angle, phi).real
figmax, figmin = figcolors.max(), figcolors.min()
figcolors = (figcolors-figmin)/(figmax-figmin)
#Sets the plot surface and colors of the figure where seismic is the color scheme
axes.plot_surface(x, y, z, rstride=1, cstride=1, facecolors=cm.autumn(figcolors))
fig.canvas.draw_idle()
pdb.set_trace()
def plot_star_2D(x,y,z,cmap="hot",quantities=['','',''],units=['','',''],noshow=False):
"""Plots the projected stellar disk.
Parameters
----------
x : np.array()
The x coordinate of the map.
y : np.array()
The y coordinate of the map
z : np.array()
Two-dimensional image corresponding to the x and y axes.
cmap: str (optional)
The color map identifier corresponding to a matplotlib colormap.
Defaults to "hot".
quantities : list(str,str,str) (optional)
A list of three strings corresponding to the axis labels (quantities).
units : list(str,str,str) (optional)
A list of three strings with the corresponding units.
Returns
-------
An open matplotlib plot window.
"""
import matplotlib.pyplot as plt
if len(units) != 3:
raise ValueError("For passing units, please provide a list containing three strings.")
xlabel = quantities[0]
ylabel = quantities[1]
zlabel = quantities[2]
if units[0] != '': xlabel+=' (%s)' % units[0]
if units[1] != '': ylabel+=' (%s)' % units[1]
if units[2] != '': zlabel+=' (%s)' % units[2]
plt.imshow(z,cmap=cmap,extent=[min(x),max(x),min(y),max(y)])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
cbar = plt.colorbar()
cbar.set_label(zlabel, labelpad=0, rotation=270)
if noshow == False:
plt.show()
return
|
HoeijmakersREPO_NAMEStarRotatorPATH_START.@StarRotator_extracted@StarRotator-master@lib@plotting.py@.PATH_END.py
|
{
"filename": "test_merge_ordered.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/reshape/merge/test_merge_ordered.py",
"type": "Python"
}
|
import re
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
merge_ordered,
)
import pandas._testing as tm
@pytest.fixture
def left():
return DataFrame({"key": ["a", "c", "e"], "lvalue": [1, 2.0, 3]})
@pytest.fixture
def right():
return DataFrame({"key": ["b", "c", "d", "f"], "rvalue": [1, 2, 3.0, 4]})
class TestMergeOrdered:
def test_basic(self, left, right):
result = merge_ordered(left, right, on="key")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1, np.nan, 2, np.nan, 3, np.nan],
"rvalue": [np.nan, 1, 2, 3, np.nan, 4],
}
)
tm.assert_frame_equal(result, expected)
def test_ffill(self, left, right):
result = merge_ordered(left, right, on="key", fill_method="ffill")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1.0, 1, 2, 2, 3, 3.0],
"rvalue": [np.nan, 1, 2, 3, 3, 4],
}
)
tm.assert_frame_equal(result, expected)
def test_multigroup(self, left, right):
left = pd.concat([left, left], ignore_index=True)
left["group"] = ["a"] * 3 + ["b"] * 3
result = merge_ordered(
left, right, on="key", left_by="group", fill_method="ffill"
)
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"] * 2,
"lvalue": [1.0, 1, 2, 2, 3, 3.0] * 2,
"rvalue": [np.nan, 1, 2, 3, 3, 4] * 2,
}
)
expected["group"] = ["a"] * 6 + ["b"] * 6
tm.assert_frame_equal(result, expected.loc[:, result.columns])
result2 = merge_ordered(
right, left, on="key", right_by="group", fill_method="ffill"
)
tm.assert_frame_equal(result, result2.loc[:, result.columns])
result = merge_ordered(left, right, on="key", left_by="group")
assert result["group"].notna().all()
@pytest.mark.filterwarnings(
"ignore:Passing a BlockManager|Passing a SingleBlockManager:DeprecationWarning"
)
def test_merge_type(self, left, right):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(left)
result = nad.merge(right, on="key")
assert isinstance(result, NotADataFrame)
@pytest.mark.parametrize(
"df_seq, pattern",
[
((), "[Nn]o objects"),
([], "[Nn]o objects"),
({}, "[Nn]o objects"),
([None], "objects.*None"),
([None, None], "objects.*None"),
],
)
def test_empty_sequence_concat(self, df_seq, pattern):
# GH 9157
with pytest.raises(ValueError, match=pattern):
pd.concat(df_seq)
@pytest.mark.parametrize(
"arg", [[DataFrame()], [None, DataFrame()], [DataFrame(), None]]
)
def test_empty_sequence_concat_ok(self, arg):
pd.concat(arg)
def test_doc_example(self):
left = DataFrame(
{
"group": list("aaabbb"),
"key": ["a", "c", "e", "a", "c", "e"],
"lvalue": [1, 2, 3] * 2,
}
)
right = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]})
result = merge_ordered(left, right, fill_method="ffill", left_by="group")
expected = DataFrame(
{
"group": list("aaaaabbbbb"),
"key": ["a", "b", "c", "d", "e"] * 2,
"lvalue": [1, 1, 2, 2, 3] * 2,
"rvalue": [np.nan, 1, 2, 3, 3] * 2,
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left, right, on, left_by, right_by, expected",
[
(
{"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]},
{"T": [2], "E": [1]},
["T"],
["G", "H"],
None,
{
"G": ["g"] * 3,
"H": ["h"] * 3,
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
},
),
(
{"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]},
{"T": [2], "E": [1]},
"T",
["G", "H"],
None,
{
"G": ["g"] * 3,
"H": ["h"] * 3,
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
},
),
(
{"T": [2], "E": [1]},
{"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]},
["T"],
None,
["G", "H"],
{
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
"G": ["g"] * 3,
"H": ["h"] * 3,
},
),
],
)
def test_list_type_by(self, left, right, on, left_by, right_by, expected):
# GH 35269
left = DataFrame(left)
right = DataFrame(right)
result = merge_ordered(
left=left,
right=right,
on=on,
left_by=left_by,
right_by=right_by,
)
expected = DataFrame(expected)
tm.assert_frame_equal(result, expected)
def test_left_by_length_equals_to_right_shape0(self):
# GH 38166
left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHE"))
right = DataFrame([[2, 1]], columns=list("ET"))
result = merge_ordered(left, right, on="E", left_by=["G", "H"])
expected = DataFrame(
{"G": ["g"] * 3, "H": ["h"] * 3, "E": [1, 2, 3], "T": [np.nan, 1.0, np.nan]}
)
tm.assert_frame_equal(result, expected)
def test_elements_not_in_by_but_in_df(self):
# GH 38167
left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHE"))
right = DataFrame([[2, 1]], columns=list("ET"))
msg = r"\{'h'\} not found in left columns"
with pytest.raises(KeyError, match=msg):
merge_ordered(left, right, on="E", left_by=["G", "h"])
@pytest.mark.parametrize("invalid_method", ["linear", "carrot"])
def test_ffill_validate_fill_method(self, left, right, invalid_method):
# GH 55884
with pytest.raises(
ValueError, match=re.escape("fill_method must be 'ffill' or None")
):
merge_ordered(left, right, on="key", fill_method=invalid_method)
def test_ffill_left_merge(self):
# GH 57010
df1 = DataFrame(
{
"key": ["a", "c", "e", "a", "c", "e"],
"lvalue": [1, 2, 3, 1, 2, 3],
"group": ["a", "a", "a", "b", "b", "b"],
}
)
df2 = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]})
result = merge_ordered(
df1, df2, fill_method="ffill", left_by="group", how="left"
)
expected = DataFrame(
{
"key": ["a", "c", "e", "a", "c", "e"],
"lvalue": [1, 2, 3, 1, 2, 3],
"group": ["a", "a", "a", "b", "b", "b"],
"rvalue": [np.nan, 2.0, 2.0, np.nan, 2.0, 2.0],
}
)
tm.assert_frame_equal(result, expected)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@reshape@merge@test_merge_ordered.py@.PATH_END.py
|
{
"filename": "check_configuration.py",
"repo_name": "BiaPyX/BiaPy",
"repo_path": "BiaPy_extracted/BiaPy-master/biapy/engine/check_configuration.py",
"type": "Python"
}
|
import os
import numpy as np
import collections
from biapy.utils.misc import get_checkpoint_path
from biapy.data.data_manipulation import check_value
def check_configuration(cfg, jobname, check_data_paths=True):
"""
Check if the configuration is good.
"""
if cfg.SYSTEM.NUM_WORKERS < 0:
raise ValueError("'SYSTEM.NUM_WORKERS' can not be less than 0")
dim_count = 2 if cfg.PROBLEM.NDIM == "2D" else 3
# Adjust overlap and padding in the default setting if it was not set
opts = []
if cfg.PROBLEM.NDIM == "3D":
if cfg.DATA.TRAIN.OVERLAP == (0, 0):
opts.extend(["DATA.TRAIN.OVERLAP", (0, 0, 0)])
if cfg.DATA.TRAIN.PADDING == (0, 0):
opts.extend(["DATA.TRAIN.PADDING", (0, 0, 0)])
if cfg.DATA.VAL.OVERLAP == (0, 0):
opts.extend(["DATA.VAL.OVERLAP", (0, 0, 0)])
if cfg.DATA.VAL.PADDING == (0, 0):
opts.extend(["DATA.VAL.PADDING", (0, 0, 0)])
if cfg.DATA.TEST.OVERLAP == (0, 0):
opts.extend(["DATA.TEST.OVERLAP", (0, 0, 0)])
if cfg.DATA.TEST.PADDING == (0, 0):
opts.extend(["DATA.TEST.PADDING", (0, 0, 0)])
# Adjust channel weights
if cfg.PROBLEM.TYPE == "INSTANCE_SEG":
channels_provided = len(cfg.PROBLEM.INSTANCE_SEG.DATA_CHANNELS.replace("Dv2", "D"))
if cfg.MODEL.N_CLASSES > 2:
channels_provided += 1
if len(cfg.PROBLEM.INSTANCE_SEG.DATA_CHANNEL_WEIGHTS) != channels_provided:
if cfg.PROBLEM.INSTANCE_SEG.DATA_CHANNEL_WEIGHTS == (1, 1):
opts.extend(
[
"PROBLEM.INSTANCE_SEG.DATA_CHANNEL_WEIGHTS",
(1,) * channels_provided,
]
)
for phase in ["TRAIN", "VAL", "TEST"]:
if getattr(cfg.DATA, phase).FILTER_SAMPLES.ENABLE:
if not (
len(getattr(cfg.DATA, phase).FILTER_SAMPLES.PROPS)
== len(getattr(cfg.DATA, phase).FILTER_SAMPLES.VALUES)
== len(getattr(cfg.DATA, phase).FILTER_SAMPLES.SIGNS)
):
raise ValueError(
"'DATA.TRAIN.FILTER_SAMPLES.PROPS', 'DATA.TRAIN.FILTER_SAMPLES.VALUES' and "
"'DATA.TRAIN.FILTER_SAMPLES.SIGNS' need to have same length"
)
foreground_filter_requested = any(
[True for cond in getattr(cfg.DATA, phase).FILTER_SAMPLES.PROPS if "foreground" in cond]
)
if foreground_filter_requested:
if cfg.PROBLEM.TYPE not in ["SEMANTIC_SEG", "INSTANCE_SEG", "DETECTION"]:
raise ValueError(
"'foreground' property can only be used in SEMANTIC_SEG, INSTANCE_SEG and DETECTION workflows"
)
if phase == "TEST" and not cfg.DATA.TEST.LOAD_GT and cfg.DATA.TEST.USE_VAL_AS_TEST:
raise ValueError(
"'foreground' condition can not be used for filtering when test ground truth is not provided"
)
if len(getattr(cfg.DATA, phase).FILTER_SAMPLES.PROPS) == 0:
raise ValueError(
"'DATA.TRAIN.FILTER_SAMPLES.PROPS' can not be an empty list when "
"'DATA.TRAIN.FILTER_SAMPLES.ENABLE' is enabled"
)
for i in range(len(getattr(cfg.DATA, phase).FILTER_SAMPLES.PROPS)):
if not isinstance(
getattr(cfg.DATA, phase).FILTER_SAMPLES.PROPS[i],
list,
):
raise ValueError(
"'DATA.TRAIN.FILTER_SAMPLES.PROPS' need to be a list of list. E.g. [ ['mean'], ['min', 'max'] ]"
)
if not isinstance(
getattr(cfg.DATA, phase).FILTER_SAMPLES.VALUES[i],
list,
):
raise ValueError(
"'DATA.TRAIN.FILTER_SAMPLES.VALUES' need to be a list of list. E.g. [ [10], [15, 3] ]"
)
if not isinstance(
getattr(cfg.DATA, phase).FILTER_SAMPLES.SIGNS[i],
list,
):
raise ValueError(
"'DATA.TRAIN.FILTER_SAMPLES.SIGNS' need to be a list of list. E.g. [ ['gt'], ['le', 'gt'] ]"
)
if not (
len(getattr(cfg.DATA, phase).FILTER_SAMPLES.PROPS[i])
== len(getattr(cfg.DATA, phase).FILTER_SAMPLES.VALUES[i])
== len(getattr(cfg.DATA, phase).FILTER_SAMPLES.SIGNS[i])
):
raise ValueError(
"'DATA.TRAIN.FILTER_SAMPLES.PROPS', 'DATA.TRAIN.FILTER_SAMPLES.VALUES' and "
"'DATA.TRAIN.FILTER_SAMPLES.SIGNS' need to have same length"
)
# Check for unique values
if (
len(
[
item
for item, count in collections.Counter(
getattr(cfg.DATA, phase).FILTER_SAMPLES.PROPS[i]
).items()
if count > 1
]
)
> 0
):
raise ValueError("Non repeated values are allowed in 'DATA.TRAIN.FILTER_SAMPLES'")
for j in range(len(getattr(cfg.DATA, phase).FILTER_SAMPLES.PROPS[i])):
if getattr(cfg.DATA, phase).FILTER_SAMPLES.PROPS[i][j] not in ["foreground", "mean", "min", "max"]:
raise ValueError(
"'DATA.TRAIN.FILTER_SAMPLES.PROPS' can only be one among these: ['foreground', 'mean', 'min', 'max']"
)
if getattr(cfg.DATA, phase).FILTER_SAMPLES.SIGNS[i][j] not in [
"gt",
"ge",
"lt",
"le",
]:
raise ValueError(
"'DATA.TRAIN.FILTER_SAMPLES.SIGNS' can only be one among these: ['gt', 'ge', 'lt', 'le']"
)
if getattr(cfg.DATA, phase).FILTER_SAMPLES.PROPS[i][j] == "foreground" and not check_value(
getattr(cfg.DATA, phase).FILTER_SAMPLES.VALUES[i][j]
):
raise ValueError(
"'foreground' property value can only be in [0, 1] range (check 'DATA.TRAIN.FILTER_SAMPLES.VALUES' values)"
)
if len(cfg.DATA.TRAIN.RESOLUTION) == 1 and cfg.DATA.TRAIN.RESOLUTION[0] == -1:
opts.extend(["DATA.TRAIN.RESOLUTION", (1,) * dim_count])
if len(cfg.DATA.VAL.RESOLUTION) == 1 and cfg.DATA.VAL.RESOLUTION[0] == -1:
opts.extend(["DATA.VAL.RESOLUTION", (1,) * dim_count])
if len(cfg.DATA.TEST.RESOLUTION) == 1 and cfg.DATA.TEST.RESOLUTION[0] == -1:
opts.extend(["DATA.TEST.RESOLUTION", (1,) * dim_count])
if cfg.TEST.POST_PROCESSING.REPARE_LARGE_BLOBS_SIZE != -1:
if cfg.PROBLEM.TYPE != "INSTANCE_SEG":
raise ValueError(
"'TEST.POST_PROCESSING.REPARE_LARGE_BLOBS_SIZE' can only be set when 'PROBLEM.TYPE' is 'INSTANCE_SEG'"
)
if cfg.PROBLEM.INSTANCE_SEG.DATA_CHANNELS != "BP":
raise ValueError(
"'TEST.POST_PROCESSING.REPARE_LARGE_BLOBS_SIZE' only makes sense when 'PROBLEM.INSTANCE_SEG.DATA_CHANNELS == 'BP'"
)
if cfg.TEST.POST_PROCESSING.DET_WATERSHED and cfg.PROBLEM.TYPE != "DETECTION":
raise ValueError("'TEST.POST_PROCESSING.DET_WATERSHED' can only be set when 'PROBLEM.TYPE' is 'DETECTION'")
if cfg.TEST.POST_PROCESSING.DET_WATERSHED:
for x in cfg.TEST.POST_PROCESSING.DET_WATERSHED_FIRST_DILATION:
if not isinstance(x, list):
raise ValueError("'TEST.POST_PROCESSING.DET_WATERSHED_FIRST_DILATION' needs to be a list of list")
if any(y == -1 for y in x):
raise ValueError(
"Please set 'TEST.POST_PROCESSING.DET_WATERSHED_FIRST_DILATION' when using 'TEST.POST_PROCESSING.DET_WATERSHED_FIRST_DILATION'"
)
if len(x) != dim_count:
raise ValueError(
"'TEST.POST_PROCESSING.DET_WATERSHED_FIRST_DILATION' needs to be of dimension {} for {} problem".format(
dim_count, cfg.PROBLEM.NDIM
)
)
if cfg.TEST.POST_PROCESSING.DET_WATERSHED_DONUTS_CLASSES != [-1]:
if len(cfg.TEST.POST_PROCESSING.DET_WATERSHED_DONUTS_CLASSES) > cfg.MODEL.N_CLASSES:
raise ValueError(
"'TEST.POST_PROCESSING.DET_WATERSHED_DONUTS_CLASSES' length can't be greater than 'MODEL.N_CLASSES'"
)
if np.max(cfg.TEST.POST_PROCESSING.DET_WATERSHED_DONUTS_CLASSES) > cfg.MODEL.N_CLASSES:
raise ValueError(
"'TEST.POST_PROCESSING.DET_WATERSHED_DONUTS_CLASSES' can not have a class number greater than 'MODEL.N_CLASSES'"
)
min_class = np.min(cfg.TEST.POST_PROCESSING.DET_WATERSHED_DONUTS_CLASSES)
if not all(
cfg.TEST.POST_PROCESSING.DET_WATERSHED_DONUTS_CLASSES
== np.array(
range(
min_class,
len(cfg.TEST.POST_PROCESSING.DET_WATERSHED_DONUTS_CLASSES) + 1,
)
)
):
raise ValueError(
"'TEST.POST_PROCESSING.DET_WATERSHED_DONUTS_CLASSES' must be consecutive, e.g [1,2,3,4..]"
)
if len(cfg.TEST.POST_PROCESSING.DET_WATERSHED_DONUTS_PATCH) != dim_count:
raise ValueError(
"'TEST.POST_PROCESSING.DET_WATERSHED_DONUTS_PATCH' needs to be of dimension {} for {} problem".format(
dim_count, cfg.PROBLEM.NDIM
)
)
if not (
len(cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS)
== len(cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.VALUES)
== len(cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.SIGNS)
):
raise ValueError(
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS', 'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.VALUES' and "
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.SIGNS' need to have same length"
)
if (
cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.ENABLE
and cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.ENABLE
):
if cfg.PROBLEM.TYPE not in ["INSTANCE_SEG", "DETECTION"]:
raise ValueError(
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS' can only be used in INSTANCE_SEG and DETECTION workflows"
)
if len(cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS) == 0:
raise ValueError(
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS' can not be an empty list when "
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.ENABLE' is enabled"
)
for i in range(len(cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS)):
if not isinstance(
cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS[i],
list,
):
raise ValueError(
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS' need to be a list of list. E.g. [ ['circularity'], ['area', 'diameter'] ]"
)
if not isinstance(
cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.VALUES[i],
list,
):
raise ValueError(
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.VALUES' need to be a list of list. E.g. [ [10], [15, 3] ]"
)
if not isinstance(
cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.SIGNS[i],
list,
):
raise ValueError(
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.SIGNS' need to be a list of list. E.g. [ ['gt'], ['le', 'gt'] ]"
)
if not (
len(cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS[i])
== len(cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.VALUES[i])
== len(cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.SIGNS[i])
):
raise ValueError(
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS', 'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.VALUES' and "
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.SIGNS' need to have same length"
)
# Check for unique values
if (
len(
[
item
for item, count in collections.Counter(
cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS[i]
).items()
if count > 1
]
)
> 0
):
raise ValueError(
"Non repeated values are allowed in 'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES'"
)
for j in range(len(cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS[i])):
if cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS[i][j] not in [
"circularity",
"npixels",
"area",
"diameter",
"elongation",
"sphericity",
"perimeter",
]:
raise ValueError(
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS' can only be one among these: ['circularity', 'npixels', 'area', 'diameter', 'elongation', 'sphericity', 'perimeter']"
)
if (
cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS[i][j]
in ["circularity", "elongation"]
and cfg.PROBLEM.NDIM != "2D"
):
raise ValueError(
"'circularity' or 'elongation' properties can only be measured in 2D images. Delete them from 'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS'. "
"'circularity'-kind property in 3D is 'sphericity'"
)
if (
cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS[i][j] == "sphericity"
and cfg.PROBLEM.NDIM != "3D"
):
raise ValueError(
"'sphericity' property can only be measured in 3D images. Delete it from 'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS'. "
"'sphericity'-kind property in 2D is 'circularity'"
)
if cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.SIGNS[i][j] not in [
"gt",
"ge",
"lt",
"le",
]:
raise ValueError(
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.SIGNS' can only be one among these: ['gt', 'ge', 'lt', 'le']"
)
if cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS[i][
j
] == "circularity" and not check_value(
cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.VALUES[i][j]
):
raise ValueError(
"Circularity can only have values in [0, 1] range (check 'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.VALUES' values)"
)
if cfg.PROBLEM.TYPE != "INSTANCE_SEG":
if cfg.TEST.POST_PROCESSING.VORONOI_ON_MASK:
raise ValueError("'TEST.POST_PROCESSING.VORONOI_ON_MASK' can only be enabled in a 'INSTANCE_SEG' problem")
if cfg.TEST.POST_PROCESSING.CLEAR_BORDER:
raise ValueError("'TEST.POST_PROCESSING.CLEAR_BORDER' can only be enabled in a 'INSTANCE_SEG' problem")
if cfg.TEST.POST_PROCESSING.DET_WATERSHED and cfg.PROBLEM.TYPE != "DETECTION":
raise ValueError("'TEST.POST_PROCESSING.DET_WATERSHED' can only be set when 'PROBLEM.TYPE' is 'DETECTION'")
if cfg.TEST.POST_PROCESSING.MEDIAN_FILTER:
if len(cfg.TEST.POST_PROCESSING.MEDIAN_FILTER_AXIS) == 0:
raise ValueError(
"Configure 'TEST.POST_PROCESSING.MEDIAN_FILTER_AXIS' as 'TEST.POST_PROCESSING.MEDIAN_FILTER' is enabled"
)
if len(cfg.TEST.POST_PROCESSING.MEDIAN_FILTER_SIZE) == 0:
raise ValueError(
"Configure 'TEST.POST_PROCESSING.MEDIAN_FILTER_SIZE' as 'TEST.POST_PROCESSING.MEDIAN_FILTER' is enabled"
)
assert len(cfg.TEST.POST_PROCESSING.MEDIAN_FILTER_AXIS) == len(
cfg.TEST.POST_PROCESSING.MEDIAN_FILTER_SIZE
), "'TEST.POST_PROCESSING.MEDIAN_FILTER_AXIS' and 'TEST.POST_PROCESSING.MEDIAN_FILTER_SIZE' lenght must be the same"
if len(cfg.TEST.POST_PROCESSING.MEDIAN_FILTER_AXIS) > 0 and cfg.PROBLEM.TYPE not in [
"SEMANTIC_SEG",
"INSTANCE_SEG",
"DETECTION",
]:
raise ValueError(
"'TEST.POST_PROCESSING.MEDIAN_FILTER_AXIS' can only be used when 'PROBLEM.TYPE' is among "
"['SEMANTIC_SEG', 'INSTANCE_SEG', 'DETECTION']"
)
for f in cfg.TEST.POST_PROCESSING.MEDIAN_FILTER_AXIS:
if cfg.PROBLEM.NDIM == "2D" and "z" in f and not cfg.TEST.ANALIZE_2D_IMGS_AS_3D_STACK:
raise ValueError(
"In 2D z axis filtering can not be done unless 'TEST.ANALIZE_2D_IMGS_AS_3D_STACK' is selected. "
"So, please, remove it from 'TEST.POST_PROCESSING.MEDIAN_FILTER_AXIS'"
)
if f not in ["xy", "yx", "zy", "yz", "zx", "xz", "z"]:
raise ValueError(
"'TEST.POST_PROCESSING.MEDIAN_FILTER_AXIS' options are ['xy', 'yx', 'zy', 'yz', 'zx', 'xz', 'z']"
)
# First update is done here as some checks from this point need to have those updates
if len(opts) > 0:
cfg.merge_from_list(opts)
opts = []
#### General checks ####
assert cfg.PROBLEM.NDIM in ["2D", "3D"], "Problem needs to be '2D' or '3D'"
assert cfg.PROBLEM.TYPE in [
"SEMANTIC_SEG",
"INSTANCE_SEG",
"CLASSIFICATION",
"DETECTION",
"DENOISING",
"SUPER_RESOLUTION",
"SELF_SUPERVISED",
"IMAGE_TO_IMAGE",
], "PROBLEM.TYPE not in ['SEMANTIC_SEG', 'INSTANCE_SEG', 'CLASSIFICATION', 'DETECTION', 'DENOISING', 'SUPER_RESOLUTION', 'SELF_SUPERVISED', 'IMAGE_TO_IMAGE']"
if cfg.PROBLEM.NDIM == "3D" and cfg.TEST.FULL_IMG:
print(
"WARNING: TEST.FULL_IMG == True while using PROBLEM.NDIM == '3D'. As 3D images are usually 'huge'"
", full image statistics will be disabled to avoid GPU memory overflow"
)
set_train_metrics = True if len(cfg.TRAIN.METRICS) == 0 else False
set_test_metrics = True if len(cfg.TEST.METRICS) == 0 else False
if cfg.PROBLEM.TYPE in [
"SEMANTIC_SEG",
"INSTANCE_SEG",
"DETECTION",
]:
if set_train_metrics:
opts.extend(["TRAIN.METRICS", ["iou"]])
if set_test_metrics:
opts.extend(["TEST.METRICS", ["iou"]])
assert len(cfg.TRAIN.METRICS) == 0 or all(
[True if x.lower() in ["iou"] else False for x in cfg.TRAIN.METRICS]
), f"'TRAIN.METRICS' needs to be 'iou' in {cfg.PROBLEM.TYPE} workflow"
assert len(cfg.TEST.METRICS) == 0 or all(
[True if x.lower() in ["iou"] else False for x in cfg.TEST.METRICS]
), f"'TEST.METRICS' needs to be 'iou' in {cfg.PROBLEM.TYPE} workflow"
elif cfg.PROBLEM.TYPE in [
"SUPER_RESOLUTION",
"IMAGE_TO_IMAGE",
"SELF_SUPERVISED",
]:
if set_train_metrics:
opts.extend(["TRAIN.METRICS", ["psnr", "mae", "mse", "ssim"]])
if set_test_metrics:
metric_default_list = ["psnr", "mae", "mse", "ssim"]
if cfg.PROBLEM.NDIM == "2D": # IS, FID and LPIPS implementations only works for 2D images
metric_default_list += ["is", "fid", "lpips"]
opts.extend(["TEST.METRICS", metric_default_list])
assert len(cfg.TRAIN.METRICS) == 0 or all(
[True if x.lower() in ["psnr", "mae", "mse", "ssim"] else False for x in cfg.TRAIN.METRICS]
), f"'TRAIN.METRICS' options are ['psnr', 'mae', 'mse', 'ssim'] in {cfg.PROBLEM.TYPE} workflow"
assert len(cfg.TEST.METRICS) == 0 or all(
[
True if x.lower() in ["psnr", "mae", "mse", "ssim", "fid", "is", "lpips"] else False
for x in cfg.TEST.METRICS
]
), f"'TEST.METRICS' options are ['psnr', 'mae', 'mse', 'ssim', 'fid', 'is', 'lpips'] in {cfg.PROBLEM.TYPE} workflow"
if any([True for x in cfg.TEST.METRICS if x.lower() in ["is", "fid", "lpips"]]) and cfg.PROBLEM.NDIM == "3D":
raise ValueError("IS, FID and LPIPS metrics can only be measured when PROBLEM.NDIM == '3D'")
elif cfg.PROBLEM.TYPE == "DENOISING":
if set_train_metrics:
opts.extend(["TRAIN.METRICS", ["mae", "mse"]])
if set_test_metrics:
opts.extend(["TEST.METRICS", ["mae", "mse"]])
assert len(cfg.TRAIN.METRICS) == 0 or all(
[True if x.lower() in ["mae", "mse"] else False for x in cfg.TRAIN.METRICS]
), f"'TRAIN.METRICS' options are ['mae', 'mse'] in {cfg.PROBLEM.TYPE} workflow"
assert len(cfg.TEST.METRICS) == 0 or all(
[True if x.lower() in ["mae", "mse"] else False for x in cfg.TEST.METRICS]
), f"'TEST.METRICS' options are ['mae', 'mse'] in {cfg.PROBLEM.TYPE} workflow"
elif cfg.PROBLEM.TYPE == "CLASSIFICATION":
if set_train_metrics:
opts.extend(["TRAIN.METRICS", ["accuracy", "top-5-accuracy"]])
if set_test_metrics:
opts.extend(["TEST.METRICS", ["accuracy"]])
assert len(cfg.TRAIN.METRICS) == 0 or all(
[True if x.lower() in ["accuracy", "top-5-accuracy"] else False for x in cfg.TRAIN.METRICS]
), f"'TRAIN.METRICS' options are ['accuracy', 'top-5-accuracy'] in {cfg.PROBLEM.TYPE} workflow"
assert len(cfg.TEST.METRICS) == 0 or all(
[True if x.lower() in ["accuracy"] else False for x in cfg.TEST.METRICS]
), f"'TEST.METRICS' options is 'accuracy' in {cfg.PROBLEM.TYPE} workflow"
if "top-5-accuracy" in [x.lower() for x in cfg.TRAIN.METRICS] and cfg.MODEL.N_CLASSES < 5:
raise ValueError("'top-5-accuracy' can only be used when MODEL.N_CLASSES >= 5")
loss = ""
if cfg.PROBLEM.TYPE in [
"SEMANTIC_SEG",
"DETECTION",
]:
loss = "CE" if cfg.LOSS.TYPE == "" else cfg.LOSS.TYPE
assert loss in [
"CE",
"DICE",
"W_CE_DICE",
], "LOSS.TYPE not in ['CE', 'DICE', 'W_CE_DICE']"
if loss == "W_CE_DICE":
assert (
len(cfg.LOSS.WEIGHTS) == 2
), "'LOSS.WEIGHTS' needs to be a list of two floats when using LOSS.TYPE == 'W_CE_DICE'"
assert sum(cfg.LOSS.WEIGHTS) != 1, "'LOSS.WEIGHTS' values need to sum 1"
elif cfg.PROBLEM.TYPE in [
"SUPER_RESOLUTION",
"SELF_SUPERVISED",
"IMAGE_TO_IMAGE",
]:
loss = "MAE" if cfg.LOSS.TYPE == "" else cfg.LOSS.TYPE
assert loss in [
"MAE",
"MSE",
], "LOSS.TYPE not in ['MAE', 'MSE']"
elif cfg.PROBLEM.TYPE == "DENOISING":
loss = "MSE" if cfg.LOSS.TYPE == "" else cfg.LOSS.TYPE
assert loss == "MSE", "LOSS.TYPE must be 'MSE'"
elif cfg.PROBLEM.TYPE == "CLASSIFICATION":
loss = "CE" if cfg.LOSS.TYPE == "" else cfg.LOSS.TYPE
assert loss == "CE", "LOSS.TYPE must be 'CE'"
opts.extend(["LOSS.TYPE", loss])
if cfg.TEST.ENABLE and cfg.TEST.ANALIZE_2D_IMGS_AS_3D_STACK and cfg.PROBLEM.NDIM == "3D":
raise ValueError("'TEST.ANALIZE_2D_IMGS_AS_3D_STACK' makes no sense when the problem is 3D. Disable it.")
if cfg.MODEL.SOURCE not in ["biapy", "bmz", "torchvision"]:
raise ValueError("'MODEL.SOURCE' needs to be one between ['biapy', 'bmz', 'torchvision']")
if cfg.MODEL.SOURCE == "bmz":
if cfg.MODEL.BMZ.SOURCE_MODEL_ID == "":
raise ValueError("'MODEL.BMZ.SOURCE_MODEL_ID' needs to be configured when 'MODEL.SOURCE' is 'bmz'")
elif cfg.MODEL.SOURCE == "torchvision":
if cfg.MODEL.TORCHVISION_MODEL_NAME == "":
raise ValueError(
"'MODEL.TORCHVISION_MODEL_NAME' needs to be configured when 'MODEL.SOURCE' is 'torchvision'"
)
if cfg.TEST.AUGMENTATION:
print("WARNING: 'TEST.AUGMENTATION' is not available using TorchVision models")
if cfg.TEST.ANALIZE_2D_IMGS_AS_3D_STACK:
raise ValueError("'TEST.ANALIZE_2D_IMGS_AS_3D_STACK' can not be activated with TorchVision models")
if cfg.PROBLEM.NDIM == "3D":
raise ValueError("TorchVision model's are only available for 2D images")
if not cfg.TEST.FULL_IMG and cfg.PROBLEM.TYPE != "CLASSIFICATION":
raise ValueError("With TorchVision models only 'TEST.FULL_IMG' setting is available, so please set it")
if cfg.TEST.AUGMENTATION and cfg.TEST.REDUCE_MEMORY:
raise ValueError(
"'TEST.AUGMENTATION' and 'TEST.REDUCE_MEMORY' are incompatible as the function used to make the rotation "
"does not support float16 data type."
)
if cfg.MODEL.N_CLASSES > 2 and cfg.PROBLEM.TYPE not in [
"SEMANTIC_SEG",
"INSTANCE_SEG",
"DETECTION",
"CLASSIFICATION",
"IMAGE_TO_IMAGE",
]:
raise ValueError(
"'MODEL.N_CLASSES' can only be greater than 2 in the following workflows: 'SEMANTIC_SEG', "
"'INSTANCE_SEG', 'DETECTION', 'CLASSIFICATION' and 'IMAGE_TO_IMAGE'"
)
model_arch = cfg.MODEL.ARCHITECTURE.lower()
model_will_be_read = cfg.MODEL.LOAD_CHECKPOINT and cfg.MODEL.LOAD_MODEL_FROM_CHECKPOINT
#### Semantic segmentation ####
if cfg.PROBLEM.TYPE == "SEMANTIC_SEG":
if not model_will_be_read and cfg.MODEL.SOURCE == "biapy":
if cfg.MODEL.N_CLASSES < 2:
raise ValueError("'MODEL.N_CLASSES' needs to be greater or equal 2 (binary case)")
elif cfg.MODEL.SOURCE == "torchvision":
if cfg.MODEL.TORCHVISION_MODEL_NAME not in [
"deeplabv3_mobilenet_v3_large",
"deeplabv3_resnet101",
"deeplabv3_resnet50",
"fcn_resnet101",
"fcn_resnet50",
"lraspp_mobilenet_v3_large",
]:
raise ValueError(
"'MODEL.SOURCE' must be one between ['deeplabv3_mobilenet_v3_large', 'deeplabv3_resnet101', "
"'deeplabv3_resnet50', 'fcn_resnet101', 'fcn_resnet50', 'lraspp_mobilenet_v3_large' ]"
)
if cfg.MODEL.TORCHVISION_MODEL_NAME in ["deeplabv3_mobilenet_v3_large"] and cfg.DATA.PATCH_SIZE[-1] != 3:
raise ValueError(
"'deeplabv3_mobilenet_v3_large' model expects 3 channel data (RGB). "
f"'DATA.PATCH_SIZE' set is {cfg.DATA.PATCH_SIZE}"
)
#### Instance segmentation ####
if cfg.PROBLEM.TYPE == "INSTANCE_SEG":
assert cfg.PROBLEM.INSTANCE_SEG.DATA_CHANNELS in [
"A",
"C",
"BC",
"BCM",
"BCD",
"BCDv2",
"Dv2",
"BDv2",
"BP",
"BD",
], "PROBLEM.INSTANCE_SEG.DATA_CHANNELS not in ['A','C', 'BC', 'BCM', 'BCD', 'BCDv2', 'Dv2', 'BDv2', 'BP', 'BD']"
if len(cfg.PROBLEM.INSTANCE_SEG.DATA_CHANNEL_WEIGHTS) != channels_provided:
raise ValueError(
"'PROBLEM.INSTANCE_SEG.DATA_CHANNEL_WEIGHTS' needs to be of the same length as the channels selected in 'PROBLEM.INSTANCE_SEG.DATA_CHANNELS'. "
"E.g. 'PROBLEM.INSTANCE_SEG.DATA_CHANNELS'='BC' 'PROBLEM.INSTANCE_SEG.DATA_CHANNEL_WEIGHTS'=[1,0.5]. "
"'PROBLEM.INSTANCE_SEG.DATA_CHANNELS'='BCD' 'PROBLEM.INSTANCE_SEG.DATA_CHANNEL_WEIGHTS'=[0.5,0.5,1]. "
"If 'MODEL.N_CLASSES' > 2 one more weigth need to be provided."
)
if cfg.TEST.POST_PROCESSING.VORONOI_ON_MASK:
if cfg.PROBLEM.INSTANCE_SEG.DATA_CHANNELS not in [
"C",
"BC",
"BCM",
"BCD",
"BCDv2",
]:
raise ValueError(
"'PROBLEM.INSTANCE_SEG.DATA_CHANNELS' needs to be one between ['C', 'BC', 'BCM', 'BCD', 'BCDv2'] "
"when 'TEST.POST_PROCESSING.VORONOI_ON_MASK' is enabled"
)
if not check_value(cfg.TEST.POST_PROCESSING.VORONOI_TH):
raise ValueError("'TEST.POST_PROCESSING.VORONOI_TH' not in [0, 1] range")
if (
cfg.PROBLEM.INSTANCE_SEG.DATA_CHANNELS not in ["C", "BC", "BCM", "BCD", "BP"]
and cfg.PROBLEM.INSTANCE_SEG.ERODE_AND_DILATE_FOREGROUND
):
raise ValueError(
"'PROBLEM.INSTANCE_SEG.ERODE_AND_DILATE_FOREGROUND' can only be used with 'C', 'BC', 'BCM', 'BP' or 'BCD' channels"
)
for morph_operation in cfg.PROBLEM.INSTANCE_SEG.SEED_MORPH_SEQUENCE:
if morph_operation != "dilate" and morph_operation != "erode":
raise ValueError(
"'PROBLEM.INSTANCE_SEG.SEED_MORPH_SEQUENCE' can only be a sequence with 'dilate' or 'erode' operations. "
"{} given".format(cfg.PROBLEM.INSTANCE_SEG.SEED_MORPH_SEQUENCE)
)
if len(cfg.PROBLEM.INSTANCE_SEG.SEED_MORPH_SEQUENCE) != len(cfg.PROBLEM.INSTANCE_SEG.SEED_MORPH_RADIUS):
raise ValueError(
"'PROBLEM.INSTANCE_SEG.SEED_MORPH_SEQUENCE' length and 'PROBLEM.INSTANCE_SEG.SEED_MORPH_RADIUS' length needs to be the same"
)
if cfg.PROBLEM.INSTANCE_SEG.DATA_CONTOUR_MODE not in [
"thick",
"inner",
"outer",
"subpixel",
"dense",
]:
raise ValueError(
"'PROBLEM.INSTANCE_SEG.DATA_CONTOUR_MODE' must be one between ['thick', 'inner', 'outer', 'subpixel', 'dense']"
)
if cfg.PROBLEM.INSTANCE_SEG.DATA_CONTOUR_MODE == "dense" and cfg.PROBLEM.INSTANCE_SEG.DATA_CHANNELS == "BCM":
raise ValueError(
"'PROBLEM.INSTANCE_SEG.DATA_CONTOUR_MODE' can not be 'dense' when 'PROBLEM.INSTANCE_SEG.DATA_CHANNELS' is 'BCM'"
" as it does not have sense"
)
if cfg.PROBLEM.INSTANCE_SEG.WATERSHED_BY_2D_SLICES:
if cfg.PROBLEM.NDIM == "2D" and not cfg.TEST.ANALIZE_2D_IMGS_AS_3D_STACK:
raise ValueError(
"'PROBLEM.INSTANCE_SEG.WATERSHED_BY_2D_SLICE' can only be activated when 'PROBLEM.NDIM' == 3D or "
"in 2D when 'TEST.ANALIZE_2D_IMGS_AS_3D_STACK' is enabled"
)
if cfg.MODEL.SOURCE == "torchvision":
if cfg.MODEL.TORCHVISION_MODEL_NAME not in [
"maskrcnn_resnet50_fpn",
"maskrcnn_resnet50_fpn_v2",
]:
raise ValueError(
"'MODEL.SOURCE' must be one between ['maskrcnn_resnet50_fpn', 'maskrcnn_resnet50_fpn_v2']"
)
if cfg.PROBLEM.NDIM == "3D":
raise ValueError("TorchVision model's for instance segmentation are only available for 2D images")
if cfg.TRAIN.ENABLE:
raise NotImplementedError # require bbox generator etc.
#### Detection ####
if cfg.PROBLEM.TYPE == "DETECTION":
if not model_will_be_read and cfg.MODEL.SOURCE == "biapy" and cfg.MODEL.N_CLASSES < 2:
raise ValueError("'MODEL.N_CLASSES' needs to be greater or equal 2 (binary case)")
cpd = cfg.PROBLEM.DETECTION.CENTRAL_POINT_DILATION
if len(cpd) == 1:
cpd = cpd * 2 if cfg.PROBLEM.NDIM == "2D" else cpd * 3
if len(cpd) != 3 and cfg.PROBLEM.NDIM == "3D":
raise ValueError(
"'PROBLEM.DETECTION.CENTRAL_POINT_DILATION' needs to be a list of three ints in a 3D problem"
)
elif len(cpd) != 2 and cfg.PROBLEM.NDIM == "2D":
raise ValueError(
"'PROBLEM.DETECTION.CENTRAL_POINT_DILATION' needs to be a list of two ints in a 2D problem"
)
opts.extend(["PROBLEM.DETECTION.CENTRAL_POINT_DILATION", cpd])
if cfg.TEST.POST_PROCESSING.DET_WATERSHED:
if any(len(x) != dim_count for x in cfg.TEST.POST_PROCESSING.DET_WATERSHED_FIRST_DILATION):
raise ValueError(
"Each structure object defined in 'TEST.POST_PROCESSING.DET_WATERSHED_FIRST_DILATION' "
"needs to be of {} dimension".format(dim_count)
)
if (
not cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.ENABLE
or not cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.ENABLE
):
raise ValueError(
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.ENABLE' and "
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.ENABLE' needs to be set when 'TEST.POST_PROCESSING.DET_WATERSHED' is enabled"
)
for lprop in cfg.TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS:
if len(lprop) != 1:
raise ValueError(
"'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS' can not be set with more than one property and that property"
" needs to be set to 'circularity' or 'sphericity'. This restriction is because 'TEST.POST_PROCESSING.DET_WATERSHED' is enabled"
)
if lprop[0] not in ["circularity", "sphericity"]:
raise ValueError(
"Only 'circularity' or 'sphericity' can be used in 'TEST.POST_PROCESSING.MEASURE_PROPERTIES.REMOVE_BY_PROPERTIES.PROPS' "
"when 'TEST.POST_PROCESSING.DET_WATERSHED' is enabled"
)
if cfg.TEST.DET_POINT_CREATION_FUNCTION not in ["peak_local_max", "blob_log"]:
raise ValueError("'TEST.DET_POINT_CREATION_FUNCTION' must be one between: ['peak_local_max', 'blob_log']")
if cfg.MODEL.SOURCE == "torchvision":
if cfg.MODEL.TORCHVISION_MODEL_NAME not in [
"fasterrcnn_mobilenet_v3_large_320_fpn",
"fasterrcnn_mobilenet_v3_large_fpn",
"fasterrcnn_resnet50_fpn",
"fasterrcnn_resnet50_fpn_v2",
"fcos_resnet50_fpn",
"ssd300_vgg16",
"ssdlite320_mobilenet_v3_large",
"retinanet_resnet50_fpn",
"retinanet_resnet50_fpn_v2",
]:
raise ValueError(
"'MODEL.SOURCE' must be one between ['fasterrcnn_mobilenet_v3_large_320_fpn', 'fasterrcnn_mobilenet_v3_large_fpn', "
"'fasterrcnn_resnet50_fpn', 'fasterrcnn_resnet50_fpn_v2', 'fcos_resnet50_fpn', 'ssd300_vgg16', 'ssdlite320_mobilenet_v3_large', "
"'retinanet_resnet50_fpn', 'retinanet_resnet50_fpn_v2']"
)
if cfg.PROBLEM.NDIM == "3D":
raise ValueError("TorchVision model's for detection are only available for 2D images")
if cfg.TRAIN.ENABLE:
raise NotImplementedError # require bbox generator etc.
if cfg.TEST.ENABLE and len(cfg.TEST.DET_IGNORE_POINTS_OUTSIDE_BOX) > 0:
assert [x > 0 for x in cfg.TEST.DET_IGNORE_POINTS_OUTSIDE_BOX], (
"'TEST.DET_IGNORE_POINTS_OUTSIDE_BOX' needs to be a list " "of positive integers"
)
assert len(cfg.TEST.DET_IGNORE_POINTS_OUTSIDE_BOX) == dim_count, (
"'TEST.DET_IGNORE_POINTS_OUTSIDE_BOX' needs to be of " f"{dim_count} dimension"
)
#### Super-resolution ####
elif cfg.PROBLEM.TYPE == "SUPER_RESOLUTION":
if not (cfg.PROBLEM.SUPER_RESOLUTION.UPSCALING):
raise ValueError("Resolution scale must be provided with 'PROBLEM.SUPER_RESOLUTION.UPSCALING' variable")
assert all(
i > 0 for i in cfg.PROBLEM.SUPER_RESOLUTION.UPSCALING
), "'PROBLEM.SUPER_RESOLUTION.UPSCALING' are not positive integers"
if len(cfg.PROBLEM.SUPER_RESOLUTION.UPSCALING) != dim_count:
raise ValueError(f"'PROBLEM.SUPER_RESOLUTION.UPSCALING' needs to be a tuple of {dim_count} integers")
if cfg.MODEL.SOURCE == "torchvision":
raise ValueError("'MODEL.SOURCE' as 'torchvision' is not available in super-resolution workflow")
if cfg.DATA.NORMALIZATION.TYPE not in ["div", "scale_range"]:
raise ValueError("'DATA.NORMALIZATION.TYPE' in SR workflow needs to be one between ['div','scale_range']")
#### Self-supervision ####
elif cfg.PROBLEM.TYPE == "SELF_SUPERVISED":
if cfg.PROBLEM.SELF_SUPERVISED.PRETEXT_TASK == "crappify":
if cfg.PROBLEM.SELF_SUPERVISED.RESIZING_FACTOR not in [2, 4, 6]:
raise ValueError("'PROBLEM.SELF_SUPERVISED.RESIZING_FACTOR' not in [2,4,6]")
if not check_value(cfg.PROBLEM.SELF_SUPERVISED.NOISE):
raise ValueError("'PROBLEM.SELF_SUPERVISED.NOISE' not in [0, 1] range")
if not model_will_be_read and model_arch == "mae":
raise ValueError(
"'MODEL.ARCHITECTURE' can not be 'mae' when 'PROBLEM.SELF_SUPERVISED.PRETEXT_TASK' is 'crappify'"
)
elif cfg.PROBLEM.SELF_SUPERVISED.PRETEXT_TASK == "masking":
if not model_will_be_read and model_arch != "mae":
raise ValueError(
"'MODEL.ARCHITECTURE' needs to be 'mae' when 'PROBLEM.SELF_SUPERVISED.PRETEXT_TASK' is 'masking'"
)
assert cfg.MODEL.MAE_MASK_TYPE in [
"random",
"grid",
], "'MODEL.MAE_MASK_TYPE' needs to be one between ['random', 'grid']"
if cfg.MODEL.MAE_MASK_TYPE == "random" and not check_value(cfg.MODEL.MAE_MASK_RATIO):
raise ValueError("'MODEL.MAE_MASK_RATIO' not in [0, 1] range")
else:
raise ValueError(
"'PROBLEM.SELF_SUPERVISED.PRETEXT_TASK' needs to be among these options: ['crappify', 'masking']"
)
if cfg.MODEL.SOURCE == "torchvision":
raise ValueError("'MODEL.SOURCE' as 'torchvision' is not available in self-supervised workflow")
#### Denoising ####
elif cfg.PROBLEM.TYPE == "DENOISING":
if cfg.DATA.TEST.LOAD_GT:
raise ValueError(
"Denoising is made in an unsupervised way so there is no ground truth required. Disable 'DATA.TEST.LOAD_GT'"
)
if not check_value(cfg.PROBLEM.DENOISING.N2V_PERC_PIX):
raise ValueError("PROBLEM.DENOISING.N2V_PERC_PIX not in [0, 1] range")
if cfg.MODEL.SOURCE == "torchvision":
raise ValueError("'MODEL.SOURCE' as 'torchvision' is not available in denoising workflow")
#### Classification ####
elif cfg.PROBLEM.TYPE == "CLASSIFICATION":
if cfg.TEST.BY_CHUNKS.ENABLE:
raise ValueError("'TEST.BY_CHUNKS.ENABLE' can not be activated for CLASSIFICATION workflow")
if cfg.MODEL.SOURCE == "torchvision":
if cfg.MODEL.TORCHVISION_MODEL_NAME not in [
"alexnet",
"convnext_base",
"convnext_large",
"convnext_small",
"convnext_tiny",
"densenet121",
"densenet161",
"densenet169",
"densenet201",
"efficientnet_b0",
"efficientnet_b1",
"efficientnet_b2",
"efficientnet_b3",
"efficientnet_b4",
"efficientnet_b5",
"efficientnet_b6",
"efficientnet_b7",
"efficientnet_v2_l",
"efficientnet_v2_m",
"efficientnet_v2_s",
"googlenet",
"inception_v3",
"maxvit_t",
"mnasnet0_5",
"mnasnet0_75",
"mnasnet1_0",
"mnasnet1_3",
"mobilenet_v2",
"mobilenet_v3_large",
"mobilenet_v3_small",
"quantized_googlenet",
"quantized_inception_v3",
"quantized_mobilenet_v2",
"quantized_mobilenet_v3_large",
"quantized_resnet18",
"quantized_resnet50",
"quantized_resnext101_32x8d",
"quantized_resnext101_64x4d",
"quantized_shufflenet_v2_x0_5",
"quantized_shufflenet_v2_x1_0",
"quantized_shufflenet_v2_x1_5",
"quantized_shufflenet_v2_x2_0",
"regnet_x_16gf",
"regnet_x_1_6gf",
"regnet_x_32gf",
"regnet_x_3_2gf",
"regnet_x_400mf",
"regnet_x_800mf",
"regnet_x_8gf",
"regnet_y_128gf",
"regnet_y_16gf",
"regnet_y_1_6gf",
"regnet_y_32gf",
"regnet_y_3_2gf",
"regnet_y_400mf",
"regnet_y_800mf",
"regnet_y_8gf",
"resnet101",
"resnet152",
"resnet18",
"resnet34",
"resnet50",
"resnext101_32x8d",
"resnext101_64x4d",
"resnext50_32x4d",
"retinanet_resnet50_fpn",
"shufflenet_v2_x0_5",
"shufflenet_v2_x1_0",
"shufflenet_v2_x1_5",
"shufflenet_v2_x2_0",
"squeezenet1_0",
"squeezenet1_1",
"swin_b",
"swin_s",
"swin_t",
"swin_v2_b",
"swin_v2_s",
"swin_v2_t",
"vgg11",
"vgg11_bn",
"vgg13",
"vgg13_bn",
"vgg16",
"vgg16_bn",
"vgg19",
"vgg19_bn",
"vit_b_16",
"vit_b_32",
"vit_h_14",
"vit_l_16",
"vit_l_32",
"wide_resnet101_2",
"wide_resnet50_2",
]:
raise ValueError(
"'MODEL.SOURCE' must be one between [ "
"'alexnet', 'convnext_base', 'convnext_large', 'convnext_small', 'convnext_tiny', 'densenet121', 'densenet161', "
"'densenet169', 'densenet201', 'efficientnet_b0', 'efficientnet_b1', 'efficientnet_b2', 'efficientnet_b3', "
"'efficientnet_b4', 'efficientnet_b5', 'efficientnet_b6', 'efficientnet_b7', 'efficientnet_v2_l', 'efficientnet_v2_m', "
"'efficientnet_v2_s', 'googlenet', 'inception_v3', 'maxvit_t', 'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0', 'mnasnet1_3', "
"'mobilenet_v2', 'mobilenet_v3_large', 'mobilenet_v3_small', 'quantized_googlenet', 'quantized_inception_v3', "
"'quantized_mobilenet_v2', 'quantized_mobilenet_v3_large', 'quantized_resnet18', 'quantized_resnet50', "
"'quantized_resnext101_32x8d', 'quantized_resnext101_64x4d', 'quantized_shufflenet_v2_x0_5', 'quantized_shufflenet_v2_x1_0', "
"'quantized_shufflenet_v2_x1_5', 'quantized_shufflenet_v2_x2_0', 'regnet_x_16gf', 'regnet_x_1_6gf', 'regnet_x_32gf', "
"'regnet_x_3_2gf', 'regnet_x_400mf', 'regnet_x_800mf', 'regnet_x_8gf', 'regnet_y_128gf', 'regnet_y_16gf', 'regnet_y_1_6gf', "
"'regnet_y_32gf', 'regnet_y_3_2gf', 'regnet_y_400mf', 'regnet_y_800mf', 'regnet_y_8gf', 'resnet101', 'resnet152', "
"'resnet18', 'resnet34', 'resnet50', 'resnext101_32x8d', 'resnext101_64x4d', 'resnext50_32x4d', 'retinanet_resnet50_fpn', "
"'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0', "
"'squeezenet1_0', 'squeezenet1_1', 'swin_b', 'swin_s', 'swin_t', 'swin_v2_b', 'swin_v2_s', 'swin_v2_t', "
"'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', 'vgg19', 'vgg19_bn', 'vit_b_16', 'vit_b_32', "
"'vit_h_14', 'vit_l_16', 'vit_l_32', 'wide_resnet101_2', 'wide_resnet50_2' "
"]"
)
#### Image to image ####
elif cfg.PROBLEM.TYPE == "IMAGE_TO_IMAGE":
if cfg.MODEL.SOURCE == "torchvision":
raise ValueError("'MODEL.SOURCE' as 'torchvision' is not available in image to image workflow")
if cfg.PROBLEM.IMAGE_TO_IMAGE.MULTIPLE_RAW_ONE_TARGET_LOADER:
if cfg.TRAIN.ENABLE and cfg.DATA.TRAIN.FILTER_SAMPLES.ENABLE:
raise ValueError(
"'DATA.TRAIN.FILTER_SAMPLES.ENABLE' can not be enabled when 'PROBLEM.IMAGE_TO_IMAGE.MULTIPLE_RAW_ONE_TARGET_LOADER' is enabled too"
)
if cfg.TRAIN.ENABLE and cfg.DATA.VAL.FILTER_SAMPLES.ENABLE:
raise ValueError(
"'DATA.VAL.FILTER_SAMPLES.ENABLE' can not be enabled when 'PROBLEM.IMAGE_TO_IMAGE.MULTIPLE_RAW_ONE_TARGET_LOADER' is enabled too"
)
if cfg.DATA.EXTRACT_RANDOM_PATCH and cfg.DATA.PROBABILITY_MAP:
if cfg.DATA.W_FOREGROUND + cfg.DATA.W_BACKGROUND != 1:
raise ValueError(
"cfg.DATA.W_FOREGROUND+cfg.DATA.W_BACKGROUND need to sum 1. E.g. 0.94 and 0.06 respectively."
)
if cfg.DATA.VAL.FROM_TRAIN and cfg.DATA.PREPROCESS.VAL:
print(
"WARNING: validation preprocessing will be done based on 'DATA.PREPROCESS.TRAIN', as 'DATA.VAL.FROM_TRAIN' is selected"
)
### Pre-processing ###
if cfg.DATA.PREPROCESS.TRAIN or cfg.DATA.PREPROCESS.TEST or cfg.DATA.PREPROCESS.VAL:
if cfg.DATA.PREPROCESS.RESIZE.ENABLE:
if cfg.PROBLEM.TYPE == "DETECTION":
raise ValueError("Resizing preprocessing is not available for the DETECTION workflow.")
if cfg.PROBLEM.NDIM == "3D":
if cfg.DATA.PREPROCESS.RESIZE.OUTPUT_SHAPE == (512, 512):
opts.extend(["DATA.PREPROCESS.RESIZE.OUTPUT_SHAPE", (512, 512, 512)])
elif len(cfg.DATA.PREPROCESS.RESIZE.OUTPUT_SHAPE) != 3:
raise ValueError(
"When 'PROBLEM.NDIM' is 3D, 'DATA.PREPROCESS.RESIZE.OUTPUT_SHAPE' must indicate desired size for each dimension."
f"Given shape ({cfg.DATA.PREPROCESS.RESIZE.OUTPUT_SHAPE}) is not compatible."
)
if cfg.PROBLEM.NDIM == "2D" and len(cfg.DATA.PREPROCESS.RESIZE.OUTPUT_SHAPE) != 2:
raise ValueError(
"When 'PROBLEM.NDIM' is 2D, 'DATA.PREPROCESS.RESIZE.OUTPUT_SHAPE' must indicate desired size for each dimension."
f"Given shape ({cfg.DATA.PREPROCESS.RESIZE.OUTPUT_SHAPE}) is not compatible."
)
for i, s in enumerate(cfg.DATA.PREPROCESS.RESIZE.OUTPUT_SHAPE):
if cfg.DATA.PATCH_SIZE[i] > s:
raise ValueError(
f"'DATA.PREPROCESS.RESIZE.OUTPUT_SHAPE' {cfg.DATA.PREPROCESS.RESIZE.OUTPUT_SHAPE} can not be smaller than 'DATA.PATCH_SIZE' {cfg.DATA.PATCH_SIZE}."
)
if cfg.DATA.PREPROCESS.CANNY.ENABLE and cfg.PROBLEM.NDIM != "2D":
raise ValueError("Canny or edge detection can not be activated when 'PROBLEM.NDIM' is 2D.")
if cfg.DATA.PREPROCESS.MEDIAN_BLUR.ENABLE:
if cfg.PROBLEM.NDIM == "2D" and len(cfg.DATA.PREPROCESS.MEDIAN_BLUR.KERNEL_SIZE) != 3:
raise ValueError(
"When 'PROBLEM.NDIM' is 2D, 'DATA.PREPROCESS.MEDIAN_BLUR.KERNEL_SIZE' must indicate desired kernel size for each dimension, including channels (y,x,c)."
f"Given kernel size ({cfg.DATA.PREPROCESS.MEDIAN_BLUR.KERNEL_SIZE}) is not compatible."
)
elif cfg.PROBLEM.NDIM == "3D" and len(cfg.DATA.PREPROCESS.MEDIAN_BLUR.KERNEL_SIZE) != 4:
raise ValueError(
"When 'PROBLEM.NDIM' is 3D, 'DATA.PREPROCESS.MEDIAN_BLUR.KERNEL_SIZE' must indicate desired kernel size for each dimension, including channels (z,y,x,c)."
f"Given kernel size ({cfg.DATA.PREPROCESS.MEDIAN_BLUR.KERNEL_SIZE}) is not compatible."
)
if cfg.DATA.PREPROCESS.MATCH_HISTOGRAM.ENABLE:
if not os.path.exists(cfg.DATA.PREPROCESS.MATCH_HISTOGRAM.REFERENCE_PATH):
raise ValueError(
f"Path pointed by 'DATA.PREPROCESS.MATCH_HISTOGRAM.REFERENCE_PATH' does not exist: {cfg.DATA.PREPROCESS.MATCH_HISTOGRAM.REFERENCE_PATH}"
)
if cfg.DATA.PREPROCESS.ZOOM.ENABLE and not cfg.TEST.BY_CHUNKS.ENABLE:
raise ValueError("'DATA.PREPROCESS.ZOOM.ENABLE' can only be activated when 'TEST.BY_CHUNKS.ENABLE' is True")
if cfg.DATA.PREPROCESS.ZOOM.ENABLE and len(cfg.DATA.PREPROCESS.ZOOM.ZOOM_FACTOR) != len(
cfg.TEST.BY_CHUNKS.INPUT_IMG_AXES_ORDER
):
raise ValueError(
"'DATA.PREPROCESS.ZOOM.ZOOM_FACTOR' needs to have the same length as 'TEST.BY_CHUNKS.INPUT_IMG_AXES_ORDER'"
)
#### Data ####
if cfg.TRAIN.ENABLE:
if check_data_paths:
if not os.path.exists(cfg.DATA.TRAIN.PATH):
raise ValueError("Train data dir not found: {}".format(cfg.DATA.TRAIN.PATH))
if (
not os.path.exists(cfg.DATA.TRAIN.GT_PATH)
and cfg.PROBLEM.TYPE not in ["DENOISING", "CLASSIFICATION", "SELF_SUPERVISED"]
and not cfg.DATA.TRAIN.INPUT_ZARR_MULTIPLE_DATA
):
raise ValueError("Train mask data dir not found: {}".format(cfg.DATA.TRAIN.GT_PATH))
if not cfg.DATA.VAL.FROM_TRAIN:
if not os.path.exists(cfg.DATA.VAL.PATH):
raise ValueError("Validation data dir not found: {}".format(cfg.DATA.VAL.PATH))
if (
not os.path.exists(cfg.DATA.VAL.GT_PATH)
and cfg.PROBLEM.TYPE not in ["DENOISING", "CLASSIFICATION", "SELF_SUPERVISED"]
and not cfg.DATA.VAL.INPUT_ZARR_MULTIPLE_DATA
):
raise ValueError("Validation mask data dir not found: {}".format(cfg.DATA.VAL.GT_PATH))
if cfg.DATA.TRAIN.INPUT_ZARR_MULTIPLE_DATA:
if cfg.PROBLEM.NDIM != "3D":
raise ValueError("'DATA.TRAIN.INPUT_ZARR_MULTIPLE_DATA' to True is only implemented in 3D workflows")
if (
cfg.DATA.TRAIN.INPUT_ZARR_MULTIPLE_DATA_RAW_PATH == ""
or cfg.DATA.TRAIN.INPUT_ZARR_MULTIPLE_DATA_GT_PATH == ""
):
raise ValueError(
"'DATA.TRAIN.INPUT_ZARR_MULTIPLE_DATA_RAW_PATH' and 'DATA.TRAIN.INPUT_ZARR_MULTIPLE_DATA_GT_PATH' "
"need to be set when 'DATA.TRAIN.INPUT_ZARR_MULTIPLE_DATA' is used."
)
if cfg.DATA.VAL.INPUT_ZARR_MULTIPLE_DATA:
if cfg.PROBLEM.NDIM != "3D":
raise ValueError("'DATA.VAL.INPUT_ZARR_MULTIPLE_DATA' to True is only implemented in 3D workflows")
if (
cfg.DATA.VAL.INPUT_ZARR_MULTIPLE_DATA_RAW_PATH == ""
or cfg.DATA.VAL.INPUT_ZARR_MULTIPLE_DATA_GT_PATH == ""
):
raise ValueError(
"'DATA.VAL.INPUT_ZARR_MULTIPLE_DATA_RAW_PATH' and 'DATA.VAL.INPUT_ZARR_MULTIPLE_DATA_GT_PATH' "
"need to be set when 'DATA.VAL.INPUT_ZARR_MULTIPLE_DATA' is used."
)
if cfg.TEST.ENABLE and not cfg.DATA.TEST.USE_VAL_AS_TEST and check_data_paths:
if not os.path.exists(cfg.DATA.TEST.PATH):
raise ValueError("Test data not found: {}".format(cfg.DATA.TEST.PATH))
if (
cfg.DATA.TEST.LOAD_GT
and not os.path.exists(cfg.DATA.TEST.GT_PATH)
and cfg.PROBLEM.TYPE not in ["CLASSIFICATION", "SELF_SUPERVISED"]
and not cfg.TEST.BY_CHUNKS.INPUT_ZARR_MULTIPLE_DATA
):
raise ValueError("Test data mask not found: {}".format(cfg.DATA.TEST.GT_PATH))
if cfg.TEST.ENABLE and cfg.TEST.BY_CHUNKS.ENABLE:
if cfg.PROBLEM.NDIM == "2D":
raise ValueError("'TEST.BY_CHUNKS' can not be activated when 'PROBLEM.NDIM' is 2D")
assert cfg.TEST.BY_CHUNKS.FORMAT.lower() in [
"h5",
"zarr",
], "'TEST.BY_CHUNKS.FORMAT' needs to be one between ['H5', 'Zarr']"
opts.extend(["TEST.BY_CHUNKS.FORMAT", cfg.TEST.BY_CHUNKS.FORMAT.lower()])
if cfg.TEST.BY_CHUNKS.WORKFLOW_PROCESS.ENABLE:
assert cfg.TEST.BY_CHUNKS.WORKFLOW_PROCESS.TYPE in [
"chunk_by_chunk",
"entire_pred",
], "'TEST.BY_CHUNKS.WORKFLOW_PROCESS.TYPE' needs to be one between ['chunk_by_chunk', 'entire_pred']"
if len(cfg.TEST.BY_CHUNKS.INPUT_IMG_AXES_ORDER) < 3:
raise ValueError("'TEST.BY_CHUNKS.INPUT_IMG_AXES_ORDER' needs to be at least of length 3, e.g., 'ZYX'")
if cfg.MODEL.N_CLASSES > 2:
raise ValueError("Not implemented pipeline option: 'MODEL.N_CLASSES' > 2 and 'TEST.BY_CHUNKS'")
if cfg.TEST.BY_CHUNKS.INPUT_ZARR_MULTIPLE_DATA:
if cfg.TEST.BY_CHUNKS.INPUT_ZARR_MULTIPLE_DATA_RAW_PATH == "":
raise ValueError(
"'TEST.BY_CHUNKS.INPUT_ZARR_MULTIPLE_DATA_RAW_PATH' needs to be set when "
"'TEST.BY_CHUNKS.INPUT_ZARR_MULTIPLE_DATA' is used."
)
if cfg.DATA.TEST.LOAD_GT and cfg.TEST.BY_CHUNKS.INPUT_ZARR_MULTIPLE_DATA_GT_PATH == "":
raise ValueError(
"'TEST.BY_CHUNKS.INPUT_ZARR_MULTIPLE_DATA_GT_PATH' needs to be set when "
"'TEST.BY_CHUNKS.INPUT_ZARR_MULTIPLE_DATA' is used."
)
if cfg.TRAIN.ENABLE:
if cfg.DATA.EXTRACT_RANDOM_PATCH and cfg.DATA.PROBABILITY_MAP:
if not cfg.PROBLEM.TYPE == "SEMANTIC_SEG":
raise ValueError("'DATA.PROBABILITY_MAP' can only be selected when 'PROBLEM.TYPE' is 'SEMANTIC_SEG'")
if cfg.DATA.VAL.FROM_TRAIN and not cfg.DATA.VAL.CROSS_VAL and cfg.DATA.VAL.SPLIT_TRAIN <= 0:
raise ValueError("'DATA.VAL.SPLIT_TRAIN' needs to be > 0 when 'DATA.VAL.FROM_TRAIN' == True")
if cfg.PROBLEM.NDIM == "2D" and cfg.DATA.TRAIN.INPUT_IMG_AXES_ORDER != "TZCYX":
raise ValueError("'DATA.TRAIN.INPUT_IMG_AXES_ORDER' can not be set in 2D problems")
if cfg.PROBLEM.NDIM == "2D" and cfg.DATA.TRAIN.INPUT_MASK_AXES_ORDER != "TZCYX":
raise ValueError("'DATA.TRAIN.INPUT_MASK_AXES_ORDER' can not be set in 2D problems")
if len(cfg.DATA.TRAIN.INPUT_IMG_AXES_ORDER) < 3:
raise ValueError("'DATA.TRAIN.INPUT_IMG_AXES_ORDER' needs to be at least of length 3, e.g., 'ZYX'")
if len(cfg.DATA.TRAIN.INPUT_MASK_AXES_ORDER) < 3:
raise ValueError("'DATA.TRAIN.INPUT_MASK_AXES_ORDER' needs to be at least of length 3, e.g., 'ZYX'")
if cfg.PROBLEM.NDIM == "2D" and cfg.DATA.VAL.INPUT_IMG_AXES_ORDER != "TZCYX":
raise ValueError("'DATA.VAL.INPUT_IMG_AXES_ORDER' can not be set in 2D problems")
if cfg.PROBLEM.NDIM == "2D" and cfg.DATA.VAL.INPUT_MASK_AXES_ORDER != "TZCYX":
raise ValueError("'DATA.VAL.INPUT_MASK_AXES_ORDER' can not be set in 2D problems")
if len(cfg.DATA.VAL.INPUT_IMG_AXES_ORDER) < 3:
raise ValueError("'DATA.VAL.INPUT_IMG_AXES_ORDER' needs to be at least of length 3, e.g., 'ZYX'")
if len(cfg.DATA.VAL.INPUT_MASK_AXES_ORDER) < 3:
raise ValueError("'DATA.VAL.INPUT_MASK_AXES_ORDER' needs to be at least of length 3, e.g., 'ZYX'")
if cfg.DATA.VAL.CROSS_VAL:
if not cfg.DATA.VAL.FROM_TRAIN:
raise ValueError("'DATA.VAL.CROSS_VAL' can only be used when 'DATA.VAL.FROM_TRAIN' is True")
if cfg.DATA.VAL.CROSS_VAL_NFOLD < cfg.DATA.VAL.CROSS_VAL_FOLD:
raise ValueError("'DATA.VAL.CROSS_VAL_NFOLD' can not be less than 'DATA.VAL.CROSS_VAL_FOLD'")
if cfg.DATA.TEST.USE_VAL_AS_TEST and not cfg.DATA.VAL.CROSS_VAL:
raise ValueError("'DATA.TEST.USE_VAL_AS_TEST' can only be used when 'DATA.VAL.CROSS_VAL' is selected")
if len(cfg.DATA.TRAIN.RESOLUTION) != 1 and len(cfg.DATA.TRAIN.RESOLUTION) != dim_count:
raise ValueError(
"When PROBLEM.NDIM == {} DATA.TRAIN.RESOLUTION tuple must be length {}, given {}.".format(
cfg.PROBLEM.NDIM, dim_count, cfg.DATA.TRAIN.RESOLUTION
)
)
if len(cfg.DATA.VAL.RESOLUTION) != 1 and len(cfg.DATA.VAL.RESOLUTION) != dim_count:
raise ValueError(
"When PROBLEM.NDIM == {} DATA.VAL.RESOLUTION tuple must be length {}, given {}.".format(
cfg.PROBLEM.NDIM, dim_count, cfg.DATA.VAL.RESOLUTION
)
)
if cfg.TEST.ANALIZE_2D_IMGS_AS_3D_STACK and cfg.PROBLEM.TYPE == "INSTANCE_SEG":
if len(cfg.DATA.TEST.RESOLUTION) != 2 and len(cfg.DATA.TEST.RESOLUTION) != 3:
raise ValueError(
"'DATA.TEST.RESOLUTION' needs to be a tuple with 2 or 3 values (both valid because "
"'TEST.ANALIZE_2D_IMGS_AS_3D_STACK' is activated in this case)".format(dim_count)
)
else:
if len(cfg.DATA.TEST.RESOLUTION) != 1 and len(cfg.DATA.TEST.RESOLUTION) != dim_count:
raise ValueError(
"When PROBLEM.NDIM == {} DATA.TEST.RESOLUTION tuple must be length {}, given {}.".format(
cfg.PROBLEM.NDIM, dim_count, cfg.DATA.TEST.RESOLUTION
)
)
if len(cfg.DATA.TRAIN.OVERLAP) != dim_count:
raise ValueError(
"When PROBLEM.NDIM == {} DATA.TRAIN.OVERLAP tuple must be length {}, given {}.".format(
cfg.PROBLEM.NDIM, dim_count, cfg.DATA.TRAIN.OVERLAP
)
)
if any(not check_value(x) for x in cfg.DATA.TRAIN.OVERLAP):
raise ValueError("DATA.TRAIN.OVERLAP not in [0, 1] range")
if len(cfg.DATA.TRAIN.PADDING) != dim_count:
raise ValueError(
"When PROBLEM.NDIM == {} DATA.TRAIN.PADDING tuple must be length {}, given {}.".format(
cfg.PROBLEM.NDIM, dim_count, cfg.DATA.TRAIN.PADDING
)
)
if len(cfg.DATA.VAL.OVERLAP) != dim_count:
raise ValueError(
"When PROBLEM.NDIM == {} DATA.VAL.OVERLAP tuple must be length {}, given {}.".format(
cfg.PROBLEM.NDIM, dim_count, cfg.DATA.VAL.OVERLAP
)
)
if any(not check_value(x) for x in cfg.DATA.VAL.OVERLAP):
raise ValueError("DATA.VAL.OVERLAP not in [0, 1] range")
if len(cfg.DATA.VAL.PADDING) != dim_count:
raise ValueError(
"When PROBLEM.NDIM == {} DATA.VAL.PADDING tuple must be length {}, given {}.".format(
cfg.PROBLEM.NDIM, dim_count, cfg.DATA.VAL.PADDING
)
)
if len(cfg.DATA.TEST.OVERLAP) != dim_count:
raise ValueError(
"When PROBLEM.NDIM == {} DATA.TEST.OVERLAP tuple must be length {}, given {}.".format(
cfg.PROBLEM.NDIM, dim_count, cfg.DATA.TEST.OVERLAP
)
)
if any(not check_value(x) for x in cfg.DATA.TEST.OVERLAP):
raise ValueError("DATA.TEST.OVERLAP not in [0, 1] range")
if len(cfg.DATA.TEST.PADDING) != dim_count:
raise ValueError(
"When PROBLEM.NDIM == {} DATA.TEST.PADDING tuple must be length {}, given {}.".format(
cfg.PROBLEM.NDIM, dim_count, cfg.DATA.TEST.PADDING
)
)
if len(cfg.DATA.PATCH_SIZE) != dim_count + 1:
if cfg.MODEL.SOURCE != "bmz":
raise ValueError(
"When PROBLEM.NDIM == {} DATA.PATCH_SIZE tuple must be length {}, given {}.".format(
cfg.PROBLEM.NDIM, dim_count + 1, cfg.DATA.PATCH_SIZE
)
)
else:
print(
"WARNING: when PROBLEM.NDIM == {} DATA.PATCH_SIZE tuple must be length {}, given {}. Not an error "
"because you are using a model from BioImage Model Zoo (BMZ) and the patch size will be determined by the model."
" However, this message is printed so you are aware of this. "
)
assert cfg.DATA.NORMALIZATION.TYPE in [
"div",
"scale_range",
"custom",
], "DATA.NORMALIZATION.TYPE not in ['div', 'scale_range', 'custom']"
if cfg.DATA.NORMALIZATION.CUSTOM_MEAN != -1 and cfg.DATA.NORMALIZATION.CUSTOM_STD == -1:
raise ValueError(
"'DATA.NORMALIZATION.CUSTOM_STD' needs to be provided when 'DATA.NORMALIZATION.CUSTOM_MEAN' is provided too"
)
if cfg.DATA.NORMALIZATION.PERC_CLIP:
if cfg.DATA.NORMALIZATION.PERC_LOWER == -1:
raise ValueError(
"'DATA.NORMALIZATION.PERC_LOWER' needs to be set when DATA.NORMALIZATION.PERC_CLIP == 'True'"
)
if cfg.DATA.NORMALIZATION.PERC_UPPER == -1:
raise ValueError(
"'DATA.NORMALIZATION.PERC_UPPER' needs to be set when DATA.NORMALIZATION.PERC_CLIP == 'True'"
)
if not check_value(cfg.DATA.NORMALIZATION.PERC_LOWER, value_range=(0, 100)):
raise ValueError("'DATA.NORMALIZATION.PERC_LOWER' not in [0, 100] range")
if not check_value(cfg.DATA.NORMALIZATION.PERC_UPPER, value_range=(0, 100)):
raise ValueError("'DATA.NORMALIZATION.PERC_UPPER' not in [0, 100] range")
if cfg.DATA.TRAIN.REPLICATE:
if cfg.PROBLEM.TYPE == "CLASSIFICATION" or (
cfg.PROBLEM.TYPE == "SELF_SUPERVISED" and cfg.PROBLEM.SELF_SUPERVISED.PRETEXT_TASK == "masking"
):
print("WARNING: 'DATA.TRAIN.REPLICATE' has no effect in the selected workflow")
### Model ###
if not model_will_be_read and cfg.MODEL.SOURCE == "biapy":
assert model_arch in [
"unet",
"resunet",
"resunet++",
"attention_unet",
"multiresunet",
"seunet",
"resunet_se",
"simple_cnn",
"efficientnet_b0",
"efficientnet_b1",
"efficientnet_b2",
"efficientnet_b3",
"efficientnet_b4",
"efficientnet_b5",
"efficientnet_b6",
"efficientnet_b7",
"unetr",
"edsr",
"rcan",
"dfcan",
"wdsr",
"vit",
"mae",
"unext_v1",
"unext_v2",
], "MODEL.ARCHITECTURE not in ['unet', 'resunet', 'resunet++', 'attention_unet', 'multiresunet', 'seunet', 'simple_cnn', 'efficientnet_b[0-7]', 'unetr', 'edsr', 'rcan', 'dfcan', 'wdsr', 'vit', 'mae', 'unext_v1', 'unext_v2']"
if (
model_arch
not in [
"unet",
"resunet",
"resunet++",
"seunet",
"resunet_se",
"attention_unet",
"multiresunet",
"unetr",
"vit",
"mae",
"unext_v1",
"unext_v2",
]
and cfg.PROBLEM.NDIM == "3D"
and cfg.PROBLEM.TYPE != "CLASSIFICATION"
):
raise ValueError(
"For 3D these models are available: {}".format(
[
"unet",
"resunet",
"resunet++",
"seunet",
"resunet_se",
"multiresunet",
"attention_unet",
"unetr",
"vit",
"mae",
"unext_v1",
"unext_v2",
]
)
)
if (
cfg.MODEL.N_CLASSES > 2
and cfg.PROBLEM.TYPE != "CLASSIFICATION"
and model_arch
not in [
"unet",
"resunet",
"resunet++",
"seunet",
"resunet_se",
"attention_unet",
"multiresunet",
"unetr",
"unext_v1",
"unext_v2",
]
):
raise ValueError(
"'MODEL.N_CLASSES' > 2 can only be used with 'MODEL.ARCHITECTURE' in ['unet', 'resunet', 'resunet++', 'seunet', 'resunet_se', 'attention_unet', 'multiresunet', 'unetr', 'unext_v1', 'unext_v2']"
)
assert len(cfg.MODEL.FEATURE_MAPS) > 2, "'MODEL.FEATURE_MAPS' needs to have at least 3 values"
# Adjust dropout to feature maps
if model_arch in ["vit", "unetr", "mae"]:
if all(x == 0 for x in cfg.MODEL.DROPOUT_VALUES):
opts.extend(["MODEL.DROPOUT_VALUES", (0.0,)])
elif len(cfg.MODEL.DROPOUT_VALUES) != 1:
raise ValueError(
"'MODEL.DROPOUT_VALUES' must be list of an unique number when 'MODEL.ARCHITECTURE' is one among ['vit', 'mae', 'unetr']"
)
elif not check_value(cfg.MODEL.DROPOUT_VALUES[0]):
raise ValueError("'MODEL.DROPOUT_VALUES' not in [0, 1] range")
else:
if len(cfg.MODEL.FEATURE_MAPS) != len(cfg.MODEL.DROPOUT_VALUES):
if all(x == 0 for x in cfg.MODEL.DROPOUT_VALUES):
opts.extend(["MODEL.DROPOUT_VALUES", (0.0,) * len(cfg.MODEL.FEATURE_MAPS)])
elif any(not check_value(x) for x in cfg.MODEL.DROPOUT_VALUES):
raise ValueError("'MODEL.DROPOUT_VALUES' not in [0, 1] range")
else:
raise ValueError("'MODEL.FEATURE_MAPS' and 'MODEL.DROPOUT_VALUES' lengths must be equal")
# Adjust Z_DOWN values to feature maps
if all(x == 0 for x in cfg.MODEL.Z_DOWN):
opts.extend(["MODEL.Z_DOWN", (2,) * (len(cfg.MODEL.FEATURE_MAPS) - 1)])
elif any([False for x in cfg.MODEL.Z_DOWN if x != 1 and x != 2]):
raise ValueError("'MODEL.Z_DOWN' needs to be 1 or 2")
else:
if model_arch == "multiresunet" and len(cfg.MODEL.Z_DOWN) != 4:
raise ValueError("'MODEL.Z_DOWN' length must be 4 when using 'multiresunet'")
elif len(cfg.MODEL.FEATURE_MAPS) - 1 != len(cfg.MODEL.Z_DOWN):
raise ValueError("'MODEL.FEATURE_MAPS' length minus one and 'MODEL.Z_DOWN' length must be equal")
# Adjust ISOTROPY values to feature maps
if all(x == True for x in cfg.MODEL.ISOTROPY):
opts.extend(["MODEL.ISOTROPY", (True,) * (len(cfg.MODEL.FEATURE_MAPS))])
# Correct UPSCALING for other workflows than SR
if len(cfg.PROBLEM.SUPER_RESOLUTION.UPSCALING) == 0:
opts.extend(["PROBLEM.SUPER_RESOLUTION.UPSCALING", (1,) * dim_count])
if len(opts) > 0:
cfg.merge_from_list(opts)
if not model_will_be_read and cfg.MODEL.SOURCE == "biapy":
assert cfg.MODEL.LAST_ACTIVATION.lower() in [
"relu",
"tanh",
"leaky_relu",
"elu",
"gelu",
"silu",
"sigmoid",
"softmax",
"linear",
"none",
], "Get unknown activation key {}".format(cfg.MODEL.LAST_ACTIVATION.lower())
if cfg.MODEL.UPSAMPLE_LAYER.lower() not in ["upsampling", "convtranspose"]:
raise ValueError(
"cfg.MODEL.UPSAMPLE_LAYER' needs to be one between ['upsampling', 'convtranspose']. Provided {}".format(
cfg.MODEL.UPSAMPLE_LAYER
)
)
if cfg.PROBLEM.TYPE in [
"SEMANTIC_SEG",
"INSTANCE_SEG",
"DETECTION",
"DENOISING",
]:
if model_arch not in [
"unet",
"resunet",
"resunet++",
"seunet",
"attention_unet",
"resunet_se",
"unetr",
"multiresunet",
"unext_v1","unext_v2",
]:
raise ValueError(
"Architectures available for {} are: ['unet', 'resunet', 'resunet++', 'seunet', 'attention_unet', 'resunet_se', 'unetr', 'multiresunet', 'unext_v1', 'unext_v2']".format(
cfg.PROBLEM.TYPE
)
)
elif cfg.PROBLEM.TYPE == "SUPER_RESOLUTION":
if cfg.PROBLEM.NDIM == "2D" and model_arch not in [
"edsr",
"rcan",
"dfcan",
"wdsr",
"unet",
"resunet",
"resunet++",
"seunet",
"resunet_se",
"attention_unet",
"multiresunet",
"unext_v1",
"unext_v2",
]:
raise ValueError(
"Architectures available for 2D 'SUPER_RESOLUTION' are: ['edsr', 'rcan', 'dfcan', 'wdsr', 'unet', 'resunet', 'resunet++', 'seunet', 'resunet_se', 'attention_unet', 'multiresunet', 'unext_v1', 'unext_v2']"
)
elif cfg.PROBLEM.NDIM == "3D":
if model_arch not in [
"unet",
"resunet",
"resunet++",
"seunet",
"attention_unet",
"multiresunet",
"unext_v1",
"unext_v2",
]:
raise ValueError(
"Architectures available for 3D 'SUPER_RESOLUTION' are: ['unet', 'resunet', 'resunet++', 'seunet', 'resunet_se', 'attention_unet', 'multiresunet', 'unext_v1', 'unext_v2']"
)
assert cfg.MODEL.UNET_SR_UPSAMPLE_POSITION in [
"pre",
"post",
], "'MODEL.UNET_SR_UPSAMPLE_POSITION' not in ['pre', 'post']"
elif cfg.PROBLEM.TYPE == "IMAGE_TO_IMAGE":
if model_arch not in [
"edsr",
"rcan",
"dfcan",
"wdsr",
"unet",
"resunet",
"resunet++",
"seunet",
"resunet_se",
"attention_unet",
"unetr",
"multiresunet",
"unext_v1",
"unext_v2",
]:
raise ValueError(
"Architectures available for 'IMAGE_TO_IMAGE' are: ['edsr', 'rcan', 'dfcan', 'wdsr', 'unet', 'resunet', 'resunet++', 'resunet_se', 'seunet', 'attention_unet', 'unetr', 'multiresunet', 'unext_v1', 'unext_v2']"
)
elif cfg.PROBLEM.TYPE == "SELF_SUPERVISED":
if model_arch not in [
"unet",
"resunet",
"resunet++",
"attention_unet",
"multiresunet",
"seunet",
"resunet_se",
"unetr",
"unext_v1",
"unext_v2",
"edsr",
"rcan",
"dfcan",
"wdsr",
"vit",
"mae",
]:
raise ValueError(
"'SELF_SUPERVISED' models available are these: ['unet', 'resunet', 'resunet++', 'attention_unet', 'multiresunet', 'seunet', 'resunet_se', "
"'unetr', 'unext_v1', 'edsr', 'rcan', 'dfcan', 'wdsr', 'vit', 'mae']"
)
elif cfg.PROBLEM.TYPE == "CLASSIFICATION":
if model_arch not in ["simple_cnn", "vit"] and "efficientnet" not in model_arch:
raise ValueError(
"Architectures available for 'CLASSIFICATION' are: ['simple_cnn', 'efficientnet_b[0-7]', 'vit']"
)
if cfg.PROBLEM.NDIM == "3D" and "efficientnet" in model_arch:
raise ValueError("EfficientNet architectures are only available for 2D images")
if model_arch in ["unetr", "vit", "mae"]:
if model_arch == "mae" and cfg.PROBLEM.TYPE != "SELF_SUPERVISED":
raise ValueError("'mae' model can only be used in 'SELF_SUPERVISED' workflow")
if cfg.MODEL.VIT_EMBED_DIM % cfg.MODEL.VIT_NUM_HEADS != 0:
raise ValueError("'MODEL.VIT_EMBED_DIM' should be divisible by 'MODEL.VIT_NUM_HEADS'")
if not all([i == cfg.DATA.PATCH_SIZE[0] for i in cfg.DATA.PATCH_SIZE[:-1]]):
raise ValueError(
"'unetr', 'vit' 'mae' models need to have same shape in all dimensions (e.g. DATA.PATCH_SIZE = (80,80,80,1) )"
)
# Check that the input patch size is divisible in every level of the U-Net's like architectures, as the model
# will throw an error not very clear for users
if model_arch in [
"unet",
"resunet",
"resunet++",
"seunet",
"resunet_se",
"attention_unet",
"multiresunet",
"unext_v1",
"unext_v2",
]:
z_size = cfg.DATA.PATCH_SIZE[0]
sizes = cfg.DATA.PATCH_SIZE[1:-1]
for i in range(len(cfg.MODEL.FEATURE_MAPS) - 1):
if not all(
[False for x in sizes if x % (np.power(2, (i + 1))) != 0 or z_size % cfg.MODEL.Z_DOWN[i] != 0]
):
m = (
"The 'DATA.PATCH_SIZE' provided is not divisible by 2 in each of the U-Net's levels. You can:\n 1) Reduce the number "
+ "of levels (by reducing 'cfg.MODEL.FEATURE_MAPS' array's length)\n 2) Increase 'DATA.PATCH_SIZE'"
)
if cfg.PROBLEM.NDIM == "3D":
m += (
"\n 3) If the Z axis is the problem, as the patch size is normally less than in other axis due to resolution, you "
+ "can tune 'MODEL.Z_DOWN' variable to not downsample the image in all U-Net levels"
)
raise ValueError(m)
z_size = z_size // cfg.MODEL.Z_DOWN[i]
if cfg.MODEL.LOAD_CHECKPOINT and check_data_paths:
if not os.path.exists(get_checkpoint_path(cfg, jobname)):
raise FileNotFoundError(f"Model checkpoint not found at {get_checkpoint_path(cfg, jobname)}")
### Train ###
assert cfg.TRAIN.OPTIMIZER in [
"SGD",
"ADAM",
"ADAMW",
], "TRAIN.OPTIMIZER not in ['SGD', 'ADAM', 'ADAMW']"
if cfg.TRAIN.ENABLE and cfg.TRAIN.LR_SCHEDULER.NAME != "":
if cfg.TRAIN.LR_SCHEDULER.NAME not in [
"reduceonplateau",
"warmupcosine",
"onecycle",
]:
raise ValueError(
"'TRAIN.LR_SCHEDULER.NAME' must be one between ['reduceonplateau', 'warmupcosine', 'onecycle']"
)
if cfg.TRAIN.LR_SCHEDULER.MIN_LR == -1.0 and cfg.TRAIN.LR_SCHEDULER.NAME != "onecycle":
raise ValueError(
"'TRAIN.LR_SCHEDULER.MIN_LR' needs to be set when 'TRAIN.LR_SCHEDULER.NAME' is between ['reduceonplateau', 'warmupcosine']"
)
if cfg.TRAIN.LR_SCHEDULER.NAME == "reduceonplateau":
if cfg.TRAIN.LR_SCHEDULER.REDUCEONPLATEAU_PATIENCE == -1:
raise ValueError(
"'TRAIN.LR_SCHEDULER.REDUCEONPLATEAU_PATIENCE' needs to be set when 'TRAIN.LR_SCHEDULER.NAME' is 'reduceonplateau'"
)
if cfg.TRAIN.LR_SCHEDULER.REDUCEONPLATEAU_PATIENCE >= cfg.TRAIN.PATIENCE:
raise ValueError(
"'TRAIN.LR_SCHEDULER.REDUCEONPLATEAU_PATIENCE' needs to be less than 'TRAIN.PATIENCE' "
)
if cfg.TRAIN.LR_SCHEDULER.NAME == "warmupcosine":
if cfg.TRAIN.LR_SCHEDULER.WARMUP_COSINE_DECAY_EPOCHS == -1:
raise ValueError(
"'TRAIN.LR_SCHEDULER.WARMUP_COSINE_DECAY_EPOCHS' needs to be set when 'TRAIN.LR_SCHEDULER.NAME' is 'warmupcosine'"
)
if cfg.TRAIN.LR_SCHEDULER.WARMUP_COSINE_DECAY_EPOCHS > cfg.TRAIN.EPOCHS:
raise ValueError("'TRAIN.LR_SCHEDULER.WARMUP_COSINE_DECAY_EPOCHS' needs to be less than 'TRAIN.EPOCHS'")
#### Augmentation ####
if cfg.AUGMENTOR.ENABLE:
if not check_value(cfg.AUGMENTOR.DA_PROB):
raise ValueError("AUGMENTOR.DA_PROB not in [0, 1] range")
if cfg.AUGMENTOR.RANDOM_ROT:
if not check_value(cfg.AUGMENTOR.RANDOM_ROT_RANGE, (-360, 360)):
raise ValueError("AUGMENTOR.RANDOM_ROT_RANGE values needs to be between [-360,360]")
if cfg.AUGMENTOR.SHEAR:
if not check_value(cfg.AUGMENTOR.SHEAR_RANGE, (-360, 360)):
raise ValueError("AUGMENTOR.SHEAR_RANGE values needs to be between [-360,360]")
if cfg.AUGMENTOR.ELASTIC:
if cfg.AUGMENTOR.E_MODE not in ["constant", "nearest", "reflect", "wrap"]:
raise ValueError("AUGMENTOR.E_MODE not in ['constant', 'nearest', 'reflect', 'wrap']")
if cfg.AUGMENTOR.BRIGHTNESS:
if cfg.AUGMENTOR.BRIGHTNESS_MODE not in ["2D", "3D"] and cfg.PROBLEM.NDIM == "3D":
raise ValueError("AUGMENTOR.BRIGHTNESS_MODE not in ['2D', '3D']")
if cfg.AUGMENTOR.CONTRAST:
if cfg.AUGMENTOR.CONTRAST_MODE not in ["2D", "3D"] and cfg.PROBLEM.NDIM == "3D":
raise ValueError("AUGMENTOR.CONTRAST_MODE not in ['2D', '3D']")
if cfg.AUGMENTOR.DROPOUT:
if not check_value(cfg.AUGMENTOR.DROP_RANGE):
raise ValueError("AUGMENTOR.DROP_RANGE values not in [0, 1] range")
if cfg.AUGMENTOR.CUTOUT:
if not check_value(cfg.AUGMENTOR.COUT_SIZE):
raise ValueError("AUGMENTOR.COUT_SIZE values not in [0, 1] range")
if cfg.AUGMENTOR.CUTBLUR:
if not check_value(cfg.AUGMENTOR.CBLUR_SIZE):
raise ValueError("AUGMENTOR.CBLUR_SIZE values not in [0, 1] range")
if not check_value(cfg.AUGMENTOR.CBLUR_DOWN_RANGE, (1, 8)):
raise ValueError("AUGMENTOR.CBLUR_DOWN_RANGE values not in [1, 8] range")
if cfg.AUGMENTOR.CUTMIX:
if not check_value(cfg.AUGMENTOR.CMIX_SIZE):
raise ValueError("AUGMENTOR.CMIX_SIZE values not in [0, 1] range")
if cfg.AUGMENTOR.CUTNOISE:
if not check_value(cfg.AUGMENTOR.CNOISE_SCALE):
raise ValueError("AUGMENTOR.CNOISE_SCALE values not in [0, 1] range")
if not check_value(cfg.AUGMENTOR.CNOISE_SIZE):
raise ValueError("AUGMENTOR.CNOISE_SIZE values not in [0, 1] range")
if cfg.AUGMENTOR.GRIDMASK:
if not check_value(cfg.AUGMENTOR.GRID_RATIO):
raise ValueError("AUGMENTOR.GRID_RATIO not in [0, 1] range")
if cfg.AUGMENTOR.GRID_D_RANGE[0] >= cfg.AUGMENTOR.GRID_D_RANGE[1]:
raise ValueError(
"cfg.AUGMENTOR.GRID_D_RANGE[0] needs to be larger than cfg.AUGMENTOR.GRID_D_RANGE[1]"
"Provided {}".format(cfg.AUGMENTOR.GRID_D_RANGE)
)
if not check_value(cfg.AUGMENTOR.GRID_D_RANGE):
raise ValueError("cfg.AUGMENTOR.GRID_D_RANGE values not in [0, 1] range")
if not check_value(cfg.AUGMENTOR.GRID_ROTATE):
raise ValueError("AUGMENTOR.GRID_ROTATE not in [0, 1] range")
if cfg.AUGMENTOR.ZOOM:
if not check_value(cfg.AUGMENTOR.ZOOM_RANGE, (0.1, 10)):
raise ValueError("AUGMENTOR.ZOOM_RANGE values needs to be between [0.1,10]")
if cfg.AUGMENTOR.ZOOM_IN_Z and dim_count == 2:
print("WARNING: Ignoring AUGMENTOR.ZOOM_IN_Z in 2D problem")
assert cfg.AUGMENTOR.AFFINE_MODE in [
"constant",
"reflect",
"wrap",
"symmetric",
], "'AUGMENTOR.AFFINE_MODE' needs to be one between ['constant', 'reflect', 'wrap', 'symmetric']"
if cfg.AUGMENTOR.GAMMA_CONTRAST and cfg.DATA.NORMALIZATION.TYPE == "custom":
raise ValueError(
"'AUGMENTOR.GAMMA_CONTRAST' doesn't work correctly on images with negative values, which 'custom' "
"normalization will lead to"
)
# BioImage Model Zoo exportation process
if cfg.MODEL.BMZ.EXPORT.ENABLE:
if not cfg.MODEL.BMZ.EXPORT.REUSE_BMZ_CONFIG:
if cfg.MODEL.BMZ.EXPORT.MODEL_NAME == "":
raise ValueError(
"'MODEL.BMZ.EXPORT.MODEL_NAME' must be set. Remember that it should be something meaningful (take other models names in https://bioimage.io/#/ as reference)."
)
if cfg.MODEL.BMZ.EXPORT.DESCRIPTION == "":
raise ValueError(
"'MODEL.BMZ.EXPORT.DESCRIPTION' must be set. Remember that it should be meaninful (take other models descriptions in https://bioimage.io/#/ as reference)."
)
if len(cfg.MODEL.BMZ.EXPORT.AUTHORS) == 0:
raise ValueError(
"At least one author must be provided in 'MODEL.BMZ.EXPORT.AUTHORS'. Each author must be a dictionary containing 'name' and 'github_user' keys. E.g. [{'name': 'Daniel', 'github_user': 'danifranco'}]"
)
if cfg.MODEL.BMZ.EXPORT.LICENSE == "":
raise ValueError(
"'MODEL.BMZ.EXPORT.LICENSE' must be set. Remember that it should be something meaningful (take other models licenses in https://bioimage.io/#/ as reference)."
)
if len(cfg.MODEL.BMZ.EXPORT.TAGS) == 0:
raise ValueError(
"'MODEL.BMZ.EXPORT.TAGS' must be set. Remember that it should be something meaningful (take other models tags in https://bioimage.io/#/ as reference)."
)
if len(cfg.MODEL.BMZ.EXPORT.CITE) > 0:
for d in cfg.MODEL.BMZ.EXPORT.CITE:
if not isinstance(d, dict):
raise ValueError(
"'MODEL.BMZ.EXPORT.CITE' needs to be a list of dicts. E.g. [{'text': 'Gizmo et al.', 'doi': '10.1002/xyzacab123'}, {'text': 'training library', 'doi': '10.1101/2024.02.03.576026'}]"
)
else:
if len(d.keys()) < 2 or "text" not in d:
raise ValueError(
"'MODEL.BMZ.EXPORT.CITE' malformed. Cite dictionary must have at least 'text' key. E.g. {'text': 'Gizmo et al.', 'doi': '10.1002/xyzacab123'}"
)
for k in d.keys():
if k not in ["text", "doi", "url"]:
raise ValueError(
f"'MODEL.BMZ.EXPORT.CITE' malformed. Cite dictionary available keys are: ['text', 'doi', 'url']. Provided {k}. E.g. {'text': 'Gizmo et al.', 'doi': '10.1002/xyzacab123'}"
)
if cfg.MODEL.BMZ.EXPORT.DOCUMENTATION == "":
print(
"WARNING: 'MODEL.BMZ.EXPORT.DOCUMENTATION' not set so the model documentation will point to BiaPy doc: https://github.com/BiaPyX/BiaPy/blob/master/README.md"
)
elif not os.path.exists(cfg.MODEL.BMZ.EXPORT.DOCUMENTATION):
raise ValueError(
"'MODEL.BMZ.EXPORT.DOCUMENTATION' path provided doesn't point to a file or can't be reached: {}".format(
cfg.MODEL.BMZ.EXPORT.DOCUMENTATION
)
)
elif not str(cfg.MODEL.BMZ.EXPORT.DOCUMENTATION).endswith(".md"):
raise ValueError(
"'MODEL.BMZ.EXPORT.DOCUMENTATION' file suffix must be .md"
)
else:
if cfg.MODEL.SOURCE != "bmz":
raise ValueError("Seems that you are not loading a BioImage Model Zoo model. Thus, you can not activate 'MODEL.BMZ.EXPORT.REUSE_BMZ_CONFIG' as there will be nothing to reuse.")
#### Post-processing ####
if cfg.TEST.POST_PROCESSING.REMOVE_CLOSE_POINTS:
if len(cfg.DATA.TEST.RESOLUTION) == 1:
raise ValueError("'DATA.TEST.RESOLUTION' must be set when using 'TEST.POST_PROCESSING.REMOVE_CLOSE_POINTS'")
if len(cfg.DATA.TEST.RESOLUTION) != dim_count:
raise ValueError(
"'DATA.TEST.RESOLUTION' must match in length to {}, which is the number of "
"dimensions".format(dim_count)
)
if cfg.TEST.POST_PROCESSING.REMOVE_CLOSE_POINTS_RADIUS[0] == -1:
raise ValueError(
"'TEST.POST_PROCESSING.REMOVE_CLOSE_POINTS' needs to be set when 'TEST.POST_PROCESSING.REMOVE_CLOSE_POINTS' is True"
)
def compare_configurations_without_model(actual_cfg, old_cfg, header_message="", old_cfg_version=None):
"""
Compares two configurations and throws an error if they differ in some critical variables that change workflow behaviour. This
comparisdon does not take into account model specs.
"""
print("Comparing configurations . . .")
vars_to_compare = [
"PROBLEM.TYPE",
"PROBLEM.NDIM",
"DATA.PATCH_SIZE",
"PROBLEM.INSTANCE_SEG.DATA_CHANNELS",
"PROBLEM.SELF_SUPERVISED.PRETEXT_TASK",
"PROBLEM.SUPER_RESOLUTION.UPSCALING",
"MODEL.N_CLASSES",
]
def get_attribute_recursive(var, attr):
att = attr.split(".")
if len(att) == 1:
return getattr(var, att[0])
else:
return get_attribute_recursive(getattr(var, att[0]), ".".join(att[1:]))
# Old configuration translation
dim_count = 2 if old_cfg.PROBLEM.NDIM == "2D" else 3
# BiaPy version less than 3.5.5
if old_cfg_version is None:
if isinstance(old_cfg["PROBLEM"]["SUPER_RESOLUTION"]["UPSCALING"], int):
old_cfg["PROBLEM"]["SUPER_RESOLUTION"]["UPSCALING"] = (old_cfg["PROBLEM"]["SUPER_RESOLUTION"]["UPSCALING"],) * dim_count
for var_to_compare in vars_to_compare:
if get_attribute_recursive(actual_cfg, var_to_compare) != get_attribute_recursive(old_cfg, var_to_compare):
raise ValueError(
header_message + f"The '{var_to_compare}' value of the compared configurations does not match: " +\
f"{get_attribute_recursive(actual_cfg, var_to_compare)} (current configuration) vs {get_attribute_recursive(old_cfg, var_to_compare)} (from loaded configuration)"
)
print("Configurations seem to be compatible. Continuing . . .")
def convert_old_model_cfg_to_current_version(old_cfg):
"""
Backward compatibility until commit 6aa291baa9bc5d7fb410454bfcea3a3da0c23604 (version 3.2.0)
Commit url: https://github.com/BiaPyX/BiaPy/commit/6aa291baa9bc5d7fb410454bfcea3a3da0c23604
"""
if "TEST" in old_cfg:
if "STATS" in old_cfg["TEST"]:
full_image = old_cfg["TEST"]["STATS"]["FULL_IMG"]
del old_cfg["TEST"]["STATS"]
old_cfg["TEST"]["FULL_IMG"] = full_image
if "EVALUATE" in old_cfg["TEST"]:
del old_cfg["TEST"]["EVALUATE"]
if "POST_PROCESSING" in old_cfg["TEST"]:
if "YZ_FILTERING" in old_cfg["TEST"]["POST_PROCESSING"]:
del old_cfg["TEST"]["POST_PROCESSING"]["YZ_FILTERING"]
try:
fsize = old_cfg["TEST"]["POST_PROCESSING"]["YZ_FILTERING_SIZE"]
except:
fsize = 5
del old_cfg["TEST"]["POST_PROCESSING"]["YZ_FILTERING_SIZE"]
old_cfg["TEST"]["POST_PROCESSING"]["MEDIAN_FILTER"] = True
old_cfg["TEST"]["POST_PROCESSING"]["MEDIAN_FILTER_AXIS"] = ["yz"]
old_cfg["TEST"]["POST_PROCESSING"]["MEDIAN_FILTER_SIZE"] = [fsize]
if "Z_FILTERING" in old_cfg["TEST"]["POST_PROCESSING"]:
del old_cfg["TEST"]["POST_PROCESSING"]["Z_FILTERING"]
try:
fsize = old_cfg["TEST"]["POST_PROCESSING"]["Z_FILTERING_SIZE"]
except:
fsize = 5
del old_cfg["TEST"]["POST_PROCESSING"]["Z_FILTERING_SIZE"]
old_cfg["TEST"]["POST_PROCESSING"]["MEDIAN_FILTER"] = True
old_cfg["TEST"]["POST_PROCESSING"]["MEDIAN_FILTER_AXIS"] = ["z"]
old_cfg["TEST"]["POST_PROCESSING"]["MEDIAN_FILTER_SIZE"] = [fsize]
if "MEASURE_PROPERTIES" in old_cfg["TEST"]["POST_PROCESSING"]:
if "REMOVE_BY_PROPERTIES" in old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"]:
if "SIGN" in old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"]["REMOVE_BY_PROPERTIES"]:
old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"]["REMOVE_BY_PROPERTIES"]["SIGNS"] = old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"]["REMOVE_BY_PROPERTIES"]["SIGN"]
del old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"]["REMOVE_BY_PROPERTIES"]["SIGN"]
if "REMOVE_BY_PROPERTIES" in old_cfg["TEST"]["POST_PROCESSING"]:
old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"] = {}
old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"]["REMOVE_BY_PROPERTIES"] = {}
old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"]["ENABLE"] = True
old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"]["REMOVE_BY_PROPERTIES"]["ENABLE"] = True
old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"]["REMOVE_BY_PROPERTIES"]["PROPS"] = old_cfg["TEST"]["POST_PROCESSING"]["REMOVE_BY_PROPERTIES"]
del old_cfg["TEST"]["POST_PROCESSING"]["REMOVE_BY_PROPERTIES"]
if "REMOVE_BY_PROPERTIES_VALUES" in old_cfg["TEST"]["POST_PROCESSING"]:
old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"]["REMOVE_BY_PROPERTIES"]["VALUES"] = old_cfg["TEST"]["POST_PROCESSING"]["REMOVE_BY_PROPERTIES_VALUES"]
del old_cfg["TEST"]["POST_PROCESSING"]["REMOVE_BY_PROPERTIES_VALUES"]
if "REMOVE_BY_PROPERTIES_SIGN" in old_cfg["TEST"]["POST_PROCESSING"]:
old_cfg["TEST"]["POST_PROCESSING"]["MEASURE_PROPERTIES"]["REMOVE_BY_PROPERTIES"]["SIGNS"] = old_cfg["TEST"]["POST_PROCESSING"]["REMOVE_BY_PROPERTIES_SIGN"]
del old_cfg["TEST"]["POST_PROCESSING"]["REMOVE_BY_PROPERTIES_SIGN"]
if "PROBLEM" in old_cfg:
ndim = 3 if "NDIM" in old_cfg["PROBLEM"] and old_cfg["PROBLEM"]["NDIM"] == "3D" else 2
if "DETECTION" in old_cfg["PROBLEM"]:
if "CENTRAL_POINT_DILATION" in old_cfg["PROBLEM"]["DETECTION"]:
if isinstance(old_cfg["PROBLEM"]["DETECTION"]["CENTRAL_POINT_DILATION"], int):
old_cfg["PROBLEM"]["DETECTION"]["CENTRAL_POINT_DILATION"] = [old_cfg["PROBLEM"]["DETECTION"]["CENTRAL_POINT_DILATION"]]
if "SUPER_RESOLUTION" in old_cfg["PROBLEM"]:
if "UPSCALING" in old_cfg["PROBLEM"]["SUPER_RESOLUTION"]:
if isinstance(old_cfg["PROBLEM"]["SUPER_RESOLUTION"]["UPSCALING"], int):
old_cfg["PROBLEM"]["SUPER_RESOLUTION"]["UPSCALING"] = (old_cfg["PROBLEM"]["SUPER_RESOLUTION"]["UPSCALING"],)*ndim
if "DATA" in old_cfg:
if "TRAIN" in old_cfg["DATA"]:
if "MINIMUM_FOREGROUND_PER" in old_cfg["DATA"]["TRAIN"]:
min_fore = old_cfg["DATA"]["TRAIN"]["MINIMUM_FOREGROUND_PER"]
del old_cfg["DATA"]["TRAIN"]["MINIMUM_FOREGROUND_PER"]
if min_fore != -1:
old_cfg["DATA"]["TRAIN"]["FILTER_SAMPLES"] = {}
old_cfg["DATA"]["TRAIN"]["FILTER_SAMPLES"]["PROPS"] = [['foreground']]
old_cfg["DATA"]["TRAIN"]["FILTER_SAMPLES"]["VALUES"] = [[min_fore]]
old_cfg["DATA"]["TRAIN"]["FILTER_SAMPLES"]["SIGNS"] = [['lt']]
if "VAL" in old_cfg["DATA"]:
if "BINARY_MASKS" in old_cfg["DATA"]["VAL"]:
del old_cfg["DATA"]["VAL"]["BINARY_MASKS"]
if "AUGMENTOR" in old_cfg:
if "BRIGHTNESS_EM" in old_cfg["AUGMENTOR"]:
del old_cfg["AUGMENTOR"]["BRIGHTNESS_EM"]
if "BRIGHTNESS_EM_FACTOR" in old_cfg["AUGMENTOR"]:
del old_cfg["AUGMENTOR"]["BRIGHTNESS_EM_FACTOR"]
if "BRIGHTNESS_EM_MODE" in old_cfg["AUGMENTOR"]:
del old_cfg["AUGMENTOR"]["BRIGHTNESS_EM_MODE"]
if "CONTRAST_EM" in old_cfg["AUGMENTOR"]:
del old_cfg["AUGMENTOR"]["CONTRAST_EM"]
if "CONTRAST_EM_FACTOR" in old_cfg["AUGMENTOR"]:
del old_cfg["AUGMENTOR"]["CONTRAST_EM_FACTOR"]
if "CONTRAST_EM_MODE" in old_cfg["AUGMENTOR"]:
del old_cfg["AUGMENTOR"]["CONTRAST_EM_MODE"]
if "MODEL" in old_cfg:
if "BATCH_NORMALIZATION" in old_cfg["MODEL"]:
if old_cfg["MODEL"]["BATCH_NORMALIZATION"]:
old_cfg["MODEL"]["NORMALIZATION"] = "bn"
del old_cfg["MODEL"]["BATCH_NORMALIZATION"]
if "BMZ" in old_cfg["MODEL"]:
if "SOURCE_MODEL_DOI" in old_cfg["MODEL"]["BMZ"]:
model = old_cfg["MODEL"]["BMZ"]["SOURCE_MODEL_DOI"]
del old_cfg["MODEL"]["BMZ"]["SOURCE_MODEL_DOI"]
old_cfg["MODEL"]["BMZ"]["SOURCE_MODEL_ID"] = model
if "EXPORT_MODEL" in old_cfg["MODEL"]["BMZ"]:
old_cfg["MODEL"]["BMZ"]["EXPORT"] = {}
try:
enabled = old_cfg["MODEL"]["BMZ"]["EXPORT_MODEL"]["ENABLE"]
except:
enabled = False
old_cfg["MODEL"]["BMZ"]["EXPORT"]["ENABLED"] = enabled
try:
model_name = old_cfg["MODEL"]["BMZ"]["EXPORT_MODEL"]["NAME"]
except:
model_name = ''
old_cfg["MODEL"]["BMZ"]["EXPORT"]["MODEL_NAME"] = model_name
try:
description = old_cfg["MODEL"]["BMZ"]["EXPORT_MODEL"]["DESCRIPTION"]
except:
description = ""
old_cfg["MODEL"]["BMZ"]["EXPORT"]["DESCRIPTION"] = description
try:
authors = old_cfg["MODEL"]["BMZ"]["EXPORT_MODEL"]["AUTHORS"]
except:
authors = []
old_cfg["MODEL"]["BMZ"]["EXPORT"]["AUTHORS"] = authors
try:
license = old_cfg["MODEL"]["BMZ"]["EXPORT_MODEL"]["LICENSE"]
except:
license = "CC-BY-4.0"
old_cfg["MODEL"]["BMZ"]["EXPORT"]["LICENSE"] = license
try:
doc = old_cfg["MODEL"]["BMZ"]["EXPORT_MODEL"]["DOCUMENTATION"]
except:
doc = ""
old_cfg["MODEL"]["BMZ"]["EXPORT"]["DOCUMENTATION"] = doc
try:
tags = old_cfg["MODEL"]["BMZ"]["EXPORT_MODEL"]["TAGS"]
except:
tags = []
old_cfg["MODEL"]["BMZ"]["EXPORT"]["TAGS"] = tags
try:
cite = old_cfg["MODEL"]["BMZ"]["EXPORT_MODEL"]["CITE"]
except:
cite = []
old_cfg["MODEL"]["BMZ"]["EXPORT"]["CITE"] = cite
del old_cfg["MODEL"]["BMZ"]["EXPORT_MODEL"]
if "LOSS" in old_cfg:
if "TYPE" in old_cfg["LOSS"]:
del old_cfg["LOSS"]["TYPE"]
try:
del old_cfg["PATHS"]["RESULT_DIR"]["BMZ_BUILD"]
except:
pass
return old_cfg
|
BiaPyXREPO_NAMEBiaPyPATH_START.@BiaPy_extracted@BiaPy-master@biapy@engine@check_configuration.py@.PATH_END.py
|
{
"filename": "occurrence.py",
"repo_name": "GijsMulders/epos",
"repo_path": "epos_extracted/epos-master/EPOS/plot/occurrence.py",
"type": "Python"
}
|
#import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import matplotlib.colorbar as clrbar
import matplotlib.colors
from matplotlib.cm import get_cmap
import numpy as np
from . import helpers, parametric
from EPOS.population import periodradius
clrs= ['r','g','b','m'] # in epos.prep
#fmt_symbol= {'ls':'', 'marker':'o', 'mew':2, 'ms':8,'alpha':0.6}
def all(epos, color=None, alpha_fac=None):
assert epos.Observation
if hasattr(epos, 'occurrence'):
if 'planet' in epos.occurrence:
colored(epos)
if 'model' in epos.occurrence:
model(epos, color=color)
if alpha_fac is not None:
model(epos, color=color, alpha_fac=alpha_fac)
#if Fade:
model(epos, color=color, Gradient=True)
if 'poly' in epos.occurrence:
colored(epos, Poly=True)
if 'model' in epos.occurrence:
model(epos, color=color, alpha_fac=alpha_fac, Poly=True)
if 'labels' in epos.occurrence['poly']:
# only callable with models right now
poly_only(epos)
if 'bin' in epos.occurrence:
colored(epos, Bins=True)
if 'model' in epos.occurrence:
model(epos, color=color, alpha_fac=alpha_fac, Bins=True)
if 'eta0' in epos.occurrence['bin']:
integrated(epos)
if 'eta' in epos.occurrence['bin']:
integrated(epos, MCMC=True)
integrated(epos, MCMC=True,Planets=True)
if 'xzoom' in epos.occurrence:
if epos.Parametric:
parametric.oneD(epos, Occ=True)
if not epos.MonteCarlo and epos.Msini:
parametric.oneD_y(epos, Occ=True, Convert=True)
if hasattr(epos, 'chain'):
parametric.oneD(epos, Occ=True, MCMC=True)
if epos.Msini:
parametric.oneD_y(epos, Occ=True, MCMC=True, Convert=True)
else:
print ('\nNo occurrence to plot, did you run EPOS.occurrence.all()? \n')
def colored(epos, Bins=False, Poly=False, NB=False):
f, (ax, axb) = plt.subplots(1,2, gridspec_kw = {'width_ratios':[20, 1]})
f.subplots_adjust(wspace=0)
name= 'Survey Completeness'
if epos.name in ['dr25_F','dr25_G','dr25_K','dr25_M','dr25_GK']: name+= ' ('+epos.name[5:]+')'
ax.set_title(name)
helpers.set_axes(ax, epos, Trim=True)
#helpers.set_axes(ax, epos, Trim=hasattr(epos, 'xtrim'))
#ax.plot(epos.obs_xvar, epos.obs_yvar, ls='', marker='.', mew=0, ms=5.0, color='k')
''' color scale? '''
cmap='magma' # viridis, plasma, inferno, magma, spring, cool
cmap='viridis'
vmin, vmax= -4, 0
ticks=np.linspace(vmin, vmax, (vmax-vmin)+1)
clrs, norm= helpers.color_array(np.log10(epos.occurrence['planet']['completeness']),
vmin=vmin,vmax=vmax, cmap=get_cmap(cmap))
ax.scatter(epos.obs_xvar, epos.obs_yvar, color=clrs, s=4)
# colorbar?
cb1 = clrbar.ColorbarBase(axb, cmap=get_cmap(cmap), norm=norm, ticks=ticks,
orientation='vertical') # horizontal
axb.set_yticklabels(100*10.**ticks)
axb.tick_params(axis='y', direction='out')
''' bins?'''
if Bins:
occbin= epos.occurrence['bin']
for k, (xbin, ybin, n, inbin, occ) in enumerate(
zip(occbin['x'],occbin['y'],occbin['n'],occbin['i'], occbin['occ'])
):
clr= clrs[k%4]
# colored dots
#ax.plot(epos.obs_xvar[inbin], epos.obs_yvar[inbin],
# ls='', marker='.', mew=0, ms=5.0, color=clr, zorder=1)
# box
ax.add_patch(patches.Rectangle( (xbin[0],ybin[0]),
xbin[1]-xbin[0], ybin[1]-ybin[0],
fill=False, zorder=2, ls='-', color='k') )
xnudge=1.01
ynudge=1.02
size=16 if not 'textsize' in epos.plotpars else epos.plotpars['textsize']
# 12 fit in box, 16 default
ax.text(xbin[0]*xnudge,ybin[1]/ynudge,'{:.1%}'.format(occ), va='top',
size=size)
ax.text(xbin[0]*xnudge,ybin[1]/ynudge,'\n$\pm${:.1f}'.format(
occbin['err'][k]*100), va='top', size=size)
ax.text(xbin[1]/xnudge,ybin[0]*ynudge,'n={}'.format(n), ha='right',
size=size)
helpers.save(plt, epos.plotdir+'occurrence/bins', NB=NB)
elif Poly:
occpoly= epos.occurrence['poly']
for k, (xc, yc, coords, n, inbin, occ, err) in enumerate(
zip(occpoly['xc'],occpoly['yc'],occpoly['coords'],
occpoly['n'],occpoly['i'], occpoly['occ'], occpoly['err'])
):
# box
ax.add_patch(matplotlib.patches.Polygon(coords,
fill=False, zorder=2, ls='-', color='k') )
size=16 if not 'textsize' in epos.plotpars else epos.plotpars['textsize']
# 12 fit in box, 16 default
ax.text(xc,yc,'{:.1%}\n$\pm${:.1f}'.format(occ, err*100), ha='center', va='center',
size=size)
#ax.text(xbin[1]/xnudge,ybin[0]*ynudge,'n={}'.format(n), ha='right',
# size=size)
helpers.save(plt, epos.plotdir+'occurrence/poly', NB=NB)
else:
helpers.save(plt, epos.plotdir+'occurrence/colored', NB=NB)
def integrated(epos, MCMC=False, Planets=False, NB=False):
f, (ax, axb) = plt.subplots(1,2, gridspec_kw = {'width_ratios':[20, 1]})
f.subplots_adjust(wspace=0)
sy= 'M' if (epos.MassRadius or epos.RV) else 'R'
ax.set_title('Occurrence'+ (' (dln'+sy+' dlnP)' if MCMC else ' (Initial Guess)'))
helpers.set_axes(ax, epos, Trim=True, In=epos.MassRadius)
''' color scale? '''
cmap='jet' # cool, spring
vmin, vmax= -5, 0
ticks=np.linspace(vmin, vmax, (vmax-vmin)+1)
levels=np.linspace(vmin, vmax, 256)
''' 2D pdf '''
pps, pdf, _, _= periodradius(epos, Init=not MCMC)
pdflog= np.log10(pdf) # in %
cs= ax.contourf(epos.X_in, epos.Y_in, pdflog, cmap=cmap, levels=levels)
cbar= f.colorbar(cs, cax=axb, ticks=ticks)
axb.set_yticklabels(100*10.**ticks)
axb.tick_params(axis='y', direction='out')
axb.set_title('%')
''' integrated occurrence per bin'''
occbin= epos.occurrence['bin']
key = 'eta' if MCMC else 'eta0'
for k, (xbin, ybin, n, inbin, occ) in enumerate(
zip(occbin['x'],occbin['y in'],occbin['n'],occbin['i'], occbin[key])
):
clr= clrs[k%4]
# colored dots
#ax.plot(epos.obs_xvar[inbin], epos.obs_yvar[inbin],
# ls='', marker='.', mew=0, ms=5.0, color=clr, zorder=1)
# box
ax.add_patch(patches.Rectangle( (xbin[0],ybin[0]),
xbin[1]-xbin[0], ybin[1]-ybin[0],
fill=False, zorder=2, ls='-', color='k') )
xnudge=1.01
ynudge=1.02
size=16 if not 'textsize' in epos.plotpars else epos.plotpars['textsize']
# 12 fit in box, 16 default
ax.text(xbin[0]*xnudge,ybin[1]/ynudge,'{:.1%}'.format(occ), va='top',size=size)
if MCMC:
ax.text(xbin[0]*xnudge,ybin[1]/ynudge,'\n +{:.1%}\n -{:.1%}'.format(
occbin['eta+'][k],occbin['eta-'][k]
), va='top',size=size)
''' overplot planets '''
if Planets:
ax.plot(epos.obs_xvar, epos.obs_yvar,
ls='', marker='.', mew=0, ms=5, alpha=1, color='k')
fname= 'posterior' if MCMC else 'integrated'
if Planets: fname+= '.planets'
helpers.save(plt, epos.plotdir+'occurrence/'+fname, NB=NB)
def model(epos, color='C0', alpha_fac=None, Bins=False, Poly=False, Gradient=False):
f, ax = plt.subplots()
name= '{}, $\eta={:.2g}$'.format(epos.name, epos.occurrence['model']['eta'])
ax.set_title(name)
helpers.set_axes(ax, epos, Trim=True)
# set transparency / color gradient
if Gradient:
suffix= '.gradient'
weigths= epos.occurrence['model']['completeness']
cmin, cmax= 0.001, 0.1
weigths= np.maximum(np.minimum(weigths,cmax), cmin)
cmap='copper_r'
#ticks=np.linspace(vmin, vmax, (vmax-vmin)+1)
clrs, norm= helpers.color_array(np.log10(weigths),
vmin=np.log10(cmin),vmax=np.log10(cmax), cmap=cmap)
ax.scatter(epos.pfm['P'], epos.pfm['R'],
marker='o', s=13, lw=0, color=clrs,zorder=0)
# colorbar?
# cb1 = clrbar.ColorbarBase(axb, cmap=cmap, norm=norm, ticks=ticks,
# orientation='vertical') # horizontal
# axb.set_yticklabels(100*10.**ticks)
# axb.tick_params(axis='y', direction='out')
elif alpha_fac is not None:
suffix= '.alpha'
weigths= epos.occurrence['model']['completeness']*alpha_fac #*epos.nstars
alpha= np.maximum(np.minimum(weigths,1.), 0.0) # 0.2?
if True:
# color issues with to_rgba_array
clr_rgba = np.empty((len(alpha), 4), float)
for i, a in enumerate(alpha):
clr_rgba[i] = matplotlib.colors.to_rgba(color, a)
else:
clr= np.full_like(weigths,color,dtype=str)
clr_rgba= matplotlib.colors.to_rgba_array(clr) # alpha
#print clr_rgba[0,:]
clr_rgba[:,3]= alpha
ax.scatter(epos.pfm['P'], epos.pfm['R'],
marker='o', s=13, lw=0, color=clr_rgba,zorder=0)
else:
suffix=''
clr= matplotlib.colors.to_rgba(color)
ax.plot(epos.pfm['P'], epos.pfm['R'], ls='', marker='o', mew=0, ms=4,
color=clr, zorder=0)
''' bins'''
if Bins:
occbin= epos.occurrence['model']['bin']
for k, (xbin, ybin, n, inbin, occ) in enumerate(
zip(occbin['x'],occbin['y'],occbin['n'],occbin['i'], occbin['occ'])
):
# box
ax.add_patch(patches.Rectangle( (xbin[0],ybin[0]),
xbin[1]-xbin[0], ybin[1]-ybin[0],
fill=False, zorder=2, ls='-', color='k') )
xnudge=1.01
ynudge=1.02
size=16 if not 'textsize' in epos.plotpars else epos.plotpars['textsize']
# 12 fit in box, 16 default
ax.text(xbin[0]*xnudge,ybin[1]/ynudge,'{:.1%}'.format(occ), va='top',
size=size)
ax.text(xbin[0]*xnudge,ybin[1]/ynudge,'\n$\pm${:.1f}'.format(
occbin['err'][k]*100), va='top', size=size)
ax.text(xbin[1]/xnudge,ybin[0]*ynudge,'n={}'.format(n), ha='right',
size=size)
helpers.save(plt, epos.plotdir+'occurrence/model_bins'+suffix)
elif Poly:
occpoly= epos.occurrence['model']['poly']
for k, (xc, yc, coords, n, inbin, occ, err) in enumerate(
zip(occpoly['xc'],occpoly['yc'],occpoly['coords'],
occpoly['n'],occpoly['i'], occpoly['occ'], occpoly['err'])
):
# box
ax.add_patch(matplotlib.patches.Polygon(coords,
fill=False, zorder=2, ls='-', color='k') )
size=16 if not 'textsize' in epos.plotpars else epos.plotpars['textsize']
# 12 fit in box, 16 default
ax.text(xc,yc,'{:.1%}\n$\pm${:.1%}'.format(occ, err), ha='center', va='center',
size=size)
helpers.save(plt, epos.plotdir+'occurrence/model_poly'+suffix)
else:
helpers.save(plt, epos.plotdir+'occurrence/model'+suffix)
def poly_only(epos):
f, ax = plt.subplots()
ax.set_title('Planet Classes')
helpers.set_axes(ax, epos, Trim=True)
# coordinates are from model routine
occpoly= epos.occurrence['model']['poly']
for k, (xc, yc, coords, label) in enumerate(
zip(occpoly['xc'],occpoly['yc'],occpoly['coords'],
epos.occurrence['poly']['labels'])
):
# box
ax.add_patch(matplotlib.patches.Polygon(coords,
fill=False, zorder=2, ls='-', color='k') )
size=16 if not 'textsize' in epos.plotpars else epos.plotpars['textsize']
# 12 fit in box, 16 default
ax.text(xc,yc,label, ha='center', va='center',
size=size)
helpers.save(plt, epos.plotdir+'occurrence/poly_only')
|
GijsMuldersREPO_NAMEeposPATH_START.@epos_extracted@epos-master@EPOS@plot@occurrence.py@.PATH_END.py
|
{
"filename": "plot_hdbscan.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/examples/cluster/plot_hdbscan.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
====================================
Demo of HDBSCAN clustering algorithm
====================================
.. currentmodule:: sklearn
In this demo we will take a look at :class:`cluster.HDBSCAN` from the
perspective of generalizing the :class:`cluster.DBSCAN` algorithm.
We'll compare both algorithms on specific datasets. Finally we'll evaluate
HDBSCAN's sensitivity to certain hyperparameters.
We first define a couple utility functions for convenience.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import DBSCAN, HDBSCAN
from sklearn.datasets import make_blobs
def plot(X, labels, probabilities=None, parameters=None, ground_truth=False, ax=None):
if ax is None:
_, ax = plt.subplots(figsize=(10, 4))
labels = labels if labels is not None else np.ones(X.shape[0])
probabilities = probabilities if probabilities is not None else np.ones(X.shape[0])
# Black removed and is used for noise instead.
unique_labels = set(labels)
colors = [plt.cm.Spectral(each) for each in np.linspace(0, 1, len(unique_labels))]
# The probability of a point belonging to its labeled cluster determines
# the size of its marker
proba_map = {idx: probabilities[idx] for idx in range(len(labels))}
for k, col in zip(unique_labels, colors):
if k == -1:
# Black used for noise.
col = [0, 0, 0, 1]
class_index = np.where(labels == k)[0]
for ci in class_index:
ax.plot(
X[ci, 0],
X[ci, 1],
"x" if k == -1 else "o",
markerfacecolor=tuple(col),
markeredgecolor="k",
markersize=4 if k == -1 else 1 + 5 * proba_map[ci],
)
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
preamble = "True" if ground_truth else "Estimated"
title = f"{preamble} number of clusters: {n_clusters_}"
if parameters is not None:
parameters_str = ", ".join(f"{k}={v}" for k, v in parameters.items())
title += f" | {parameters_str}"
ax.set_title(title)
plt.tight_layout()
# %%
# Generate sample data
# --------------------
# One of the greatest advantages of HDBSCAN over DBSCAN is its out-of-the-box
# robustness. It's especially remarkable on heterogeneous mixtures of data.
# Like DBSCAN, it can model arbitrary shapes and distributions, however unlike
# DBSCAN it does not require specification of an arbitrary and sensitive
# `eps` hyperparameter.
#
# For example, below we generate a dataset from a mixture of three bi-dimensional
# and isotropic Gaussian distributions.
centers = [[1, 1], [-1, -1], [1.5, -1.5]]
X, labels_true = make_blobs(
n_samples=750, centers=centers, cluster_std=[0.4, 0.1, 0.75], random_state=0
)
plot(X, labels=labels_true, ground_truth=True)
# %%
# Scale Invariance
# -----------------
# It's worth remembering that, while DBSCAN provides a default value for `eps`
# parameter, it hardly has a proper default value and must be tuned for the
# specific dataset at use.
#
# As a simple demonstration, consider the clustering for a `eps` value tuned
# for one dataset, and clustering obtained with the same value but applied to
# rescaled versions of the dataset.
fig, axes = plt.subplots(3, 1, figsize=(10, 12))
dbs = DBSCAN(eps=0.3)
for idx, scale in enumerate([1, 0.5, 3]):
dbs.fit(X * scale)
plot(X * scale, dbs.labels_, parameters={"scale": scale, "eps": 0.3}, ax=axes[idx])
# %%
# Indeed, in order to maintain the same results we would have to scale `eps` by
# the same factor.
fig, axis = plt.subplots(1, 1, figsize=(12, 5))
dbs = DBSCAN(eps=0.9).fit(3 * X)
plot(3 * X, dbs.labels_, parameters={"scale": 3, "eps": 0.9}, ax=axis)
# %%
# While standardizing data (e.g. using
# :class:`sklearn.preprocessing.StandardScaler`) helps mitigate this problem,
# great care must be taken to select the appropriate value for `eps`.
#
# HDBSCAN is much more robust in this sense: HDBSCAN can be seen as
# clustering over all possible values of `eps` and extracting the best
# clusters from all possible clusters (see :ref:`User Guide <HDBSCAN>`).
# One immediate advantage is that HDBSCAN is scale-invariant.
fig, axes = plt.subplots(3, 1, figsize=(10, 12))
hdb = HDBSCAN()
for idx, scale in enumerate([1, 0.5, 3]):
hdb.fit(X * scale)
plot(
X * scale,
hdb.labels_,
hdb.probabilities_,
ax=axes[idx],
parameters={"scale": scale},
)
# %%
# Multi-Scale Clustering
# ----------------------
# HDBSCAN is much more than scale invariant though -- it is capable of
# multi-scale clustering, which accounts for clusters with varying density.
# Traditional DBSCAN assumes that any potential clusters are homogeneous in
# density. HDBSCAN is free from such constraints. To demonstrate this we
# consider the following dataset
centers = [[-0.85, -0.85], [-0.85, 0.85], [3, 3], [3, -3]]
X, labels_true = make_blobs(
n_samples=750, centers=centers, cluster_std=[0.2, 0.35, 1.35, 1.35], random_state=0
)
plot(X, labels=labels_true, ground_truth=True)
# %%
# This dataset is more difficult for DBSCAN due to the varying densities and
# spatial separation:
#
# - If `eps` is too large then we risk falsely clustering the two dense
# clusters as one since their mutual reachability will extend
# clusters.
# - If `eps` is too small, then we risk fragmenting the sparser clusters
# into many false clusters.
#
# Not to mention this requires manually tuning choices of `eps` until we
# find a tradeoff that we are comfortable with.
fig, axes = plt.subplots(2, 1, figsize=(10, 8))
params = {"eps": 0.7}
dbs = DBSCAN(**params).fit(X)
plot(X, dbs.labels_, parameters=params, ax=axes[0])
params = {"eps": 0.3}
dbs = DBSCAN(**params).fit(X)
plot(X, dbs.labels_, parameters=params, ax=axes[1])
# %%
# To properly cluster the two dense clusters, we would need a smaller value of
# epsilon, however at `eps=0.3` we are already fragmenting the sparse clusters,
# which would only become more severe as we decrease epsilon. Indeed it seems
# that DBSCAN is incapable of simultaneously separating the two dense clusters
# while preventing the sparse clusters from fragmenting. Let's compare with
# HDBSCAN.
hdb = HDBSCAN().fit(X)
plot(X, hdb.labels_, hdb.probabilities_)
# %%
# HDBSCAN is able to adapt to the multi-scale structure of the dataset without
# requiring parameter tuning. While any sufficiently interesting dataset will
# require tuning, this case demonstrates that HDBSCAN can yield qualitatively
# better classes of clusterings without users' intervention which are
# inaccessible via DBSCAN.
# %%
# Hyperparameter Robustness
# -------------------------
# Ultimately tuning will be an important step in any real world application, so
# let's take a look at some of the most important hyperparameters for HDBSCAN.
# While HDBSCAN is free from the `eps` parameter of DBSCAN, it does still have
# some hyperparameters like `min_cluster_size` and `min_samples` which tune its
# results regarding density. We will however see that HDBSCAN is relatively robust
# to various real world examples thanks to those parameters whose clear meaning
# helps tuning them.
#
# `min_cluster_size`
# ^^^^^^^^^^^^^^^^^^
# `min_cluster_size` is the minimum number of samples in a group for that
# group to be considered a cluster.
#
# Clusters smaller than the ones of this size will be left as noise.
# The default value is 5. This parameter is generally tuned to
# larger values as needed. Smaller values will likely to lead to results with
# fewer points labeled as noise. However values which too small will lead to
# false sub-clusters being picked up and preferred. Larger values tend to be
# more robust with respect to noisy datasets, e.g. high-variance clusters with
# significant overlap.
PARAM = ({"min_cluster_size": 5}, {"min_cluster_size": 3}, {"min_cluster_size": 25})
fig, axes = plt.subplots(3, 1, figsize=(10, 12))
for i, param in enumerate(PARAM):
hdb = HDBSCAN(**param).fit(X)
labels = hdb.labels_
plot(X, labels, hdb.probabilities_, param, ax=axes[i])
# %%
# `min_samples`
# ^^^^^^^^^^^^^
# `min_samples` is the number of samples in a neighborhood for a point to
# be considered as a core point, including the point itself.
# `min_samples` defaults to `min_cluster_size`.
# Similarly to `min_cluster_size`, larger values for `min_samples` increase
# the model's robustness to noise, but risks ignoring or discarding
# potentially valid but small clusters.
# `min_samples` better be tuned after finding a good value for `min_cluster_size`.
PARAM = (
{"min_cluster_size": 20, "min_samples": 5},
{"min_cluster_size": 20, "min_samples": 3},
{"min_cluster_size": 20, "min_samples": 25},
)
fig, axes = plt.subplots(3, 1, figsize=(10, 12))
for i, param in enumerate(PARAM):
hdb = HDBSCAN(**param).fit(X)
labels = hdb.labels_
plot(X, labels, hdb.probabilities_, param, ax=axes[i])
# %%
# `dbscan_clustering`
# ^^^^^^^^^^^^^^^^^^^
# During `fit`, `HDBSCAN` builds a single-linkage tree which encodes the
# clustering of all points across all values of :class:`~cluster.DBSCAN`'s
# `eps` parameter.
# We can thus plot and evaluate these clusterings efficiently without fully
# recomputing intermediate values such as core-distances, mutual-reachability,
# and the minimum spanning tree. All we need to do is specify the `cut_distance`
# (equivalent to `eps`) we want to cluster with.
PARAM = (
{"cut_distance": 0.1},
{"cut_distance": 0.5},
{"cut_distance": 1.0},
)
hdb = HDBSCAN()
hdb.fit(X)
fig, axes = plt.subplots(len(PARAM), 1, figsize=(10, 12))
for i, param in enumerate(PARAM):
labels = hdb.dbscan_clustering(**param)
plot(X, labels, hdb.probabilities_, param, ax=axes[i])
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@examples@cluster@plot_hdbscan.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/modeling/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This subpackage provides a framework for representing models and
performing model evaluation and fitting. It supports 1D and 2D models
and fitting with parameter constraints. It has some predefined models
and fitting routines.
"""
from . import fitting
from . import models
from .core import *
from .parameters import *
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@modeling@__init__.py@.PATH_END.py
|
{
"filename": "_hovertemplatesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergeo/_hovertemplatesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hovertemplatesrc", parent_name="scattergeo", **kwargs
):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergeo@_hovertemplatesrc.py@.PATH_END.py
|
{
"filename": "linear.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/flax/linen/linear.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linear modules."""
from typing import (
Any,
)
from collections.abc import Iterable, Sequence
import jax
import jax.numpy as jnp
import numpy as np
from jax import eval_shape, lax
from jax.core import ShapedArray
import opt_einsum
from flax.core import meta
from flax.linen import initializers
from flax.linen.dtypes import promote_dtype
from flax.linen import module
from flax.linen.module import Module, compact
from flax.typing import (
Array,
PRNGKey as PRNGKey,
Dtype,
Shape as Shape,
Initializer,
PrecisionLike,
DotGeneralT,
ConvGeneralDilatedT,
PaddingLike,
LaxPadding,
)
default_kernel_init = initializers.lecun_normal()
def _normalize_axes(axes: tuple[int, ...], ndim: int) -> tuple[int, ...]:
# A tuple by convention. len(axes_tuple) then also gives the rank efficiently.
return tuple(sorted(ax if ax >= 0 else ndim + ax for ax in axes))
def _canonicalize_tuple(x: Sequence[int] | int) -> tuple[int, ...]:
if isinstance(x, Iterable):
return tuple(x)
else:
return (x,)
class DenseGeneral(Module):
"""A linear transformation with flexible axes.
Example usage::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> # equivalent to `nn.Dense(features=4)`
>>> layer = nn.DenseGeneral(features=4)
>>> # output features (4, 5)
>>> layer = nn.DenseGeneral(features=(4, 5))
>>> params = layer.init(jax.random.key(0), jnp.ones((1, 3)))
>>> jax.tree_util.tree_map(jnp.shape, params)
{'params': {'bias': (4, 5), 'kernel': (3, 4, 5)}}
>>> # apply transformation on the the second and last axes
>>> layer = nn.DenseGeneral(features=(4, 5), axis=(1, -1))
>>> params = layer.init(jax.random.key(0), jnp.ones((1, 3, 6, 7)))
>>> jax.tree_util.tree_map(jnp.shape, params)
{'params': {'bias': (4, 5), 'kernel': (3, 7, 4, 5)}}
Attributes:
features: int or tuple with number of output features.
axis: int or tuple with axes to apply the transformation on. For instance,
(-2, -1) will apply the transformation to the last two axes.
batch_dims: tuple with batch axes.
use_bias: whether to add a bias to the output (default: True).
dtype: the dtype of the computation (default: infer from input and params).
param_dtype: the dtype passed to parameter initializers (default: float32).
kernel_init: initializer function for the weight matrix.
bias_init: initializer function for the bias.
precision: numerical precision of the computation see ``jax.lax.Precision``
for details.
"""
features: int | Sequence[int]
axis: int | Sequence[int] = -1
batch_dims: Sequence[int] = ()
use_bias: bool = True
dtype: Dtype | None = None
param_dtype: Dtype = jnp.float32
kernel_init: Initializer = default_kernel_init
bias_init: Initializer = initializers.zeros_init()
precision: PrecisionLike = None
# Deprecated. Will be removed.
dot_general: DotGeneralT | None = None
dot_general_cls: Any = None
@compact
def __call__(self, inputs: Array) -> Array:
"""Applies a linear transformation to the inputs along multiple dimensions.
Args:
inputs: The nd-array to be transformed.
Returns:
The transformed input.
"""
features = _canonicalize_tuple(self.features)
axis = _canonicalize_tuple(self.axis)
batch_dims = _canonicalize_tuple(self.batch_dims)
if batch_dims:
max_dim = np.max(batch_dims)
if set(batch_dims) != set(range(max_dim + 1)):
raise ValueError(
'batch_dims %s must be consecutive leading '
'dimensions starting from 0.' % str(batch_dims)
)
ndim = inputs.ndim
n_batch_dims = len(batch_dims)
axis = _normalize_axes(axis, ndim)
batch_dims = _normalize_axes(batch_dims, ndim)
n_axis, n_features = len(axis), len(features)
def kernel_init_wrap(rng, shape, dtype=jnp.float32):
flat_shape = (
np.prod(shape[:n_batch_dims])
* np.prod(shape[n_batch_dims : n_axis + n_batch_dims]),
np.prod(shape[-n_features:]),
)
flat_shape = jax.tree_util.tree_map(int, flat_shape)
kernel = self.kernel_init(rng, flat_shape, dtype)
if isinstance(kernel, meta.AxisMetadata):
return meta.replace_boxed(kernel, jnp.reshape(kernel.unbox(), shape))
return jnp.reshape(kernel, shape)
batch_shape = tuple(inputs.shape[ax] for ax in batch_dims)
# batch and non-contracting dims of input with 1s for batch dims.
expanded_batch_shape = tuple(
inputs.shape[ax] if ax in batch_dims else 1
for ax in range(inputs.ndim)
if ax not in axis
)
kernel_shape = tuple(inputs.shape[ax] for ax in axis) + features
kernel = self.param(
'kernel', kernel_init_wrap, batch_shape + kernel_shape, self.param_dtype
)
batch_ind = tuple(range(n_batch_dims))
contract_ind = tuple(range(n_batch_dims, n_axis + n_batch_dims))
if self.use_bias:
def bias_init_wrap(rng, shape, dtype=jnp.float32):
flat_shape = (
np.prod(shape[:n_batch_dims]) * np.prod(shape[-n_features:]),
)
flat_shape = jax.tree_util.tree_map(int, flat_shape)
bias = self.bias_init(rng, flat_shape, dtype)
if isinstance(bias, meta.AxisMetadata):
return meta.replace_boxed(bias, jnp.reshape(bias.unbox(), shape))
return jnp.reshape(bias, shape)
bias = self.param(
'bias', bias_init_wrap, batch_shape + features, self.param_dtype
)
else:
bias = None
inputs, kernel, bias = promote_dtype(inputs, kernel, bias, dtype=self.dtype)
if self.dot_general_cls is not None:
dot_general = self.dot_general_cls()
elif self.dot_general is not None:
dot_general = self.dot_general
else:
dot_general = lax.dot_general
out = dot_general(
inputs,
kernel,
((axis, contract_ind), (batch_dims, batch_ind)),
precision=self.precision,
)
# dot_general output has shape [batch_dims/group_dims] + [feature_dims]
if self.use_bias:
# expand bias shape to broadcast bias over batch dims.
bias = jnp.reshape(bias, expanded_batch_shape + features)
out += bias
return out
class Dense(Module):
"""A linear transformation applied over the last dimension of the input.
Example usage::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> layer = nn.Dense(features=4)
>>> params = layer.init(jax.random.key(0), jnp.ones((1, 3)))
>>> jax.tree_util.tree_map(jnp.shape, params)
{'params': {'bias': (4,), 'kernel': (3, 4)}}
Attributes:
features: the number of output features.
use_bias: whether to add a bias to the output (default: True).
dtype: the dtype of the computation (default: infer from input and params).
param_dtype: the dtype passed to parameter initializers (default: float32).
precision: numerical precision of the computation see ``jax.lax.Precision``
for details.
kernel_init: initializer function for the weight matrix.
bias_init: initializer function for the bias.
"""
features: int
use_bias: bool = True
dtype: Dtype | None = None
param_dtype: Dtype = jnp.float32
precision: PrecisionLike = None
kernel_init: Initializer = default_kernel_init
bias_init: Initializer = initializers.zeros_init()
# Deprecated. Will be removed.
dot_general: DotGeneralT | None = None
dot_general_cls: Any = None
@compact
def __call__(self, inputs: Array) -> Array:
"""Applies a linear transformation to the inputs along the last dimension.
Args:
inputs: The nd-array to be transformed.
Returns:
The transformed input.
"""
kernel = self.param(
'kernel',
self.kernel_init,
(jnp.shape(inputs)[-1], self.features),
self.param_dtype,
)
if self.use_bias:
bias = self.param(
'bias', self.bias_init, (self.features,), self.param_dtype
)
else:
bias = None
inputs, kernel, bias = promote_dtype(inputs, kernel, bias, dtype=self.dtype)
if self.dot_general_cls is not None:
dot_general = self.dot_general_cls()
elif self.dot_general is not None:
dot_general = self.dot_general
else:
dot_general = lax.dot_general
y = dot_general(
inputs,
kernel,
(((inputs.ndim - 1,), (0,)), ((), ())),
precision=self.precision,
)
if bias is not None:
y += jnp.reshape(bias, (1,) * (y.ndim - 1) + (-1,))
return y
class Einsum(Module):
"""An einsum transformation with learnable kernel and bias.
Example usage::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> layer = nn.Einsum((5, 6, 7), 'abc,cde->abde')
>>> variables = layer.init(jax.random.key(0), jnp.ones((3, 4, 5)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'bias': (6, 7), 'kernel': (5, 6, 7)}}
Attributes:
shape: the shape of the kernel.
einsum_str: a string to denote the einsum equation. The equation must
have exactly two operands, the lhs being the input passed in, and
the rhs being the learnable kernel. Exactly one of ``einsum_str``
in the constructor argument and call argument must be not None,
while the other must be None.
use_bias: whether to add a bias to the output (default: True).
dtype: the dtype of the computation (default: infer from input and params).
param_dtype: the dtype passed to parameter initializers (default: float32).
precision: numerical precision of the computation see ``jax.lax.Precision``
for details.
kernel_init: initializer function for the weight matrix.
bias_init: initializer function for the bias.
"""
shape: Shape
einsum_str: str | None = None
use_bias: bool = True
dtype: Dtype | None = None
param_dtype: Dtype = jnp.float32
precision: PrecisionLike = None
kernel_init: Initializer = default_kernel_init
bias_init: Initializer = initializers.zeros_init()
@compact
def __call__(self, inputs: Array, einsum_str: str | None = None) -> Array:
"""Applies a linear transformation to the inputs along the last dimension.
Args:
inputs: The nd-array to be transformed.
einsum_str: a string to denote the einsum equation. The equation must
have exactly two operands, the lhs being the input passed in, and
the rhs being the learnable kernel. The ``einsum_str`` passed into
the call method will take precedence over the ``einsum_str`` passed
into the constructor.
Returns:
The transformed input.
"""
einsum_str = module.merge_param('einsum_str', self.einsum_str, einsum_str)
einsum_str = einsum_str.replace(' ', '')
if '->' not in einsum_str:
raise ValueError(
'`einsum_str` equation must be explicit and include "->".'
)
if einsum_str.count(',') != 1:
raise ValueError(
'`einsum_str` equation must have exactly two operands and '
'therefore, exactly one comma character, instead of '
f'{einsum_str.count(",")}'
)
kernel = self.param(
'kernel',
self.kernel_init,
self.shape,
self.param_dtype,
)
if self.use_bias:
bias_shape, broadcasted_bias_shape = self._get_bias_shape(
einsum_str, inputs, kernel
)
bias = self.param('bias', self.bias_init, bias_shape, self.param_dtype)
else:
bias = None
inputs, kernel, bias = promote_dtype(inputs, kernel, bias, dtype=self.dtype)
y = jnp.einsum(einsum_str, inputs, kernel, precision=self.precision)
if bias is not None:
y += jnp.reshape(bias, broadcasted_bias_shape)
return y
def _get_bias_shape(self, einsum_str: str, lhs: Array, rhs: Array):
"""Infer the bias shape and broadcasted bias shape given the ``einsum_str``,
``lhs`` and ``rhs`` arrays. This is needed for instantiating the bias
parameter and adding the bias to the output during forward inference.
This function first replaces all ellipses with actual letter characters,
then computes the bias shape by checking to see which axes in the rhs
array remain in the resulting array after einsumming. These axes are the
embedding/feature dimensions, and all other axes in rhs are reduction axes.
"""
# More details on the parsing function: https://github.com/dgasmith/opt_einsum/blob/c826bb7df16f470a69f7bf90598fc27586209d11/opt_einsum/parser.py#L246
# returns the einsum string representation of the operands and result, with
# ellipsis replaced by actual letter characters
operands_str, result_str, _ = opt_einsum.parser.parse_einsum_input(
(einsum_str, lhs, rhs)
)
# rhs_dict is a dict{character:index} mapping that maps every character in
# the rhs einsum string representation to its corresponding index position in the string
rhs_dict = {c: i for i, c in enumerate(operands_str.split(',')[1])}
assert len(rhs_dict) == len(self.shape)
broadcasted_bias_shape = [1] * len(result_str)
bias_shape = []
for i, c in enumerate(result_str):
if c in rhs_dict:
broadcasted_bias_shape[i] = self.shape[rhs_dict[c]]
bias_shape.append(self.shape[rhs_dict[c]])
return bias_shape, broadcasted_bias_shape
def _conv_dimension_numbers(input_shape):
"""Computes the dimension numbers based on the input shape."""
ndim = len(input_shape)
lhs_spec = (0, ndim - 1) + tuple(range(1, ndim - 1))
rhs_spec = (ndim - 1, ndim - 2) + tuple(range(0, ndim - 2))
out_spec = lhs_spec
return lax.ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
def canonicalize_padding(padding: PaddingLike, rank: int) -> LaxPadding:
""" "Canonicalizes conv padding to a jax.lax supported format."""
if isinstance(padding, str):
return padding
if isinstance(padding, int):
return [(padding, padding)] * rank
if isinstance(padding, Sequence) and len(padding) == rank:
new_pad = []
for p in padding:
if isinstance(p, int):
new_pad.append((p, p))
elif isinstance(p, tuple) and len(p) == 2:
new_pad.append(p)
else:
break
if len(new_pad) == rank:
return new_pad
raise ValueError(
f'Invalid padding format: {padding}, should be str, int,'
f' or a sequence of len {rank} where each element is an'
' int or pair of ints.'
)
class _Conv(Module):
"""Convolution Module wrapping ``lax.conv_general_dilated``.
Attributes:
features: number of convolution filters.
kernel_size: shape of the convolutional kernel. An integer will be
interpreted as a tuple of the single integer.
strides: an integer or a sequence of `n` integers, representing the
inter-window strides (default: 1).
padding: either the string ``'SAME'``, the string ``'VALID'``, the string
``'CIRCULAR'`` (periodic boundary conditions), or a sequence of ``n`` ``(low,
high)`` integer pairs that give the padding to apply before and after each
spatial dimension. A single int is interpreted as applying the same padding
in all dims and assign a single int in a sequence causes the same padding
to be used on both sides. ``'CAUSAL'`` padding for a 1D convolution will
left-pad the convolution axis, resulting in same-sized output.
input_dilation: an integer or a sequence of ``n`` integers, giving the
dilation factor to apply in each spatial dimension of ``inputs``
(default: 1). Convolution with input dilation ``d`` is equivalent to
transposed convolution with stride ``d``.
kernel_dilation: an integer or a sequence of ``n`` integers, giving the
dilation factor to apply in each spatial dimension of the convolution
kernel (default: 1). Convolution with kernel dilation
is also known as 'atrous convolution'.
feature_group_count: integer, default 1. If specified divides the input
features into groups.
use_bias: whether to add a bias to the output (default: True).
mask: Optional mask for the weights during masked convolution. The mask must
be the same shape as the convolution weight matrix.
dtype: the dtype of the computation (default: infer from input and params).
param_dtype: the dtype passed to parameter initializers (default: float32).
precision: numerical precision of the computation see ``jax.lax.Precision``
for details.
kernel_init: initializer for the convolutional kernel.
bias_init: initializer for the bias.
"""
features: int
kernel_size: int | Sequence[int]
strides: None | int | Sequence[int] = 1
padding: PaddingLike = 'SAME'
input_dilation: None | int | Sequence[int] = 1
kernel_dilation: None | int | Sequence[int] = 1
feature_group_count: int = 1
use_bias: bool = True
mask: Array | None = None
dtype: Dtype | None = None
param_dtype: Dtype = jnp.float32
precision: PrecisionLike = None
kernel_init: Initializer = default_kernel_init
bias_init: Initializer = initializers.zeros_init()
# Deprecated. Will be removed.
conv_general_dilated: ConvGeneralDilatedT | None = None
conv_general_dilated_cls: Any = None
@property
def shared_weights(self) -> bool: # type: ignore
"""Defines whether weights are shared or not between different pixels.
Returns:
``True`` to use shared weights in convolution (regular convolution).
``False`` to use different weights at different pixels, a.k.a.
"locally connected layer", "unshared convolution", or "local convolution".
"""
...
@compact
def __call__(self, inputs: Array) -> Array:
"""Applies a (potentially unshared) convolution to the inputs.
Args:
inputs: input data with dimensions ``(*batch_dims, spatial_dims..., features)``.
This is the channels-last convention, i.e. NHWC for a 2d convolution and
NDHWC for a 3D convolution. Note: this is different from the input convention
used by ``lax.conv_general_dilated``, which puts the spatial dimensions last.
Note: If the input has more than 1 batch dimension, all batch dimensions
are flattened into a single dimension for the convolution and restored
before returning. In some cases directly vmap'ing the layer may yield
better performance than this default flattening approach. If the input
lacks a batch dimension it will be added for the convolution and removed
n return, an allowance made to enable writing single-example code.
Returns:
The convolved data.
"""
kernel_size: Sequence[int]
if isinstance(self.kernel_size, int):
kernel_size = (self.kernel_size,)
else:
kernel_size = tuple(self.kernel_size)
def maybe_broadcast(
x: int | Sequence[int] | None,
) -> tuple[int, ...]:
if x is None:
# backward compatibility with using None as sentinel for
# broadcast 1
x = 1
if isinstance(x, int):
return (x,) * len(kernel_size)
return tuple(x)
# Combine all input batch dimensions into a single leading batch axis.
num_batch_dimensions = inputs.ndim - (len(kernel_size) + 1)
if num_batch_dimensions != 1:
input_batch_shape = inputs.shape[:num_batch_dimensions]
total_batch_size = int(np.prod(input_batch_shape))
flat_input_shape = (total_batch_size,) + inputs.shape[
num_batch_dimensions:
]
inputs = jnp.reshape(inputs, flat_input_shape)
# self.strides or (1,) * (inputs.ndim - 2)
strides = maybe_broadcast(self.strides)
input_dilation = maybe_broadcast(self.input_dilation)
kernel_dilation = maybe_broadcast(self.kernel_dilation)
padding_lax = canonicalize_padding(self.padding, len(kernel_size))
if padding_lax == 'CIRCULAR':
kernel_size_dilated = [
(k - 1) * d + 1 for k, d in zip(kernel_size, kernel_dilation)
]
zero_pad: list[tuple[int, int]] = [(0, 0)]
pads = (
zero_pad
+ [((k - 1) // 2, k // 2) for k in kernel_size_dilated]
+ [(0, 0)]
)
inputs = jnp.pad(inputs, pads, mode='wrap')
padding_lax = 'VALID'
elif padding_lax == 'CAUSAL':
if len(kernel_size) != 1:
raise ValueError(
'Causal padding is only implemented for 1D convolutions.'
)
left_pad = kernel_dilation[0] * (kernel_size[0] - 1)
pads = [(0, 0), (left_pad, 0), (0, 0)]
inputs = jnp.pad(inputs, pads)
padding_lax = 'VALID'
dimension_numbers = _conv_dimension_numbers(inputs.shape)
in_features = jnp.shape(inputs)[-1]
if self.shared_weights:
# One shared convolutional kernel for all pixels in the output.
assert in_features % self.feature_group_count == 0
kernel_shape = kernel_size + (
in_features // self.feature_group_count,
self.features,
)
else:
if self.feature_group_count != 1:
raise NotImplementedError(
'`lax.conv_general_dilated_local` does not support '
f'`feature_group_count != 1`, got `{self.feature_group_count}`.'
)
# Need to know the spatial output shape of a standard convolution to
# create the unshared convolution kernel.
if self.conv_general_dilated_cls is not None:
conv_general_dilated = self.conv_general_dilated_cls()
elif self.conv_general_dilated is not None:
conv_general_dilated = self.conv_general_dilated
else:
conv_general_dilated = lax.conv_general_dilated
conv_output_shape = eval_shape(
lambda lhs, rhs: conv_general_dilated( # pylint: disable=g-long-lambda
lhs=lhs,
rhs=rhs,
window_strides=strides,
padding=padding_lax,
dimension_numbers=dimension_numbers,
lhs_dilation=input_dilation,
rhs_dilation=kernel_dilation,
),
inputs,
ShapedArray(kernel_size + (in_features, self.features), inputs.dtype),
).shape
# One (unshared) convolutional kernel per each pixel in the output.
kernel_shape = conv_output_shape[1:-1] + (
np.prod(kernel_size) * in_features,
self.features,
)
if self.mask is not None and self.mask.shape != kernel_shape:
raise ValueError(
'Mask needs to have the same shape as weights. '
f'Shapes are: {self.mask.shape}, {kernel_shape}'
)
kernel = self.param(
'kernel', self.kernel_init, kernel_shape, self.param_dtype
)
if self.mask is not None:
kernel *= self.mask
if self.use_bias:
if self.shared_weights:
# One bias weight per output channel, shared between pixels.
bias_shape = (self.features,)
else:
# One bias weight per output entry, unshared betwen pixels.
bias_shape = conv_output_shape[1:]
bias = self.param('bias', self.bias_init, bias_shape, self.param_dtype)
else:
bias = None
inputs, kernel, bias = promote_dtype(inputs, kernel, bias, dtype=self.dtype)
if self.shared_weights:
if self.conv_general_dilated_cls is not None:
conv_general_dilated = self.conv_general_dilated_cls()
elif self.conv_general_dilated is not None:
conv_general_dilated = self.conv_general_dilated
else:
conv_general_dilated = lax.conv_general_dilated
y = conv_general_dilated(
inputs,
kernel,
strides,
padding_lax,
lhs_dilation=input_dilation,
rhs_dilation=kernel_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=self.feature_group_count,
precision=self.precision,
)
else:
y = lax.conv_general_dilated_local(
lhs=inputs,
rhs=kernel,
window_strides=strides,
padding=padding_lax,
filter_shape=kernel_size,
lhs_dilation=input_dilation,
rhs_dilation=kernel_dilation,
dimension_numbers=dimension_numbers,
precision=self.precision,
)
if self.use_bias:
bias = bias.reshape((1,) * (y.ndim - bias.ndim) + bias.shape) # type: ignore
y += bias
if num_batch_dimensions != 1:
output_shape = input_batch_shape + y.shape[1:]
y = jnp.reshape(y, output_shape)
return y
class Conv(_Conv):
"""Convolution Module wrapping ``lax.conv_general_dilated``.
Example usage::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> # valid padding
>>> layer = nn.Conv(features=4, kernel_size=(3,), padding='VALID')
>>> out, variables = layer.init_with_output(jax.random.key(0), jnp.ones((1, 8, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'bias': (4,), 'kernel': (3, 3, 4)}}
>>> out.shape
(1, 6, 4)
>>> # circular padding with stride 2
>>> layer = nn.Conv(features=4, kernel_size=(3, 3), strides=2, padding='CIRCULAR')
>>> out, variables = layer.init_with_output(jax.random.key(0), jnp.ones((1, 8, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'bias': (4,), 'kernel': (3, 3, 3, 4)}}
>>> out.shape
(1, 4, 4)
>>> # apply lower triangle mask
>>> mask = jnp.tril(jnp.ones((3, 3, 4)))
>>> layer = nn.Conv(features=4, kernel_size=(3,), mask=mask, padding='VALID')
>>> variables = layer.init(jax.random.key(0), jnp.ones((1, 8, 3)))
Attributes:
features: number of convolution filters.
kernel_size: shape of the convolutional kernel. An integer will be
interpreted as a tuple of the single integer.
strides: an integer or a sequence of `n` integers, representing the
inter-window strides (default: 1).
padding: either the string ``'SAME'``, the string ``'VALID'``, the string
``'CIRCULAR'`` (periodic boundary conditions), or a sequence of ``n``
``(low, high)`` integer pairs that give the padding to apply before and
after each spatial dimension. A single int is interpreted as applying the
same padding in all dims and assign a single int in a sequence causes the
same padding to be used on both sides. ``'CAUSAL'`` padding for a 1D
convolution will left-pad the convolution axis, resulting in same-sized
output.
input_dilation: an integer or a sequence of ``n`` integers, giving the
dilation factor to apply in each spatial dimension of ``inputs`` (default:
1). Convolution with input dilation ``d`` is equivalent to transposed
convolution with stride ``d``.
kernel_dilation: an integer or a sequence of ``n`` integers, giving the
dilation factor to apply in each spatial dimension of the convolution
kernel (default: 1). Convolution with kernel dilation is also known as
'atrous convolution'.
feature_group_count: integer, default 1. If specified divides the input
features into groups.
use_bias: whether to add a bias to the output (default: True).
mask: Optional mask for the weights during masked convolution. The mask must
be the same shape as the convolution weight matrix.
dtype: the dtype of the computation (default: infer from input and params).
param_dtype: the dtype passed to parameter initializers (default: float32).
precision: numerical precision of the computation see ``jax.lax.Precision`
for details.
kernel_init: initializer for the convolutional kernel.
bias_init: initializer for the bias.
"""
@property
def shared_weights(self) -> bool:
return True
class ConvLocal(_Conv):
"""Local convolution Module wrapping ``lax.conv_general_dilated_local``.
Example usage::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> # valid padding
>>> layer = nn.ConvLocal(features=4, kernel_size=(3,), padding='VALID')
>>> out, variables = layer.init_with_output(jax.random.key(0), jnp.ones((1, 8, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'bias': (6, 4), 'kernel': (6, 9, 4)}}
>>> out.shape
(1, 6, 4)
>>> # circular padding with stride 2
>>> layer = nn.ConvLocal(features=4, kernel_size=(3, 3), strides=2, padding='CIRCULAR')
>>> out, variables = layer.init_with_output(jax.random.key(0), jnp.ones((1, 8, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'bias': (1, 4, 4), 'kernel': (1, 4, 27, 4)}}
>>> out.shape
(1, 4, 4)
>>> # apply lower triangle mask
>>> mask = jnp.tril(jnp.ones((6, 9, 4)))
>>> layer = nn.ConvLocal(features=4, kernel_size=(3,), mask=mask, padding='VALID')
>>> variables = layer.init(jax.random.key(0), jnp.ones((1, 8, 3)))
Attributes:
features: number of convolution filters.
kernel_size: shape of the convolutional kernel. An integer will be
interpreted as a tuple of the single integer.
strides: an integer or a sequence of `n` integers, representing the
inter-window strides (default: 1).
padding: either the string ``'SAME'``, the string ``'VALID'``, the string
``'CIRCULAR'`` (periodic boundary conditions), or a sequence of ``n``
``(low, high)`` integer pairs that give the padding to apply before and
after each spatial dimension. A single int is interpreted as applying the
same padding in all dims and assign a single int in a sequence causes the
same padding to be used on both sides. ``'CAUSAL'`` padding for a 1D
convolution will left-pad the convolution axis, resulting in same-sized
output.
input_dilation: an integer or a sequence of ``n`` integers, giving the
dilation factor to apply in each spatial dimension of ``inputs`` (default:
1). Convolution with input dilation ``d`` is equivalent to transposed
convolution with stride ``d``.
kernel_dilation: an integer or a sequence of ``n`` integers, giving the
dilation factor to apply in each spatial dimension of the convolution
kernel (default: 1). Convolution with kernel dilation is also known as
'atrous convolution'.
feature_group_count: integer, default 1. If specified divides the input
features into groups.
use_bias: whether to add a bias to the output (default: True).
mask: Optional mask for the weights during masked convolution. The mask must
be the same shape as the convolution weight matrix.
dtype: the dtype of the computation (default: infer from input and params).
param_dtype: the dtype passed to parameter initializers (default: float32).
precision: numerical precision of the computation see ``jax.lax.Precision``
for details.
kernel_init: initializer for the convolutional kernel.
bias_init: initializer for the bias.
"""
@property
def shared_weights(self) -> bool:
return False
class ConvTranspose(Module):
"""Convolution Module wrapping ``lax.conv_transpose``.
Example usage::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> # valid padding
>>> layer = nn.ConvTranspose(features=4, kernel_size=(3,), padding='VALID')
>>> out, variables = layer.init_with_output(jax.random.key(0), jnp.ones((1, 8, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'bias': (4,), 'kernel': (3, 3, 4)}}
>>> out.shape
(1, 10, 4)
>>> # circular padding with stride 2
>>> layer = nn.ConvTranspose(features=4, kernel_size=(6, 6), strides=(2, 2), padding='CIRCULAR', transpose_kernel=True)
>>> out, variables = layer.init_with_output(jax.random.key(0), jnp.ones((1, 15, 15, 3)))
>>> jax.tree_util.tree_map(jnp.shape, variables)
{'params': {'bias': (4,), 'kernel': (6, 6, 4, 3)}}
>>> out.shape
(1, 30, 30, 4)
>>> # apply lower triangle mask
>>> mask = jnp.tril(jnp.ones((3, 3, 4)))
>>> layer = nn.ConvTranspose(features=4, kernel_size=(3,), mask=mask, padding='VALID')
>>> variables = layer.init(jax.random.key(0), jnp.ones((1, 8, 3)))
Attributes:
features: number of convolution filters.
kernel_size: shape of the convolutional kernel. For 1D convolution, the
kernel size can be passed as an integer, which will be interpreted as a
tuple of the single integer. For all other cases, it must be a sequence of
integers.
strides: an integer or a sequence of `n` integers, representing the
inter-window strides.
padding: either the string `'SAME'`, the string `'VALID'`, the string
`'CIRCULAR'` (periodic boundary conditions), or a sequence of `n` `(low,
high)` integer pairs that give the padding to apply before and after each
spatial dimension. A single int is interpreted as applying the same
padding in all dims and assign a single int in a sequence causes the same
padding to be used on both sides.
kernel_dilation: ``None``, or an integer or a sequence of ``n`` integers,
giving the dilation factor to apply in each spatial dimension of the convolution
kernel. Convolution with kernel dilation is also known as 'atrous
convolution'.
use_bias: whether to add a bias to the output (default: True).
mask: Optional mask for the weights during masked convolution. The mask must
be the same shape as the convolution weight matrix.
dtype: the dtype of the computation (default: infer from input and params).
param_dtype: the dtype passed to parameter initializers (default: float32).
precision: numerical precision of the computation see ``jax.lax.Precision``
for details.
kernel_init: initializer for the convolutional kernel.
bias_init: initializer for the bias.
transpose_kernel: if ``True`` flips spatial axes and swaps the input/output
channel axes of the kernel.
"""
features: int
kernel_size: int | Sequence[int]
strides: Sequence[int] | None = None
padding: PaddingLike = 'SAME'
kernel_dilation: Sequence[int] | None = None
use_bias: bool = True
mask: Array | None = None
dtype: Dtype | None = None
param_dtype: Dtype = jnp.float32
precision: PrecisionLike = None
kernel_init: Initializer = default_kernel_init
bias_init: Initializer = initializers.zeros_init()
transpose_kernel: bool = False
@compact
def __call__(self, inputs: Array) -> Array:
"""Applies a transposed convolution to the inputs.
Behaviour mirrors of ``jax.lax.conv_transpose``.
Args:
inputs: input data with dimensions ``(*batch_dims, spatial_dims..., features).``
This is the channels-last convention, i.e. NHWC for a 2d convolution and NDHWC
for a 3D convolution. Note: this is different from the input convention used by
``lax.conv_general_dilated``, which puts the spatial dimensions last.
Note: If the input has more than 1 batch dimension, all batch dimensions
are flattened into a single dimension for the convolution and restored
before returning. In some cases directly vmap'ing the layer may yield
better performance than this default flattening approach. If the input
lacks a batch dimension it will be added for the convolution and removed
n return, an allowance made to enable writing single-example code.
Returns:
The convolved data.
"""
kernel_size: tuple[int, ...]
if isinstance(self.kernel_size, int):
kernel_size = (self.kernel_size,)
else:
kernel_size = tuple(self.kernel_size)
def maybe_broadcast(
x: int | Sequence[int] | None,
) -> tuple[int, ...]:
if x is None:
# backward compatibility with using None as sentinel for
# broadcast 1
x = 1
if isinstance(x, int):
return (x,) * len(kernel_size)
return tuple(x)
# Combine all input batch dimensions into a single leading batch axis.
num_batch_dimensions = inputs.ndim - (len(kernel_size) + 1)
if num_batch_dimensions != 1:
input_batch_shape = inputs.shape[:num_batch_dimensions]
total_batch_size = int(np.prod(input_batch_shape))
flat_input_shape = (total_batch_size,) + inputs.shape[
num_batch_dimensions:
]
inputs = jnp.reshape(inputs, flat_input_shape)
strides = maybe_broadcast(self.strides)
kernel_dilation = maybe_broadcast(self.kernel_dilation)
in_features = jnp.shape(inputs)[-1]
if self.transpose_kernel:
kernel_shape = kernel_size + (self.features, in_features)
else:
kernel_shape = kernel_size + (in_features, self.features)
if self.mask is not None and self.mask.shape != kernel_shape:
raise ValueError(
'Mask needs to have the same shape as weights. '
f'Shapes are: {self.mask.shape}, {kernel_shape}'
)
kernel = self.param(
'kernel', self.kernel_init, kernel_shape, self.param_dtype
)
if self.mask is not None:
kernel *= self.mask
padding_lax = canonicalize_padding(self.padding, len(kernel_size))
if padding_lax == 'CIRCULAR':
padding_lax = 'VALID'
if self.use_bias:
bias = self.param(
'bias', self.bias_init, (self.features,), self.param_dtype
)
else:
bias = None
inputs, kernel, bias = promote_dtype(inputs, kernel, bias, dtype=self.dtype)
y = lax.conv_transpose(
inputs,
kernel,
strides,
padding_lax,
rhs_dilation=kernel_dilation,
transpose_kernel=self.transpose_kernel,
precision=self.precision,
)
if self.padding == 'CIRCULAR':
# For circular padding, we need to identify the size of the final output
# ("period") along each spatial dimension, pad each dimension to an
# integer number of periods, and wrap the array periodically around each
# dimension. Padding should be done in such a way that the start of the
# original input data inside the padded array is located at integer
# number of periods - otherwise the result would be circularly shifted.
# Compute period along each spatial dimension - it's input size scaled
# by the stride.
scaled_x_dims = [
x_dim * stride
for x_dim, stride in zip(jnp.shape(inputs)[1:-1], strides)
]
# Compute difference between the current size of y and the final output
# size, and complement this difference to 2 * period - that gives how
# much we need to pad.
size_diffs = [
-(y_dim - x_dim) % (2 * x_dim)
for y_dim, x_dim in zip(y.shape[1:-1], scaled_x_dims)
]
if self.transpose_kernel:
# If the kernel is transposed, the "+1" is put on the right to
# mirror the regular convolution. If the same kernel parameters are used
# as for Conv, this layer then computes the proper transpose convolution.
total_pad = [
(size_diff // 2, (size_diff + 1) // 2) for size_diff in size_diffs
]
else:
# Divide the padding equally between left and right. The choice to put
# "+1" on the left (and not on the right) represents a convention for
# aligning even-sized kernels.
total_pad = [
((size_diff + 1) // 2, size_diff // 2) for size_diff in size_diffs
]
y = jnp.pad(y, [(0, 0)] + total_pad + [(0, 0)])
# Wrap the result periodically around each spatial dimension,
# one by one.
for i in range(1, y.ndim - 1):
y = y.reshape(
y.shape[:i] + (-1, scaled_x_dims[i - 1]) + y.shape[i + 1 :]
)
y = y.sum(axis=i)
if self.use_bias:
y += jnp.reshape(bias, (1,) * (y.ndim - 1) + (-1,)) # type: ignore
if num_batch_dimensions != 1:
output_shape = input_batch_shape + y.shape[1:]
y = jnp.reshape(y, output_shape)
return y
default_embed_init = initializers.variance_scaling(
1.0, 'fan_in', 'normal', out_axis=0
)
class Embed(Module):
"""Embedding Module.
A parameterized function from integers [0, ``num_embeddings``) to
``features``-dimensional vectors. This ``Module`` will create an ``embedding``
matrix with shape ``(num_embeddings, features)``. When calling this layer,
the input values will be used to 0-index into the ``embedding`` matrix.
Indexing on a value greater than or equal to ``num_embeddings`` will result
in ``nan`` values. When ``num_embeddings`` equals to 1, it will
broadcast the ``embedding`` matrix to input shape with ``features``
dimension appended.
Example usage::
>>> import flax.linen as nn
>>> import jax, jax.numpy as jnp
>>> layer = nn.Embed(num_embeddings=5, features=3)
>>> indices_input = jnp.array([[0, 1, 2], [-1, -2, -3]])
>>> variables = layer.init(jax.random.key(0), indices_input)
>>> variables
{'params': {'embedding': Array([[-0.28884724, 0.19018005, -0.414205 ],
[-0.11768015, -0.54618824, -0.3789283 ],
[ 0.30428642, 0.49511626, 0.01706631],
[-0.0982546 , -0.43055868, 0.20654906],
[-0.688412 , -0.46882293, 0.26723292]], dtype=float32)}}
>>> # get the first three and last three embeddings
>>> layer.apply(variables, indices_input)
Array([[[-0.28884724, 0.19018005, -0.414205 ],
[-0.11768015, -0.54618824, -0.3789283 ],
[ 0.30428642, 0.49511626, 0.01706631]],
<BLANKLINE>
[[-0.688412 , -0.46882293, 0.26723292],
[-0.0982546 , -0.43055868, 0.20654906],
[ 0.30428642, 0.49511626, 0.01706631]]], dtype=float32)
Attributes:
num_embeddings: number of embeddings / vocab size.
features: number of feature dimensions for each embedding.
dtype: the dtype of the embedding vectors (default: same as embedding).
param_dtype: the dtype passed to parameter initializers (default: float32).
embedding_init: embedding initializer.
"""
num_embeddings: int
features: int
dtype: Dtype | None = None
param_dtype: Dtype = jnp.float32
embedding_init: Initializer = default_embed_init
def setup(self):
self.embedding = self.param(
'embedding',
self.embedding_init,
(self.num_embeddings, self.features),
self.param_dtype,
)
def __call__(self, inputs: Array) -> Array:
"""Embeds the inputs along the last dimension.
Args:
inputs: input data, all dimensions are considered batch dimensions.
Values in the input array must be integers.
Returns:
Output which is embedded input data. The output shape follows the input,
with an additional ``features`` dimension appended.
"""
if not jnp.issubdtype(inputs.dtype, jnp.integer):
raise ValueError('Input type must be an integer or unsigned integer.')
# Use take because fancy indexing numpy arrays with JAX indices does not
# work correctly.
(embedding,) = promote_dtype(
self.embedding, dtype=self.dtype, inexact=False
)
if self.num_embeddings == 1:
return jnp.where(
jnp.broadcast_to(inputs[..., None], inputs.shape + (self.features,))
== 0,
embedding,
jnp.nan,
)
return jnp.take(embedding, inputs, axis=0)
def attend(self, query: Array) -> Array:
"""Attend over the embedding using a query array.
Args:
query: array with last dimension equal the feature depth ``features`` of the
embedding.
Returns:
An array with final dim ``num_embeddings`` corresponding to the batched
inner-product of the array of query vectors against each embedding.
Commonly used for weight-sharing between embeddings and logit transform
in NLP models.
"""
query, embedding = promote_dtype(query, self.embedding, dtype=self.dtype)
return jnp.dot(query, embedding.T)
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@flax@linen@linear.py@.PATH_END.py
|
{
"filename": "test_function_calling.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/core/tests/unit_tests/utils/test_function_calling.py",
"type": "Python"
}
|
# mypy: disable-error-code="annotation-unchecked"
import sys
import typing
from collections.abc import Iterable, Mapping, MutableMapping, Sequence
from typing import Annotated as ExtensionsAnnotated
from typing import (
Any,
Callable,
Literal,
Optional,
Union,
)
from typing import TypedDict as TypingTypedDict
import pytest
from pydantic import BaseModel as BaseModelV2Maybe # pydantic: ignore
from pydantic import Field as FieldV2Maybe # pydantic: ignore
from typing_extensions import (
TypedDict as ExtensionsTypedDict,
)
try:
from typing import Annotated as TypingAnnotated # type: ignore[attr-defined]
except ImportError:
TypingAnnotated = ExtensionsAnnotated
from pydantic import BaseModel, Field
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
from langchain_core.runnables import Runnable, RunnableLambda
from langchain_core.tools import BaseTool, StructuredTool, Tool, tool
from langchain_core.utils.function_calling import (
_convert_typed_dict_to_openai_function,
convert_to_openai_function,
tool_example_to_messages,
)
@pytest.fixture()
def pydantic() -> type[BaseModel]:
class dummy_function(BaseModel): # noqa: N801
"""dummy function"""
arg1: int = Field(..., description="foo")
arg2: Literal["bar", "baz"] = Field(..., description="one of 'bar', 'baz'")
return dummy_function
@pytest.fixture()
def annotated_function() -> Callable:
def dummy_function(
arg1: ExtensionsAnnotated[int, "foo"],
arg2: ExtensionsAnnotated[Literal["bar", "baz"], "one of 'bar', 'baz'"],
) -> None:
"""dummy function"""
return dummy_function
@pytest.fixture()
def function() -> Callable:
def dummy_function(arg1: int, arg2: Literal["bar", "baz"]) -> None:
"""dummy function
Args:
arg1: foo
arg2: one of 'bar', 'baz'
"""
return dummy_function
@pytest.fixture()
def function_docstring_annotations() -> Callable:
def dummy_function(arg1: int, arg2: Literal["bar", "baz"]) -> None:
"""dummy function
Args:
arg1 (int): foo
arg2: one of 'bar', 'baz'
"""
return dummy_function
@pytest.fixture()
def runnable() -> Runnable:
class Args(ExtensionsTypedDict):
arg1: ExtensionsAnnotated[int, "foo"]
arg2: ExtensionsAnnotated[Literal["bar", "baz"], "one of 'bar', 'baz'"]
def dummy_function(input_dict: Args) -> None:
pass
return RunnableLambda(dummy_function)
@pytest.fixture()
def dummy_tool() -> BaseTool:
class Schema(BaseModel):
arg1: int = Field(..., description="foo")
arg2: Literal["bar", "baz"] = Field(..., description="one of 'bar', 'baz'")
class DummyFunction(BaseTool):
args_schema: type[BaseModel] = Schema
name: str = "dummy_function"
description: str = "dummy function"
def _run(self, *args: Any, **kwargs: Any) -> Any:
pass
return DummyFunction()
@pytest.fixture()
def dummy_structured_tool() -> StructuredTool:
class Schema(BaseModel):
arg1: int = Field(..., description="foo")
arg2: Literal["bar", "baz"] = Field(..., description="one of 'bar', 'baz'")
return StructuredTool.from_function(
lambda x: None,
name="dummy_function",
description="dummy function",
args_schema=Schema,
)
@pytest.fixture()
def dummy_pydantic() -> type[BaseModel]:
class dummy_function(BaseModel): # noqa: N801
"""dummy function"""
arg1: int = Field(..., description="foo")
arg2: Literal["bar", "baz"] = Field(..., description="one of 'bar', 'baz'")
return dummy_function
@pytest.fixture()
def dummy_pydantic_v2() -> type[BaseModelV2Maybe]:
class dummy_function(BaseModelV2Maybe): # noqa: N801
"""dummy function"""
arg1: int = FieldV2Maybe(..., description="foo")
arg2: Literal["bar", "baz"] = FieldV2Maybe(
..., description="one of 'bar', 'baz'"
)
return dummy_function
@pytest.fixture()
def dummy_typing_typed_dict() -> type:
class dummy_function(TypingTypedDict): # noqa: N801
"""dummy function"""
arg1: TypingAnnotated[int, ..., "foo"] # noqa: F821
arg2: TypingAnnotated[Literal["bar", "baz"], ..., "one of 'bar', 'baz'"] # noqa: F722
return dummy_function
@pytest.fixture()
def dummy_typing_typed_dict_docstring() -> type:
class dummy_function(TypingTypedDict): # noqa: N801
"""dummy function
Args:
arg1: foo
arg2: one of 'bar', 'baz'
"""
arg1: int
arg2: Literal["bar", "baz"]
return dummy_function
@pytest.fixture()
def dummy_extensions_typed_dict() -> type:
class dummy_function(ExtensionsTypedDict): # noqa: N801
"""dummy function"""
arg1: ExtensionsAnnotated[int, ..., "foo"]
arg2: ExtensionsAnnotated[Literal["bar", "baz"], ..., "one of 'bar', 'baz'"]
return dummy_function
@pytest.fixture()
def dummy_extensions_typed_dict_docstring() -> type:
class dummy_function(ExtensionsTypedDict): # noqa: N801
"""dummy function
Args:
arg1: foo
arg2: one of 'bar', 'baz'
"""
arg1: int
arg2: Literal["bar", "baz"]
return dummy_function
@pytest.fixture()
def json_schema() -> dict:
return {
"title": "dummy_function",
"description": "dummy function",
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
}
@pytest.fixture()
def anthropic_tool() -> dict:
return {
"name": "dummy_function",
"description": "dummy function",
"input_schema": {
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
},
}
@pytest.fixture()
def bedrock_converse_tool() -> dict:
return {
"toolSpec": {
"name": "dummy_function",
"description": "dummy function",
"inputSchema": {
"json": {
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
}
},
}
}
class Dummy:
def dummy_function(self, arg1: int, arg2: Literal["bar", "baz"]) -> None:
"""dummy function
Args:
arg1: foo
arg2: one of 'bar', 'baz'
"""
class DummyWithClassMethod:
@classmethod
def dummy_function(cls, arg1: int, arg2: Literal["bar", "baz"]) -> None:
"""dummy function
Args:
arg1: foo
arg2: one of 'bar', 'baz'
"""
def test_convert_to_openai_function(
pydantic: type[BaseModel],
function: Callable,
function_docstring_annotations: Callable,
dummy_structured_tool: StructuredTool,
dummy_tool: BaseTool,
json_schema: dict,
anthropic_tool: dict,
bedrock_converse_tool: dict,
annotated_function: Callable,
dummy_pydantic: type[BaseModel],
runnable: Runnable,
dummy_typing_typed_dict: type,
dummy_typing_typed_dict_docstring: type,
dummy_extensions_typed_dict: type,
dummy_extensions_typed_dict_docstring: type,
) -> None:
expected = {
"name": "dummy_function",
"description": "dummy function",
"parameters": {
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
},
}
for fn in (
pydantic,
function,
function_docstring_annotations,
dummy_structured_tool,
dummy_tool,
json_schema,
anthropic_tool,
bedrock_converse_tool,
expected,
Dummy.dummy_function,
DummyWithClassMethod.dummy_function,
annotated_function,
dummy_pydantic,
dummy_typing_typed_dict,
dummy_typing_typed_dict_docstring,
dummy_extensions_typed_dict,
dummy_extensions_typed_dict_docstring,
):
actual = convert_to_openai_function(fn) # type: ignore
assert actual == expected
# Test runnables
actual = convert_to_openai_function(runnable.as_tool(description="dummy function"))
parameters = {
"type": "object",
"properties": {
"arg1": {"type": "integer"},
"arg2": {
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
}
runnable_expected = expected.copy()
runnable_expected["parameters"] = parameters
assert actual == runnable_expected
# Test simple Tool
def my_function(input_string: str) -> str:
pass
tool = Tool(
name="dummy_function",
func=my_function,
description="test description",
)
actual = convert_to_openai_function(tool)
expected = {
"name": "dummy_function",
"description": "test description",
"parameters": {
"properties": {"__arg1": {"title": "__arg1", "type": "string"}},
"required": ["__arg1"],
"type": "object",
},
}
assert actual == expected
@pytest.mark.xfail(reason="Direct pydantic v2 models not yet supported")
def test_convert_to_openai_function_nested_v2() -> None:
class NestedV2(BaseModelV2Maybe):
nested_v2_arg1: int = FieldV2Maybe(..., description="foo")
nested_v2_arg2: Literal["bar", "baz"] = FieldV2Maybe(
..., description="one of 'bar', 'baz'"
)
def my_function(arg1: NestedV2) -> None:
"""dummy function"""
convert_to_openai_function(my_function)
def test_convert_to_openai_function_nested() -> None:
class Nested(BaseModel):
nested_arg1: int = Field(..., description="foo")
nested_arg2: Literal["bar", "baz"] = Field(
..., description="one of 'bar', 'baz'"
)
def my_function(arg1: Nested) -> None:
"""dummy function"""
expected = {
"name": "my_function",
"description": "dummy function",
"parameters": {
"type": "object",
"properties": {
"arg1": {
"type": "object",
"properties": {
"nested_arg1": {"type": "integer", "description": "foo"},
"nested_arg2": {
"type": "string",
"enum": ["bar", "baz"],
"description": "one of 'bar', 'baz'",
},
},
"required": ["nested_arg1", "nested_arg2"],
},
},
"required": ["arg1"],
},
}
actual = convert_to_openai_function(my_function)
assert actual == expected
def test_convert_to_openai_function_nested_strict() -> None:
class Nested(BaseModel):
nested_arg1: int = Field(..., description="foo")
nested_arg2: Literal["bar", "baz"] = Field(
..., description="one of 'bar', 'baz'"
)
def my_function(arg1: Nested) -> None:
"""dummy function"""
expected = {
"name": "my_function",
"description": "dummy function",
"parameters": {
"type": "object",
"properties": {
"arg1": {
"type": "object",
"properties": {
"nested_arg1": {"type": "integer", "description": "foo"},
"nested_arg2": {
"type": "string",
"enum": ["bar", "baz"],
"description": "one of 'bar', 'baz'",
},
},
"required": ["nested_arg1", "nested_arg2"],
"additionalProperties": False,
},
},
"required": ["arg1"],
"additionalProperties": False,
},
"strict": True,
}
actual = convert_to_openai_function(my_function, strict=True)
assert actual == expected
json_schema_no_description_no_params = {
"title": "dummy_function",
}
json_schema_no_description = {
"title": "dummy_function",
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
}
anthropic_tool_no_description = {
"name": "dummy_function",
"input_schema": {
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
},
}
bedrock_converse_tool_no_description = {
"toolSpec": {
"name": "dummy_function",
"inputSchema": {
"json": {
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
}
},
}
}
openai_function_no_description = {
"name": "dummy_function",
"parameters": {
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
},
}
openai_function_no_description_no_params = {
"name": "dummy_function",
}
@pytest.mark.parametrize(
"func",
[
anthropic_tool_no_description,
json_schema_no_description,
bedrock_converse_tool_no_description,
openai_function_no_description,
],
)
def test_convert_to_openai_function_no_description(func: dict) -> None:
expected = {
"name": "dummy_function",
"parameters": {
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "integer"},
"arg2": {
"description": "one of 'bar', 'baz'",
"enum": ["bar", "baz"],
"type": "string",
},
},
"required": ["arg1", "arg2"],
},
}
actual = convert_to_openai_function(func)
assert actual == expected
@pytest.mark.parametrize(
"func",
[
json_schema_no_description_no_params,
openai_function_no_description_no_params,
],
)
def test_convert_to_openai_function_no_description_no_params(func: dict) -> None:
expected = {
"name": "dummy_function",
}
actual = convert_to_openai_function(func)
assert actual == expected
@pytest.mark.xfail(
reason="Pydantic converts Optional[str] to str in .model_json_schema()"
)
def test_function_optional_param() -> None:
@tool
def func5(
a: Optional[str],
b: str,
c: Optional[list[Optional[str]]],
) -> None:
"""A test function"""
func = convert_to_openai_function(func5)
req = func["parameters"]["required"]
assert set(req) == {"b"}
def test_function_no_params() -> None:
def nullary_function() -> None:
"""nullary function"""
func = convert_to_openai_function(nullary_function)
req = func["parameters"].get("required")
assert not req
class FakeCall(BaseModel):
data: str
def test_valid_example_conversion() -> None:
expected_messages = [
HumanMessage(content="This is a valid example"),
AIMessage(content="", additional_kwargs={"tool_calls": []}),
]
assert (
tool_example_to_messages(input="This is a valid example", tool_calls=[])
== expected_messages
)
def test_multiple_tool_calls() -> None:
messages = tool_example_to_messages(
input="This is an example",
tool_calls=[
FakeCall(data="ToolCall1"),
FakeCall(data="ToolCall2"),
FakeCall(data="ToolCall3"),
],
)
assert len(messages) == 5
assert isinstance(messages[0], HumanMessage)
assert isinstance(messages[1], AIMessage)
assert isinstance(messages[2], ToolMessage)
assert isinstance(messages[3], ToolMessage)
assert isinstance(messages[4], ToolMessage)
assert messages[1].additional_kwargs["tool_calls"] == [
{
"id": messages[2].tool_call_id,
"type": "function",
"function": {"name": "FakeCall", "arguments": '{"data":"ToolCall1"}'},
},
{
"id": messages[3].tool_call_id,
"type": "function",
"function": {"name": "FakeCall", "arguments": '{"data":"ToolCall2"}'},
},
{
"id": messages[4].tool_call_id,
"type": "function",
"function": {"name": "FakeCall", "arguments": '{"data":"ToolCall3"}'},
},
]
def test_tool_outputs() -> None:
messages = tool_example_to_messages(
input="This is an example",
tool_calls=[
FakeCall(data="ToolCall1"),
],
tool_outputs=["Output1"],
)
assert len(messages) == 3
assert isinstance(messages[0], HumanMessage)
assert isinstance(messages[1], AIMessage)
assert isinstance(messages[2], ToolMessage)
assert messages[1].additional_kwargs["tool_calls"] == [
{
"id": messages[2].tool_call_id,
"type": "function",
"function": {"name": "FakeCall", "arguments": '{"data":"ToolCall1"}'},
},
]
assert messages[2].content == "Output1"
# Test final AI response
messages = tool_example_to_messages(
input="This is an example",
tool_calls=[
FakeCall(data="ToolCall1"),
],
tool_outputs=["Output1"],
ai_response="The output is Output1",
)
assert len(messages) == 4
assert isinstance(messages[0], HumanMessage)
assert isinstance(messages[1], AIMessage)
assert isinstance(messages[2], ToolMessage)
assert isinstance(messages[3], AIMessage)
response = messages[3]
assert response.content == "The output is Output1"
assert not response.tool_calls
@pytest.mark.parametrize("use_extension_typed_dict", [True, False])
@pytest.mark.parametrize("use_extension_annotated", [True, False])
def test__convert_typed_dict_to_openai_function(
use_extension_typed_dict: bool, use_extension_annotated: bool
) -> None:
typed_dict = ExtensionsTypedDict if use_extension_typed_dict else TypingTypedDict
annotated = TypingAnnotated if use_extension_annotated else TypingAnnotated
class SubTool(typed_dict):
"""Subtool docstring"""
args: annotated[dict[str, Any], {}, "this does bar"] # noqa: F722 # type: ignore
class Tool(typed_dict):
"""Docstring
Args:
arg1: foo
"""
arg1: str
arg2: Union[int, str, bool]
arg3: Optional[list[SubTool]]
arg4: annotated[Literal["bar", "baz"], ..., "this does foo"] # noqa: F722
arg5: annotated[Optional[float], None]
arg6: annotated[
Optional[Sequence[Mapping[str, tuple[Iterable[Any], SubTool]]]], []
]
arg7: annotated[list[SubTool], ...]
arg8: annotated[tuple[SubTool], ...]
arg9: annotated[Sequence[SubTool], ...]
arg10: annotated[Iterable[SubTool], ...]
arg11: annotated[set[SubTool], ...]
arg12: annotated[dict[str, SubTool], ...]
arg13: annotated[Mapping[str, SubTool], ...]
arg14: annotated[MutableMapping[str, SubTool], ...]
arg15: annotated[bool, False, "flag"] # noqa: F821 # type: ignore
expected = {
"name": "Tool",
"description": "Docstring",
"parameters": {
"type": "object",
"properties": {
"arg1": {"description": "foo", "type": "string"},
"arg2": {
"anyOf": [
{"type": "integer"},
{"type": "string"},
{"type": "boolean"},
]
},
"arg3": {
"type": "array",
"items": {
"description": "Subtool docstring",
"type": "object",
"properties": {
"args": {
"description": "this does bar",
"default": {},
"type": "object",
}
},
},
},
"arg4": {
"description": "this does foo",
"enum": ["bar", "baz"],
"type": "string",
},
"arg5": {"type": "number"},
"arg6": {
"default": [],
"type": "array",
"items": {
"type": "object",
"additionalProperties": {
"type": "array",
"minItems": 2,
"maxItems": 2,
"items": [
{"type": "array", "items": {}},
{
"title": "SubTool",
"description": "Subtool docstring",
"type": "object",
"properties": {
"args": {
"title": "Args",
"description": "this does bar",
"default": {},
"type": "object",
}
},
},
],
},
},
},
"arg7": {
"type": "array",
"items": {
"description": "Subtool docstring",
"type": "object",
"properties": {
"args": {
"description": "this does bar",
"default": {},
"type": "object",
}
},
},
},
"arg8": {
"type": "array",
"minItems": 1,
"maxItems": 1,
"items": [
{
"title": "SubTool",
"description": "Subtool docstring",
"type": "object",
"properties": {
"args": {
"title": "Args",
"description": "this does bar",
"default": {},
"type": "object",
}
},
}
],
},
"arg9": {
"type": "array",
"items": {
"description": "Subtool docstring",
"type": "object",
"properties": {
"args": {
"description": "this does bar",
"default": {},
"type": "object",
}
},
},
},
"arg10": {
"type": "array",
"items": {
"description": "Subtool docstring",
"type": "object",
"properties": {
"args": {
"description": "this does bar",
"default": {},
"type": "object",
}
},
},
},
"arg11": {
"type": "array",
"items": {
"description": "Subtool docstring",
"type": "object",
"properties": {
"args": {
"description": "this does bar",
"default": {},
"type": "object",
}
},
},
"uniqueItems": True,
},
"arg12": {
"type": "object",
"additionalProperties": {
"description": "Subtool docstring",
"type": "object",
"properties": {
"args": {
"description": "this does bar",
"default": {},
"type": "object",
}
},
},
},
"arg13": {
"type": "object",
"additionalProperties": {
"description": "Subtool docstring",
"type": "object",
"properties": {
"args": {
"description": "this does bar",
"default": {},
"type": "object",
}
},
},
},
"arg14": {
"type": "object",
"additionalProperties": {
"description": "Subtool docstring",
"type": "object",
"properties": {
"args": {
"description": "this does bar",
"default": {},
"type": "object",
}
},
},
},
"arg15": {"description": "flag", "default": False, "type": "boolean"},
},
"required": [
"arg1",
"arg2",
"arg3",
"arg4",
"arg7",
"arg8",
"arg9",
"arg10",
"arg11",
"arg12",
"arg13",
"arg14",
],
},
}
actual = _convert_typed_dict_to_openai_function(Tool)
assert actual == expected
@pytest.mark.parametrize("typed_dict", [ExtensionsTypedDict, TypingTypedDict])
def test__convert_typed_dict_to_openai_function_fail(typed_dict: type) -> None:
class Tool(typed_dict):
arg1: typing.MutableSet # Pydantic 2 supports this, but pydantic v1 does not.
# Error should be raised since we're using v1 code path here
with pytest.raises(TypeError):
_convert_typed_dict_to_openai_function(Tool)
@pytest.mark.skipif(
sys.version_info < (3, 10), reason="Requires python version >= 3.10 to run."
)
def test_convert_union_type_py_39() -> None:
@tool
def magic_function(input: int | float) -> str:
"""Compute a magic function."""
result = convert_to_openai_function(magic_function)
assert result["parameters"]["properties"]["input"] == {
"anyOf": [{"type": "integer"}, {"type": "number"}]
}
def test_convert_to_openai_function_no_args() -> None:
@tool
def empty_tool() -> str:
"""No args"""
return "foo"
actual = convert_to_openai_function(empty_tool, strict=True)
assert actual == {
"name": "empty_tool",
"description": "No args",
"parameters": {
"properties": {},
"additionalProperties": False,
"type": "object",
},
"strict": True,
}
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@core@tests@unit_tests@utils@test_function_calling.py@.PATH_END.py
|
{
"filename": "_gradient.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/scatterpolar/marker/_gradient.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Gradient(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatterpolar.marker"
_path_str = "scatterpolar.marker.gradient"
_valid_props = {"color", "colorsrc", "type", "typesrc"}
# color
# -----
@property
def color(self):
"""
Sets the final color of the gradient fill: the center color for
radial, the right for horizontal, or the bottom for vertical.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# type
# ----
@property
def type(self):
"""
Sets the type of gradient used to fill the markers
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['radial', 'horizontal', 'vertical', 'none']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
# typesrc
# -------
@property
def typesrc(self):
"""
Sets the source reference on Chart Studio Cloud for type .
The 'typesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["typesrc"]
@typesrc.setter
def typesrc(self, val):
self["typesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the final color of the gradient fill: the center
color for radial, the right for horizontal, or the
bottom for vertical.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
type
Sets the type of gradient used to fill the markers
typesrc
Sets the source reference on Chart Studio Cloud for
type .
"""
def __init__(
self, arg=None, color=None, colorsrc=None, type=None, typesrc=None, **kwargs
):
"""
Construct a new Gradient object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatterpolar.marker.Gradient`
color
Sets the final color of the gradient fill: the center
color for radial, the right for horizontal, or the
bottom for vertical.
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
type
Sets the type of gradient used to fill the markers
typesrc
Sets the source reference on Chart Studio Cloud for
type .
Returns
-------
Gradient
"""
super(Gradient, self).__init__("gradient")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatterpolar.marker.Gradient
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatterpolar.marker.Gradient`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("type", None)
_v = type if type is not None else _v
if _v is not None:
self["type"] = _v
_v = arg.pop("typesrc", None)
_v = typesrc if typesrc is not None else _v
if _v is not None:
self["typesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@scatterpolar@marker@_gradient.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/legendgrouptitle/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="shadow",
parent_name="scatter3d.legendgrouptitle.font",
**kwargs,
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@legendgrouptitle@font@_shadow.py@.PATH_END.py
|
{
"filename": "optimize_pca.py",
"repo_name": "arjunsavel/cortecs",
"repo_path": "cortecs_extracted/cortecs-main/src/cortecs/opt/optimize_pca.py",
"type": "Python"
}
|
"""
Performs simple optimization of PCA hyperparameters — i.e., number of components and wavelength index
for computing eigenvectors.
"""
import math
import numpy as np
from tqdm.autonotebook import tqdm
from cortecs.fit.fit import Fitter
from cortecs.fit.fit_neural_net import *
from cortecs.fit.fit_pca import *
def optimize_pca(
max_size,
max_evaluations,
opac,
min_components=3,
max_components=5,
wav_ind_start=3573,
):
"""
Inputs
------
max_size: float
maximum size of file in kB.
max_evaluations: int
maximum number of evaluations of the fitter
"""
T = opac.T
P = opac.P
wl = opac.wl
cross_section = opac.cross_section
# each axis — the wavelength index being tested and the number of components — will be tested n times.
# n * n = max_evaluations.
n_test_each_axis = math.floor(np.power(max_evaluations, 1 / 2))
n_pc_range = np.linspace(min_components, max_components, n_test_each_axis).astype(
int
)
wav_ind_range = np.linspace(wav_ind_start, len(wl) - 1, n_test_each_axis).astype(
int
)
print("len wl")
print(len(wl))
print("wl range")
print(wav_ind_range)
(
n_pc_grid,
wav_ind_grid,
) = np.meshgrid(n_pc_range, wav_ind_range)
# max_size currently isn't used.
final_errors = []
lin_samples = []
# ah. we're supposed to fit at every wavelength.
for sample in tqdm(
range(len(n_pc_grid.flatten())),
desc="Optimizing PCA hyperparameters",
):
n_pc, wav_ind = (
n_pc_grid.flatten()[sample],
wav_ind_grid.flatten()[sample],
)
fitter = Fitter(opac, method="pca", wav_ind=wav_ind, nc=n_pc)
try:
fitter.fit(verbose=0)
# evaluate the fit
vals, orig_vals, abs_diffs, percent_diffs = calc_metrics(fitter, plot=False)
mse = np.mean(np.square(abs_diffs))
except ValueError as e:
mse = np.inf
final_errors += [mse]
lin_samples += [sample]
# return the best-performing hyperparameters
best_sample_ind = lin_samples[np.argmin(final_errors)]
best_params = {
"n_pc": n_pc_grid.flatten()[best_sample_ind],
"wav_ind": wav_ind_grid.flatten()[best_sample_ind],
}
return best_params
|
arjunsavelREPO_NAMEcortecsPATH_START.@cortecs_extracted@cortecs-main@src@cortecs@opt@optimize_pca.py@.PATH_END.py
|
{
"filename": "paths.py",
"repo_name": "NikolayBritavskiyAstro/fast_rotating_binaries",
"repo_path": "fast_rotating_binaries_extracted/fast_rotating_binaries-main/src/scripts/paths.py",
"type": "Python"
}
|
"""
Exposes common paths useful for manipulating datasets and generating figures.
"""
from pathlib import Path
# Absolute path to the top level of the repository
root = Path(__file__).resolve().parents[2].absolute()
# Absolute path to the `src` folder
src = root / "src"
# Absolute path to the `src/data` folder (contains datasets)
data = src / "data"
# Absolute path to the `src/static` folder (contains static images)
static = src / "static"
# Absolute path to the `src/scripts` folder (contains figure/pipeline scripts)
scripts = src / "scripts"
# Absolute path to the `src/tex` folder (contains the manuscript)
tex = src / "tex"
# Absolute path to the `src/tex/figures` folder (contains figure output)
figures = tex / "figures"
# Absolute path to the `src/tex/output` folder (contains other user-defined output)
output = tex / "output"
|
NikolayBritavskiyAstroREPO_NAMEfast_rotating_binariesPATH_START.@fast_rotating_binaries_extracted@fast_rotating_binaries-main@src@scripts@paths.py@.PATH_END.py
|
{
"filename": "cholesky_update_test.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/tests/cholesky_update_test.py",
"type": "Python"
}
|
# Copyright 2024 The JAX Authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from jax import numpy as jnp
from jax._src import config
from jax._src import test_util as jtu
from jax._src.lax import linalg as lax_linalg
import numpy as np
config.parse_flags_with_absl()
class CholeskyUpdateTest(jtu.JaxTestCase):
@jtu.sample_product(
shape=[
(128, 128),
],
dtype=[jnp.float32, jnp.float64],
)
def testUpperOnes(self, shape, dtype):
"""A test with a (mildly) ill-conditioned matrix."""
if dtype is jnp.float64 and not config.enable_x64.value:
self.skipTest("Test disabled for x32 mode")
r_upper = jnp.triu(jnp.ones(shape)).astype(dtype)
w = jnp.arange(1, shape[0] + 1).astype(dtype)
new_matrix = r_upper.T @ r_upper + jnp.outer(w, w)
new_cholesky = jnp.linalg.cholesky(new_matrix, upper=True)
updated = lax_linalg.cholesky_update(r_upper, w)
atol = 1e-6 if (dtype is jnp.float64) else 2e-2
jtu._assert_numpy_allclose(updated, new_cholesky, atol=atol)
@jtu.sample_product(
shape=[
(128, 128),
],
dtype=[jnp.float32, jnp.float64],
)
def testRandomMatrix(self, shape, dtype):
if dtype is jnp.float64 and not config.enable_x64.value:
self.skipTest("Test disabled for x32 mode")
rng = jtu.rand_default(self.rng())
a = rng(shape, np.float64)
pd_matrix = jnp.array(a.T @ a).astype(dtype)
old_cholesky = jnp.linalg.cholesky(pd_matrix, upper=True)
w = rng((shape[0],), np.float64)
w = jnp.array(w).astype(dtype)
new_matrix = pd_matrix + jnp.outer(w, w)
new_cholesky = jnp.linalg.cholesky(new_matrix, upper=True)
updated = lax_linalg.cholesky_update(old_cholesky, w)
atol = 1e-6 if dtype == jnp.float64 else 1e-3
jtu._assert_numpy_allclose(updated, new_cholesky, atol=atol)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@cholesky_update_test.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "solerjuan/magnetar",
"repo_path": "magnetar_extracted/magnetar-master/extern/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This packages contains python packages that are bundled with the affiliated
package but are external to the affiliated package, and hence are developed in
a separate source tree. Note that this package is distinct from the /cextern
directory of the source code distribution, as that directory only contains C
extension code.
"""
|
solerjuanREPO_NAMEmagnetarPATH_START.@magnetar_extracted@magnetar-master@extern@__init__.py@.PATH_END.py
|
{
"filename": "test_rates.py",
"repo_name": "pynucastro/pynucastro",
"repo_path": "pynucastro_extracted/pynucastro-main/pynucastro/rates/tests/test_rates.py",
"type": "Python"
}
|
# unit tests for rates
import math
import pytest
from pytest import approx
from pynucastro import Composition, rates
from pynucastro.nucdata import Nucleus
class TestTfactors:
@pytest.fixture(scope="class")
def tf(self):
return rates.Tfactors(2.e9)
def test_tfactors(self, tf):
assert tf.T9 == approx(2.0)
assert tf.T9i == approx(0.5)
assert tf.T913i == approx(0.5**(1./3.))
assert tf.T913 == approx(2.0**(1./3.))
assert tf.T953 == approx(2.0**(5./3.))
assert tf.lnT9 == approx(math.log(2.0))
class TestRate:
@classmethod
def setup_class(cls):
""" this is run once for each class before any tests """
@classmethod
def teardown_class(cls):
""" this is run once for each class after all tests """
def setup_method(self):
""" this is run before each test """
# chapter-1
self.rate1 = rates.load_rate("o15--n15-wc12")
# chapter-2
self.rate2 = rates.load_rate("t-gn-d-nk06")
# chapter-3
self.rate3 = rates.load_rate("he6-gnn-he4-cf88")
# chapter-4
self.rate4 = rates.load_rate("c12-ag-o16-nac2")
# chapter-5
self.rate5 = rates.load_rate("n15-pa-c12-nacr")
# chapter-6
self.rate6 = rates.load_rate("he3-he3pp-he4-nacr")
# chapter-7
self.rate7 = rates.load_rate("li7-tnna-he4-mafo")
# chapter-8
self.rate8 = rates.load_rate("he4-aag-c12-fy05")
# chapter-8, historical format (same rate as chapter-9)
self.rate8_hist = rates.load_rate("he4-pphe3-he3-nacr-historical")
# chapter-9
self.rate9 = rates.load_rate("he4-pphe3-he3-nacr")
# chapter-10
self.rate10 = rates.load_rate("he4-npahe3-li7-mafo")
# chapter-11
self.rate11 = rates.load_rate("b17-nnn-c14-wc12")
self.n = Nucleus("n")
self.p = Nucleus("p")
self.h1 = Nucleus("H1")
self.d = Nucleus("d")
self.h3 = Nucleus("H3")
self.he3 = Nucleus("He3")
self.he4 = Nucleus("He4")
self.he6 = Nucleus("He6")
self.li7 = Nucleus("Li7")
self.b17 = Nucleus("B17")
self.c12 = Nucleus("C12")
self.c14 = Nucleus("C14")
self.n15 = Nucleus("N15")
self.o15 = Nucleus("O15")
self.o16 = Nucleus("O16")
self.ni56 = Nucleus("Ni56")
self.u238 = Nucleus("U238")
self.he4_also = Nucleus("he4")
def teardown_method(self):
""" this is run after each test """
def test_source(self):
assert self.rate1.source["Year"] == "2012"
def test_reactants(self):
# o15--n15-wc12
assert self.rate1.reactants[0] == self.o15
assert len(self.rate1.reactants) == 1
# t-gn-d-nk06
assert self.rate2.reactants[0] == self.h3
assert len(self.rate2.reactants) == 1
# he6-gnn-he4-cf88
assert self.rate3.reactants[0] == self.he6
assert len(self.rate3.reactants) == 1
# c12-ag-o16-nac2
assert self.rate4.reactants[0] == self.he4
assert self.rate4.reactants[1] == self.c12
assert len(self.rate4.reactants) == 2
# n15-pa-c12-nacr
assert self.rate5.reactants[0] == self.h1
assert self.rate5.reactants[1] == self.n15
assert len(self.rate5.reactants) == 2
# he3-he3pp-he4-nacr
assert self.rate6.reactants[0] == self.he3
assert self.rate6.reactants[1] == self.he3
assert len(self.rate6.reactants) == 2
# li7-tnna-he4-mafo
assert self.rate7.reactants[0] == self.h3
assert self.rate7.reactants[1] == self.li7
assert len(self.rate7.reactants) == 2
# he4-aag-c12-fy05
assert self.rate8.reactants[0] == self.he4
assert self.rate8.reactants[1] == self.he4
assert self.rate8.reactants[2] == self.he4
assert len(self.rate8.reactants) == 3
# he4-pphe3-he3-nacr-historical
assert self.rate8_hist.reactants[0] == self.p
assert self.rate8_hist.reactants[1] == self.h1
assert self.rate8_hist.reactants[2] == self.he4
assert len(self.rate8_hist.reactants) == 3
# he4-pphe3-he3-nacr
assert self.rate9.reactants[0] == self.p
assert self.rate9.reactants[1] == self.h1
assert self.rate9.reactants[2] == self.he4
assert len(self.rate9.reactants) == 3
# he4-npahe3-li7-mafo
assert self.rate10.reactants[0] == self.n
assert self.rate10.reactants[1] == self.h1
assert self.rate10.reactants[2] == self.he4
assert self.rate10.reactants[3] == self.he4
assert len(self.rate10.reactants) == 4
# b17-nnn-c14-wc12
assert self.rate11.reactants[0] == self.b17
assert len(self.rate11.reactants) == 1
def test_products(self):
assert self.rate4.products[0] == self.o16
assert self.rate8.products[0] == self.c12
assert len(self.rate8.products) == 1
# he4-pphe3-he3-nacr-historical
assert self.rate8_hist.products[0] == self.he3
assert self.rate8_hist.products[1] == self.he3
assert len(self.rate8_hist.products) == 2
# he4-pphe3-he3-nacr
assert self.rate9.products[0] == self.he3
assert self.rate9.products[1] == self.he3
assert len(self.rate9.products) == 2
def test_prefactor(self):
assert self.rate4.prefactor == 1.0
assert self.rate8.prefactor == approx(0.16666666)
def test_rate_exponent(self):
assert self.rate8.get_rate_exponent(1.e8) == approx(40.9106396)
def test_eval(self):
assert self.rate8.eval(1.e8) == approx(2.0403192412842946e-24, rel=1.e-6, abs=1.e-40)
def test_eval_deriv(self):
T0 = 1.e8
eps = 1.e-8
# compare finite diff to analytic diff
# rate4
diff = (self.rate4.eval(T0*(1.0+eps)) - self.rate4.eval(T0)) / (T0 * eps)
err = abs(diff - self.rate4.eval_deriv(T0)) / diff
assert err < 1.e-6
# rate5
diff = (self.rate5.eval(T0*(1.0+eps)) - self.rate5.eval(T0)) / (T0 * eps)
err = abs(diff - self.rate5.eval_deriv(T0)) / diff
assert err < 1.e-6
# rate6
diff = (self.rate6.eval(T0*(1.0+eps)) - self.rate6.eval(T0)) / (T0 * eps)
err = abs(diff - self.rate6.eval_deriv(T0)) / diff
assert err < 1.e-6
def test_comparison(self):
assert self.rate1 > self.rate2
assert self.rate1 > self.rate4
assert self.rate8 > self.rate9
def test_weak(self):
assert self.rate1.weak
assert not self.rate2.weak
def test_screen(self):
assert not self.rate1.ion_screen
assert self.rate4.ion_screen == [Nucleus("he4"), Nucleus("c12")]
assert self.rate8.ion_screen == 3*[Nucleus("he4")]
def test_heaviest_lightest(self):
assert self.rate4.heaviest() == Nucleus("o16")
assert self.rate4.lightest() == Nucleus("he4")
assert self.rate2.lightest() == Nucleus("n")
assert self.rate2.heaviest() == Nucleus("t")
def test_identical_particle_factor(self):
assert self.rate8.prefactor == approx(0.16666667)
self.rate8.use_identical_particle_factor = False
self.rate8._set_rhs_properties() # pylint: disable=protected-access
assert self.rate8.prefactor == 1.0
class TestDerivedRate:
def a_a_ag_c12(self, reaclib_library):
"""
Here we test the inverse rate, computed by the use of detailed balance
of a:
A + B -> C
reaction type.
"""
a_a_ag_c12 = reaclib_library.get_rate('he4 + he4 + he4 --> c12 <fy05_reaclib__>')
c12_ga_a_a_reaclib = reaclib_library.get_rate('c12 --> he4 + he4 + he4 <fy05_reaclib__reverse>')
c12_ga_a_a_derived = rates.DerivedRate(rate=a_a_ag_c12, compute_Q=False, use_pf=False)
assert c12_ga_a_a_reaclib.eval(T=2.0e9) == approx(c12_ga_a_a_derived.eval(T=2.0e9), rel=2e-4)
def test_a_a_ag_c12_with_pf(self, reaclib_library):
"""
This function test the correct rate value if we take in consideration the partition
functions on the range 1.0e9 to 100.0e9
"""
a_a_ag_c12 = reaclib_library.get_rate('he4 + he4 + he4 --> c12 <fy05_reaclib__>')
c12_ga_a_a_derived = rates.DerivedRate(rate=a_a_ag_c12, compute_Q=False, use_pf=True)
with pytest.warns(UserWarning, match="C12 partition function is not supported by tables"):
rval = c12_ga_a_a_derived.eval(T=2.0e9)
assert rval == approx(2.8953989705969484e-07)
def test_a_a_ag_c12_with_Q(self, reaclib_library):
"""
This function test the correct rate value if we take in consideration the
exact values of atomic nuclear weight in order to compute the Q capture value
of the reaction rate.
"""
a_a_ag_c12 = reaclib_library.get_rate('he4 + he4 + he4 --> c12 <fy05_reaclib__>')
c12_ga_a_a_derived = rates.DerivedRate(rate=a_a_ag_c12, compute_Q=True, use_pf=False)
assert c12_ga_a_a_derived.eval(T=2.0e9) == approx(2.899642192191721e-07)
class TestWeakRates:
@pytest.fixture(scope="class")
def rate1(self):
return rates.TabularRate("o18--f18-toki")
@pytest.fixture(scope="class")
def rate2(self):
return rates.TabularRate("na22--ne22-toki")
@pytest.fixture(scope="class")
def rate3(self):
return rates.TabularRate("sc45--ca45-toki")
@pytest.fixture(scope="class")
def rate4(self):
return rates.TabularRate("ti45--sc45-toki")
@pytest.fixture(scope="class")
def rate5(self):
return rates.TabularRate("v45--ti45-toki")
@pytest.fixture(scope="class")
def rate6(self):
return rates.TabularRate("ca45--sc45-toki")
def test_reactants(self, rate1, rate2, rate3, rate4, rate5, rate6):
# pick a composition that gives Ye = 0.5 just for testing
comp = Composition(["c12", "o16"])
comp.set_equal()
assert len(rate1.reactants) == 1 and len(rate1.products) == 1
assert rate1.products[0] == Nucleus("f18")
assert rate1.reactants[0] == Nucleus("o18")
assert rate1.eval(2.5e9, rho=2.e8, comp=comp) == approx(8.032467196099662e-16, rel=1.e-6, abs=1.e-20)
assert len(rate2.reactants) == 1 and len(rate2.products) == 1
assert rate2.products[0] == Nucleus("ne22")
assert rate2.reactants[0] == Nucleus("na22")
assert rate2.eval(1.e9, rho=4.e7, comp=comp) == approx(3.232714235735518e-05, rel=1.e-6, abs=1.e-20)
assert len(rate3.reactants) == 1 and len(rate3.products) == 1
assert rate3.products[0] == Nucleus("ca45")
assert rate3.reactants[0] == Nucleus("sc45")
assert math.log10(rate3.eval(1.e9, rho=2.e11, comp=comp)) == approx(3.4400000000000004)
assert len(rate4.reactants) == 1 and len(rate4.products) == 1
assert rate4.products[0] == Nucleus("sc45")
assert rate4.reactants[0] == Nucleus("ti45")
assert math.log10(rate4.eval(1.e9, rho=2.e11, comp=comp)) == approx(3.853)
assert len(rate5.reactants) == 1 and len(rate5.products) == 1
assert rate5.products[0] == Nucleus("ti45")
assert rate5.reactants[0] == Nucleus("v45")
assert math.log10(rate5.eval(1.e9, rho=2.e11, comp=comp)) == approx(4.71501)
assert len(rate6.reactants) == 1 and len(rate6.products) == 1
assert rate6.products[0] == Nucleus("sc45")
assert rate6.reactants[0] == Nucleus("ca45")
assert math.log10(rate6.eval(1.e9, rho=2.e11, comp=comp)) == approx(-99.69797)
class TestModify:
@pytest.fixture(scope="function")
def rate(self):
return rates.load_rate("c12-c12n-mg23-cf88")
def test_modify(self, rate):
rate.modify_products("mg24")
assert rate.Q == approx(13.933578000000125)
assert rate.products == [Nucleus("mg24")]
assert rate.modified
|
pynucastroREPO_NAMEpynucastroPATH_START.@pynucastro_extracted@pynucastro-main@pynucastro@rates@tests@test_rates.py@.PATH_END.py
|
{
"filename": "barklem2016.ipynb",
"repo_name": "tardis-sn/carsus",
"repo_path": "carsus_extracted/carsus-master/docs/io/barklem2016.ipynb",
"type": "Jupyter Notebook"
}
|
## Barklem & Collet 2016 Data Ingestion
[Barklem & Collet 2016](https://ui.adsabs.harvard.edu/abs/2016A%26A...588A..96B/abstract) contains information for molecular formation that are useful for calculating number densities of molecules in stellar atmospheres.
This data ingestor by default grabs mirrored tables from the [carsus-data-molecules-barklem2016](https://github.com/tardis-sn/carsus-data-molecules-barklem2016) repository, though other destinations can be specified.
**_NOTE:_**
```python
from carsus.io.molecules.molecules import BarklemCollet2016Reader
```
```python
barklem_reader = BarklemCollet2016Reader()
```
Table information is parsed to a dataframe which can be accesed via the .vald attribute. The column information is described in https://articles.adsabs.harvard.edu/pdf/1995A%26AS..112..525P and is as follows:
The reader grabs four tables and parses them to dataframes. Namely, it parses atomic ionization energies, molecular dissociation energies, molecular equilibrium constants, and molecular partition functions.
```python
barklem_reader.ionization_energies
```
[[1mcarsus.io.molecules.molecules[0m][ [1;37mINFO[0m] - Parsing Barklem & Collet 2016 from: https://raw.githubusercontent.com/tardis-sn/carsus-data-molecules-barklem2016/main/data/ ([1mmolecules.py[0m:79)
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Element</th>
<th>IE1 [eV]</th>
<th>IE2 [eV]</th>
<th>IE3 [eV]</th>
</tr>
<tr>
<th>Atomic_Number</th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>1</th>
<td>H</td>
<td>13.5984</td>
<td>-1.000</td>
<td>-1.000</td>
</tr>
<tr>
<th>2</th>
<td>He</td>
<td>24.5874</td>
<td>54.418</td>
<td>-1.000</td>
</tr>
<tr>
<th>3</th>
<td>Li</td>
<td>5.3917</td>
<td>75.640</td>
<td>122.454</td>
</tr>
<tr>
<th>4</th>
<td>Be</td>
<td>9.3227</td>
<td>18.211</td>
<td>153.896</td>
</tr>
<tr>
<th>5</th>
<td>B</td>
<td>8.2980</td>
<td>25.155</td>
<td>37.931</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>88</th>
<td>Ra</td>
<td>5.2784</td>
<td>10.147</td>
<td>31.000</td>
</tr>
<tr>
<th>89</th>
<td>Ac</td>
<td>5.3802</td>
<td>11.750</td>
<td>17.431</td>
</tr>
<tr>
<th>90</th>
<td>Th</td>
<td>6.3067</td>
<td>11.900</td>
<td>18.320</td>
</tr>
<tr>
<th>91</th>
<td>Pa</td>
<td>5.8900</td>
<td>11.900</td>
<td>19.000</td>
</tr>
<tr>
<th>92</th>
<td>U</td>
<td>6.1940</td>
<td>11.590</td>
<td>19.800</td>
</tr>
</tbody>
</table>
<p>92 rows × 4 columns</p>
</div>
```python
barklem_reader.dissociation_energies
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Ion1</th>
<th>Ion2</th>
<th>H&H Energy [eV]</th>
<th>H&H Sigma [eV]</th>
<th>Luo Energy [eV]</th>
<th>Luo Sigma [eV]</th>
<th>G2 Energy [eV]</th>
<th>G2 Sigma [eV]</th>
<th>Adopted Energy [eV]</th>
<th>Adopted Sigma [eV]</th>
</tr>
<tr>
<th>Molecule</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>H2</th>
<td>H</td>
<td>H</td>
<td>4.478130</td>
<td>---</td>
<td>4.478007</td>
<td>0.000004</td>
<td>---</td>
<td>---</td>
<td>4.478007</td>
<td>0.000004</td>
</tr>
<tr>
<th>Li2</th>
<td>Li</td>
<td>Li</td>
<td>1.046000</td>
<td>---</td>
<td>1.049900</td>
<td>---</td>
<td>1.124000</td>
<td>---</td>
<td>1.049900</td>
<td>---</td>
</tr>
<tr>
<th>B2</th>
<td>B</td>
<td>B</td>
<td>3.020000</td>
<td>---</td>
<td>2.802000</td>
<td>---</td>
<td>---</td>
<td>---</td>
<td>2.802000</td>
<td>---</td>
</tr>
<tr>
<th>C2</th>
<td>C</td>
<td>C</td>
<td>6.210000</td>
<td>---</td>
<td>6.371000</td>
<td>0.160000</td>
<td>6.401000</td>
<td>---</td>
<td>6.371000</td>
<td>0.160000</td>
</tr>
<tr>
<th>N2</th>
<td>N</td>
<td>N</td>
<td>9.759400</td>
<td>---</td>
<td>9.753940</td>
<td>0.000900</td>
<td>9.705000</td>
<td>---</td>
<td>9.753940</td>
<td>0.000900</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>GeSe</th>
<td>Se</td>
<td>Ge</td>
<td>4.980000</td>
<td>---</td>
<td>4.983000</td>
<td>0.017000</td>
<td>---</td>
<td>---</td>
<td>4.983000</td>
<td>0.017000</td>
</tr>
<tr>
<th>KBr</th>
<td>Br</td>
<td>K</td>
<td>3.910000</td>
<td>---</td>
<td>3.890000</td>
<td>0.043000</td>
<td>---</td>
<td>---</td>
<td>3.890000</td>
<td>0.043000</td>
</tr>
<tr>
<th>SiTe</th>
<td>Te</td>
<td>Si</td>
<td>4.640000</td>
<td>---</td>
<td>3.977000</td>
<td>0.087000</td>
<td>---</td>
<td>---</td>
<td>3.977000</td>
<td>0.087000</td>
</tr>
<tr>
<th>GeTe</th>
<td>Te</td>
<td>Ge</td>
<td>4.240000</td>
<td>---</td>
<td>4.072000</td>
<td>0.035000</td>
<td>---</td>
<td>---</td>
<td>4.072000</td>
<td>0.035000</td>
</tr>
<tr>
<th>KI</th>
<td>I</td>
<td>K</td>
<td>3.310000</td>
<td>---</td>
<td>3.300000</td>
<td>0.020000</td>
<td>---</td>
<td>---</td>
<td>3.300000</td>
<td>0.020000</td>
</tr>
</tbody>
</table>
<p>291 rows × 10 columns</p>
</div>
Equilibrium constants and partition functions are sampled at temperatures from 1e-5 to 1e4 K, as described in Barklem and Collet 2016.
```python
barklem_reader.equilibrium_constants
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0.00001</th>
<th>0.00010</th>
<th>0.00100</th>
<th>0.01000</th>
<th>0.10000</th>
<th>0.15000</th>
<th>0.20000</th>
<th>0.30000</th>
<th>0.50000</th>
<th>0.70000</th>
<th>...</th>
<th>1500.00000</th>
<th>2000.00000</th>
<th>3000.00000</th>
<th>4000.00000</th>
<th>5000.00000</th>
<th>6000.00000</th>
<th>7000.00000</th>
<th>8000.00000</th>
<th>9000.00000</th>
<th>10000.00000</th>
</tr>
<tr>
<th>Molecule</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>H2</th>
<td>-2.256870e+09</td>
<td>-225687000.0</td>
<td>-22568700.0</td>
<td>-2256870.0</td>
<td>-225685.0</td>
<td>-150456.0</td>
<td>-112841.0</td>
<td>-75226.2</td>
<td>-45134.0</td>
<td>-32237.20</td>
<td>...</td>
<td>-4.50941</td>
<td>-0.578773</td>
<td>3.391970</td>
<td>5.393640</td>
<td>6.59790</td>
<td>7.40150</td>
<td>7.97669</td>
<td>8.41000</td>
<td>8.74933</td>
<td>9.02320</td>
</tr>
<tr>
<th>Li2</th>
<td>-5.291390e+08</td>
<td>-52913900.0</td>
<td>-5291390.0</td>
<td>-529139.0</td>
<td>-52911.2</td>
<td>-35272.7</td>
<td>-26453.4</td>
<td>-17634.0</td>
<td>-10578.3</td>
<td>-7554.39</td>
<td>...</td>
<td>5.74130</td>
<td>6.669680</td>
<td>7.577480</td>
<td>8.027500</td>
<td>8.31687</td>
<td>8.54092</td>
<td>8.74276</td>
<td>8.94695</td>
<td>9.16483</td>
<td>9.39624</td>
</tr>
<tr>
<th>B2</th>
<td>-1.412180e+09</td>
<td>-141218000.0</td>
<td>-14121800.0</td>
<td>-1412180.0</td>
<td>-141216.0</td>
<td>-94142.5</td>
<td>-70605.9</td>
<td>-47069.1</td>
<td>-28239.5</td>
<td>-20169.60</td>
<td>...</td>
<td>1.26571</td>
<td>3.725950</td>
<td>6.208530</td>
<td>7.462810</td>
<td>8.22075</td>
<td>8.72849</td>
<td>9.09324</td>
<td>9.37007</td>
<td>9.59097</td>
<td>9.77654</td>
</tr>
<tr>
<th>C2</th>
<td>-3.210920e+09</td>
<td>-321092000.0</td>
<td>-32109200.0</td>
<td>-3210920.0</td>
<td>-321090.0</td>
<td>-214059.0</td>
<td>-160543.0</td>
<td>-107027.0</td>
<td>-64214.6</td>
<td>-45866.10</td>
<td>...</td>
<td>-10.23760</td>
<td>-4.807300</td>
<td>0.641634</td>
<td>3.376850</td>
<td>5.02489</td>
<td>6.12879</td>
<td>6.92105</td>
<td>7.51778</td>
<td>7.98355</td>
<td>8.35733</td>
</tr>
<tr>
<th>N2</th>
<td>-4.915890e+09</td>
<td>-491589000.0</td>
<td>-49158900.0</td>
<td>-4915890.0</td>
<td>-491585.0</td>
<td>-327722.0</td>
<td>-245790.0</td>
<td>-163858.0</td>
<td>-98312.5</td>
<td>-70221.30</td>
<td>...</td>
<td>-21.41590</td>
<td>-13.077300</td>
<td>-4.704750</td>
<td>-0.496084</td>
<td>2.04530</td>
<td>3.75526</td>
<td>4.99247</td>
<td>5.93537</td>
<td>6.68194</td>
<td>7.28996</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>GeSe</th>
<td>-2.511380e+09</td>
<td>-251138000.0</td>
<td>-25113800.0</td>
<td>-2511380.0</td>
<td>-251134.0</td>
<td>-167421.0</td>
<td>-125565.0</td>
<td>-83708.0</td>
<td>-50222.5</td>
<td>-35871.50</td>
<td>...</td>
<td>-6.20701</td>
<td>-1.839170</td>
<td>2.580750</td>
<td>4.826600</td>
<td>6.19361</td>
<td>7.11537</td>
<td>7.77900</td>
<td>8.27984</td>
<td>8.67304</td>
<td>8.99403</td>
</tr>
<tr>
<th>KBr</th>
<td>-1.960520e+09</td>
<td>-196052000.0</td>
<td>-19605200.0</td>
<td>-1960520.0</td>
<td>-196048.0</td>
<td>-130697.0</td>
<td>-98021.7</td>
<td>-65346.0</td>
<td>-39205.4</td>
<td>-28002.20</td>
<td>...</td>
<td>-3.55589</td>
<td>-0.220820</td>
<td>3.134120</td>
<td>4.827280</td>
<td>5.86926</td>
<td>6.61359</td>
<td>7.21373</td>
<td>7.72999</td>
<td>8.18059</td>
<td>8.57306</td>
</tr>
<tr>
<th>SiTe</th>
<td>-2.004370e+09</td>
<td>-200437000.0</td>
<td>-20043700.0</td>
<td>-2004370.0</td>
<td>-200433.0</td>
<td>-133621.0</td>
<td>-100214.0</td>
<td>-66807.9</td>
<td>-40082.6</td>
<td>-28628.90</td>
<td>...</td>
<td>-2.91531</td>
<td>0.534329</td>
<td>4.033150</td>
<td>5.825470</td>
<td>6.92790</td>
<td>7.67944</td>
<td>8.22620</td>
<td>8.64257</td>
<td>8.97185</td>
<td>9.24224</td>
</tr>
<tr>
<th>GeTe</th>
<td>-2.052250e+09</td>
<td>-205225000.0</td>
<td>-20522500.0</td>
<td>-2052250.0</td>
<td>-205221.0</td>
<td>-136812.0</td>
<td>-102608.0</td>
<td>-68403.5</td>
<td>-41039.8</td>
<td>-29312.50</td>
<td>...</td>
<td>-3.30789</td>
<td>0.272089</td>
<td>3.904890</td>
<td>5.763160</td>
<td>6.90233</td>
<td>7.67480</td>
<td>8.23278</td>
<td>8.65444</td>
<td>8.98591</td>
<td>9.25764</td>
</tr>
<tr>
<th>KI</th>
<td>-1.663170e+09</td>
<td>-166317000.0</td>
<td>-16631700.0</td>
<td>-1663170.0</td>
<td>-166313.0</td>
<td>-110874.0</td>
<td>-83154.0</td>
<td>-55434.3</td>
<td>-33258.3</td>
<td>-23754.30</td>
<td>...</td>
<td>-1.67166</td>
<td>1.159090</td>
<td>4.002910</td>
<td>5.438360</td>
<td>6.32569</td>
<td>6.96714</td>
<td>7.49399</td>
<td>7.95547</td>
<td>8.36375</td>
<td>8.72302</td>
</tr>
</tbody>
</table>
<p>291 rows × 42 columns</p>
</div>
```python
barklem_reader.partition_functions
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>0.00001</th>
<th>0.00010</th>
<th>0.00100</th>
<th>0.01000</th>
<th>0.10000</th>
<th>0.15000</th>
<th>0.20000</th>
<th>0.30000</th>
<th>0.50000</th>
<th>0.70000</th>
<th>...</th>
<th>1500.00000</th>
<th>2000.00000</th>
<th>3000.00000</th>
<th>4000.00000</th>
<th>5000.00000</th>
<th>6000.00000</th>
<th>7000.00000</th>
<th>8000.00000</th>
<th>9000.00000</th>
<th>10000.00000</th>
</tr>
<tr>
<th>Molecule</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>H2</th>
<td>0.250000</td>
<td>0.250000</td>
<td>0.250000</td>
<td>0.250000</td>
<td>0.250000</td>
<td>0.250000</td>
<td>0.250000</td>
<td>0.250000</td>
<td>0.250000</td>
<td>0.250000</td>
<td>...</td>
<td>9.41007</td>
<td>13.0843</td>
<td>22.2684</td>
<td>34.6011</td>
<td>5.076190e+01</td>
<td>7.115120e+01</td>
<td>9.587620e+01</td>
<td>124.852</td>
<td>157.911</td>
<td>194.871</td>
</tr>
<tr>
<th>Li2</th>
<td>0.375000</td>
<td>0.375000</td>
<td>0.375000</td>
<td>0.375000</td>
<td>0.375000</td>
<td>0.375005</td>
<td>0.375124</td>
<td>0.378064</td>
<td>0.414917</td>
<td>0.495355</td>
<td>...</td>
<td>3147.34000</td>
<td>5805.8200</td>
<td>15142.2000</td>
<td>31173.2000</td>
<td>5.448590e+04</td>
<td>8.545120e+04</td>
<td>1.246260e+05</td>
<td>172676.000</td>
<td>230246.000</td>
<td>297872.000</td>
</tr>
<tr>
<th>B2</th>
<td>1.875000</td>
<td>1.875000</td>
<td>1.875000</td>
<td>1.875000</td>
<td>1.875000</td>
<td>1.875000</td>
<td>1.875000</td>
<td>1.875030</td>
<td>1.878280</td>
<td>1.898830</td>
<td>...</td>
<td>2094.40000</td>
<td>3380.0200</td>
<td>6955.2600</td>
<td>11975.9000</td>
<td>1.861150e+04</td>
<td>2.707380e+04</td>
<td>3.758660e+04</td>
<td>50352.600</td>
<td>65534.300</td>
<td>83250.200</td>
</tr>
<tr>
<th>C2</th>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>...</td>
<td>1728.02000</td>
<td>3002.8400</td>
<td>6758.5200</td>
<td>12432.7000</td>
<td>2.039010e+04</td>
<td>3.098290e+04</td>
<td>4.457250e+04</td>
<td>61555.400</td>
<td>82379.000</td>
<td>107544.000</td>
</tr>
<tr>
<th>N2</th>
<td>0.666667</td>
<td>0.666667</td>
<td>0.666667</td>
<td>0.666667</td>
<td>0.666667</td>
<td>0.666667</td>
<td>0.666667</td>
<td>0.666667</td>
<td>0.666677</td>
<td>0.666947</td>
<td>...</td>
<td>294.92200</td>
<td>433.0910</td>
<td>789.9790</td>
<td>1258.4900</td>
<td>1.842330e+03</td>
<td>2.545150e+03</td>
<td>3.371350e+03</td>
<td>4327.340</td>
<td>5424.050</td>
<td>6680.470</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>GeSe</th>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.189590</td>
<td>1.493710</td>
<td>1.832090</td>
<td>2.534750</td>
<td>3.965470</td>
<td>5.404720</td>
<td>...</td>
<td>34412.70000</td>
<td>59060.9000</td>
<td>129764.0000</td>
<td>231096.0000</td>
<td>3.664790e+05</td>
<td>5.409200e+05</td>
<td>7.612170e+05</td>
<td>1035680.000</td>
<td>1373560.000</td>
<td>1784420.000</td>
</tr>
<tr>
<th>KBr</th>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.296090</td>
<td>1.681890</td>
<td>2.093080</td>
<td>2.935000</td>
<td>4.639000</td>
<td>6.349960</td>
<td>...</td>
<td>75537.90000</td>
<td>135476.0000</td>
<td>321922.0000</td>
<td>617948.0000</td>
<td>1.044380e+06</td>
<td>1.612150e+06</td>
<td>2.321720e+06</td>
<td>3165260.000</td>
<td>4129640.000</td>
<td>5199270.000</td>
</tr>
<tr>
<th>SiTe</th>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.296250</td>
<td>1.682170</td>
<td>2.093460</td>
<td>2.935590</td>
<td>4.639980</td>
<td>6.351320</td>
<td>...</td>
<td>35231.70000</td>
<td>59604.7000</td>
<td>128059.0000</td>
<td>223473.0000</td>
<td>3.470030e+05</td>
<td>5.007860e+05</td>
<td>6.885790e+05</td>
<td>915880.000</td>
<td>1189470.000</td>
<td>1516650.000</td>
</tr>
<tr>
<th>GeTe</th>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.476830</td>
<td>1.978830</td>
<td>2.498160</td>
<td>3.551040</td>
<td>5.672150</td>
<td>7.798670</td>
<td>...</td>
<td>61591.50000</td>
<td>106650.0000</td>
<td>236391.0000</td>
<td>422636.0000</td>
<td>6.715710e+05</td>
<td>9.931800e+05</td>
<td>1.402380e+06</td>
<td>1918290.000</td>
<td>2562110.000</td>
<td>3354910.000</td>
</tr>
<tr>
<th>KI</th>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.000000</td>
<td>1.549050</td>
<td>2.093720</td>
<td>2.653750</td>
<td>3.786740</td>
<td>6.066760</td>
<td>8.351800</td>
<td>...</td>
<td>113418.00000</td>
<td>203747.0000</td>
<td>483831.0000</td>
<td>927859.0000</td>
<td>1.570380e+06</td>
<td>2.432910e+06</td>
<td>3.521270e+06</td>
<td>4828320.000</td>
<td>6338480.000</td>
<td>8032000.000</td>
</tr>
</tbody>
</table>
<p>291 rows × 42 columns</p>
</div>
```python
```
|
tardis-snREPO_NAMEcarsusPATH_START.@carsus_extracted@carsus-master@docs@io@barklem2016.ipynb@.PATH_END.py
|
{
"filename": "diagtable.py",
"repo_name": "ExeClim/Isca",
"repo_path": "Isca_extracted/Isca-master/src/extra/python/isca/diagtable.py",
"type": "Python"
}
|
import copy
from jinja2 import Template
_TEMPLATE = Template("""
{%- macro fortrantrue(t) -%}
{%- if t -%}
.true.
{%- else -%}
.false.
{%- endif -%}
{%- endmacro -%}
"FMS Model results"
{% if calendar -%}
0001 1 1 0 0 0
{%- else -%}
0 0 0 0 0 0
{%- endif %}
# = output files =
# file_name, output_freq, output_units, format, time_units, long_name
{% for file in outputfiles %}
"{{ file.name }}", {{ file.freq }}, "{{ file.units }}", 1, "{{ file.time_units }}", "time",
{% endfor %}
# = diagnostic field entries =
# module_name, field_name, output_name, file_name, time_sampling, time_avg, other_opts, precision
{% for file in outputfiles %}
{% for field in file.fields -%}
"{{ field.module}}", "{{ field.name }}", "{{ field.name }}", "{{ file.name }}", "all", {{ fortrantrue(field.time_avg) }}, "none", 2,
{% endfor %}
{% endfor %}
""")
def numorstr(x):
"""Try to parse a string into an int or float."""
x = x.strip()
if x.startswith('"'):
return x.strip('"')
try:
ix = int(x)
fx = float(x)
return ix if ix == fx else fx
except: pass
if x.lower() == '.true.': return True
if x.lower() == '.false.': return False
return x
class DiagTable(object):
def __init__(self):
super(DiagTable, self).__init__()
self.files = {}
self.calendar = None
def add_file(self, name, freq, units="hours", time_units=None):
if time_units is None:
time_units = units
self.files[name] = {
'name': name,
'freq': freq,
'units': units,
'time_units': time_units,
'fields': []
}
def add_field(self, module, name, time_avg=False, files=None):
if files is None:
files = self.files.keys()
for fname in files:
self.files[fname]['fields'].append({
'module': module,
'name': name,
'time_avg': time_avg
})
def copy(self):
d = DiagTable()
d.files = copy.deepcopy(self.files)
return d
def has_calendar(self):
if self.calendar is None or self.calendar.lower() == 'no_calendar':
return False
else:
return True
def write(self, filename):
vars = {'calendar': self.has_calendar(), 'outputfiles': self.files.values()}
_TEMPLATE.stream(**vars).dump(filename)
def is_valid(self):
return len(self.files) > 0
@classmethod
def from_file(cls, filename):
lines = [l.strip() for l in open(filename)]
lines = [l.split(',') for l in lines if not l.startswith("#")]
dt = cls()
dt.calendar = False
#dt.files = [l[0] for l in lines if len(l)==7]
with open(filename, 'r') as file:
for line in file:
lx = line.strip()
if lx.startswith('#'):
continue
if lx == '0001 1 1 0 0 0':
dt.calendar = 'undefined'
continue
ls = lx.split(',')
vals = [numorstr(x) for x in ls]
if len(ls) == 7:
dt.add_file(
name=vals[0],
freq=vals[1],
units=vals[2],
time_units=vals[4])
elif len(ls) == 9:
dt.add_field(
module=vals[0],
name=vals[1],
time_avg=vals[5],
files=[vals[3]])
return dt
|
ExeClimREPO_NAMEIscaPATH_START.@Isca_extracted@Isca-master@src@extra@python@isca@diagtable.py@.PATH_END.py
|
{
"filename": "_alignmentgroup.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/bar/_alignmentgroup.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AlignmentgroupValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="alignmentgroup", parent_name="bar", **kwargs):
super(AlignmentgroupValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@bar@_alignmentgroup.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/unit_tests/retrievers/document_compressors/__init__.py",
"type": "Python"
}
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@unit_tests@retrievers@document_compressors@__init__.py@.PATH_END.py
|
|
{
"filename": "apps.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/webserver/lasair/apps/annotator/apps.py",
"type": "Python"
}
|
from django.apps import AppConfig
class AnnotatorConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'lasair.apps.annotator'
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@webserver@lasair@apps@annotator@apps.py@.PATH_END.py
|
{
"filename": "match_pinholes.py",
"repo_name": "mwanakijiji/dewarp",
"repo_path": "dewarp_extracted/dewarp-master/astrom/match_pinholes.py",
"type": "Python"
}
|
# This is the script to run for making a warping/astrometric solution to LMIRCam, using data taken
# in Nov and Dev 2016
# created by E.S., Nov 2016
# revamped to be more user-friendly, Nov 2017
import numpy as np
from astrom_lmircam_soln import *
from astrom_lmircam_soln import find_pinhole_centroids
from astrom_lmircam_soln import polywarp
from astrom_lmircam_soln import polywarp_v2 # the check for polywarp
from astrom_lmircam_soln import dewarp
from astrom_lmircam_soln import make_barb_plot
from astropy.io import fits
import matplotlib.pyplot as plt
import pickle
import ipdb
import configparser
# configuration data
config = configparser.ConfigParser() # for parsing values in .init file
config.read("astrom/config.ini")
#####################################################################
# RETRIEVE MEDIAN PINHOLE GRID IMAGE, FIND PINHOLE CENTERS
def match_pinholes(translationPass,
holeSpacingPass,
barrelCenterPass,
barrelAmountPass,
rotationAnglePass,
writeoutString='test',
plotTitleString='test',
plot=True):
'''
translationPass: translation of the grid
holeSpacingPass: spacing between the holes
barrelCenterPass: TBD
barrelCenterPass: TBD
barrelCenterPass: TBD
writeoutString: TBD
writeoutString: TBD
'''
# write the git hash
write_hash('git_hash_match_pinholes.txt')
# read in median image of pinholes
hdul = fits.open(config["data_dirs"]["DIR_PINHOLE_BASIC"] + config["src_file_names"]["PINHOLE_FITS"])
imagePinholes = hdul[0].data.copy()
# make model pinhole locations: distorted and undistorted
coordsModel_d, coordsModel_not_d = find_pinhole_centroids.put_down_grid_guesses(
translationPass,
holeSpacingPass,
barrelCenterPass,
barrelAmountPass,
rotationAnglePass)
# find empirical pinhole locations
coordsEmpirical = find_pinhole_centroids.find_psf_centers(
imagePass = imagePinholes,
fwhmPass = 20.,
thresholdPass = 1000.)
# option for adding in some additional points manually
##xCoordsMissed = [71.774,697.353,1460.66]
##yCoordsMissed = [1267.57,1404.06,737.932]
'''
xCoordsFound = np.concatenate((xCoordsFoundAutomated,
xCoordsMissed),
axis=0)
yCoordsFound = np.concatenate((yCoordsFoundAutomated,
yCoordsMissed),
axis=0)
'''
# match and sort the model coordinates (distorted and undistorted) with the empirical pinhole coordinates
coordsModel_d_matched, coordsModel_not_d_matched, coordsEmpirical_matched = find_pinhole_centroids.match_model_empirical(
coordsModel_d,
coordsModel_not_d,
coordsEmpirical,
imagePinholes,
plotTitleString,
plot=plot)
# pickle
picklename='pinhole_coordinates_'+writeoutString+'.pkl'
fo=open(picklename,'wb')
pickle.dump({'coordsModel_d_matched':coordsModel_d_matched,
'coordsModel_not_d_matched':coordsModel_not_d_matched,
'coordsEmpirical_matched':coordsEmpirical_matched},
fo)
fo.close()
print('------------------------------')
print('Saved pinhole grid info to:')
print(picklename)
print('------------------------------')
'''
class match:
def __init__(self,translation, holeSpacing, barrelCenter, barrelAmount, rotationAngle, writeoutString, plot=True):
self.translation = translation
self.holeSpacing = holeSpacing
self.barrelCenter = barrelCenter
self.barrelAmoung = barrelAmount
self.rotationAngle = rotationAngle
self.writeoutString = writeoutString
def __call__(self):
match_pinholes(self.translation,
self.holeSpacing,
self.barrelCenter,
self.barrelAmount,
self.rotationAngle,
self.writeoutString)
match
'''
|
mwanakijijiREPO_NAMEdewarpPATH_START.@dewarp_extracted@dewarp-master@astrom@match_pinholes.py@.PATH_END.py
|
{
"filename": "pascal.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/lexers/pascal.py",
"type": "Python"
}
|
"""
pygments.lexers.pascal
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Pascal family languages.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer
from pygments.util import get_bool_opt, get_list_opt
from pygments.token import Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Whitespace
from pygments.scanner import Scanner
# compatibility import
from pygments.lexers.modula2 import Modula2Lexer # noqa: F401
__all__ = ['DelphiLexer', 'PortugolLexer']
class PortugolLexer(Lexer):
"""For Portugol, a Pascal dialect with keywords in Portuguese."""
name = 'Portugol'
aliases = ['portugol']
filenames = ['*.alg', '*.portugol']
mimetypes = []
url = "https://www.apoioinformatica.inf.br/produtos/visualg/linguagem"
version_added = ''
def __init__(self, **options):
Lexer.__init__(self, **options)
self.lexer = DelphiLexer(**options, portugol=True)
def get_tokens_unprocessed(self, text):
return self.lexer.get_tokens_unprocessed(text)
class DelphiLexer(Lexer):
"""
For Delphi (Borland Object Pascal),
Turbo Pascal and Free Pascal source code.
Additional options accepted:
`turbopascal`
Highlight Turbo Pascal specific keywords (default: ``True``).
`delphi`
Highlight Borland Delphi specific keywords (default: ``True``).
`freepascal`
Highlight Free Pascal specific keywords (default: ``True``).
`units`
A list of units that should be considered builtin, supported are
``System``, ``SysUtils``, ``Classes`` and ``Math``.
Default is to consider all of them builtin.
"""
name = 'Delphi'
aliases = ['delphi', 'pas', 'pascal', 'objectpascal']
filenames = ['*.pas', '*.dpr']
mimetypes = ['text/x-pascal']
url = 'https://www.embarcadero.com/products/delphi'
version_added = ''
TURBO_PASCAL_KEYWORDS = (
'absolute', 'and', 'array', 'asm', 'begin', 'break', 'case',
'const', 'constructor', 'continue', 'destructor', 'div', 'do',
'downto', 'else', 'end', 'file', 'for', 'function', 'goto',
'if', 'implementation', 'in', 'inherited', 'inline', 'interface',
'label', 'mod', 'nil', 'not', 'object', 'of', 'on', 'operator',
'or', 'packed', 'procedure', 'program', 'record', 'reintroduce',
'repeat', 'self', 'set', 'shl', 'shr', 'string', 'then', 'to',
'type', 'unit', 'until', 'uses', 'var', 'while', 'with', 'xor'
)
DELPHI_KEYWORDS = (
'as', 'class', 'except', 'exports', 'finalization', 'finally',
'initialization', 'is', 'library', 'on', 'property', 'raise',
'threadvar', 'try'
)
FREE_PASCAL_KEYWORDS = (
'dispose', 'exit', 'false', 'new', 'true'
)
BLOCK_KEYWORDS = {
'begin', 'class', 'const', 'constructor', 'destructor', 'end',
'finalization', 'function', 'implementation', 'initialization',
'label', 'library', 'operator', 'procedure', 'program', 'property',
'record', 'threadvar', 'type', 'unit', 'uses', 'var'
}
FUNCTION_MODIFIERS = {
'alias', 'cdecl', 'export', 'inline', 'interrupt', 'nostackframe',
'pascal', 'register', 'safecall', 'softfloat', 'stdcall',
'varargs', 'name', 'dynamic', 'near', 'virtual', 'external',
'override', 'assembler'
}
# XXX: those aren't global. but currently we know no way for defining
# them just for the type context.
DIRECTIVES = {
'absolute', 'abstract', 'assembler', 'cppdecl', 'default', 'far',
'far16', 'forward', 'index', 'oldfpccall', 'private', 'protected',
'published', 'public'
}
BUILTIN_TYPES = {
'ansichar', 'ansistring', 'bool', 'boolean', 'byte', 'bytebool',
'cardinal', 'char', 'comp', 'currency', 'double', 'dword',
'extended', 'int64', 'integer', 'iunknown', 'longbool', 'longint',
'longword', 'pansichar', 'pansistring', 'pbool', 'pboolean',
'pbyte', 'pbytearray', 'pcardinal', 'pchar', 'pcomp', 'pcurrency',
'pdate', 'pdatetime', 'pdouble', 'pdword', 'pextended', 'phandle',
'pint64', 'pinteger', 'plongint', 'plongword', 'pointer',
'ppointer', 'pshortint', 'pshortstring', 'psingle', 'psmallint',
'pstring', 'pvariant', 'pwidechar', 'pwidestring', 'pword',
'pwordarray', 'pwordbool', 'real', 'real48', 'shortint',
'shortstring', 'single', 'smallint', 'string', 'tclass', 'tdate',
'tdatetime', 'textfile', 'thandle', 'tobject', 'ttime', 'variant',
'widechar', 'widestring', 'word', 'wordbool'
}
BUILTIN_UNITS = {
'System': (
'abs', 'acquireexceptionobject', 'addr', 'ansitoutf8',
'append', 'arctan', 'assert', 'assigned', 'assignfile',
'beginthread', 'blockread', 'blockwrite', 'break', 'chdir',
'chr', 'close', 'closefile', 'comptocurrency', 'comptodouble',
'concat', 'continue', 'copy', 'cos', 'dec', 'delete',
'dispose', 'doubletocomp', 'endthread', 'enummodules',
'enumresourcemodules', 'eof', 'eoln', 'erase', 'exceptaddr',
'exceptobject', 'exclude', 'exit', 'exp', 'filepos', 'filesize',
'fillchar', 'finalize', 'findclasshinstance', 'findhinstance',
'findresourcehinstance', 'flush', 'frac', 'freemem',
'get8087cw', 'getdir', 'getlasterror', 'getmem',
'getmemorymanager', 'getmodulefilename', 'getvariantmanager',
'halt', 'hi', 'high', 'inc', 'include', 'initialize', 'insert',
'int', 'ioresult', 'ismemorymanagerset', 'isvariantmanagerset',
'length', 'ln', 'lo', 'low', 'mkdir', 'move', 'new', 'odd',
'olestrtostring', 'olestrtostrvar', 'ord', 'paramcount',
'paramstr', 'pi', 'pos', 'pred', 'ptr', 'pucs4chars', 'random',
'randomize', 'read', 'readln', 'reallocmem',
'releaseexceptionobject', 'rename', 'reset', 'rewrite', 'rmdir',
'round', 'runerror', 'seek', 'seekeof', 'seekeoln',
'set8087cw', 'setlength', 'setlinebreakstyle',
'setmemorymanager', 'setstring', 'settextbuf',
'setvariantmanager', 'sin', 'sizeof', 'slice', 'sqr', 'sqrt',
'str', 'stringofchar', 'stringtoolestr', 'stringtowidechar',
'succ', 'swap', 'trunc', 'truncate', 'typeinfo',
'ucs4stringtowidestring', 'unicodetoutf8', 'uniquestring',
'upcase', 'utf8decode', 'utf8encode', 'utf8toansi',
'utf8tounicode', 'val', 'vararrayredim', 'varclear',
'widecharlentostring', 'widecharlentostrvar',
'widechartostring', 'widechartostrvar',
'widestringtoucs4string', 'write', 'writeln'
),
'SysUtils': (
'abort', 'addexitproc', 'addterminateproc', 'adjustlinebreaks',
'allocmem', 'ansicomparefilename', 'ansicomparestr',
'ansicomparetext', 'ansidequotedstr', 'ansiextractquotedstr',
'ansilastchar', 'ansilowercase', 'ansilowercasefilename',
'ansipos', 'ansiquotedstr', 'ansisamestr', 'ansisametext',
'ansistrcomp', 'ansistricomp', 'ansistrlastchar', 'ansistrlcomp',
'ansistrlicomp', 'ansistrlower', 'ansistrpos', 'ansistrrscan',
'ansistrscan', 'ansistrupper', 'ansiuppercase',
'ansiuppercasefilename', 'appendstr', 'assignstr', 'beep',
'booltostr', 'bytetocharindex', 'bytetocharlen', 'bytetype',
'callterminateprocs', 'changefileext', 'charlength',
'chartobyteindex', 'chartobytelen', 'comparemem', 'comparestr',
'comparetext', 'createdir', 'createguid', 'currentyear',
'currtostr', 'currtostrf', 'date', 'datetimetofiledate',
'datetimetostr', 'datetimetostring', 'datetimetosystemtime',
'datetimetotimestamp', 'datetostr', 'dayofweek', 'decodedate',
'decodedatefully', 'decodetime', 'deletefile', 'directoryexists',
'diskfree', 'disksize', 'disposestr', 'encodedate', 'encodetime',
'exceptionerrormessage', 'excludetrailingbackslash',
'excludetrailingpathdelimiter', 'expandfilename',
'expandfilenamecase', 'expanduncfilename', 'extractfiledir',
'extractfiledrive', 'extractfileext', 'extractfilename',
'extractfilepath', 'extractrelativepath', 'extractshortpathname',
'fileage', 'fileclose', 'filecreate', 'filedatetodatetime',
'fileexists', 'filegetattr', 'filegetdate', 'fileisreadonly',
'fileopen', 'fileread', 'filesearch', 'fileseek', 'filesetattr',
'filesetdate', 'filesetreadonly', 'filewrite', 'finalizepackage',
'findclose', 'findcmdlineswitch', 'findfirst', 'findnext',
'floattocurr', 'floattodatetime', 'floattodecimal', 'floattostr',
'floattostrf', 'floattotext', 'floattotextfmt', 'fmtloadstr',
'fmtstr', 'forcedirectories', 'format', 'formatbuf', 'formatcurr',
'formatdatetime', 'formatfloat', 'freeandnil', 'getcurrentdir',
'getenvironmentvariable', 'getfileversion', 'getformatsettings',
'getlocaleformatsettings', 'getmodulename', 'getpackagedescription',
'getpackageinfo', 'gettime', 'guidtostring', 'incamonth',
'includetrailingbackslash', 'includetrailingpathdelimiter',
'incmonth', 'initializepackage', 'interlockeddecrement',
'interlockedexchange', 'interlockedexchangeadd',
'interlockedincrement', 'inttohex', 'inttostr', 'isdelimiter',
'isequalguid', 'isleapyear', 'ispathdelimiter', 'isvalidident',
'languages', 'lastdelimiter', 'loadpackage', 'loadstr',
'lowercase', 'msecstotimestamp', 'newstr', 'nextcharindex', 'now',
'outofmemoryerror', 'quotedstr', 'raiselastoserror',
'raiselastwin32error', 'removedir', 'renamefile', 'replacedate',
'replacetime', 'safeloadlibrary', 'samefilename', 'sametext',
'setcurrentdir', 'showexception', 'sleep', 'stralloc', 'strbufsize',
'strbytetype', 'strcat', 'strcharlength', 'strcomp', 'strcopy',
'strdispose', 'strecopy', 'strend', 'strfmt', 'stricomp',
'stringreplace', 'stringtoguid', 'strlcat', 'strlcomp', 'strlcopy',
'strlen', 'strlfmt', 'strlicomp', 'strlower', 'strmove', 'strnew',
'strnextchar', 'strpas', 'strpcopy', 'strplcopy', 'strpos',
'strrscan', 'strscan', 'strtobool', 'strtobooldef', 'strtocurr',
'strtocurrdef', 'strtodate', 'strtodatedef', 'strtodatetime',
'strtodatetimedef', 'strtofloat', 'strtofloatdef', 'strtoint',
'strtoint64', 'strtoint64def', 'strtointdef', 'strtotime',
'strtotimedef', 'strupper', 'supports', 'syserrormessage',
'systemtimetodatetime', 'texttofloat', 'time', 'timestamptodatetime',
'timestamptomsecs', 'timetostr', 'trim', 'trimleft', 'trimright',
'tryencodedate', 'tryencodetime', 'tryfloattocurr', 'tryfloattodatetime',
'trystrtobool', 'trystrtocurr', 'trystrtodate', 'trystrtodatetime',
'trystrtofloat', 'trystrtoint', 'trystrtoint64', 'trystrtotime',
'unloadpackage', 'uppercase', 'widecomparestr', 'widecomparetext',
'widefmtstr', 'wideformat', 'wideformatbuf', 'widelowercase',
'widesamestr', 'widesametext', 'wideuppercase', 'win32check',
'wraptext'
),
'Classes': (
'activateclassgroup', 'allocatehwnd', 'bintohex', 'checksynchronize',
'collectionsequal', 'countgenerations', 'deallocatehwnd', 'equalrect',
'extractstrings', 'findclass', 'findglobalcomponent', 'getclass',
'groupdescendantswith', 'hextobin', 'identtoint',
'initinheritedcomponent', 'inttoident', 'invalidpoint',
'isuniqueglobalcomponentname', 'linestart', 'objectbinarytotext',
'objectresourcetotext', 'objecttexttobinary', 'objecttexttoresource',
'pointsequal', 'readcomponentres', 'readcomponentresex',
'readcomponentresfile', 'rect', 'registerclass', 'registerclassalias',
'registerclasses', 'registercomponents', 'registerintegerconsts',
'registernoicon', 'registernonactivex', 'smallpoint', 'startclassgroup',
'teststreamformat', 'unregisterclass', 'unregisterclasses',
'unregisterintegerconsts', 'unregistermoduleclasses',
'writecomponentresfile'
),
'Math': (
'arccos', 'arccosh', 'arccot', 'arccoth', 'arccsc', 'arccsch', 'arcsec',
'arcsech', 'arcsin', 'arcsinh', 'arctan2', 'arctanh', 'ceil',
'comparevalue', 'cosecant', 'cosh', 'cot', 'cotan', 'coth', 'csc',
'csch', 'cycletodeg', 'cycletograd', 'cycletorad', 'degtocycle',
'degtograd', 'degtorad', 'divmod', 'doubledecliningbalance',
'ensurerange', 'floor', 'frexp', 'futurevalue', 'getexceptionmask',
'getprecisionmode', 'getroundmode', 'gradtocycle', 'gradtodeg',
'gradtorad', 'hypot', 'inrange', 'interestpayment', 'interestrate',
'internalrateofreturn', 'intpower', 'isinfinite', 'isnan', 'iszero',
'ldexp', 'lnxp1', 'log10', 'log2', 'logn', 'max', 'maxintvalue',
'maxvalue', 'mean', 'meanandstddev', 'min', 'minintvalue', 'minvalue',
'momentskewkurtosis', 'netpresentvalue', 'norm', 'numberofperiods',
'payment', 'periodpayment', 'poly', 'popnstddev', 'popnvariance',
'power', 'presentvalue', 'radtocycle', 'radtodeg', 'radtograd',
'randg', 'randomrange', 'roundto', 'samevalue', 'sec', 'secant',
'sech', 'setexceptionmask', 'setprecisionmode', 'setroundmode',
'sign', 'simpleroundto', 'sincos', 'sinh', 'slndepreciation', 'stddev',
'sum', 'sumint', 'sumofsquares', 'sumsandsquares', 'syddepreciation',
'tan', 'tanh', 'totalvariance', 'variance'
)
}
ASM_REGISTERS = {
'ah', 'al', 'ax', 'bh', 'bl', 'bp', 'bx', 'ch', 'cl', 'cr0',
'cr1', 'cr2', 'cr3', 'cr4', 'cs', 'cx', 'dh', 'di', 'dl', 'dr0',
'dr1', 'dr2', 'dr3', 'dr4', 'dr5', 'dr6', 'dr7', 'ds', 'dx',
'eax', 'ebp', 'ebx', 'ecx', 'edi', 'edx', 'es', 'esi', 'esp',
'fs', 'gs', 'mm0', 'mm1', 'mm2', 'mm3', 'mm4', 'mm5', 'mm6',
'mm7', 'si', 'sp', 'ss', 'st0', 'st1', 'st2', 'st3', 'st4', 'st5',
'st6', 'st7', 'xmm0', 'xmm1', 'xmm2', 'xmm3', 'xmm4', 'xmm5',
'xmm6', 'xmm7'
}
ASM_INSTRUCTIONS = {
'aaa', 'aad', 'aam', 'aas', 'adc', 'add', 'and', 'arpl', 'bound',
'bsf', 'bsr', 'bswap', 'bt', 'btc', 'btr', 'bts', 'call', 'cbw',
'cdq', 'clc', 'cld', 'cli', 'clts', 'cmc', 'cmova', 'cmovae',
'cmovb', 'cmovbe', 'cmovc', 'cmovcxz', 'cmove', 'cmovg',
'cmovge', 'cmovl', 'cmovle', 'cmovna', 'cmovnae', 'cmovnb',
'cmovnbe', 'cmovnc', 'cmovne', 'cmovng', 'cmovnge', 'cmovnl',
'cmovnle', 'cmovno', 'cmovnp', 'cmovns', 'cmovnz', 'cmovo',
'cmovp', 'cmovpe', 'cmovpo', 'cmovs', 'cmovz', 'cmp', 'cmpsb',
'cmpsd', 'cmpsw', 'cmpxchg', 'cmpxchg486', 'cmpxchg8b', 'cpuid',
'cwd', 'cwde', 'daa', 'das', 'dec', 'div', 'emms', 'enter', 'hlt',
'ibts', 'icebp', 'idiv', 'imul', 'in', 'inc', 'insb', 'insd',
'insw', 'int', 'int01', 'int03', 'int1', 'int3', 'into', 'invd',
'invlpg', 'iret', 'iretd', 'iretw', 'ja', 'jae', 'jb', 'jbe',
'jc', 'jcxz', 'jcxz', 'je', 'jecxz', 'jg', 'jge', 'jl', 'jle',
'jmp', 'jna', 'jnae', 'jnb', 'jnbe', 'jnc', 'jne', 'jng', 'jnge',
'jnl', 'jnle', 'jno', 'jnp', 'jns', 'jnz', 'jo', 'jp', 'jpe',
'jpo', 'js', 'jz', 'lahf', 'lar', 'lcall', 'lds', 'lea', 'leave',
'les', 'lfs', 'lgdt', 'lgs', 'lidt', 'ljmp', 'lldt', 'lmsw',
'loadall', 'loadall286', 'lock', 'lodsb', 'lodsd', 'lodsw',
'loop', 'loope', 'loopne', 'loopnz', 'loopz', 'lsl', 'lss', 'ltr',
'mov', 'movd', 'movq', 'movsb', 'movsd', 'movsw', 'movsx',
'movzx', 'mul', 'neg', 'nop', 'not', 'or', 'out', 'outsb', 'outsd',
'outsw', 'pop', 'popa', 'popad', 'popaw', 'popf', 'popfd', 'popfw',
'push', 'pusha', 'pushad', 'pushaw', 'pushf', 'pushfd', 'pushfw',
'rcl', 'rcr', 'rdmsr', 'rdpmc', 'rdshr', 'rdtsc', 'rep', 'repe',
'repne', 'repnz', 'repz', 'ret', 'retf', 'retn', 'rol', 'ror',
'rsdc', 'rsldt', 'rsm', 'sahf', 'sal', 'salc', 'sar', 'sbb',
'scasb', 'scasd', 'scasw', 'seta', 'setae', 'setb', 'setbe',
'setc', 'setcxz', 'sete', 'setg', 'setge', 'setl', 'setle',
'setna', 'setnae', 'setnb', 'setnbe', 'setnc', 'setne', 'setng',
'setnge', 'setnl', 'setnle', 'setno', 'setnp', 'setns', 'setnz',
'seto', 'setp', 'setpe', 'setpo', 'sets', 'setz', 'sgdt', 'shl',
'shld', 'shr', 'shrd', 'sidt', 'sldt', 'smi', 'smint', 'smintold',
'smsw', 'stc', 'std', 'sti', 'stosb', 'stosd', 'stosw', 'str',
'sub', 'svdc', 'svldt', 'svts', 'syscall', 'sysenter', 'sysexit',
'sysret', 'test', 'ud1', 'ud2', 'umov', 'verr', 'verw', 'wait',
'wbinvd', 'wrmsr', 'wrshr', 'xadd', 'xbts', 'xchg', 'xlat',
'xlatb', 'xor'
}
PORTUGOL_KEYWORDS = (
'aleatorio',
'algoritmo',
'arquivo',
'ate',
'caso',
'cronometro',
'debug',
'e',
'eco',
'enquanto',
'entao',
'escolha',
'escreva',
'escreval',
'faca',
'falso',
'fimalgoritmo',
'fimenquanto',
'fimescolha',
'fimfuncao',
'fimpara',
'fimprocedimento',
'fimrepita',
'fimse',
'funcao',
'inicio',
'int',
'interrompa',
'leia',
'limpatela',
'mod',
'nao',
'ou',
'outrocaso',
'para',
'passo',
'pausa',
'procedimento',
'repita',
'retorne',
'se',
'senao',
'timer',
'var',
'vetor',
'verdadeiro',
'xou',
'div',
'mod',
'abs',
'arccos',
'arcsen',
'arctan',
'cos',
'cotan',
'Exp',
'grauprad',
'int',
'log',
'logn',
'pi',
'quad',
'radpgrau',
'raizq',
'rand',
'randi',
'sen',
'Tan',
'asc',
'carac',
'caracpnum',
'compr',
'copia',
'maiusc',
'minusc',
'numpcarac',
'pos',
)
PORTUGOL_BUILTIN_TYPES = {
'inteiro', 'real', 'caractere', 'logico'
}
def __init__(self, **options):
Lexer.__init__(self, **options)
self.keywords = set()
self.builtins = set()
if get_bool_opt(options, 'portugol', False):
self.keywords.update(self.PORTUGOL_KEYWORDS)
self.builtins.update(self.PORTUGOL_BUILTIN_TYPES)
self.is_portugol = True
else:
self.is_portugol = False
if get_bool_opt(options, 'turbopascal', True):
self.keywords.update(self.TURBO_PASCAL_KEYWORDS)
if get_bool_opt(options, 'delphi', True):
self.keywords.update(self.DELPHI_KEYWORDS)
if get_bool_opt(options, 'freepascal', True):
self.keywords.update(self.FREE_PASCAL_KEYWORDS)
for unit in get_list_opt(options, 'units', list(self.BUILTIN_UNITS)):
self.builtins.update(self.BUILTIN_UNITS[unit])
def get_tokens_unprocessed(self, text):
scanner = Scanner(text, re.DOTALL | re.MULTILINE | re.IGNORECASE)
stack = ['initial']
in_function_block = False
in_property_block = False
was_dot = False
next_token_is_function = False
next_token_is_property = False
collect_labels = False
block_labels = set()
brace_balance = [0, 0]
while not scanner.eos:
token = Error
if stack[-1] == 'initial':
if scanner.scan(r'\s+'):
token = Whitespace
elif not self.is_portugol and scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif self.is_portugol and scanner.scan(r'(<\-)|(>=)|(<=)|%|<|>|-|\+|\*|\=|(<>)|\/|\.|:|,'):
token = Operator
elif not self.is_portugol and scanner.scan(r'[-+*\/=<>:;,.@\^]'):
token = Operator
# stop label highlighting on next ";"
if collect_labels and scanner.match == ';':
collect_labels = False
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
# abort function naming ``foo = Function(...)``
next_token_is_function = False
# if we are in a function block we count the open
# braces because ootherwise it's impossible to
# determine the end of the modifier context
if in_function_block or in_property_block:
if scanner.match == '(':
brace_balance[0] += 1
elif scanner.match == ')':
brace_balance[0] -= 1
elif scanner.match == '[':
brace_balance[1] += 1
elif scanner.match == ']':
brace_balance[1] -= 1
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name == 'result':
token = Name.Builtin.Pseudo
elif lowercase_name in self.keywords:
token = Keyword
# if we are in a special block and a
# block ending keyword occurs (and the parenthesis
# is balanced) we end the current block context
if self.is_portugol:
if lowercase_name in ('funcao', 'procedimento'):
in_function_block = True
next_token_is_function = True
else:
if (in_function_block or in_property_block) and \
lowercase_name in self.BLOCK_KEYWORDS and \
brace_balance[0] <= 0 and \
brace_balance[1] <= 0:
in_function_block = False
in_property_block = False
brace_balance = [0, 0]
block_labels = set()
if lowercase_name in ('label', 'goto'):
collect_labels = True
elif lowercase_name == 'asm':
stack.append('asm')
elif lowercase_name == 'property':
in_property_block = True
next_token_is_property = True
elif lowercase_name in ('procedure', 'operator',
'function', 'constructor',
'destructor'):
in_function_block = True
next_token_is_function = True
# we are in a function block and the current name
# is in the set of registered modifiers. highlight
# it as pseudo keyword
elif not self.is_portugol and in_function_block and \
lowercase_name in self.FUNCTION_MODIFIERS:
token = Keyword.Pseudo
# if we are in a property highlight some more
# modifiers
elif not self.is_portugol and in_property_block and \
lowercase_name in ('read', 'write'):
token = Keyword.Pseudo
next_token_is_function = True
# if the last iteration set next_token_is_function
# to true we now want this name highlighted as
# function. so do that and reset the state
elif next_token_is_function:
# Look if the next token is a dot. If yes it's
# not a function, but a class name and the
# part after the dot a function name
if not self.is_portugol and scanner.test(r'\s*\.\s*'):
token = Name.Class
# it's not a dot, our job is done
else:
token = Name.Function
next_token_is_function = False
if self.is_portugol:
block_labels.add(scanner.match.lower())
# same for properties
elif not self.is_portugol and next_token_is_property:
token = Name.Property
next_token_is_property = False
# Highlight this token as label and add it
# to the list of known labels
elif not self.is_portugol and collect_labels:
token = Name.Label
block_labels.add(scanner.match.lower())
# name is in list of known labels
elif lowercase_name in block_labels:
token = Name.Label
elif self.is_portugol and lowercase_name in self.PORTUGOL_BUILTIN_TYPES:
token = Keyword.Type
elif not self.is_portugol and lowercase_name in self.BUILTIN_TYPES:
token = Keyword.Type
elif not self.is_portugol and lowercase_name in self.DIRECTIVES:
token = Keyword.Pseudo
# builtins are just builtins if the token
# before isn't a dot
elif not self.is_portugol and not was_dot and lowercase_name in self.builtins:
token = Name.Builtin
else:
token = Name
elif self.is_portugol and scanner.scan(r"\""):
token = String
stack.append('string')
elif not self.is_portugol and scanner.scan(r"'"):
token = String
stack.append('string')
elif not self.is_portugol and scanner.scan(r'\#(\d+|\$[0-9A-Fa-f]+)'):
token = String.Char
elif not self.is_portugol and scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
# if the stack depth is deeper than once, pop
if len(stack) > 1:
stack.pop()
scanner.get_char()
elif stack[-1] == 'string':
if self.is_portugol:
if scanner.scan(r"''"):
token = String.Escape
elif scanner.scan(r"\""):
token = String
stack.pop()
elif scanner.scan(r"[^\"]*"):
token = String
else:
scanner.get_char()
stack.pop()
else:
if scanner.scan(r"''"):
token = String.Escape
elif scanner.scan(r"'"):
token = String
stack.pop()
elif scanner.scan(r"[^']*"):
token = String
else:
scanner.get_char()
stack.pop()
elif not self.is_portugol and stack[-1] == 'asm':
if scanner.scan(r'\s+'):
token = Whitespace
elif scanner.scan(r'end'):
token = Keyword
stack.pop()
elif scanner.scan(r'\{.*?\}|\(\*.*?\*\)'):
if scanner.match.startswith('$'):
token = Comment.Preproc
else:
token = Comment.Multiline
elif scanner.scan(r'//.*?$'):
token = Comment.Single
elif scanner.scan(r"'"):
token = String
stack.append('string')
elif scanner.scan(r'@@[A-Za-z_][A-Za-z_0-9]*'):
token = Name.Label
elif scanner.scan(r'[A-Za-z_][A-Za-z_0-9]*'):
lowercase_name = scanner.match.lower()
if lowercase_name in self.ASM_INSTRUCTIONS:
token = Keyword
elif lowercase_name in self.ASM_REGISTERS:
token = Name.Builtin
else:
token = Name
elif scanner.scan(r'[-+*\/=<>:;,.@\^]+'):
token = Operator
elif scanner.scan(r'[\(\)\[\]]+'):
token = Punctuation
elif scanner.scan(r'\$[0-9A-Fa-f]+'):
token = Number.Hex
elif scanner.scan(r'\d+(?![eE]|\.[^.])'):
token = Number.Integer
elif scanner.scan(r'\d+(\.\d+([eE][+-]?\d+)?|[eE][+-]?\d+)'):
token = Number.Float
else:
scanner.get_char()
stack.pop()
# save the dot!!!11
if not self.is_portugol and scanner.match.strip():
was_dot = scanner.match == '.'
yield scanner.start_pos, token, scanner.match or ''
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@lexers@pascal.py@.PATH_END.py
|
{
"filename": "metaobject.py",
"repo_name": "astroufsc/chimera",
"repo_path": "chimera_extracted/chimera-master/src/chimera/core/metaobject.py",
"type": "Python"
}
|
#! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# chimera - observatory automation system
# Copyright (C) 2006-2007 P. Henrique Silva <henrique@astro.ufsc.br>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
import threading
from chimera.core.methodwrapper import MethodWrapper, MethodWrapperDispatcher
from chimera.core.eventwrapper import EventWrapperDispatcher
from chimera.core.lockwrapper import LockWrapper, LockWrapperDispatcher
from chimera.core.rwlock import ReadWriteLock
from chimera.core.constants import (
EVENT_ATTRIBUTE_NAME,
CONFIG_ATTRIBUTE_NAME,
LOCK_ATTRIBUTE_NAME,
EVENTS_ATTRIBUTE_NAME,
METHODS_ATTRIBUTE_NAME,
INSTANCE_MONITOR_ATTRIBUTE_NAME,
RWLOCK_ATTRIBUTE_NAME,
)
__all__ = ["MetaObject"]
class MetaObject(type):
def __new__(meta, clsname, bases, _dict):
# join __config__ dicts, class configuration override base classes
# configs
config = {}
for base in bases:
if (
CONFIG_ATTRIBUTE_NAME in base.__dict__
and type(base.__dict__[CONFIG_ATTRIBUTE_NAME]) == dict
):
config = dict(config, **base.__dict__[CONFIG_ATTRIBUTE_NAME])
# update our class with all configs got from bases, if none defined,
# our config will be equal to the sum from the bases
_dict[CONFIG_ATTRIBUTE_NAME] = dict(
config, **_dict.get(CONFIG_ATTRIBUTE_NAME, {})
)
# callables and events
events = []
methods = []
for name, obj in _dict.items():
if hasattr(obj, "__call__") and not name.startswith("_"):
# events
if hasattr(obj, EVENT_ATTRIBUTE_NAME):
_dict[name] = MethodWrapper(obj, dispatcher=EventWrapperDispatcher)
events.append(name)
# auto-locked methods
elif hasattr(obj, LOCK_ATTRIBUTE_NAME):
_dict[name] = LockWrapper(obj, dispatcher=LockWrapperDispatcher)
methods.append(name)
# normal objects
else:
_dict[name] = MethodWrapper(obj, dispatcher=MethodWrapperDispatcher)
methods.append(name)
# save our helper atributes to allow better remote reflection (mainly
# to Console)
_dict[EVENTS_ATTRIBUTE_NAME] = events
_dict[METHODS_ATTRIBUTE_NAME] = methods
# our great Monitors (put here to force use of it)
_dict[INSTANCE_MONITOR_ATTRIBUTE_NAME] = threading.Condition(threading.RLock())
_dict[RWLOCK_ATTRIBUTE_NAME] = ReadWriteLock()
return super(MetaObject, meta).__new__(meta, clsname, bases, _dict)
|
astroufscREPO_NAMEchimeraPATH_START.@chimera_extracted@chimera-master@src@chimera@core@metaobject.py@.PATH_END.py
|
{
"filename": "builder.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/fonttools/fontTools/colorLib/builder.py",
"type": "Python"
}
|
"""
colorLib.builder: Build COLR/CPAL tables from scratch
"""
import collections
import copy
import enum
from functools import partial
from math import ceil, log
from typing import (
Any,
Dict,
Generator,
Iterable,
List,
Mapping,
Optional,
Sequence,
Tuple,
Type,
TypeVar,
Union,
)
from fontTools.misc.arrayTools import intRect
from fontTools.misc.fixedTools import fixedToFloat
from fontTools.misc.treeTools import build_n_ary_tree
from fontTools.ttLib.tables import C_O_L_R_
from fontTools.ttLib.tables import C_P_A_L_
from fontTools.ttLib.tables import _n_a_m_e
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otTables import ExtendMode, CompositeMode
from .errors import ColorLibError
from .geometry import round_start_circle_stable_containment
from .table_builder import BuildCallback, TableBuilder
# TODO move type aliases to colorLib.types?
T = TypeVar("T")
_Kwargs = Mapping[str, Any]
_PaintInput = Union[int, _Kwargs, ot.Paint, Tuple[str, "_PaintInput"]]
_PaintInputList = Sequence[_PaintInput]
_ColorGlyphsDict = Dict[str, Union[_PaintInputList, _PaintInput]]
_ColorGlyphsV0Dict = Dict[str, Sequence[Tuple[str, int]]]
_ClipBoxInput = Union[
Tuple[int, int, int, int, int], # format 1, variable
Tuple[int, int, int, int], # format 0, non-variable
ot.ClipBox,
]
MAX_PAINT_COLR_LAYER_COUNT = 255
_DEFAULT_ALPHA = 1.0
_MAX_REUSE_LEN = 32
def _beforeBuildPaintRadialGradient(paint, source):
x0 = source["x0"]
y0 = source["y0"]
r0 = source["r0"]
x1 = source["x1"]
y1 = source["y1"]
r1 = source["r1"]
# TODO apparently no builder_test confirms this works (?)
# avoid abrupt change after rounding when c0 is near c1's perimeter
c = round_start_circle_stable_containment((x0, y0), r0, (x1, y1), r1)
x0, y0 = c.centre
r0 = c.radius
# update source to ensure paint is built with corrected values
source["x0"] = x0
source["y0"] = y0
source["r0"] = r0
source["x1"] = x1
source["y1"] = y1
source["r1"] = r1
return paint, source
def _defaultColorStop():
colorStop = ot.ColorStop()
colorStop.Alpha = _DEFAULT_ALPHA
return colorStop
def _defaultVarColorStop():
colorStop = ot.VarColorStop()
colorStop.Alpha = _DEFAULT_ALPHA
return colorStop
def _defaultColorLine():
colorLine = ot.ColorLine()
colorLine.Extend = ExtendMode.PAD
return colorLine
def _defaultVarColorLine():
colorLine = ot.VarColorLine()
colorLine.Extend = ExtendMode.PAD
return colorLine
def _defaultPaintSolid():
paint = ot.Paint()
paint.Alpha = _DEFAULT_ALPHA
return paint
def _buildPaintCallbacks():
return {
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintRadialGradient,
): _beforeBuildPaintRadialGradient,
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintVarRadialGradient,
): _beforeBuildPaintRadialGradient,
(BuildCallback.CREATE_DEFAULT, ot.ColorStop): _defaultColorStop,
(BuildCallback.CREATE_DEFAULT, ot.VarColorStop): _defaultVarColorStop,
(BuildCallback.CREATE_DEFAULT, ot.ColorLine): _defaultColorLine,
(BuildCallback.CREATE_DEFAULT, ot.VarColorLine): _defaultVarColorLine,
(
BuildCallback.CREATE_DEFAULT,
ot.Paint,
ot.PaintFormat.PaintSolid,
): _defaultPaintSolid,
(
BuildCallback.CREATE_DEFAULT,
ot.Paint,
ot.PaintFormat.PaintVarSolid,
): _defaultPaintSolid,
}
def populateCOLRv0(
table: ot.COLR,
colorGlyphsV0: _ColorGlyphsV0Dict,
glyphMap: Optional[Mapping[str, int]] = None,
):
"""Build v0 color layers and add to existing COLR table.
Args:
table: a raw ``otTables.COLR()`` object (not ttLib's ``table_C_O_L_R_``).
colorGlyphsV0: map of base glyph names to lists of (layer glyph names,
color palette index) tuples. Can be empty.
glyphMap: a map from glyph names to glyph indices, as returned from
``TTFont.getReverseGlyphMap()``, to optionally sort base records by GID.
"""
if glyphMap is not None:
colorGlyphItems = sorted(
colorGlyphsV0.items(), key=lambda item: glyphMap[item[0]]
)
else:
colorGlyphItems = colorGlyphsV0.items()
baseGlyphRecords = []
layerRecords = []
for baseGlyph, layers in colorGlyphItems:
baseRec = ot.BaseGlyphRecord()
baseRec.BaseGlyph = baseGlyph
baseRec.FirstLayerIndex = len(layerRecords)
baseRec.NumLayers = len(layers)
baseGlyphRecords.append(baseRec)
for layerGlyph, paletteIndex in layers:
layerRec = ot.LayerRecord()
layerRec.LayerGlyph = layerGlyph
layerRec.PaletteIndex = paletteIndex
layerRecords.append(layerRec)
table.BaseGlyphRecordArray = table.LayerRecordArray = None
if baseGlyphRecords:
table.BaseGlyphRecordArray = ot.BaseGlyphRecordArray()
table.BaseGlyphRecordArray.BaseGlyphRecord = baseGlyphRecords
if layerRecords:
table.LayerRecordArray = ot.LayerRecordArray()
table.LayerRecordArray.LayerRecord = layerRecords
table.BaseGlyphRecordCount = len(baseGlyphRecords)
table.LayerRecordCount = len(layerRecords)
def buildCOLR(
colorGlyphs: _ColorGlyphsDict,
version: Optional[int] = None,
*,
glyphMap: Optional[Mapping[str, int]] = None,
varStore: Optional[ot.VarStore] = None,
varIndexMap: Optional[ot.DeltaSetIndexMap] = None,
clipBoxes: Optional[Dict[str, _ClipBoxInput]] = None,
allowLayerReuse: bool = True,
) -> C_O_L_R_.table_C_O_L_R_:
"""Build COLR table from color layers mapping.
Args:
colorGlyphs: map of base glyph name to, either list of (layer glyph name,
color palette index) tuples for COLRv0; or a single ``Paint`` (dict) or
list of ``Paint`` for COLRv1.
version: the version of COLR table. If None, the version is determined
by the presence of COLRv1 paints or variation data (varStore), which
require version 1; otherwise, if all base glyphs use only simple color
layers, version 0 is used.
glyphMap: a map from glyph names to glyph indices, as returned from
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
varStore: Optional ItemVarationStore for deltas associated with v1 layer.
varIndexMap: Optional DeltaSetIndexMap for deltas associated with v1 layer.
clipBoxes: Optional map of base glyph name to clip box 4- or 5-tuples:
(xMin, yMin, xMax, yMax) or (xMin, yMin, xMax, yMax, varIndexBase).
Returns:
A new COLR table.
"""
self = C_O_L_R_.table_C_O_L_R_()
if varStore is not None and version == 0:
raise ValueError("Can't add VarStore to COLRv0")
if version in (None, 0) and not varStore:
# split color glyphs into v0 and v1 and encode separately
colorGlyphsV0, colorGlyphsV1 = _split_color_glyphs_by_version(colorGlyphs)
if version == 0 and colorGlyphsV1:
raise ValueError("Can't encode COLRv1 glyphs in COLRv0")
else:
# unless explicitly requested for v1 or have variations, in which case
# we encode all color glyph as v1
colorGlyphsV0, colorGlyphsV1 = {}, colorGlyphs
colr = ot.COLR()
populateCOLRv0(colr, colorGlyphsV0, glyphMap)
colr.LayerList, colr.BaseGlyphList = buildColrV1(
colorGlyphsV1,
glyphMap,
allowLayerReuse=allowLayerReuse,
)
if version is None:
version = 1 if (varStore or colorGlyphsV1) else 0
elif version not in (0, 1):
raise NotImplementedError(version)
self.version = colr.Version = version
if version == 0:
self.ColorLayers = self._decompileColorLayersV0(colr)
else:
colr.ClipList = buildClipList(clipBoxes) if clipBoxes else None
colr.VarIndexMap = varIndexMap
colr.VarStore = varStore
self.table = colr
return self
def buildClipList(clipBoxes: Dict[str, _ClipBoxInput]) -> ot.ClipList:
clipList = ot.ClipList()
clipList.Format = 1
clipList.clips = {name: buildClipBox(box) for name, box in clipBoxes.items()}
return clipList
def buildClipBox(clipBox: _ClipBoxInput) -> ot.ClipBox:
if isinstance(clipBox, ot.ClipBox):
return clipBox
n = len(clipBox)
clip = ot.ClipBox()
if n not in (4, 5):
raise ValueError(f"Invalid ClipBox: expected 4 or 5 values, found {n}")
clip.xMin, clip.yMin, clip.xMax, clip.yMax = intRect(clipBox[:4])
clip.Format = int(n == 5) + 1
if n == 5:
clip.VarIndexBase = int(clipBox[4])
return clip
class ColorPaletteType(enum.IntFlag):
USABLE_WITH_LIGHT_BACKGROUND = 0x0001
USABLE_WITH_DARK_BACKGROUND = 0x0002
@classmethod
def _missing_(cls, value):
# enforce reserved bits
if isinstance(value, int) and (value < 0 or value & 0xFFFC != 0):
raise ValueError(f"{value} is not a valid {cls.__name__}")
return super()._missing_(value)
# None, 'abc' or {'en': 'abc', 'de': 'xyz'}
_OptionalLocalizedString = Union[None, str, Dict[str, str]]
def buildPaletteLabels(
labels: Iterable[_OptionalLocalizedString], nameTable: _n_a_m_e.table__n_a_m_e
) -> List[Optional[int]]:
return [
(
nameTable.addMultilingualName(l, mac=False)
if isinstance(l, dict)
else (
C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
if l is None
else nameTable.addMultilingualName({"en": l}, mac=False)
)
)
for l in labels
]
def buildCPAL(
palettes: Sequence[Sequence[Tuple[float, float, float, float]]],
paletteTypes: Optional[Sequence[ColorPaletteType]] = None,
paletteLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
paletteEntryLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
nameTable: Optional[_n_a_m_e.table__n_a_m_e] = None,
) -> C_P_A_L_.table_C_P_A_L_:
"""Build CPAL table from list of color palettes.
Args:
palettes: list of lists of colors encoded as tuples of (R, G, B, A) floats
in the range [0..1].
paletteTypes: optional list of ColorPaletteType, one for each palette.
paletteLabels: optional list of palette labels. Each lable can be either:
None (no label), a string (for for default English labels), or a
localized string (as a dict keyed with BCP47 language codes).
paletteEntryLabels: optional list of palette entry labels, one for each
palette entry (see paletteLabels).
nameTable: optional name table where to store palette and palette entry
labels. Required if either paletteLabels or paletteEntryLabels is set.
Return:
A new CPAL v0 or v1 table, if custom palette types or labels are specified.
"""
if len({len(p) for p in palettes}) != 1:
raise ColorLibError("color palettes have different lengths")
if (paletteLabels or paletteEntryLabels) and not nameTable:
raise TypeError(
"nameTable is required if palette or palette entries have labels"
)
cpal = C_P_A_L_.table_C_P_A_L_()
cpal.numPaletteEntries = len(palettes[0])
cpal.palettes = []
for i, palette in enumerate(palettes):
colors = []
for j, color in enumerate(palette):
if not isinstance(color, tuple) or len(color) != 4:
raise ColorLibError(
f"In palette[{i}][{j}]: expected (R, G, B, A) tuple, got {color!r}"
)
if any(v > 1 or v < 0 for v in color):
raise ColorLibError(
f"palette[{i}][{j}] has invalid out-of-range [0..1] color: {color!r}"
)
# input colors are RGBA, CPAL encodes them as BGRA
red, green, blue, alpha = color
colors.append(
C_P_A_L_.Color(*(round(v * 255) for v in (blue, green, red, alpha)))
)
cpal.palettes.append(colors)
if any(v is not None for v in (paletteTypes, paletteLabels, paletteEntryLabels)):
cpal.version = 1
if paletteTypes is not None:
if len(paletteTypes) != len(palettes):
raise ColorLibError(
f"Expected {len(palettes)} paletteTypes, got {len(paletteTypes)}"
)
cpal.paletteTypes = [ColorPaletteType(t).value for t in paletteTypes]
else:
cpal.paletteTypes = [C_P_A_L_.table_C_P_A_L_.DEFAULT_PALETTE_TYPE] * len(
palettes
)
if paletteLabels is not None:
if len(paletteLabels) != len(palettes):
raise ColorLibError(
f"Expected {len(palettes)} paletteLabels, got {len(paletteLabels)}"
)
cpal.paletteLabels = buildPaletteLabels(paletteLabels, nameTable)
else:
cpal.paletteLabels = [C_P_A_L_.table_C_P_A_L_.NO_NAME_ID] * len(palettes)
if paletteEntryLabels is not None:
if len(paletteEntryLabels) != cpal.numPaletteEntries:
raise ColorLibError(
f"Expected {cpal.numPaletteEntries} paletteEntryLabels, "
f"got {len(paletteEntryLabels)}"
)
cpal.paletteEntryLabels = buildPaletteLabels(paletteEntryLabels, nameTable)
else:
cpal.paletteEntryLabels = [
C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
] * cpal.numPaletteEntries
else:
cpal.version = 0
return cpal
# COLR v1 tables
# See draft proposal at: https://github.com/googlefonts/colr-gradients-spec
def _is_colrv0_layer(layer: Any) -> bool:
# Consider as COLRv0 layer any sequence of length 2 (be it tuple or list) in which
# the first element is a str (the layerGlyph) and the second element is an int
# (CPAL paletteIndex).
# https://github.com/googlefonts/ufo2ft/issues/426
try:
layerGlyph, paletteIndex = layer
except (TypeError, ValueError):
return False
else:
return isinstance(layerGlyph, str) and isinstance(paletteIndex, int)
def _split_color_glyphs_by_version(
colorGlyphs: _ColorGlyphsDict,
) -> Tuple[_ColorGlyphsV0Dict, _ColorGlyphsDict]:
colorGlyphsV0 = {}
colorGlyphsV1 = {}
for baseGlyph, layers in colorGlyphs.items():
if all(_is_colrv0_layer(l) for l in layers):
colorGlyphsV0[baseGlyph] = layers
else:
colorGlyphsV1[baseGlyph] = layers
# sanity check
assert set(colorGlyphs) == (set(colorGlyphsV0) | set(colorGlyphsV1))
return colorGlyphsV0, colorGlyphsV1
def _reuse_ranges(num_layers: int) -> Generator[Tuple[int, int], None, None]:
# TODO feels like something itertools might have already
for lbound in range(num_layers):
# Reuse of very large #s of layers is relatively unlikely
# +2: we want sequences of at least 2
# otData handles single-record duplication
for ubound in range(
lbound + 2, min(num_layers + 1, lbound + 2 + _MAX_REUSE_LEN)
):
yield (lbound, ubound)
class LayerReuseCache:
reusePool: Mapping[Tuple[Any, ...], int]
tuples: Mapping[int, Tuple[Any, ...]]
keepAlive: List[ot.Paint] # we need id to remain valid
def __init__(self):
self.reusePool = {}
self.tuples = {}
self.keepAlive = []
def _paint_tuple(self, paint: ot.Paint):
# start simple, who even cares about cyclic graphs or interesting field types
def _tuple_safe(value):
if isinstance(value, enum.Enum):
return value
elif hasattr(value, "__dict__"):
return tuple(
(k, _tuple_safe(v)) for k, v in sorted(value.__dict__.items())
)
elif isinstance(value, collections.abc.MutableSequence):
return tuple(_tuple_safe(e) for e in value)
return value
# Cache the tuples for individual Paint instead of the whole sequence
# because the seq could be a transient slice
result = self.tuples.get(id(paint), None)
if result is None:
result = _tuple_safe(paint)
self.tuples[id(paint)] = result
self.keepAlive.append(paint)
return result
def _as_tuple(self, paints: Sequence[ot.Paint]) -> Tuple[Any, ...]:
return tuple(self._paint_tuple(p) for p in paints)
def try_reuse(self, layers: List[ot.Paint]) -> List[ot.Paint]:
found_reuse = True
while found_reuse:
found_reuse = False
ranges = sorted(
_reuse_ranges(len(layers)),
key=lambda t: (t[1] - t[0], t[1], t[0]),
reverse=True,
)
for lbound, ubound in ranges:
reuse_lbound = self.reusePool.get(
self._as_tuple(layers[lbound:ubound]), -1
)
if reuse_lbound == -1:
continue
new_slice = ot.Paint()
new_slice.Format = int(ot.PaintFormat.PaintColrLayers)
new_slice.NumLayers = ubound - lbound
new_slice.FirstLayerIndex = reuse_lbound
layers = layers[:lbound] + [new_slice] + layers[ubound:]
found_reuse = True
break
return layers
def add(self, layers: List[ot.Paint], first_layer_index: int):
for lbound, ubound in _reuse_ranges(len(layers)):
self.reusePool[self._as_tuple(layers[lbound:ubound])] = (
lbound + first_layer_index
)
class LayerListBuilder:
layers: List[ot.Paint]
cache: LayerReuseCache
allowLayerReuse: bool
def __init__(self, *, allowLayerReuse=True):
self.layers = []
if allowLayerReuse:
self.cache = LayerReuseCache()
else:
self.cache = None
# We need to intercept construction of PaintColrLayers
callbacks = _buildPaintCallbacks()
callbacks[
(
BuildCallback.BEFORE_BUILD,
ot.Paint,
ot.PaintFormat.PaintColrLayers,
)
] = self._beforeBuildPaintColrLayers
self.tableBuilder = TableBuilder(callbacks)
# COLR layers is unusual in that it modifies shared state
# so we need a callback into an object
def _beforeBuildPaintColrLayers(self, dest, source):
# Sketchy gymnastics: a sequence input will have dropped it's layers
# into NumLayers; get it back
if isinstance(source.get("NumLayers", None), collections.abc.Sequence):
layers = source["NumLayers"]
else:
layers = source["Layers"]
# Convert maps seqs or whatever into typed objects
layers = [self.buildPaint(l) for l in layers]
# No reason to have a colr layers with just one entry
if len(layers) == 1:
return layers[0], {}
if self.cache is not None:
# Look for reuse, with preference to longer sequences
# This may make the layer list smaller
layers = self.cache.try_reuse(layers)
# The layer list is now final; if it's too big we need to tree it
is_tree = len(layers) > MAX_PAINT_COLR_LAYER_COUNT
layers = build_n_ary_tree(layers, n=MAX_PAINT_COLR_LAYER_COUNT)
# We now have a tree of sequences with Paint leaves.
# Convert the sequences into PaintColrLayers.
def listToColrLayers(layer):
if isinstance(layer, collections.abc.Sequence):
return self.buildPaint(
{
"Format": ot.PaintFormat.PaintColrLayers,
"Layers": [listToColrLayers(l) for l in layer],
}
)
return layer
layers = [listToColrLayers(l) for l in layers]
# No reason to have a colr layers with just one entry
if len(layers) == 1:
return layers[0], {}
paint = ot.Paint()
paint.Format = int(ot.PaintFormat.PaintColrLayers)
paint.NumLayers = len(layers)
paint.FirstLayerIndex = len(self.layers)
self.layers.extend(layers)
# Register our parts for reuse provided we aren't a tree
# If we are a tree the leaves registered for reuse and that will suffice
if self.cache is not None and not is_tree:
self.cache.add(layers, paint.FirstLayerIndex)
# we've fully built dest; empty source prevents generalized build from kicking in
return paint, {}
def buildPaint(self, paint: _PaintInput) -> ot.Paint:
return self.tableBuilder.build(ot.Paint, paint)
def build(self) -> Optional[ot.LayerList]:
if not self.layers:
return None
layers = ot.LayerList()
layers.LayerCount = len(self.layers)
layers.Paint = self.layers
return layers
def buildBaseGlyphPaintRecord(
baseGlyph: str, layerBuilder: LayerListBuilder, paint: _PaintInput
) -> ot.BaseGlyphList:
self = ot.BaseGlyphPaintRecord()
self.BaseGlyph = baseGlyph
self.Paint = layerBuilder.buildPaint(paint)
return self
def _format_glyph_errors(errors: Mapping[str, Exception]) -> str:
lines = []
for baseGlyph, error in sorted(errors.items()):
lines.append(f" {baseGlyph} => {type(error).__name__}: {error}")
return "\n".join(lines)
def buildColrV1(
colorGlyphs: _ColorGlyphsDict,
glyphMap: Optional[Mapping[str, int]] = None,
*,
allowLayerReuse: bool = True,
) -> Tuple[Optional[ot.LayerList], ot.BaseGlyphList]:
if glyphMap is not None:
colorGlyphItems = sorted(
colorGlyphs.items(), key=lambda item: glyphMap[item[0]]
)
else:
colorGlyphItems = colorGlyphs.items()
errors = {}
baseGlyphs = []
layerBuilder = LayerListBuilder(allowLayerReuse=allowLayerReuse)
for baseGlyph, paint in colorGlyphItems:
try:
baseGlyphs.append(buildBaseGlyphPaintRecord(baseGlyph, layerBuilder, paint))
except (ColorLibError, OverflowError, ValueError, TypeError) as e:
errors[baseGlyph] = e
if errors:
failed_glyphs = _format_glyph_errors(errors)
exc = ColorLibError(f"Failed to build BaseGlyphList:\n{failed_glyphs}")
exc.errors = errors
raise exc from next(iter(errors.values()))
layers = layerBuilder.build()
glyphs = ot.BaseGlyphList()
glyphs.BaseGlyphCount = len(baseGlyphs)
glyphs.BaseGlyphPaintRecord = baseGlyphs
return (layers, glyphs)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@fonttools@fontTools@colorLib@builder.py@.PATH_END.py
|
{
"filename": "MF_example.ipynb",
"repo_name": "ander-son-almeida/DashboardOCmass",
"repo_path": "DashboardOCmass_extracted/DashboardOCmass-main/examples/MF_example.ipynb",
"type": "Jupyter Notebook"
}
|
In this notebook we will demonstrate how we build the histograms of the individual masses of the OCs. This histogram is used to calculate the mass function of each cluster through a segmented function and with the aid of the curve_fit function to determine the best parameters (alpha high, alpha low and Mc).
We will use Pleiades (Melotte_22), in which the memberships with their individual masses can be downloaded from GitHub or through our Dashboard:
🔸 GitHub: https://github.com/ander-son-almeida/DashboardOCmass/tree/main/data/membership_data_edr3
🔸 Dashboard: https://ocmass.streamlit.app/Integrated_MF
```python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statistics as st
from scipy.optimize import curve_fit
```
```python
#read membership_data
name = 'Melotte_22' #Pleiades
data_obs = np.load('Melotte_22.npy')
```
```python
# function to fit MF - segmented function
def segmented_linear(logm, logMc=0., slopeA=0., offsetA=1., slopeB=-1.0):
# logm are the mass bins determined from individual mass data
# Mc, slopeA, offsetA, slopeB are free parameters to be determined
# offsetA is the parameter that guarantees continuity of the function
func_val = []
for val in logm:
if val > logMc:
func_val .append(slopeA * val + offsetA)
else:
t1 = slopeA * logMc + offsetA
t2 = slopeB * logMc
dt = t1-t2
func_val .append(slopeB * val + dt)
return np.array(func_val )
```
```python
# extrapolation for white dwarfs
# https://ui.adsabs.harvard.edu/abs/2018ApJ...866...21C/abstract
def MFR(initial_mass):
Mf = np.zeros(initial_mass.size)
for i,mi in enumerate(initial_mass):
if (mi > 0.87 and mi < 2.8):
Mf[i] = 0.0873*mi + 0.476
elif (mi >= 2.8 and mi < 3.65):
Mf[i] = 0.181*mi + 0.21
elif (mi >= 3.65 and mi < 8.2):
Mf[i] = 0.0835*mi + 0.565
return Mf
```
```python
def fit_MF(mass, title_str=''):
# histogram MF
#######################################################################
mass = np.log10(mass[mass > 0.])
mass_cnt, mass_bins = np.histogram(mass,bins='auto')
mass_cnt_er = np.sqrt(mass_cnt)
mass_cnt_er = ((mass_cnt_er/mass_cnt)/2.303)
mass_cnt = np.log10(mass_cnt)
mass_bin_ctr = mass_bins[:-1] + np.diff(mass_bins)/2
mass_bin_ctr = mass_bin_ctr[mass_cnt >= 0]
mass_cnt_er = mass_cnt_er[mass_cnt >= 0]
mass_cnt = mass_cnt[mass_cnt >= 0]
#applying curve_fit in the segmented function
#######################################################################
guess = [0.02,-1.1, 1.1, 0.3]
popt, pcov = curve_fit(segmented_linear, mass_bin_ctr, mass_cnt, p0=guess,
sigma=mass_cnt_er,max_nfev=1e5,
bounds=([-0.2, -3, 0., 0.01], [0.2, 0.0, np.inf, 3.0]),
)
#coefficients
Mc = popt[0]
alpha_high_mass = popt[1]
offset = popt[2]
alpha_low_mass = popt[3]
# errors
sigma = np.sqrt(np.diag(pcov))
Mc_error = sigma[0]
alpha_high_mass_error = sigma[1]
offset_error = sigma[2]
alpha_low_mass_error = sigma[3]
# extrapolation
#######################################################################
mass_pts = np.arange(np.log10(0.09),mass_bin_ctr.min(),np.diff(mass_bin_ctr)[0])
Nstars = segmented_linear(mass_pts, Mc, alpha_high_mass, offset, alpha_low_mass)
# total mass not visible
inv_mass = (np.sum(10**mass_pts * 10**Nstars))
mass_pts = np.arange(mass_bin_ctr.max(), np.log10(7.5),np.diff(mass_bin_ctr)[0])
Nstars = segmented_linear(mass_pts, Mc, alpha_high_mass, offset, alpha_low_mass)
# total mass in WDs
inv_mass_wd = (np.sum(MFR(10**mass_pts) * 10**Nstars))
return alpha_high_mass, alpha_low_mass, Mc, offset, alpha_high_mass_error, \
alpha_low_mass_error, Mc_error, offset_error, mass_cnt, mass_cnt_er, \
mass_bin_ctr, inv_mass, inv_mass_wd, popt
```
This demo focuses on MF integrated, where we concatenate all populations: individual stars and binary stars with the mass of their companions.
```python
mass_intergrated = np.concatenate((data_obs['mass'],data_obs['comp_mass']), axis=0)
alpha_high_int, alpha_low_int, Mc_int, offset_int, alpha_high_er_int, \
alpha_low_er_int, Mc_er_int, offset_er_int, mass_cnt_int, mass_cnt_er_int, \
mass_bin_ctr_int, inv_mass_sing_int, inv_mass_wd_sing_int, popt_int = fit_MF(mass_intergrated,'Integrated')
title = name + '(Pleiades)' +'\n 'r'$\alpha_A = {} \pm {}$; $\alpha_B = {} \pm {}$; $M_c = {} \pm {}$'
```
```python
# graphic MF
fig = plt.figure()
ax = plt.axes()
ax.errorbar(mass_bin_ctr_int, mass_cnt_int, yerr = mass_cnt_er_int, fmt='o', capsize=5, mec='k',
ecolor='k',capthick=0.5,markeredgewidth=0.5,lw=0.5,zorder=1,label='data')
plt.title(title.format(
np.around(alpha_high_int, decimals=2),
np.around(alpha_high_er_int, decimals=2),
np.around(alpha_low_int, decimals=2),
np.around(alpha_low_er_int, decimals=2),
np.around(Mc_int, decimals=2),
np.around(Mc_er_int, decimals=2)))
xplot = np.linspace(mass_bin_ctr_int.min(),mass_bin_ctr_int.max(),1000)
plt.plot(xplot, segmented_linear(xplot, *popt_int), '--', label='two sided IMF',alpha = 0.8)
plt.xlabel('$log(M_{\odot})$')
plt.ylabel('$\\xi(log(M_{\odot}))$')
plt.show()
```

|
ander-son-almeidaREPO_NAMEDashboardOCmassPATH_START.@DashboardOCmass_extracted@DashboardOCmass-main@examples@MF_example.ipynb@.PATH_END.py
|
{
"filename": "find_halo_ids.py",
"repo_name": "SWIFTSIM/SOAP",
"repo_path": "SOAP_extracted/SOAP-master/tests/FLAMINGO/find_halo_ids.py",
"type": "Python"
}
|
#!/bin/env python3
#
# Find IDs of halos in a corner of the simulation box
#
# Run with e.g. `python3 ./find_halo_ids.py L1000N1800/HYDRO_FIDUCIAL 57 10`
#
import sys
import numpy as np
import h5py
def find_halo_indices(sim, snap_nr, boxsize):
soap_file = f"/cosma8/data/dp004/flamingo/Runs/{sim}/SOAP-HBT/halo_properties_{snap_nr:04d}.hdf5"
with h5py.File(soap_file, "r") as f:
pos = f["InputHalos/HaloCentre"][()]
mask = np.all(pos < boxsize, axis=1)
index = f["InputHalos/HaloCatalogueIndex"][()]
is_central = f["InputHalos/IsCentral"][()]
if np.sum(is_central[mask]) == 0:
print('No centrals loaded')
return index[mask]
if __name__ == "__main__":
sim = sys.argv[1]
snap_nr = int(sys.argv[2])
boxsize = float(sys.argv[3])
indices = find_halo_indices(sim, snap_nr, boxsize)
indices_list = " ".join([str(i) for i in indices])
print(indices_list)
|
SWIFTSIMREPO_NAMESOAPPATH_START.@SOAP_extracted@SOAP-master@tests@FLAMINGO@find_halo_ids.py@.PATH_END.py
|
{
"filename": "obshelpers.py",
"repo_name": "jhparkastro/gpcal",
"repo_path": "gpcal_extracted/gpcal-master/gpcal/obshelpers.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 29 13:59:06 2021
@author: jpark
"""
import numpy as np
from astropy.coordinates import EarthLocation
import astropy.time as at
import datetime as dt
from AIPSData import AIPSUVData, AIPSImage
from Wizardry.AIPSData import AIPSUVData as WAIPSUVData
import aipsutil as au
from os import path
def uvprt(data, select):
"""
Extract UV data from ParselTongue UVData.
Args:
data (ParselTongue UVData): an input ParselTongue UVData.
select (str): type of the data that will be extracted.
Returns:
list(s) of the selected UV data.
"""
ifnum = data.header.naxis[3]
dumu, dumv, ifarr, time, ant1, ant2, rrreal, rrimag, rrweight, llreal, llimag, llweight, rlreal, rlimag, rlweight, lrreal, lrimag, lrweight = \
[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []
for ifn in range(ifnum):
for visibility in data:
if((visibility.visibility[ifn,0,0,2] > 0.) & (visibility.visibility[ifn,0,1,2] > 0.) & (visibility.visibility[ifn,0,2,2] > 0.) & (visibility.visibility[ifn,0,3,2] > 0.)):
dumu.append(visibility.uvw[0])
dumv.append(visibility.uvw[1])
ifarr.append(ifn+1)
time.append(visibility.time)
rrreal.append(visibility.visibility[ifn,0,0,0])
rrimag.append(visibility.visibility[ifn,0,0,1])
rrweight.append(visibility.visibility[ifn,0,0,2])
llreal.append(visibility.visibility[ifn,0,1,0])
llimag.append(visibility.visibility[ifn,0,1,1])
llweight.append(visibility.visibility[ifn,0,1,2])
rlreal.append(visibility.visibility[ifn,0,2,0])
rlimag.append(visibility.visibility[ifn,0,2,1])
rlweight.append(visibility.visibility[ifn,0,2,2])
lrreal.append(visibility.visibility[ifn,0,3,0])
lrimag.append(visibility.visibility[ifn,0,3,1])
lrweight.append(visibility.visibility[ifn,0,3,2])
ant1.append(visibility.baseline[0])
ant2.append(visibility.baseline[1])
if(np.sum(rrreal) == 0.):
return
selectarr = ["u", "v", "ifarr", "time", "ant1", "ant2", "rrreal", "rrimag", "rrweight", "llreal", "llimag", "llweight", "rlreal", "rlimag", "rlweight", "lrreal", "lrimag", "lrweight"]
package = [dumu, dumv, ifarr, time, ant1, ant2, rrreal, rrimag, rrweight, llreal, llimag, llweight, rlreal, rlimag, rlweight, lrreal, lrimag, lrweight]
for i in range(len(selectarr)):
if(select == selectarr[i]): return package[i]
if(select == "all"): return dumu, dumv, ifarr, time, ant1, ant2, rrreal, rrimag, rrweight, llreal, llimag, llweight, rlreal, rlimag, rlweight, lrreal, lrimag, lrweight
def pol_model_uvprt(data, ifn, select):
"""
Extract UV data from ParselTongue UVData for instrumental polarization self-calibration.
Args:
data (ParselTongue UVData): an input ParselTongue UVData.
ifn (int): selected IF number
select (str): type of the data that will be extracted.
Returns:
list(s) of the selected UV data.
"""
real, imag = [], []
# (visibility.visibility[ifn,0,0,2] == np.nan) | (visibility.visibility[ifn,0,1,2] == np.nan) | (visibility.visibility[ifn,0,2,2] == np.nan) | (visibility.visibility[ifn,0,3,2] == np.nan))
for visibility in data:
if((visibility.visibility[ifn,0,0,2] > 0.) & (visibility.visibility[ifn,0,1,2] > 0.) & (visibility.visibility[ifn,0,2,2] > 0.) & (visibility.visibility[ifn,0,3,2] > 0.)):
real.append(visibility.visibility[ifn,0,0,0])
imag.append(visibility.visibility[ifn,0,0,1])
if(np.sum(real) == 0.):
return None, None
selectarr = ["real", "imag"]
package = [real, imag]
for i in range(len(selectarr)):
if(select == selectarr[i]): return package[i]
if(select == "all"): return real, imag
# def get_parang(self, time, ant, sourcearr, source, obsra, obsdec):
def get_parang(yeararr, montharr, dayarr, time, raarr, decarr, lonarr, latarr, f_el_arr, f_par_arr, phi_off_arr, f_eq_arr, f_copar_arr): # Version 1.1!
"""
Calculate antenna field-rotation angles.
Args:
time (numpy.array): a numpy array of time in UTC of the visibilities.
ant (numpy.array): a numpy array of antenna number of the visibilities.
sourcearr (numpy.array): a numpy array of source of the visibilities.
source (list): a list of calibrators.
obsra (list): a list of calibrators' right ascension in units of degrees.
obsdec (list): a list of calibrators' declination in units of degrees.
Returns:
a numpy of the field-rotation angles.
"""
latarr, decarr = np.radians(latarr), np.radians(decarr)
hour = np.floor(time)
minute = np.floor((time - hour) * 60.)
second = (time - hour - minute / 60.) * 3600.
hour = hour.astype('int')
minute = minute.astype('int')
second = second.astype('int')
dumdatetime = [dt.datetime(a, b, c, d, e, f) for a, b, c, d, e, f in zip(yeararr, montharr, dayarr, hour, minute, second)]
dumt = []
for dum in dumdatetime:
dumt.append("{:04d}-{:02d}-{:02d}T{:02d}:{:02d}:{:f}".format(dum.year, dum.month, dum.day, dum.hour, dum.minute, dum.second + dum.microsecond * 1e-6))
dumt = at.Time(dumt)
gst = dumt.sidereal_time('mean','greenwich').hour
# Obtain field-rotation angles using the known equations.
hangle = np.radians(gst * 15. + lonarr - raarr)
parang = np.arctan2((np.sin(hangle) * np.cos(latarr)), (np.sin(latarr) * np.cos(decarr) - np.cos(latarr) * np.sin(decarr) * np.cos(hangle)))
coparang = np.arctan2(np.cos(hangle), (np.sin(decarr) * np.sin(hangle)))
altitude = np.arcsin(np.sin(decarr) * np.sin(latarr) + np.cos(decarr) * np.cos(latarr) * np.cos(hangle))
pang = f_el_arr * altitude + f_par_arr * parang + phi_off_arr + f_eq_arr * 0. + f_copar_arr * coparang
return pang
def coord(antname, antx, anty, antz):
"""
Convert antenna positions from Cartesian to spherical coordinates using astropy.
Returns:
lists of antenna longitudes, latitudes, and heights.
"""
lonarr = []
latarr = []
heightarr = []
for i in range(len(antname)):
lonarr.append(EarthLocation.from_geocentric(antx[i], anty[i], antz[i], unit = 'm').to_geodetic()[0].value)
latarr.append(EarthLocation.from_geocentric(antx[i], anty[i], antz[i], unit = 'm').to_geodetic()[1].value)
heightarr.append(EarthLocation.from_geocentric(antx[i], anty[i], antz[i], unit = 'm').to_geodetic()[2].value)
return lonarr, latarr, heightarr
def calendar(sourcearr, calsour, year, month, day, obsra, obsdec):
dnum = len(sourcearr)
yeararr, montharr, dayarr, raarr, decarr = np.zeros(dnum, dtype = 'int'), np.zeros(dnum, dtype = 'int'), np.zeros(dnum, dtype = 'int'), np.zeros(dnum, dtype = 'float'), np.zeros(dnum, dtype = 'float')
for l in range(len(calsour)):
yeararr[sourcearr == calsour[l]] = year[l]
montharr[sourcearr == calsour[l]] = month[l]
dayarr[sourcearr == calsour[l]] = day[l]
raarr[sourcearr == calsour[l]] = obsra[l]
decarr[sourcearr == calsour[l]] = obsdec[l]
return yeararr, montharr, dayarr, raarr, decarr
def coordarr(longi, lati, f_el, f_par, phi_off, f_eq, f_copar, antarr):
longi = np.array(longi)
lati = np.array(lati)
f_el = np.array(f_el)
f_par = np.array(f_par)
phi_off = np.array(phi_off)
f_eq = np.array(f_eq)
f_copar = np.array(f_copar)
longarr = longi[antarr]
latarr = lati[antarr]
f_el = f_el[antarr]
f_par = f_par[antarr]
phi_off = phi_off[antarr]
f_eq = f_eq[antarr]
f_copar = f_copar[antarr]
return longarr, latarr, f_el, f_par, phi_off, f_eq, f_copar
def basic_info(source, direc, dataname):
obsra, obsdec, year, month, day = [], [], [], [], []
for l in range(len(source)):
inname = str(source[l])
data = AIPSUVData(inname, 'EDIT', 1, 1)
if(data.exists() == True):
data.clrstat()
data.zap()
# Load UVFITS files.
au.runfitld(inname, 'EDIT', direc + dataname + source[l] + '.uvf')
data = AIPSUVData(inname, 'EDIT', 1, 1)
dum_obsra, dum_obsdec = get_obscoord(data)
# Extract source coordinates from the header.
obsra.append(dum_obsra)
obsdec.append(dum_obsdec)
dumyear, dummonth, dumday = get_obsdate(data)
year.append(dumyear)
month.append(dummonth)
day.append(dumday)
# Extract antenna, frequency, mount information, etc, from the header.
if(l == 0):
antname, antx, anty, antz, antmount, f_par, f_el, phi_off, f_eq, f_copar = get_antcoord(data)
ifnum, freq = get_freqinfo(data)
data.zap()
info = {"obsra": obsra, "obsdec": obsdec, "year": year, "month": month, "day": day, "antname": antname, "antx": antx, "anty": anty, "antz": antz, "antmount": antmount, "ifnum": ifnum, "freq": freq, \
"f_par": f_par, "f_el": f_el, "phi_off": phi_off, "f_eq": f_eq, "f_copar": f_copar}
return info
def get_obsdate(data):
obsdate = data.header.date_obs
year = int(obsdate[0:4])
month = int(obsdate[5:7])
day = int(obsdate[8:10])
return year, month, day
def get_obscoord(data):
obsra = data.header.crval[4]
obsdec = data.header.crval[5]
return obsra, obsdec
def get_antcoord(data):
antname = []
antx = []
anty = []
antz = []
antmount = []
antable = data.table('AN', 1)
for row in antable:
antname.append(row.anname.replace(' ', ''))
antx.append(row.stabxyz[0])
anty.append(row.stabxyz[1])
antz.append(row.stabxyz[2])
antmount.append(row.mntsta)
f_par = []
f_eq = []
f_copar = []
f_el = []
phi_off = []
for st in range(len(antname)):
if(antmount[st] == 0): # Cassegrain mount
f_el.append(0.)
f_eq.append(0.)
f_par.append(1.)
f_copar.append(0.)
if(antmount[st] == 1): # Equatorial mount
f_el.append(0.)
f_eq.append(1.)
f_par.append(0.)
f_copar.append(0.)
if(antmount[st] == 3): # EW mount
f_el.append(0.)
f_eq.append(0.)
f_par.append(0.)
f_copar.append(1.)
if(antmount[st] == 4): # Nasmyth-Right
f_el.append(1.)
f_eq.append(0.)
f_par.append(1.)
f_copar.append(0.)
if(antmount[st] == 5): # Nasmyth-Left
f_el.append(-1.)
f_eq.append(0.)
f_par.append(1.)
f_copar.append(0.)
phi_off.append(0.)
return antname, antx, anty, antz, antmount, f_par, f_el, phi_off, f_eq, f_copar
def get_freqinfo(data):
fqtable = data.table('FQ', 1)
if(isinstance(fqtable[0].if_freq, float) == True):
ifnum = 1
IFfreq = [data.header.crval[2] / 1e9]
freq = "{0:.3f}".format(IFfreq[0]) + ' GHz'
else:
ifnum = len(fqtable[0].if_freq)
freq = [str((it + data.header.crval[2]) / 1e9) + ' GHz' for it in fqtable[0].if_freq]
return ifnum, freq
def pd_modifier(data):
# Make new columns for amplitudes, phases, and corresponding errors, etc.
data.loc[:,"rrsigma"], data.loc[:,"llsigma"], data.loc[:,"rlsigma"], data.loc[:,"lrsigma"] = \
1. / data.loc[:,"rrweight"] ** (0.5), 1. / data.loc[:,"llweight"], 1. / data.loc[:,"rlweight"] ** (0.5), 1. / data.loc[:,"lrweight"] ** (0.5)
data.loc[:,"rramp"], data.loc[:,"llamp"], data.loc[:,"rlamp"], data.loc[:,"lramp"] = \
np.absolute(data.loc[:,"rrreal"] + 1j*data.loc[:,"rrimag"]), np.absolute(data.loc[:,"llreal"] + 1j*data.loc[:,"llimag"]), \
np.absolute(data.loc[:,"rlreal"] + 1j*data.loc[:,"rlimag"]), np.absolute(data.loc[:,"lrreal"] + 1j*data.loc[:,"lrimag"])
data.loc[:,"rrphas"], data.loc[:,"llphas"], data.loc[:,"rlphas"], data.loc[:,"lrphas"] = \
np.angle(data.loc[:,"rrreal"] + 1j*data.loc[:,"rrimag"]), np.angle(data.loc[:,"llreal"] + 1j*data.loc[:,"llimag"]), \
np.angle(data.loc[:,"rlreal"] + 1j*data.loc[:,"rlimag"]), np.angle(data.loc[:,"lrreal"] + 1j*data.loc[:,"lrimag"])
data.loc[:,"rramp_sigma"], data.loc[:,"llamp_sigma"], data.loc[:,"rlamp_sigma"], data.loc[:,"lramp_sigma"] = \
data.loc[:,"rrsigma"], data.loc[:,"llsigma"], data.loc[:,"rlsigma"], data.loc[:,"lrsigma"]
data.loc[:,"rrphas_sigma"], data.loc[:,"llphas_sigma"], data.loc[:,"rlphas_sigma"], data.loc[:,"lrphas_sigma"] = \
data.loc[:,"rrsigma"] / np.abs(data.loc[:,"rrreal"] + 1j*data.loc[:,"rrimag"]), \
data.loc[:,"llsigma"] / np.abs(data.loc[:,"llreal"] + 1j*data.loc[:,"llimag"]), \
data.loc[:,"rlsigma"] / np.abs(data.loc[:,"rlreal"] + 1j*data.loc[:,"rlimag"]), \
data.loc[:,"lrsigma"] / np.abs(data.loc[:,"lrreal"] + 1j*data.loc[:,"lrimag"])
dumrl, dumlr = data.loc[:,"rlreal"] + 1j*data.loc[:,"rlimag"], data.loc[:,"lrreal"] + 1j*data.loc[:,"lrimag"]
dumrlsigma, dumlrsigma = data.loc[:,"rlsigma"] + 1j*data.loc[:,"rlsigma"], data.loc[:,"lrsigma"] + 1j*data.loc[:,"lrsigma"]
dumq, dumu = (dumrl + dumlr) / 2., -1j * (dumrl - dumlr) / 2.
dumqsigma, dumusigma = np.sqrt(dumrlsigma**2 + dumlrsigma**2) / 2., np.sqrt(dumrlsigma**2 + dumlrsigma**2) / 2.
data.loc[:,"qamp"], data.loc[:,"uamp"], data.loc[:,"qphas"], data.loc[:,"uphas"] = np.absolute(dumq), np.absolute(dumu), np.angle(dumq), np.angle(dumu)
data.loc[:,"qphas_sigma"], data.loc[:,"uphas_sigma"] = np.real(dumqsigma) / np.abs(dumq), np.real(dumusigma) / np.abs(dumu)
data.loc[:,"qsigma"], data.loc[:,"usigma"] = np.real(dumqsigma), np.real(dumusigma)
data["IF"] = data["IF"].astype('int32')
data["ant1"] = data["ant1"].astype('int32')
data["ant2"] = data["ant2"].astype('int32')
data["pang1"] = data["pang1"].astype('float64')
data["pang2"] = data["pang2"].astype('float64')
return data
def get_model(data, direc, dataname, calsour, polcal_unpol, ifnum, pol_IF_combine, outputname = None, selfcal = False):
"""
Extract Stokes Q and U visibility models and append them to the pandas dataframe.
"""
mod_qrealarr, mod_qimagarr, mod_urealarr, mod_uimagarr = [], [], [], []
for l in range(len(calsour)):
inname = str(calsour[l])
calib = AIPSUVData(inname, 'EDIT', 1, 1)
if(calib.exists() == True):
calib.clrstat()
calib.zap()
if selfcal:
au.runfitld(inname, 'EDIT', direc + dataname + calsour[l]+'.calib')
else:
au.runfitld(inname, 'EDIT', direc + dataname + calsour[l]+'.uvf')
calib = AIPSUVData(inname, 'EDIT', 1, 1)
if polcal_unpol[l]:
dumdata = data
dumsource = np.array(dumdata.loc[:,"source"])
dumlen = np.sum(dumsource == calsour[l])
mod_qrealarr = mod_qrealarr + [0.] * dumlen
mod_qimagarr = mod_qimagarr + [0.] * dumlen
mod_urealarr = mod_urealarr + [0.] * dumlen
mod_uimagarr = mod_uimagarr + [0.] * dumlen
else:
for k in range(ifnum):
if(np.sum(data.loc[:, "IF"] == k+1) == 0):
continue
qmap = AIPSImage(inname, 'QMAP', 1, 1)
if(qmap.exists() == True):
qmap.clrstat()
qmap.zap()
if pol_IF_combine:
fitsname = direc+dataname+calsour[l]+'.allIF.q.fits'
else:
fitsname = direc+dataname+calsour[l]+'.IF'+str(k+1)+'.q.fits'
if not path.exists(fitsname):
raise Exception("The requested {:} file does not exist!".format(fitsname))
else:
au.runfitld(inname, 'QMAP', fitsname)
qmap = AIPSImage(inname, 'QMAP', 1, 1)
uvsub = AIPSUVData(inname, 'UVSUB', 1, 1)
if(uvsub.exists() == True):
uvsub.clrstat()
uvsub.zap()
au.runuvsub(inname, 'EDIT', 'QMAP', 1, 1)
moddata = WAIPSUVData(inname, 'UVSUB', 1, 1)
mod_qreal, mod_qimag = pol_model_uvprt(moddata, k, "all")
if(mod_qreal != None):
mod_qrealarr = mod_qrealarr + mod_qreal
mod_qimagarr = mod_qimagarr + mod_qimag
moddata.zap()
qmap.zap()
umap = AIPSImage(inname, 'UMAP', 1, 1)
if(umap.exists() == True):
umap.clrstat()
umap.zap()
if pol_IF_combine:
fitsname = direc+dataname+calsour[l]+'.allIF.u.fits'
else:
fitsname = direc+dataname+calsour[l]+'.IF'+str(k+1)+'.u.fits'
if not path.exists(fitsname):
dum = 0
calib = WAIPSUVData(inname, 'EDIT', 1, 1)
for visibility in calib:
if((visibility.visibility[k,0,0,2] > 0) & (visibility.visibility[k,0,1,2] > 0) & (visibility.visibility[k,0,2,2] > 0) & (visibility.visibility[k,0,3,2] > 0)):
dum += 1
mod_urealarr = mod_urealarr + [0.] * dum
mod_uimagarr = mod_uimagarr + [0.] * dum
calib = AIPSUVData(inname, 'EDIT', 1, 1)
else:
au.runfitld(inname, 'UMAP', fitsname)
umap = AIPSImage(inname, 'UMAP', 1, 1)
uvsub = AIPSUVData(inname, 'UVSUB', 1, 1)
if(uvsub.exists() == True):
uvsub.clrstat()
uvsub.zap()
au.runuvsub(inname, 'EDIT', 'UMAP', 1, 1)
moddata = WAIPSUVData(inname, 'UVSUB', 1, 1)
mod_ureal, mod_uimag = pol_model_uvprt(moddata, k, "all")
if(mod_ureal != None):
mod_urealarr = mod_urealarr + mod_ureal
mod_uimagarr = mod_uimagarr + mod_uimag
moddata.zap()
umap.zap()
calib.zap()
mod_qreal, mod_qimag, mod_ureal, mod_uimag = np.array(mod_qrealarr), np.array(mod_qimagarr), np.array(mod_urealarr), np.array(mod_uimagarr)
mod_q, mod_u = mod_qreal + 1j*mod_qimag, mod_ureal + 1j*mod_uimag
mod_rlreal, mod_rlimag, mod_lrreal, mod_lrimag = np.real(mod_q + 1j*mod_u), np.imag(mod_q + 1j*mod_u), np.real(mod_q - 1j*mod_u), np.imag(mod_q - 1j*mod_u)
mod_rlamp, mod_rlphas, mod_lramp, mod_lrphas = np.absolute(mod_rlreal + 1j*mod_rlimag), np.angle(mod_rlreal + 1j*mod_rlimag), np.absolute(mod_lrreal + 1j*mod_lrimag), np.angle(mod_lrreal + 1j*mod_lrimag)
mod_qamp, mod_qphas, mod_uamp, mod_uphas = np.absolute(mod_q), np.angle(mod_q), np.absolute(mod_u), np.angle(mod_u)
# Append the model visibilities to the existing pandas dataframe as new columns.
data.loc[:,"model_rlreal"], data.loc[:,"model_rlimag"], data.loc[:,"model_lrreal"], data.loc[:,"model_lrimag"], \
data.loc[:,"model_rlamp"], data.loc[:,"model_rlphas"], data.loc[:,"model_lramp"], data.loc[:,"model_lrphas"], \
data.loc[:,"model_qamp"], data.loc[:,"model_qphas"], data.loc[:,"model_uamp"], data.loc[:,"model_uphas"] = \
mod_rlreal, mod_rlimag, mod_lrreal, mod_lrimag, mod_rlamp, mod_rlphas, mod_lramp, mod_lrphas, mod_qamp, mod_qphas, mod_uamp, mod_uphas
return data
def evpacal(datain, dataout, clcorprm, logger = None): # Version 1.1
if (logger != None):
logger.info('Correcting EVPAs... \n Input file: {:} \n Output file: {:}'.format(datain, dataout))
pinal = AIPSUVData('EVPA', 'PINAL', 1, 1)
if(pinal.exists() == True):
pinal.zap()
au.runfitld('EVPA', 'PINAL', datain)
pinal = AIPSUVData('EVPA', 'PINAL', 1, 1)
aipssource = pinal.header.object
multi = AIPSUVData('EVPA', 'MULTI', 1, 1)
if(multi.exists() == True):
multi.zap()
au.runmulti('EVPA', 'PINAL')
au.runclcor('EVPA', 'MULTI', 1, 1, clcorprm)
pang = AIPSUVData(aipssource, 'PANG', 1, 1)
if(pang.exists() == True):
pang.zap()
au.runsplitpang('EVPA')
au.runfittp(aipssource, 'PANG', dataout)
pinal.zap()
multi.zap()
pang.zap()
def get_scans(time, sourcearr, tsep = 2. / 60.):
argsort = np.argsort(time)
dum_sourcearr = sourcearr[argsort]
dumx = np.sort(time)
boundary_left = [np.min(dumx)]
boundary_right = []
boundary_source = [dum_sourcearr[0]]
for j in range(len(dumx)-1):
if(dumx[j+1] - dumx[j]) > tsep:
boundary_left.append(dumx[j+1])
boundary_right.append(dumx[j])
boundary_source.append(dum_sourcearr[j+1])
continue
if(dum_sourcearr[j+1] != dum_sourcearr[j]):
boundary_left.append(dumx[j+1])
boundary_right.append(dumx[j])
boundary_source.append(dum_sourcearr[j+1])
boundary_right.append(np.max(dumx))
return boundary_left, boundary_right, boundary_source
def bin_data(time, data, boundary_left, boundary_right, error = None, avg_nat = False):
binx, biny, binsigma = [], [], []
for i in range(len(boundary_left)):
select = (time >= boundary_left[i]) & (time < boundary_right[i])
if(np.sum(select) == 0): continue
binx.append((boundary_left[i] + boundary_right[i]) / 2.)
if (error is not None) & (avg_nat == True):
biny.append(np.average(data[select], weights = 1. / error[select] ** 2, returned = True)[0])
binsigma.append(np.abs(1. / np.average(data[select], weights = 1. / error[select] ** 2, returned = True)[1] ** 0.5))
else:
biny.append(np.mean(data[select]))
binsigma.append(np.std(data[select]) / np.sqrt(float(np.sum(select))))
binx, biny, binsigma = np.array(binx), np.array(biny), np.array(binsigma)
return binx, biny, binsigma
|
jhparkastroREPO_NAMEgpcalPATH_START.@gpcal_extracted@gpcal-master@gpcal@obshelpers.py@.PATH_END.py
|
{
"filename": "simulate.py",
"repo_name": "HERA-Team/hera_sim",
"repo_path": "hera_sim_extracted/hera_sim-main/hera_sim/simulate.py",
"type": "Python"
}
|
"""Module containing a high-level interface for :mod:`hera_sim`.
This module defines the :class:`Simulator` class, which provides the user
with a high-level interface to all of the features provided by :mod:`hera_sim`.
For detailed instructions on how to manage a simulation using the
:class:`Simulator`, please refer to the tutorials.
"""
import contextlib
import functools
import inspect
import numpy as np
import warnings
import yaml
from astropy import constants as const
from cached_property import cached_property
from collections.abc import Sequence
from deprecation import deprecated
from pathlib import Path
from pyuvdata import UVData
from pyuvdata import utils as uvutils
from typing import Optional, Union
from . import __version__, io, utils
from .components import SimulationComponent, get_model, list_all_components
from .defaults import defaults
_add_depr = deprecated(
deprecated_in="1.0", removed_in="2.0", details="Use the :meth:`add` method instead."
)
# Define some commonly used types for typing purposes.
AntPairPol = tuple[int, int, str]
AntPair = tuple[int, int]
AntPol = tuple[int, str]
Component = Union[str, type[SimulationComponent], SimulationComponent]
# wrapper for the run_sim method, necessary for part of the CLI
def _generator_to_list(func, *args, **kwargs):
@functools.wraps(func)
def new_func(*args, **kwargs):
result = list(func(*args, **kwargs))
return None if result == [] else result
return new_func
class Simulator:
"""Simulate visibilities and/or instrumental effects for an entire array.
Parameters
----------
data
:class:`pyuvdata.UVData` object to use for the simulation or path to a
UVData-supported file.
defaults_config
Path to defaults configuraiton, seasonal keyword, or configuration
dictionary for setting default simulation parameters. See tutorial
on setting defaults for further information.
redundancy_tol
Position tolerance for finding redundant groups, in meters. Default is
1 meter.
kwargs
Parameters to use for initializing UVData object if none is provided.
If ``data`` is a file path, then these parameters are used when reading
the file. Otherwise, the parameters are used in creating a ``UVData``
object using :func:`~.io.empty_uvdata`.
Attributes
----------
data : :class:`pyuvdata.UVData` instance
Object containing simulated visibilities and metadata.
extras : dict
Dictionary to use for storing extra parameters.
antpos : dict
Dictionary pairing antenna numbers to ENU positions in meters.
lsts : np.ndarray of float
Observed LSTs in radians.
freqs : np.ndarray of float
Observed frequencies in GHz.
times : np.ndarray of float
Observed times in JD.
pols : list of str
Polarization strings.
red_grps : list of list of int
Redundant baseline groups. Each entry is a list containing the baseline
integer for each member of that redundant group.
red_vecs : list of :class:`numpy.ndarray` of float
Average of all the baselines for each redundant group.
red_lengths : list of float
Length of each redundant baseline.
"""
def __init__(
self,
*,
data: Optional[Union[str, UVData]] = None,
defaults_config: Optional[Union[str, dict]] = None,
redundancy_tol: float = 1.0,
**kwargs,
):
# TODO: add ability for user to specify parameter names to look for on
# parsing call signature
# Create some utility dictionaries.
self._components = {}
self._seeds = {}
self._antpairpol_cache = {}
self._filter_cache = {"delay": {}, "fringe": {}}
# apply and activate defaults if specified
if defaults_config:
self.apply_defaults(defaults_config)
# actually initialize the UVData object stored in self.data
self._initialize_data(data, **kwargs)
self._calculate_reds(tol=redundancy_tol)
self.extras = self.data.extra_keywords
for param in ("Ntimes", "Nfreqs", "Nblts", "Npols", "Nbls"):
setattr(self, param, getattr(self.data, param))
self.Nants = len(self.antpos)
# Let's make some helpful methods from the UVData object available
for attr in ("data", "flags", "antpairs", "antpairpols", "pols"):
setattr(self, f"get_{attr}", getattr(self.data, f"get_{attr}"))
@property
def antenna_numbers(self):
return self.data.antenna_numbers
@property
def ant_1_array(self):
return self.data.ant_1_array
@property
def ant_2_array(self):
return self.data.ant_2_array
@property
def polarization_array(self):
return self.data.polarization_array
@property
def data_array(self):
"""Array storing the visibilities."""
return self.data.data_array
@property
def antpos(self):
"""Mapping between antenna numbers and ENU positions in meters."""
antpos, ants = self.data.get_ENU_antpos(pick_data_ants=True)
return dict(zip(ants, antpos))
@property
def lsts(self):
"""Observed Local Sidereal Times in radians."""
# This process retrieves the unique LSTs while respecting phase wraps.
_, unique_inds = np.unique(self.data.time_array, return_index=True)
return self.data.lst_array[unique_inds]
@property
def freqs(self):
"""Frequencies in GHz."""
return np.unique(self.data.freq_array) / 1e9
@property
def times(self):
"""Simulation times in JD."""
return np.unique(self.data.time_array)
@property
def pols(self):
"""Array of polarization strings."""
return self.data.get_pols()
@cached_property
def integration_time(self):
"""Integration time, assuming it's identical across baselines."""
return np.mean(self.data.integration_time)
@cached_property
def channel_width(self):
"""Channel width, assuming each channel is the same width."""
return np.mean(self.data.channel_width)
def apply_defaults(self, config: Optional[Union[str, dict]], refresh: bool = True):
"""
Apply the provided default configuration.
Equivalent to calling :meth:`~hera_sim.defaults.set` with the same parameters.
Parameters
----------
config
If given, either a path pointing to a defaults configuration
file, a string identifier of a particular config (e.g. 'h1c')
or a dictionary of configuration parameters
(see :class:`~.defaults.Defaults`).
refresh
Whether to refresh the defaults.
"""
defaults.set(config, refresh=refresh)
def calculate_filters(
self,
*,
delay_filter_kwargs: Optional[dict[str, Union[float, str]]] = None,
fringe_filter_kwargs: Optional[dict[str, Union[float, str, np.ndarray]]] = None,
):
"""
Pre-compute fringe-rate and delay filters for the entire array.
Parameters
----------
delay_filter_kwargs
Extra parameters necessary for generating a delay filter. See
:func:`utils.gen_delay_filter` for details.
fringe_filter_kwargs
Extra parameters necessary for generating a fringe filter. See
:func:`utils.gen_fringe_filter` for details.
"""
delay_filter_kwargs = delay_filter_kwargs or {}
fringe_filter_kwargs = fringe_filter_kwargs or {}
self._calculate_delay_filters(**delay_filter_kwargs)
self._calculate_fringe_filters(**fringe_filter_kwargs)
def add(
self,
component: Component,
*,
add_vis: bool = True,
ret_vis: bool = False,
seed: Optional[Union[str, int]] = None,
vis_filter: Optional[Sequence] = None,
component_name: Optional[str] = None,
**kwargs,
) -> Optional[Union[np.ndarray, dict[int, np.ndarray]]]:
"""
Simulate an effect then apply and/or return the result.
Parameters
----------
component
Effect to be simulated. This can either be an alias of the effect,
or the class (or instance thereof) that simulates the effect.
add_vis
Whether to apply the effect to the simulated data. Default is True.
ret_vis
Whether to return the simulated effect. Nothing is returned by default.
seed
How to seed the random number generator. Can either directly provide
a seed as an integer, or use one of the supported keywords. See
tutorial for using the :class:`Simulator` for supported seeding modes.
Default is to use a seed based on the current random state.
vis_filter
Iterable specifying which antennas/polarizations for which the effect
should be simulated. See tutorial for using the :class:`Simulator` for
details of supported formats and functionality.
component_name
Name to use when recording the parameters used for simulating the effect.
Default is to use the name of the class used to simulate the effect.
**kwargs
Optional keyword arguments for the provided ``component``.
Returns
-------
effect
The simulated effect; only returned if ``ret_vis`` is set to ``True``.
If the simulated effect is multiplicative, then a dictionary mapping
antenna numbers to the per-antenna effect (as a ``np.ndarray``) is
returned. Otherwise, the effect for the entire array is returned with
the same structure as the ``pyuvdata.UVData.data_array`` that the
data is stored in.
"""
# Obtain a callable reference to the simulation component model.
model = self._get_component(component)
model_key = (
component_name if component_name else self._get_model_name(component)
)
if not isinstance(model, SimulationComponent):
model = model(**kwargs)
self._sanity_check(model) # Check for component ordering issues.
self._antpairpol_cache[model_key] = [] # Initialize this model's cache.
if seed is None and add_vis:
warnings.warn(
"You have not specified how to seed the random state. "
"This effect might not be exactly recoverable.",
stacklevel=2,
)
# Record the component simulated and the parameters used.
if defaults._override_defaults:
for param in getattr(model, "kwargs", {}):
if param not in kwargs and param in defaults():
kwargs[param] = defaults(param)
self._components[model_key] = kwargs.copy()
self._components[model_key]["alias"] = component
# Simulate the effect by iterating over baselines and polarizations.
data = self._iteratively_apply(
model,
add_vis=add_vis,
ret_vis=ret_vis,
vis_filter=vis_filter,
antpairpol_cache=self._antpairpol_cache[model_key],
seed=seed,
model_key=model_key,
**kwargs,
) # This is None if ret_vis is False
if add_vis:
self._update_history(model, **kwargs)
if seed:
self._components[model_key]["seed"] = seed
self._update_seeds(model_key)
if vis_filter is not None:
self._components[model_key]["vis_filter"] = vis_filter
else:
del self._antpairpol_cache[model_key]
del self._components[model_key]
if self._seeds.get(model_key, None):
del self._seeds[model_key]
return data
def get(
self,
component: Component,
key: Optional[Union[int, str, AntPair, AntPairPol]] = None,
) -> Union[np.ndarray, dict[int, np.ndarray]]:
"""
Retrieve an effect that was previously simulated.
Parameters
----------
component
Effect that is to be retrieved. See :meth:`add` for more details.
key
Key for retrieving simulated effect. Possible choices are as follows:
An integer may specify either a single antenna (for per-antenna
effects) or be a ``pyuvdata``-style baseline integer.
A string specifying a polarization can be used to retrieve the
effect for every baseline for the specified polarization.
A length-2 tuple of integers can be used to retrieve the effect
for that baseline for all polarizations.
A length-3 tuple specifies a particular baseline and polarization
for which to retrieve the effect.
Not specifying a key results in the effect being returned for all
baselines (or antennas, if the effect is per-antenna) and polarizations.
Returns
-------
effect
The simulated effect appropriate for the provided key. Return type
depends on the effect being simulated and the provided key. See the
tutorial Jupyter notebook for the :class:`Simulator` for example usage.
Notes
-----
This will only produce the correct output if the simulated effect is
independent of the data itself. If the simulated effect contains a
randomly-generated component, then the random seed must have been set
when the effect was initially simulated.
"""
# Retrieve the model and verify it has been simulated.
if component in self._components:
model = self._get_component(self._components[component]["alias"])
model_key = component
else:
model = self._get_component(component)
model_key = self._get_model_name(component)
if model_key not in self._components:
raise ValueError("The provided component has not yet been simulated.")
# Parse the key and verify that it's properly formatted.
ant1, ant2, pol = self._parse_key(key)
self._validate_get_request(model, ant1, ant2, pol)
# Prepare to re-simulate the effect.
kwargs = self._components[model_key].copy()
kwargs.pop("alias") # To handle multiple instances of simulating an effect.
seed = kwargs.pop("seed", None)
vis_filter = kwargs.pop("vis_filter", None)
if not isinstance(model, SimulationComponent):
model = model(**kwargs)
if model.is_multiplicative:
# We'll get a dictionary back, so the handling is different.
gains = self._iteratively_apply(
model,
add_vis=False,
ret_vis=True,
seed=seed,
vis_filter=vis_filter,
model_key=model_key,
**kwargs,
)
if ant1 is not None:
if pol:
return gains[(ant1, pol)]
return {key: gain for key, gain in gains.items() if ant1 in key}
else:
if pol:
return {key: gain for key, gain in gains.items() if pol in key}
return gains
# Specifying neither antenna implies the full array's data is desired.
if ant1 is None and ant2 is None:
# Simulate the effect
data = self._iteratively_apply(
model,
add_vis=False,
ret_vis=True,
seed=seed,
vis_filter=vis_filter,
antpairpol_cache=None,
model_key=model_key,
**kwargs,
)
# Trim the data if a specific polarization is requested.
if pol is None:
return data
pol_ind = self.pols.index(pol)
return data[:, :, pol_ind]
# We're only simulating for a particular baseline.
# (The validation check ensures this is the case.)
# First, find out if it needs to be conjugated.
try:
blt_inds = self.data.antpair2ind(ant1, ant2)
if blt_inds is None:
raise ValueError
conj_data = False
except ValueError:
blt_inds = self.data.antpair2ind(ant2, ant1)
conj_data = True
# We have three different seeding cases to work out.
if seed == "initial":
# Initial seeding means we need to do the whole array.
data = self._iteratively_apply(
model,
add_vis=False,
ret_vis=True,
seed=seed,
vis_filter=vis_filter,
antpairpol_cache=None,
model_key=model_key,
**kwargs,
)[blt_inds, :, :]
if conj_data: # pragma: no cover
data = np.conj(data)
if pol is None:
return data
pol_ind = self.data.get_pols().index(pol)
return data[..., pol_ind]
# Figure out whether we need to do a polarization selection.
if pol is None:
data_shape = (self.lsts.size, self.freqs.size, len(self.pols))
pols = self.pols
return_slice = (slice(None),) * 3
else:
data_shape = (self.lsts.size, self.freqs.size, 1)
pols = (pol,)
return_slice = (slice(None), slice(None), 0)
# Prepare the model parameters, then simulate and return the effect.
data = np.zeros(data_shape, dtype=complex)
for i, _pol in enumerate(pols):
args = self._initialize_args_from_model(model)
args = self._update_args(args, model, ant1, ant2, pol)
args.update(kwargs)
if conj_data:
_, rng = self._seed_rng(
seed, model, ant2, ant1, _pol, model_key=model_key
)
else:
_, rng = self._seed_rng(
seed, model, ant1, ant2, _pol, model_key=model_key
)
args["rng"] = rng
data[..., i] = model(**args)
if conj_data:
data = np.conj(data)
return data[return_slice]
def plot_array(self):
"""Generate a plot of the array layout in ENU coordinates."""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel("East Position [m]", fontsize=12)
ax.set_ylabel("North Position [m]", fontsize=12)
ax.set_title("Array Layout", fontsize=12)
dx = 0.25
for ant, pos in self.antpos.items():
ax.plot(pos[0], pos[1], color="k", marker="o")
ax.text(pos[0] + dx, pos[1] + dx, ant)
return fig
def refresh(self):
"""Refresh the object.
This zeros the data array, resets the history, and clears the
instance's ``_components`` dictionary.
"""
self.data.data_array = np.zeros(self.data.data_array.shape, dtype=complex)
self.data.history = ""
self._components.clear()
self._antpairpol_cache.clear()
self._seeds.clear()
self._filter_cache = {"delay": {}, "fringe": {}}
self.extras.clear()
def write(self, filename, save_format="uvh5", **kwargs):
"""Write the ``data`` to disk using a ``pyuvdata``-supported filetype."""
try:
getattr(self.data, f"write_{save_format}")(filename, **kwargs)
except AttributeError:
raise ValueError(
"The save_format must correspond to a write method in UVData."
)
# TODO: Determine if we want to provide the user the option to retrieve
# simulation components as a return value from run_sim. Remove the
# _generator_to_list wrapper if we do not make that a feature.
@_generator_to_list
def run_sim(self, sim_file=None, **sim_params):
"""
Run an entire simulation.
Parameters
----------
sim_file
Path to a configuration file specifying simulation parameters.
Required if ``sim_params`` is not provided.
**sim_params
Once-nested dictionary mapping simulation components to models,
with each model mapping to a dictionary of parameter-value pairs.
Required if ``sim_file`` is not provided.
Returns
-------
components
List of simulation components that were generated with the
parameter ``ret_vis`` set to ``True``, returned in the order
that they were simulated. This is only returned if there is
at least one simulation component with ``ret_vis`` set to
``True`` in its configuration file/dictionary.
Examples
--------
Suppose we have the following configuration dictionary::
sim_params = {
"pntsrc_foreground": {"seed": "once", "nsrcs": 500},
"gains": {"seed": "once", "dly_rng": [-20, 20], "ret_vis": True},
"reflections": {"seed": "once", "dly_jitter": 10},
}
Invoking this method with ``**sim_params`` as its argument will simulate
visibilities appropriate for a sky with 500 point sources, generate
bandpass gains for each antenna and apply the effect to the foreground
data, then generate cable reflections with a Gaussian jitter in the
reflection delays with a standard deviation of 10 ns and apply the
effect to the data. The return value will be a list with one entry:
a dictionary mapping antenna numbers to their associated bandpass gains.
The same effect can be achieved by writing a YAML file that is loaded
into a dictionary formatted as above. See the :class:`Simulator` tutorial
for a more in-depth explanation of how to use this method.
"""
# make sure that only sim_file or sim_params are specified
if not (bool(sim_file) ^ bool(sim_params)):
raise ValueError(
"Either an absolute path to a simulation configuration "
"file or a dictionary of simulation parameters may be "
"passed, but not both. Please only pass one of the two."
)
# read the simulation file if provided
if sim_file is not None:
with open(sim_file) as config:
try:
sim_params = yaml.load(config.read(), Loader=yaml.FullLoader)
except Exception:
raise OSError("The configuration file was not able to be loaded.")
# loop over the entries in the configuration dictionary
for component, params in sim_params.items():
# make sure that the parameters are a dictionary
if not isinstance(params, dict):
raise TypeError(
f"The parameters for {component} are not formatted "
"properly. Please ensure that the parameters for "
"each component are specified using a dictionary."
)
# add the component to the data
value = self.add(component, **params)
# if the user wanted to return the data, then
if value is not None:
yield component, value
def chunk_sim_and_save(
self,
save_dir,
ref_files=None,
Nint_per_file=None,
prefix=None,
sky_cmp=None,
state=None,
filetype="uvh5",
clobber=True,
):
"""
Chunk a simulation in time and write to disk.
This function is a thin wrapper around :func:`~.io.chunk_sim_and_save`;
please see that function's documentation for more information.
"""
io.chunk_sim_and_save(
self.data,
save_dir,
ref_files=ref_files,
Nint_per_file=Nint_per_file,
prefix=prefix,
sky_cmp=sky_cmp,
state=state,
filetype=filetype,
clobber=clobber,
)
# -------------- Legacy Functions -------------- #
@_add_depr
def add_eor(self, model, **kwargs):
"""Add an EoR-like model to the visibilities."""
return self.add(model, **kwargs)
@_add_depr
def add_foregrounds(self, model, **kwargs):
"""Add foregrounds to the visibilities."""
return self.add(model, **kwargs)
@_add_depr
def add_noise(self, model, **kwargs):
"""Add thermal noise to the visibilities."""
return self.add(model, **kwargs)
@_add_depr
def add_rfi(self, model, **kwargs):
"""Add RFI to the visibilities."""
return self.add(model, **kwargs)
@_add_depr
def add_gains(self, **kwargs):
"""Apply bandpass gains to the visibilities."""
return self.add("gains", **kwargs)
@_add_depr
def add_sigchain_reflections(self, ants=None, **kwargs):
"""Apply reflections to the visibilities. See :meth:`add` for details."""
if ants is not None:
kwargs.update(vis_filter=ants)
return self.add("reflections", **kwargs)
@_add_depr
def add_xtalk(self, model="gen_whitenoise_xtalk", bls=None, **kwargs):
"""Add crosstalk to the visibilities. See :meth:`add` for more details."""
if bls is not None:
kwargs.update(vis_filter=bls)
return self.add(model, **kwargs)
@staticmethod
def _apply_filter(vis_filter, ant1, ant2, pol):
"""Determine whether to filter the visibility for (ant1, ant2, pol).
Functionally, ``vis_filter`` specifies which (ant1, ant2, pol) tuples
will have a simulated effect propagated through the ``_iteratively_apply``
method. ``vis_filter`` acts as a logical equivalent of a passband filter.
Parameters
----------
vis_filter
Either a polarization string, antenna number, baseline, antpairpol
(baseline + polarization), collection of antenna numbers and/or
polarization strings, or collection of such keys.
ant1, ant2, pol
Baseline + polarization to compare against the provided filter.
Returns
-------
apply_filter
False if the provided antpairpol satisfies any of the keys provided
in ``vis_filter``; True otherwise. See examples for details.
Examples
--------
``vis_filter`` = (0,)
returns: False for any baseline including antenna 0
result: only baselines including antenna 0 have a simulated effect applied.
``vis_filter`` = ('xx',)
returns: False if ``pol == "xx"`` else True
result: only polarization "xx" has a simulated effect applied.
``vis_filter`` = (0, 1, 'yy')
returns: False if ``(ant1, ant2, pol) in [(0, 1, 'yy'), (1, 0, 'yy)]``
result: only baseline (0,1), or its conjugate, with polarization "yy" will
have a simulated effect applied.
"""
# If multiple keys are passed, do this recursively...
multikey = any(isinstance(key, (list, tuple)) for key in vis_filter)
if multikey:
apply_filter = [
Simulator._apply_filter(key, ant1, ant2, pol) for key in vis_filter
]
return all(apply_filter) # and approve if just one key fits.
elif all(item is None for item in vis_filter):
# Support passing a list of None.
return False
elif len(vis_filter) == 1:
# For now, assume a string specifies a polarization.
if isinstance(vis_filter[0], str):
return not pol == vis_filter[0]
# Otherwise, assume that this specifies an antenna.
else:
return vis_filter[0] not in (ant1, ant2)
elif len(vis_filter) == 2:
# TODO: This will need to be updated when we support ant strings.
# Three cases: two pols; an ant+pol; a baseline.
# If it's two polarizations, then make sure this pol is one of them.
if all(isinstance(key, str) for key in vis_filter):
return pol not in vis_filter
# If it's an ant+pol, make sure both the antenna and pol are present.
elif any(isinstance(key, str) for key in vis_filter):
return not all(key in (ant1, ant2, pol) for key in vis_filter)
# Otherwise, make sure the baseline is correct.
else:
return not (
utils._listify(vis_filter) == [ant1, ant2]
or utils._listify(vis_filter) == [ant2, ant1]
)
elif len(vis_filter) == 3:
# Assume it's a proper antpairpol.
return not (
utils._listify(vis_filter) == [ant1, ant2, pol]
or utils._listify(vis_filter) == [ant2, ant1, pol]
)
else:
# Assume it's some list of antennas/polarizations.
pols = []
ants = []
for key in vis_filter:
if isinstance(key, str):
pols.append(key)
elif isinstance(key, int):
ants.append(key)
# We want polarization and ant1 or ant2 in the filter.
# This would be used in simulating e.g. a few feeds that have an
# abnormally high system temperature.
return not (pol in pols and (ant1 in ants or ant2 in ants))
def _calculate_reds(self, tol=1.0):
"""Calculate redundant groups and populate class attributes."""
groups, centers, lengths = self.data.get_redundancies(tol=tol)
self.red_grps = groups
self.red_vecs = centers
self.red_lengths = lengths
def _calculate_delay_filters(
self,
*,
standoff: float = 0.0,
delay_filter_type: Optional[str] = "gauss",
min_delay: Optional[float] = None,
max_delay: Optional[float] = None,
normalize: Optional[float] = None,
):
"""
Calculate delay filters for each redundant group.
Parameters
----------
standoff
Extra extent in delay that the filter extends out to in order to
allow for suprahorizon emission. Should be specified in nanoseconds.
Default buffer is zero.
delay_filter_type
String specifying the filter profile. See :func:`utils.gen_delay_filter`
for details.
min_delay
Minimum absolute delay of the filter, in nanoseconds.
max_delay
Maximum absolute delay of the filter, in nanoseconds.
normalize
Normalization of the filter such that the output power is the product
of the input power and the normalization factor.
See Also
--------
:func:`utils.gen_delay_filter`
"""
# Note that this is not the most efficient way of caching the filters;
# however, this is algorithmically very simple--just use one filter per
# redundant group. This could potentially be improved in the future,
# but it should work fine for our purposes.
for red_grp, bl_len in zip(self.red_grps, self.red_lengths):
bl_len_ns = bl_len / const.c.to("m/ns").value
bl_int = sorted(red_grp)[0]
delay_filter = utils.gen_delay_filter(
self.freqs,
bl_len_ns,
standoff=standoff,
delay_filter_type=delay_filter_type,
min_delay=min_delay,
max_delay=max_delay,
normalize=normalize,
)
self._filter_cache["delay"][bl_int] = delay_filter
def _calculate_fringe_filters(
self, *, fringe_filter_type: Optional[str] = "tophat", **filter_kwargs
):
"""
Calculate fringe-rate filters for all baselines.
Parameters
----------
fringe_filter_type
The fringe-rate filter profile.
filter_kwargs
Other parameters necessary for specifying the filter. These
differ based on the filter profile.
See Also
--------
:func:`utils.gen_fringe_filter`
"""
# This uses the same simplistic approach as the delay filter
# calculation does--just do one filter per redundant group.
for red_grp, (blx, _bly, _blz) in zip(self.red_grps, self.red_vecs):
ew_bl_len_ns = blx / const.c.to("m/ns").value
bl_int = sorted(red_grp)[0]
fringe_filter = utils.gen_fringe_filter(
self.lsts,
self.freqs,
ew_bl_len_ns,
fringe_filter_type=fringe_filter_type,
**filter_kwargs,
)
self._filter_cache["fringe"][bl_int] = fringe_filter
def _initialize_data(self, data: Optional[Union[str, Path, UVData]], **kwargs):
"""
Initialize the ``data`` attribute with a ``UVData`` object.
Parameters
----------
data
Either a ``UVData`` object or a path-like object to a file
that can be loaded into a ``UVData`` object. If not provided,
then sufficient keywords for initializing a ``UVData`` object
must be provided. See :func:`io.empty_uvdata` for more
information on which keywords are needed.
Raises
------
TypeError
If the provided value for ``data`` is not an object that can
be cast to a ``UVData`` object.
"""
if data is None:
self.data = io.empty_uvdata(**kwargs)
elif isinstance(data, (str, Path)):
self.data = self._read_datafile(data, **kwargs)
self.data.extra_keywords["data_file"] = data
elif isinstance(data, UVData):
self.data = data
else:
raise TypeError(
"data type not understood. Only a UVData object or a path to "
"a UVData-compatible file may be passed as the data parameter. "
"Otherwise, keywords must be provided to build a UVData object."
)
if not self.data.future_array_shapes: # pragma: nocover
self.data.use_future_array_shapes()
def _initialize_args_from_model(self, model):
"""
Retrieve the LSTs and/or frequencies required for a model.
Parameters
----------
model: callable
Model whose argspec is to be inspected and recovered.
Returns
-------
model_params: dict
Dictionary mapping positional argument names to either an
``inspect._empty`` object or the relevant parameters pulled
from the ``Simulator`` object. The only parameters that are
not ``inspect._empty`` are "lsts" and "freqs", should they
appear in the model's argspec.
Examples
--------
Suppose we have the following function::
def func(freqs, ants, other=None):
pass
The returned object would be a dictionary with keys ``freqs`` and
``ants``, with the value for ``freqs`` being ``self.freqs`` and
the value for ``ants`` being ``inspect._empty``. Since ``other``
has a default value, it will not be in the returned dictionary.
"""
model_params = self._get_model_parameters(model)
model_params = {
k: v
for k, v in model_params.items()
if v is inspect._empty or k in model.attrs_to_pull
}
# Pull any attributes from the Simulator that are required.
args = {}
for param, value in model_params.items():
if hasattr(self, param) and value in (None, inspect._empty):
args[param] = getattr(self, param)
model_params.update(args)
return model_params
def _iterate_antpair_pols(self):
"""Loop through all baselines and polarizations."""
for ant1, ant2, pol in self.data.get_antpairpols():
blt_inds = self.data.antpair2ind((ant1, ant2))
pol_ind = self.data.get_pols().index(pol)
if blt_inds is not None:
yield ant1, ant2, pol, blt_inds, pol_ind
def _iteratively_apply(
self,
model: SimulationComponent,
*,
add_vis: bool = True,
ret_vis: bool = False,
seed: str | int | None = None,
vis_filter: Sequence | None = None,
antpairpol_cache: Sequence[AntPairPol] | None = None,
model_key: str | None = None,
**kwargs,
) -> Union[np.ndarray, dict[int, np.ndarray]] | None:
"""
Simulate an effect for an entire array.
This method loops over every baseline and polarization in order
to simulate the effect ``model`` for the full array. The result
is optionally applied to the simulation's data and/or returned.
Parameters
----------
model
Callable model used to simulate an effect.
add_vis
Whether to apply the effect to the simulation data. Default
is to apply the effect.
ret_vis
Whether to return the simulated effect. Default is to not
return the effect. Type of returned object depends on whether
the effect is multiplicative or not.
seed
Either an integer specifying the seed to be used in setting
the random state, or one of a select few keywords. Default
is to use the current random state. See :meth:`_seed_rng`
for descriptions of the supported seeding modes.
vis_filter
List of antennas, baselines, polarizations, antenna-polarization
pairs, or antpairpols for which to simulate the effect. This
specifies which of the above the effect is to be simulated for,
and anything that does not meet the keys specified in this list
does not have the effect applied to it. See :meth:`_apply_filter`
for more details.
antpairpol_cache
List of (ant1, ant2, pol) tuples specifying which antpairpols have
already had the effect simulated. Not intended for use by the
typical end-user.
model_key
String identifying the model component being computed. This is
handed around to ensure that random number generation schemes using
the "initial" seeding routine can be recovered via ``self.get``.
kwargs
Extra parameters passed to ``model``.
Returns
-------
effect: np.ndarray or dict
The simulated effect. Only returned if ``ret_vis`` is set to True.
If the effect is *not* multiplicative, then the returned object
is an ndarray; otherwise, a dictionary mapping antenna numbers
to ndarrays is returned.
"""
# There's nothing to do if we're neither adding nor returning.
if not add_vis and not ret_vis:
warnings.warn(
"You have chosen to neither add nor return the effect "
"you are trying to simulate, so nothing will be "
f"computed. This warning was raised for the model: {model_key}",
stacklevel=2,
)
return
# Initialize the antpairpol cache if we need to.
if antpairpol_cache is None:
antpairpol_cache = []
# Pull relevant parameters from Simulator.
# Also make placeholders for antenna/baseline dependent parameters.
base_args = self._initialize_args_from_model(model)
# Get a copy of the data array.
data_copy = self.data.data_array.copy()
# Pull useful auxilliary parameters.
is_multiplicative = getattr(model, "is_multiplicative", None)
is_smooth_in_freq = getattr(model, "is_smooth_in_freq", True)
if is_multiplicative is None:
warnings.warn(
"You are attempting to compute a component but have "
"not specified an ``is_multiplicative`` attribute for "
"the component. The component will be added under "
"the assumption that it is *not* multiplicative.",
stacklevel=2,
)
is_multiplicative = False
# Pre-simulate gains.
if is_multiplicative:
gains = {}
args = self._update_args(base_args, model)
args.update(kwargs)
for pol in self.data.get_feedpols():
if seed:
seed, rng = self._seed_rng(
seed, model, pol=pol, model_key=model_key
)
args["rng"] = rng
polarized_gains = model(**args)
for ant, gain in polarized_gains.items():
gains[(ant, pol)] = gain
# Determine whether to use cached filters, and which ones to use if so.
model_kwargs = getattr(model, "kwargs", {})
use_cached_filters = any("filter" in key for key in model_kwargs)
get_delay_filter = (
is_smooth_in_freq
and "delay_filter_kwargs" not in kwargs
and "delay_filter_kwargs" in model_kwargs
and bool(self._filter_cache["delay"])
)
get_fringe_filter = (
"fringe_filter_kwargs" not in kwargs
and "fringe_filter_kwargs" in model_kwargs
and bool(self._filter_cache["fringe"])
)
use_cached_filters &= get_delay_filter or get_fringe_filter
if model.return_type == "full_array":
args = self._update_args(base_args, model)
args.update(kwargs)
if seed:
if seed == "redundant":
warnings.warn(
"You are trying to set the random state once per "
"redundant group while simulating an effect that "
"computes the entire visibility matrix in one go. "
"Any randomness in the simulation component may not "
"come out as expected--please check your settings."
f"This warning was raised for model: {model_key}",
stacklevel=2,
)
seed, rng = self._seed_rng(model, model_key=model_key)
args["rng"] = rng
data_copy += model(**args)
else:
# Iterate over the array and simulate the effect as-needed.
for ant1, ant2, pol, blt_inds, pol_ind in self._iterate_antpair_pols():
# Determine whether or not to filter the result.
apply_filter = self._apply_filter(
utils._listify(vis_filter), ant1, ant2, pol
)
if apply_filter:
continue
# Check if this antpairpol or its conjugate have been simulated.
bl_in_cache = (ant1, ant2, pol) in antpairpol_cache
conj_in_cache = (ant2, ant1, pol) in antpairpol_cache
# Seed the random number generator.
key = (ant2, ant1, pol) if conj_in_cache else (ant1, ant2, pol)
seed, rng = self._seed_rng(seed, model, *key, model_key=model_key)
# Prepare the actual arguments to be used.
use_args = self._update_args(base_args, model, ant1, ant2, pol)
use_args.update(kwargs)
if model.is_randomized:
use_args["rng"] = rng
if use_cached_filters:
filter_kwargs = self._get_filters(
ant1,
ant2,
get_delay_filter=get_delay_filter,
get_fringe_filter=get_fringe_filter,
)
use_args.update(filter_kwargs)
# Cache simulated antpairpols if not filtered out.
if not (bl_in_cache or conj_in_cache or apply_filter):
antpairpol_cache.append((ant1, ant2, pol))
# Check whether we're simulating a gain or a visibility.
if is_multiplicative:
# Calculate the complex gain, but only apply it if requested.
gain = gains[(ant1, pol[0])] * np.conj(gains[(ant2, pol[1])])
data_copy[blt_inds, :, pol_ind] *= gain
else:
# I don't think this will ever be executed, but just in case...
if conj_in_cache and seed is None: # pragma: no cover
conj_blts = self.data.antpair2ind((ant2, ant1))
vis = (data_copy - self.data.data_array)[
conj_blts, :, pol_ind
].conj()
else:
vis = model(**use_args)
# and add it in
data_copy[blt_inds, :, pol_ind] += vis
# return the component if desired
# this is a little complicated, but it's done this way so that
# there aren't *three* copies of the data array floating around
# this is to minimize the potential of triggering a MemoryError
if ret_vis:
# return the gain dictionary if gains are simulated
if is_multiplicative:
return gains
data_copy -= self.data.data_array
# the only time we're allowed to have add_vis be False is
# if ret_vis is True, and nothing happens if both are False
# so this is the *only* case where we'll have to reset the
# data array
if add_vis:
self.data.data_array += data_copy
# otherwise return the actual visibility simulated
return data_copy
else:
self.data.data_array = data_copy
@staticmethod
def _read_datafile(datafile: Union[str, Path], **kwargs) -> UVData:
"""Read a file as a ``UVData`` object.
Parameters
----------
datafile
Path to a file containing visibility data readable by ``pyuvdata``.
**kwargs
Arguments passed to the ``UVData.read`` method.
Returns
-------
UVData
The read-in data object.
"""
uvd = UVData()
uvd.read(datafile, read_data=True, **kwargs)
return uvd
def _seed_rng(self, seed, model, ant1=None, ant2=None, pol=None, model_key=None):
"""
Set the random state according to the provided parameters.
This is a helper function intended to be used solely in the
:meth:`_iteratively_apply` method. It exists in order to ensure that
the simulated data is as realistic as possible, assuming the user
understands the proper choice of seeding method to use for the
various effects that can be simulated.
Parameters
----------
seed
Either the random seed to use (when provided as an integer),
or one of the following keywords:
``"once"``:
The random state is set to the same value for
every baseline and polarization; one unique seed is
created for each model that uses this seeding mode.
This is recommended for simulating point-source foregrounds
and per-antenna effects.
``"redundant"``:
The random state is only uniquely set once per redundant
group for a given model. This is recommended for simulating
diffuse foregrounds and the reionization signal.
``"initial"``:
The random state is set at the very beginning of the
iteration over the array. This is essentially the same as
using a seeding mode of ``None``, though not identical.
This is recommended for simulating thermal noise, or for
simulating an effect that has a random component that
changes between baselines.
model
Name of the model for which to either recover or cache the seed.
This is used to lookup random state seeds in the :attr:`_seeds`
dictionary.
ant1
First antenna in the baseline.
ant2
Second antenna in the baseline (for baseline-dependent effects).
pol
Polarization string.
model_key
Identifier for retrieving the model parameters from the
``self._components`` attribute. This is only needed for ensuring
that random effects using the "initial" seed can be recovered
with the ``self.get`` method.
Returns
-------
updated_seed
Either the input seed or ``None``, depending on the provided seed.
This is just used to ensure that the logic for setting the random
state in the :meth:`_iteratively_apply` routine works out.
rng
The random number generator to be used for producing the random effect.
Raises
------
TypeError
The provided seed is not ``None``, an integer, or a string.
ValueError
Two cases: one, the ``"redundant"`` seeding mode is being used
and a baseline isn't provided; two, the seed is a string, but
is not one of the supported seeding modes.
"""
model_key = model_key or self._get_model_name(model)
if seed is None:
rng = self._components[model_key].get("rng", np.random.default_rng())
return (None, rng)
if isinstance(seed, int):
return (seed, np.random.default_rng(seed))
if not isinstance(seed, str):
raise TypeError(
"The seeding mode must be specified as a string or integer. "
"If an integer is provided, then it will be used as the seed."
)
if seed == "redundant":
if ant1 is None or ant2 is None:
raise ValueError(
"A baseline must be specified in order to "
"seed by redundant group."
)
# Determine the key for the redundant group this baseline is in.
bl_int = self.data.antnums_to_baseline(ant1, ant2)
key = (next(reds for reds in self.red_grps if bl_int in reds)[0],)
if pol:
key += (pol,)
# seed the RNG accordingly
seed = self._get_seed(model_key, key)
return ("redundant", np.random.default_rng(seed))
elif seed == "once":
# this option seeds the RNG once per iteration of
# _iteratively_apply, using the same seed every time
# this is appropriate for antenna-based gains (where the
# entire gain dictionary is simulated each time), or for
# something like PointSourceForeground, where objects on
# the sky are being placed randomly
key = (pol,) if pol else 0
seed = self._get_seed(model_key, key)
return ("once", np.random.default_rng(seed))
elif seed == "initial":
# this seeds the RNG once at the very beginning of
# _iteratively_apply. this would be useful for something
# like ThermalNoise
key = (pol,) if pol else -1
rng = np.random.default_rng(self._get_seed(model_key, key))
self._components[model_key]["rng"] = rng
return (None, rng)
else:
raise ValueError("Seeding mode not supported.")
def _update_args(self, args, model, ant1=None, ant2=None, pol=None):
"""
Scan the provided arguments and pull data as necessary.
This method searches the provided dictionary for various positional
arguments that can be determined by data stored in the ``Simulator``
instance. Please refer to the source code to see what argument
names are searched for and how their values are obtained.
Parameters
----------
args: dict
Dictionary mapping names of positional arguments to either
a value pulled from the ``Simulator`` instance or an
``inspect._empty`` object. See .. meth: _initialize_args_from_model
for details on what to expect (these two methods are always
called in conjunction with one another).
model: SimulationComponent
The model being simulated. The model will define which attributes
should be pulled from the ``Simulator``.
ant1: int, optional
Required parameter if an autocorrelation visibility or a baseline
vector is in the keys of ``args``.
ant2: int, optional
Required parameter if a baseline vector is in the keys of ``args``.
pol: str, optional
Polarization string. Currently not used.
"""
# TODO: review this and see if there's a smarter way to do it.
new_params = {}
for param, attr in model.attrs_to_pull.items():
if param in ("autovis", "autovis_i"):
new_params[param] = self.data.get_data(ant1, ant1, pol)
elif param == "autovis_j":
new_params[param] = self.data.get_data(ant2, ant2, pol)
elif param == "bl_vec":
bl_vec = self.antpos[ant2] - self.antpos[ant1]
new_params[param] = bl_vec / const.c.to("m/ns").value
elif param == "antpair":
new_params[param] = (ant1, ant2)
else:
# The parameter can be retrieved directly from the Simulator
new_params[param] = getattr(self, attr)
use_args = args.copy()
use_args.update(new_params)
return use_args
def _get_filters(
self,
ant1: int,
ant2: int,
*,
get_delay_filter: bool = True,
get_fringe_filter: bool = True,
) -> dict[str, np.ndarray]:
"""
Retrieve delay and fringe filters from the cache.
Parameters
----------
ant1
First antenna in the baseline.
ant2
Second antenna in the baseline.
get_delay_filter
Whether to retrieve the delay filter.
get_fringe_filter
Whether to retrieve the fringe filter.
Returns
-------
filters
Dictionary containing the fringe and delay filters that
have been pre-calculated for the provided baseline.
"""
filters = {}
if not get_delay_filter and not get_fringe_filter:
# Save some CPU cycles.
return filters
bl_int = self.data.antnums_to_baseline(ant1, ant2)
conj_bl_int = self.data.antnums_to_baseline(ant2, ant1)
is_conj = False
for red_grp in self.red_grps:
if bl_int in red_grp:
key = sorted(red_grp)[0]
break
if conj_bl_int in red_grp:
key = sorted(red_grp)[0]
is_conj = True
break
if get_delay_filter:
delay_filter = self._filter_cache["delay"][key]
filters["delay_filter_kwargs"] = {"delay_filter": delay_filter}
if get_fringe_filter:
fringe_filter = self._filter_cache["fringe"][key]
if is_conj:
# Fringes are seen to move in the opposite direction.
fringe_filter = fringe_filter[::-1, :]
filters["fringe_filter_kwargs"] = {"fringe_filter": fringe_filter}
return filters
@staticmethod
def _get_model_parameters(model):
"""Retrieve the full model signature (init + call) parameters."""
init_params = inspect.signature(model.__class__).parameters
call_params = inspect.signature(model).parameters
# this doesn't work correctly if done on one line
model_params = {}
for params in (call_params, init_params):
for parameter, value in params.items():
model_params[parameter] = value.default
model_params.pop("kwargs", None)
return model_params
@staticmethod
def _get_component(
component: Union[str, type[SimulationComponent], SimulationComponent],
) -> Union[SimulationComponent, type[SimulationComponent]]:
"""Normalize a component to be either a class or instance."""
if isinstance(component, str):
try:
return get_model(component)
except KeyError:
raise ValueError(
f"The model {component!r} does not exist. The following models are "
f"available: \n{list_all_components()}."
)
elif isinstance(component, SimulationComponent):
return component
else:
with contextlib.suppress(TypeError):
if issubclass(component, SimulationComponent):
return component
raise TypeError(
"The input type for the component was not understood. "
"Must be a string, or a class/instance of type 'SimulationComponent'. "
f"Available component models are:\n{list_all_components()}"
)
def _generate_seed(self, model, key):
"""Generate a random seed and cache it in the ``self._seeds`` attribute.
Parameters
----------
model
The name of the model to retrieve the random seed for, as it would
appear in the ``self._components`` attribute. (This should always
correspond to the ``model_key`` determined in the ``self.add`` method.)
key
The key to use for tracking the random seed. This is only really
used for keeping track of random seeds that are set per polarization
or per redundant group.
"""
# Just to make it extra random.
rng = np.random.default_rng()
if model not in self._seeds:
self._seeds[model] = {}
self._seeds[model][key] = rng.integers(2**32)
def _get_seed(self, model, key):
"""Retrieve or generate a random seed given a model and key.
Parameters
----------
model
The name of the model to retrieve the random seed for, as it would
appear in the ``self._components`` attribute. (This should always
correspond to the ``model_key`` determined in the ``self.add`` method.)
key
The key to use for tracking the random seed. This is only really
used for keeping track of random seeds that are set per polarization
or per redundant group.
Returns
-------
seed
The random seed to use for setting the random state.
"""
if model not in self._seeds:
self._generate_seed(model, key)
if key not in self._seeds[model]:
self._generate_seed(model, key)
return self._seeds[model][key]
@staticmethod
def _get_model_name(model):
"""Find out the (lowercase) name of a provided model."""
if isinstance(model, str):
return model.lower()
elif isinstance(model, SimulationComponent):
return model.__class__.__name__.lower()
else:
with contextlib.suppress(TypeError):
if issubclass(model, SimulationComponent):
return model.__name__.lower()
raise TypeError(
"You are trying to simulate an effect using a custom function. "
"Please refer to the tutorial for instructions regarding how "
"to define new simulation components compatible with the Simulator."
)
def _parse_key(self, key: Union[int, str, AntPair, AntPairPol]) -> AntPairPol:
"""Convert a key of at-most length-3 to an (ant1, ant2, pol) tuple."""
valid_pols = {
k.lower()
for k in {
**uvutils.POL_STR2NUM_DICT,
**uvutils.JONES_STR2NUM_DICT,
**uvutils.CONJ_POL_DICT,
}
}
valid_pols.update({"jee", "jen", "jne", "jnn"})
def checkpol(pol):
if pol is None:
return None
if not isinstance(pol, str):
raise TypeError(f"Invalid polarization type: {type(pol)}.")
if pol.lower() not in valid_pols:
raise ValueError(f"Invalid polarization string: {pol}.")
return pol
if key is None:
ant1, ant2, pol = None, None, None
elif np.issubdtype(type(key), np.integer):
# Figure out if it's an antenna or baseline integer
if key in self.antpos:
ant1, ant2, pol = key, None, None
else:
ant1, ant2 = self.data.baseline_to_antnums(key)
pol = None
elif isinstance(key, str):
if key.lower() in ("auto", "cross"):
raise NotImplementedError("Functionality not yet supported.")
key = checkpol(key)
ant1, ant2, pol = None, None, key
else:
def intify(x):
return x if x is None else int(x)
try:
iter(key) # ensure it's iterable
if len(key) not in (2, 3):
raise TypeError
if len(key) == 2:
if all(isinstance(val, int) for val in key):
ant1, ant2 = key
pol = None
else:
ant1, pol = intify(key[0]), checkpol(key[1])
ant2 = None
else:
ant1, ant2, pol = intify(key[0]), intify(key[1]), checkpol(key[2])
except TypeError:
raise ValueError(
"Key must be an integer, string, antenna pair, or antenna "
f"pair with a polarization string. Got {key}."
)
return ant1, ant2, pol
def _sanity_check(self, model):
"""Check that simulation components are applied sensibly."""
has_data = not np.all(self.data.data_array == 0)
is_multiplicative = getattr(model, "is_multiplicative", False)
contains_multiplicative_effect = any(
self._get_component(component["alias"]).is_multiplicative
for component in self._components.values()
)
if is_multiplicative and not has_data:
warnings.warn(
"You are trying to compute a multiplicative "
"effect, but no visibilities have been simulated yet.",
stacklevel=1,
)
elif not is_multiplicative and contains_multiplicative_effect:
warnings.warn(
"You are adding visibilities to a data array "
"*after* multiplicative effects have been introduced.",
stacklevel=1,
)
def _update_history(self, model, **kwargs):
"""Record the component simulated and its parameters in the history."""
component = self._get_model_name(model)
vis_filter = kwargs.pop("vis_filter", None)
msg = f"hera_sim v{__version__}: Added {component} using parameters:\n"
for param, value in defaults._unpack_dict(kwargs).items():
msg += f"{param} = {value}\n"
if vis_filter is not None:
msg += "Effect simulated for the following antennas/baselines/pols:\n"
msg += ", ".join(vis_filter)
self.data.history += msg
def _update_seeds(self, model_name=None):
"""Update the seeds in the extra_keywords property."""
seed_dict = {}
for component, seeds in self._seeds.items():
if model_name is not None and component != model_name:
continue
if len(seeds) == 1:
seed = list(seeds.values())[0]
key = "_".join([component, "seed"])
seed_dict[key] = seed
else:
# This should only be raised for seeding by redundancy.
# Each redundant group is denoted by the *first* baseline
# integer for the particular redundant group. See the
# _generate_redundant_seeds method for reference.
for bl_int, seed in seeds.items():
key = "_".join([component, "seed", str(bl_int)])
seed_dict[key] = seed
# Now actually update the extra_keywords dictionary.
self.data.extra_keywords.update(seed_dict)
def _validate_get_request(
self, model: Component, ant1: int, ant2: int, pol: str
) -> None:
"""Verify that the provided antpairpol is appropriate given the model."""
if getattr(model, "is_multiplicative", False):
pols = self.data.get_feedpols()
pol_type = "Feed"
else:
pols = self.pols
pol_type = "Visibility"
if ant1 is None and ant2 is None:
if pol is None or pol in pols:
return
else:
raise ValueError(f"{pol_type} polarization {pol} not found.")
if pol is not None and pol not in pols:
raise ValueError(f"{pol_type} polarization {pol} not found.")
if getattr(model, "is_multiplicative", False):
if ant1 is not None and ant2 is not None:
raise ValueError(
"At most one antenna may be specified when retrieving "
"a multiplicative effect."
)
else:
if (ant1 is None) ^ (ant2 is None):
raise ValueError(
"Either no antennas or a pair of antennas must be provided "
"when retrieving a non-multiplicative effect."
)
if ant1 not in self.antpos or ant2 not in self.antpos:
raise ValueError("At least one antenna is not in the array layout.")
|
HERA-TeamREPO_NAMEhera_simPATH_START.@hera_sim_extracted@hera_sim-main@hera_sim@simulate.py@.PATH_END.py
|
{
"filename": "_dtickrange.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergl/marker/colorbar/tickformatstop/_dtickrange.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="scattergl.marker.colorbar.tickformatstop",
**kwargs,
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"editType": "calc", "valType": "any"},
{"editType": "calc", "valType": "any"},
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergl@marker@colorbar@tickformatstop@_dtickrange.py@.PATH_END.py
|
{
"filename": "_sizemode.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattermapbox/marker/_sizemode.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizemodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="sizemode", parent_name="scattermapbox.marker", **kwargs
):
super(SizemodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["diameter", "area"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattermapbox@marker@_sizemode.py@.PATH_END.py
|
{
"filename": "spectrum_likelihood.py",
"repo_name": "threeML/threeML",
"repo_path": "threeML_extracted/threeML-master/threeML/utils/spectrum/spectrum_likelihood.py",
"type": "Python"
}
|
import copy
from builtins import object
from typing import Optional
import numba as nb
import numpy as np
from threeML.io.logging import setup_logger
from threeML.utils.numba_utils import nb_sum
from threeML.utils.statistics.likelihood_functions import (
half_chi2, poisson_log_likelihood_ideal_bkg,
poisson_observed_gaussian_background, poisson_observed_poisson_background)
log = setup_logger(__name__)
# These classes provide likelihood evaluation to SpectrumLike and children
_known_noise_models = {}
class BinnedStatistic(object):
def __init__(self, spectrum_plugin):
"""
A class to hold the likelihood call and randomization of spectrum counts
:param spectrum_plugin: the spectrum plugin to call
"""
self._spectrum_plugin = spectrum_plugin
def get_current_value(self):
RuntimeError("must be implemented in subclass")
def get_randomized_source_counts(self, source_model_counts):
return None
def get_randomized_source_errors(self):
return None
def get_randomized_background_counts(self):
return None
def get_randomized_background_errors(self):
return None
class GaussianObservedStatistic(BinnedStatistic):
def get_current_value(self, precalc_fluxes: Optional[np.array]=None):
model_counts = self._spectrum_plugin.get_model(precalc_fluxes=precalc_fluxes)
chi2_ = half_chi2(
self._spectrum_plugin.current_observed_counts,
self._spectrum_plugin.current_observed_count_errors,
model_counts,
)
assert np.all(np.isfinite(chi2_))
return nb_sum(chi2_) * (-1), None
def get_randomized_source_counts(self, source_model_counts):
if not np.isfinite(source_model_counts[0]):
source_model_counts[0] = 0
log.warning("simulated spectrum had infinite counts in first channel")
log.warning("setting to ZERO")
idx = self._spectrum_plugin.observed_count_errors > 0
randomized_source_counts = np.zeros_like(source_model_counts)
randomized_source_counts[idx] = np.random.normal(
loc=source_model_counts[idx],
scale=self._spectrum_plugin.observed_count_errors[idx],
)
# Issue a warning if the generated background is less than zero, and fix it by placing it at zero
idx = randomized_source_counts < 0 # type: np.ndarray
negative_source_n = nb_sum(idx)
if negative_source_n > 0:
log.warning(
"Generated source has negative counts "
"in %i channels. Fixing them to zero" % (negative_source_n)
)
randomized_source_counts[idx] = 0
return randomized_source_counts
def get_randomized_source_errors(self):
return self._spectrum_plugin.observed_count_errors
class PoissonObservedIdealBackgroundStatistic(BinnedStatistic):
def get_current_value(self, precalc_fluxes: Optional[np.array]=None):
# In this likelihood the background becomes part of the model, which means that
# the uncertainty in the background is completely neglected
model_counts = self._spectrum_plugin.get_model(precalc_fluxes=precalc_fluxes)
loglike, _ = poisson_log_likelihood_ideal_bkg(
self._spectrum_plugin.current_observed_counts,
self._spectrum_plugin.current_scaled_background_counts,
model_counts,
)
return nb_sum(loglike), None
def get_randomized_source_counts(self, source_model_counts):
# Randomize expectations for the source
# we want the unscalled background counts
# TODO: check with giacomo if this is correct!
if not np.isfinite(source_model_counts[0]):
source_model_counts[0] = 0
log.warning("simulated spectrum had infinite counts in first channel")
log.warning("setting to ZERO")
randomized_source_counts = np.random.poisson(
source_model_counts + self._spectrum_plugin._background_counts
)
return randomized_source_counts
def get_randomized_background_counts(self):
# No randomization for the background in this case
randomized_background_counts = self._spectrum_plugin._background_counts
return randomized_background_counts
class PoissonObservedModeledBackgroundStatistic(BinnedStatistic):
def get_current_value(self, precalc_fluxes: Optional[np.array]=None):
# In this likelihood the background becomes part of the model, which means that
# the uncertainty in the background is completely neglected
model_counts = self._spectrum_plugin.get_model(precalc_fluxes=precalc_fluxes)
# we scale the background model to the observation
background_model_counts = (
self._spectrum_plugin.get_background_model()
* self._spectrum_plugin.scale_factor
)
loglike, _ = poisson_log_likelihood_ideal_bkg(
self._spectrum_plugin.current_observed_counts,
background_model_counts,
model_counts,
)
bkg_log_like = self._spectrum_plugin.background_plugin.get_log_like()
total_log_like = nb_sum(loglike) + bkg_log_like
return total_log_like, None
def get_randomized_source_counts(self, source_model_counts):
# first generate random source counts from the plugin
self._synthetic_background_plugin = (
self._spectrum_plugin.background_plugin.get_simulated_dataset()
)
if not np.isfinite(source_model_counts[0]):
source_model_counts[0] = 0
log.warning("simulated spectrum had infinite counts in first channel")
log.warning("setting to ZERO")
randomized_source_counts = np.random.poisson(
source_model_counts + self._synthetic_background_plugin.observed_counts
)
return randomized_source_counts
def get_randomized_background_errors(self):
randomized_background_count_err = None
if not self._synthetic_background_plugin.observed_spectrum.is_poisson:
randomized_background_count_err = (
self._synthetic_background_plugin.observed_count_errors
)
return randomized_background_count_err
@property
def synthetic_background_plugin(self):
return self._synthetic_background_plugin
class PoissonObservedNoBackgroundStatistic(BinnedStatistic):
def get_current_value(self, precalc_fluxes: Optional[np.array]=None):
# In this likelihood the background becomes part of the model, which means that
# the uncertainty in the background is completely neglected
model_counts = self._spectrum_plugin.get_model(precalc_fluxes=precalc_fluxes)
background_model_counts = np.zeros_like(model_counts)
loglike, _ = poisson_log_likelihood_ideal_bkg(
self._spectrum_plugin.current_observed_counts,
background_model_counts,
model_counts,
)
return nb_sum(loglike), None
def get_randomized_source_counts(self, source_model_counts):
# Randomize expectations for the source
# we want the unscalled background counts
if not np.isfinite(source_model_counts[0]):
source_model_counts[0] = 0
log.warning("simulated spectrum had infinite counts in first channel")
log.warning("setting to ZERO")
randomized_source_counts = np.random.poisson(source_model_counts)
return randomized_source_counts
class PoissonObservedPoissonBackgroundStatistic(BinnedStatistic):
def get_current_value(self, precalc_fluxes: Optional[np.array]=None):
# Scale factor between source and background spectrum
model_counts = self._spectrum_plugin.get_model(precalc_fluxes=precalc_fluxes)
loglike, bkg_model = poisson_observed_poisson_background(
self._spectrum_plugin.current_observed_counts,
self._spectrum_plugin.current_background_counts,
self._spectrum_plugin.scale_factor,
model_counts,
)
return nb_sum(loglike), bkg_model
def get_randomized_source_counts(self, source_model_counts):
# Since we use a profile likelihood, the background model is conditional on the source model, so let's
# get it from the likelihood function
_, background_model_counts = self.get_current_value()
if not np.isfinite(source_model_counts[0]):
source_model_counts[0] = 0
log.warning("simulated spectrum had infinite counts in first channel")
log.warning("setting to ZERO")
# Now randomize the expectations
# Randomize expectations for the source
randomized_source_counts = np.random.poisson(
source_model_counts + background_model_counts
)
return randomized_source_counts
def get_randomized_background_counts(self):
# Randomize expectations for the background
_, background_model_counts = self.get_current_value()
# scale the background to the scale factor
randomized_background_counts = np.random.poisson(background_model_counts / self._spectrum_plugin.scale_factor )
return randomized_background_counts
class PoissonObservedGaussianBackgroundStatistic(BinnedStatistic):
def get_current_value(self, precalc_fluxes: Optional[np.array]=None):
expected_model_counts = self._spectrum_plugin.get_model(precalc_fluxes=precalc_fluxes)
loglike, bkg_model = poisson_observed_gaussian_background(
self._spectrum_plugin.current_observed_counts,
self._spectrum_plugin.current_background_counts * self._spectrum_plugin.scale_factor,
self._spectrum_plugin.current_background_count_errors * self._spectrum_plugin.scale_factor,
expected_model_counts,
)
return nb_sum(loglike), bkg_model
def get_randomized_source_counts(self, source_model_counts):
# Since we use a profile likelihood, the background model is conditional on the source model, so let's
# get it from the likelihood function
_, background_model_counts = self.get_current_value()
# a bad background model can produce
# more background counts than observed counts
# which results in negative background model counts
# we will filter that
idx = background_model_counts < 0
background_model_counts[idx] = 0.
if np.any(np.isnan(background_model_counts)):
log.error("NaN count in background model counts")
log.error(f"{background_model_counts}")
raise RuntimeError()
if not np.all(background_model_counts >= 0):
log.error("negative count in background model counts")
log.error(f"{background_model_counts}")
raise RuntimeError()
if not np.isfinite(source_model_counts[0]):
source_model_counts[0] = 0
log.warning("simulated spectrum had infinite counts in first channel")
log.warning("setting to ZERO")
# Now randomize the expectations
# Randomize expectations for the source
randomized_source_counts = np.random.poisson(
source_model_counts + background_model_counts
)
return randomized_source_counts
def get_randomized_background_counts(self):
# Now randomize the expectations.
_, background_model_counts = self.get_current_value()
# We cannot generate variates with zero sigma. They variates from those channel will always be zero
# This is a limitation of this whole idea. However, remember that by construction an error of zero
# it is only allowed when the background counts are zero as well.
idx = self._spectrum_plugin.background_count_errors > 0
randomized_background_counts = np.zeros_like(background_model_counts)
randomized_background_counts[idx] = np.random.normal(
loc=background_model_counts[idx],
scale=self._spectrum_plugin.background_count_errors[idx],
)
# Issue a warning if the generated background is less than zero, and fix it by placing it at zero
idx = randomized_background_counts < 0 # type: np.ndarray
negative_background_n = nb_sum(idx)
if negative_background_n > 0:
log.warning(
"Generated background has negative counts "
"in %i channels. Fixing them to zero" % (negative_background_n)
)
randomized_background_counts[idx] = 0
return randomized_background_counts
def get_randomized_background_errors(self):
return copy.copy(self._spectrum_plugin.background_count_errors)
class NotAvailableStatistic(object):
def __init__(self, spectrum_plugin):
"""
"""
log.error('The required statistic is currently restricted to the IXPE plugin only.')
raise RuntimeError()
try:
from ixpe.likelihood import GaussianObservedGaussianBackgroundStatistic
log.info('IXPE plugin found. Enabling Gaussian source with Gaussian background.')
except:
GaussianObservedGaussianBackgroundStatistic = NotAvailableStatistic
statistic_lookup = {
"poisson": {
"poisson": PoissonObservedPoissonBackgroundStatistic,
"gaussian": PoissonObservedGaussianBackgroundStatistic,
"ideal": PoissonObservedIdealBackgroundStatistic,
None: PoissonObservedNoBackgroundStatistic,
"modeled": PoissonObservedModeledBackgroundStatistic,
},
"gaussian": {None: GaussianObservedStatistic,
'gaussian':GaussianObservedGaussianBackgroundStatistic},
None: {None: None},
}
|
threeMLREPO_NAMEthreeMLPATH_START.@threeML_extracted@threeML-master@threeML@utils@spectrum@spectrum_likelihood.py@.PATH_END.py
|
{
"filename": "Background_modeling.md",
"repo_name": "threeML/threeML",
"repo_path": "threeML_extracted/threeML-master/docs/md_docs/fast_execute/Background_modeling.md",
"type": "Markdown"
}
|
---
jupyter:
jupytext:
formats: ipynb,md
text_representation:
extension: .md
format_name: markdown
format_version: '1.2'
jupytext_version: 1.7.1
kernelspec:
display_name: Python 3
language: python
name: python3
---
<!-- #region deletable=true editable=true -->
# Background Modeling
When fitting a spectrum with a background, it is invalid to simply subtract off the background if the background is part of the data's generative model [van Dyk et al. (2001)](http://iopscience.iop.org/article/10.1086/318656/meta). Therefore, we are often left with the task of modeling the statistical process of the background along with our source.
In typical spectral modeling, we find a few common cases when background is involved. If we have total counts ($S_i$) in $i^{\rm th}$ on $N$ bins observed for an exposure of $t_{\rm s}$ and also a measurement of $B_i$ background counts from looking off source for $t_{\rm b}$ seconds, we can then suppose a model for the source rate ($m_i$) and background rate ($b_i$).
**Poisson source with Poisson background**
This is described by a likelihood of the following form:
$$ L = \prod^N_{i=1} \frac{(t_{\rm s}(m_i+b_i))^{S_i} e^{-t_{\rm s}(m_i+b_i)}}{S_i!} \times \frac{(t_{\rm b} b_i)^{B_i} e^{-t_{\rm b}b_i}}{B_i!} $$
which is a Poisson likelihood for the total model ($m_i +b_i$) conditional on the Poisson distributed background observation. This is the typical case for e.g. aperture x-ray instruments that observe a source region and then a background region. Both observations are Poisson distributed.
**Poisson source with Gaussian background**
This likelihood is similar, but the conditonal background distribution is described by Gaussian:
$$ L = \prod^N_{i=1} \frac{(t_{\rm s}(m_i+b_i))^{S_i} e^{-t_{\rm s}(m_i+b_i)}}{S_i!} \times \frac{1}{\sigma_{b,i}\sqrt{2 \pi}} \exp \left[ \frac{({B_i} - t_{\rm b} b_i)^2} {2 \sigma_{b,i}^2} \right] $$
where the $\sigma_{b,i}$ are the measured errors on $B_i$. This situation occurs e.g. when the background counts are estimated from a fitted model such as time-domain instruments that estimate the background counts from temporal fits to the lightcurve.
In 3ML, we can fit a background model along with the the source model which allows for arbitrarily low background counts (in fact zero) in channels. The alternative is to use profile likelihoods where we first differentiate the likelihood with respect to the background model
$$ \frac{ \partial L}{{\partial b_i}} = 0$$
and solve for the $b_i$ that maximize the likelihood. Both the Poisson and Gaussian background profile likelihoods are described in the [XSPEC statistics guide](https://heasarc.gsfc.nasa.gov/xanadu/xspec/manual/XSappendixStatistics.html). This implicitly yields $N$ parameters to the model thus requiring at least one background count per channel. These profile likelihoods are the default Poisson likelihoods in 3ML when a background model is not used with a **SpectrumLike** (and its children, **DispersionSpectrumLike** and **OGIPLike**) plugin.
Let's examine how to handle both cases.
<!-- #endregion -->
```python
import warnings
warnings.simplefilter('ignore')
import numpy as np
np.seterr(all="ignore")
```
```python deletable=true editable=true
%%capture
from threeML import *
```
```python
from jupyterthemes import jtplot
%matplotlib inline
jtplot.style(context="talk", fscale=1, ticks=True, grid=False)
set_threeML_style()
silence_warnings()
import astropy.units as u
```
<!-- #region deletable=true editable=true -->
First we will create an observation where we have a simulated broken power law source spectrum along with an observed background spectrum. The background is a powerl law continuum with a Gaussian line.
<!-- #endregion -->
```python deletable=true editable=true
# create the simulated observation
energies = np.logspace(1,4,151)
low_edge = energies[:-1]
high_edge = energies[1:]
# get a BPL source function
source_function = Broken_powerlaw(K=2,xb=300,piv=300, alpha=0., beta=-3.)
# power law background function
background_function = Powerlaw(K=.5,index=-1.5, piv=100.) + Gaussian(F=50,mu=511,sigma=20)
spectrum_generator = SpectrumLike.from_function('fake',
source_function=source_function,
background_function=background_function,
energy_min=low_edge,
energy_max=high_edge)
spectrum_generator.view_count_spectrum()
```
<!-- #region deletable=true editable=true -->
## Using a profile likelihood
We have very few counts counts in some channels (in fact sometimes zero), but let's assume we do not know the model for the background. In this case, we will use the profile Poisson likelihood.
<!-- #endregion -->
```python deletable=true editable=true
# instance our source spectrum
bpl = Broken_powerlaw(piv=300,xb=500)
# instance a point source
ra, dec = 0,0
ps_src = PointSource('source',ra,dec,spectral_shape=bpl)
# instance the likelihood model
src_model = Model(ps_src)
# pass everything to a joint likelihood object
jl_profile = JointLikelihood(src_model,DataList(spectrum_generator))
# fit the model
_ = jl_profile.fit()
# plot the fit in count space
_ = spectrum_generator.display_model(step=False)
```
<!-- #region deletable=true editable=true -->
Our fit recovers the simulated parameters. However, we should have binned the spectrum up such that there is at least one background count per spectral bin for the profile to be valid.
<!-- #endregion -->
```python deletable=true editable=true
spectrum_generator.rebin_on_background(1)
spectrum_generator.view_count_spectrum()
_ = jl_profile.fit()
_ = spectrum_generator.display_model(step=False)
```
<!-- #region deletable=true editable=true -->
## Modeling the background
Now let's try to model the background assuming we know that the background is a power law with a Gaussian line. We can extract a background plugin from the data by passing the original plugin to a classmethod of spectrum like.
<!-- #endregion -->
```python deletable=true editable=true
# extract the background from the spectrum plugin.
# This works for OGIPLike plugins as well, though we could easily also just read
# in a bakcground PHA
background_plugin = SpectrumLike.from_background('bkg',spectrum_generator)
```
<!-- #region deletable=true editable=true -->
This constructs a new plugin with only the observed background so that we can first model it.
<!-- #endregion -->
```python deletable=true editable=true
background_plugin.view_count_spectrum()
```
<!-- #region deletable=true editable=true -->
We now construct our background model and fit it to the data. Let's assume we know that the line occurs at 511 keV, but we are unsure of its strength an width. We do not need to bin the data up because we are using a simple Poisson likelihood which is valid even when we have zero counts [Cash (1979)](http://adsabs.harvard.edu/abs/1979ApJ...228..939C).
<!-- #endregion -->
```python deletable=true editable=true
# instance the spectrum setting the line's location to 511
bkg_spectrum = Powerlaw(piv=100) + Gaussian(F=50,mu=511)
# setup model parameters
# fix the line's location
bkg_spectrum.mu_2.fix = True
# nice parameter bounds
bkg_spectrum.K_1.bounds = (1E-4, 10)
bkg_spectrum.F_2.bounds = (0., 1000)
bkg_spectrum.sigma_2.bounds = (2,30)
ps_bkg = PointSource('bkg',0,0,spectral_shape=bkg_spectrum)
bkg_model = Model(ps_bkg)
jl_bkg = JointLikelihood(bkg_model,DataList(background_plugin))
_ = jl_bkg.fit()
_ = background_plugin.display_model(step=False, data_color='#1A68F0', model_color='#FF9700')
```
<!-- #region deletable=true editable=true -->
We now have a model and estimate for the background which we can use when fitting with the source spectrum. We now create a new plugin with just the total observation and pass our background plugin as the background argument.
<!-- #endregion -->
```python deletable=true editable=true
modeled_background_plugin = SpectrumLike('full',
# here we use the original observation
observation=spectrum_generator.observed_spectrum,
# we pass the background plugin as the background!
background=background_plugin)
```
<!-- #region deletable=true editable=true -->
When we look at out count spectrum now, we will see the *predicted* background, rather than the measured one:
<!-- #endregion -->
```python deletable=true editable=true
modeled_background_plugin.view_count_spectrum()
```
<!-- #region deletable=true editable=true -->
Now we simply fit the spectrum as we did in the profiled case. The background plugin's parameters are stored in our new plugin as nuissance parameters:
<!-- #endregion -->
```python deletable=true editable=true
modeled_background_plugin.nuisance_parameters
```
<!-- #region deletable=true editable=true -->
and the fitting engine will use them in the fit. The parameters will still be connected to the background plugin and its model and thus we can free/fix them there as well as set priors on them.
<!-- #endregion -->
```python deletable=true editable=true
# instance the source model... the background plugin has it's model already specified
bpl = Broken_powerlaw(piv=300,xb=500)
bpl.K.bounds = (1E-5,1E1)
bpl.xb.bounds = (1E1,1E4)
ps_src = PointSource('source',0,0,bpl)
src_model = Model(ps_src)
jl_src = JointLikelihood(src_model,DataList(modeled_background_plugin))
_ = jl_src.fit()
```
```python deletable=true editable=true tags=["nbsphinx-thumbnail"]
# over plot the joint background and source fits
fig = modeled_background_plugin.display_model(step=False)
_ = background_plugin.display_model(data_color='#1A68F0', model_color='#FF9700',model_subplot=fig.axes,step=False)
```
```python deletable=true editable=true
```
|
threeMLREPO_NAMEthreeMLPATH_START.@threeML_extracted@threeML-master@docs@md_docs@fast_execute@Background_modeling.md@.PATH_END.py
|
{
"filename": "unit_lookup_table.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/units/unit_lookup_table.py",
"type": "Python"
}
|
from unyt._unit_lookup_table import *
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@units@unit_lookup_table.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "VarStarDetect/varstardetect",
"repo_path": "varstardetect_extracted/varstardetect-main/setup.py",
"type": "Python"
}
|
from distutils.core import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='varstardetect', # How you named your package folder (MyLib)
packages=['varstardetect'], # Chose the same as "name"
version='0.2.1.3', # Start with a small number and increase it with every change you make
# Chose a license from here: https://help.github.com/articles/licensing-a-repository
license='gpl-3.0',
# Give a short description about your library
description="TESS Variable Star Light Curve Fitter",
# Type in your name
long_description=long_description,
long_description_content_type="text/markdown",
author='Nicolas Carrizosa Arias, Jorge Perez Gonzalez and Andres Cadenas Blanco',
author_email='varstardetect@gmail.com', # Type in your E-Mail
# Provide either the link to your github or to your website
url='https://github.com/VarStarDetect/varstardetect',
# I explain this later on
download_url='https://github.com/VarStarDetect/varstardetect/archive/refs/tags/1.1.10.tar.gz',
# Keywords that define your package best
keywords=['Star', 'Astronomy', 'Star Detection', 'Detection'],
install_requires=[
'numpy',
'matplotlib',
'astropy',
'pandas',
'lightkurve',
],
package_data={'varstardetect': ['*.txt','*.md','Targets/*.csv','*.in']},
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3',
'Framework :: Matplotlib',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Astronomy',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
],
python_requires=">=3.8",
)
|
VarStarDetectREPO_NAMEvarstardetectPATH_START.@varstardetect_extracted@varstardetect-main@setup.py@.PATH_END.py
|
{
"filename": "cnn_classifier.py",
"repo_name": "SKA-INAF/caesar",
"repo_path": "caesar_extracted/caesar-master/scripts/cnn_classifier.py",
"type": "Python"
}
|
#!/usr/bin/env python
##################################################
### MODULE IMPORT
##################################################
## STANDARD MODULES
import os
import sys
import subprocess
import string
import time
import signal
from threading import Thread
import datetime
import numpy as np
import random
import math
## ASTRO
from astropy.io import fits
## COMMAND-LINE ARG MODULES
import getopt
import argparse
import collections
## KERAS MODULES
import keras
from keras import layers
from keras import models
from keras import optimizers
from keras.utils import plot_model
from keras import backend
from keras.models import Model
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Dropout
from keras.layers.core import Lambda
from keras.layers.core import Dense
from keras.layers import Flatten
from keras.layers import Input
import tensorflow as tf
## ADDON ML MODULES
from sklearn.model_selection import train_test_split
## GRAPHICS MODULES
import matplotlib.pyplot as plt
#### GET SCRIPT ARGS ####
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
###########################
## ARGS
###########################
def get_args():
"""This function parses and return arguments passed in"""
parser = argparse.ArgumentParser(description="Parse args.")
## INPUT DATA
parser.add_argument('-filelist_bkg', '--filelist_bkg', dest='filelist_bkg', required=True, type=str,action='store',help='Filename with list of train bkg data')
parser.add_argument('-filelist_source', '--filelist_source', dest='filelist_source', required=True, type=str,action='store',help='Filename with list of train source data')
parser.add_argument('-normdatamin', '--normdatamin', dest='normdatamin', required=False, type=float, default=-0.0100, action='store',help='Normalization min used to scale data in (0,1) range (default=-100 mJy/beam)')
parser.add_argument('-normdatamax', '--normdatamax', dest='normdatamax', required=False, type=float, default=10, action='store',help='Normalization max used to scale data in (0,1) range (default=10 Jy/beam)')
parser.add_argument('-nx', '--nx', dest='nx', required=False, type=int, default=101, action='store',help='Image width in pixels (default=101)')
parser.add_argument('-ny', '--ny', dest='ny', required=False, type=int, default=101, action='store',help='Image height in pixels (default=101)')
parser.add_argument('-nsamples_bkg', '--nsamples_bkg', dest='nsamples_bkg', required=False, type=int, default=10, action='store',help='Number of train images for bkg extracted from input maps (default=10)')
parser.add_argument('-nsamples_source', '--nsamples_source', dest='nsamples_source', required=False, type=int, default=-1, action='store',help='Number of train images extracted around sources from input maps (default=-1)')
parser.add_argument('-nmaxobjects', '--nmaxobjects', dest='nmaxobjects', required=False, type=int, default=5, action='store',help='Max number of predicted objects in target (default=5)')
parser.add_argument('-ntargetpars', '--ntargetpars', dest='ntargetpars', required=False, type=int, default=6, action='store',help='Nmber of pars per objects in target (default=6)')
parser.add_argument('-conv_nfilt_min', '--conv_nfilt_min', dest='conv_nfilt_min', required=False, type=int, default=16, action='store',help='Number of min convolution filters used (default=16)')
parser.add_argument('-conv_nfilt_max', '--conv_nfilt_max', dest='conv_nfilt_max', required=False, type=int, default=32, action='store',help='Number of max convolution filters used (default=32)')
parser.add_argument('-dense_size_min', '--dense_size_min', dest='dense_size_min', required=False, type=int, default=16, action='store',help='Number of min neurons used in dense layer(default=16)')
parser.add_argument('-dense_size_max', '--dense_size_max', dest='dense_size_max', required=False, type=int, default=32, action='store',help='Number of max neurons used in dense layer(default=32)')
parser.add_argument('-test_size', '--test_size', dest='test_size', required=False, type=float, default=0.2, action='store',help='Fraction of input data used for testing the network (default=0.2)')
parser.add_argument('-spars_loss_weight', '--spars_loss_weight', dest='spars_loss_weight', required=False, type=float, default=1, action='store',help='Loss weight to be given to source pars learning (default=1)')
parser.add_argument('-labels_loss_weight', '--labels_loss_weight', dest='labels_loss_weight', required=False, type=float, default=1, action='store',help='Loss weight to be given to source labels learning (default=1)')
parser.add_argument('-nepochs', '--nepochs', dest='nepochs', required=False, type=int, default=100, action='store',help='Number of epochs used in network training (default=100)')
parser.add_argument('--saveimg', dest='saveimg', action='store_true')
parser.set_defaults(saveimg=False)
parser.add_argument('-outfile_loss', '--outfile_loss', dest='outfile_loss', required=False, type=str, default='nn_loss.png', action='store',help='Name of NN loss plot file (default=nn_loss.png)')
parser.add_argument('-outfile_accuracy', '--outfile_accuracy', dest='outfile_accuracy', required=False, type=str, default='nn_accuracy.png', action='store',help='Name of NN accuracy plot file (default=nn_accuracy.png)')
parser.add_argument('-outfile_model', '--outfile_model', dest='outfile_model', required=False, type=str, default='nn_model.png', action='store',help='Name of NN model plot file (default=nn_model.png)')
parser.add_argument('-outfile_posaccuracy', '--outfile_posaccuracy', dest='outfile_posaccuracy', required=False, type=str, default='nn_posaccuracy.png', action='store',help='Name of NN source position accuracy plot file (default=nn_posaccuracy.png)')
parser.add_argument('-outfile_fluxaccuracy', '--outfile_fluxaccuracy', dest='outfile_fluxaccuracy', required=False, type=str, default='nn_fluxaccuracy.png', action='store',help='Name of NN source flux accuracy plot file (default=nn_fluxaccuracy.png)')
args = parser.parse_args()
return args
###########################
## READ INPUT DATA
###########################
# - Has pattern in string
def has_patterns(s,patterns):
""" Return true if patterns are found in string """
if not patterns:
return False
found= False
for pattern in patterns:
found= pattern in s
if found:
break
return found
# - Read filelist
def read_list(filename,skip_patterns=[]):
""" Read a file list line by line """
try:
f = open(filename, 'r')
except IOError:
errmsg= 'Could not read file: ' + filename
print "ERROR: " + errmsg
raise IOError(errmsg)
fields= []
for line in f:
line = line.strip()
line_fields = line.split()
if not line_fields:
continue
# Skip pattern
skipline= has_patterns(line_fields[0],skip_patterns)
if skipline:
continue
fields.append(line_fields)
f.close()
return fields
# Read bkg data
def read_bkg_data(filename):
""" Read input bkg data """
# - Read list with files
filelist_data= []
try:
filelist_data= read_list(filename,['#'])
except IOError:
errmsg= 'Cannot read file: ' + filename
print "ERROR: " + errmsg
raise IOError(errmsg)
filelist_data_shape= np.shape(filelist_data)
print 'filelist_data=',filelist_data
# - Check list
nfiles_img= filelist_data_shape[0]
if nfiles_img<=0:
errmsg= 'Empty file: ' + filename
print "ERROR: " + errmsg
raise IOError(errmsg)
print ('INFO: #%d bkg image data found in list...' % nfiles_img)
return filelist_data
# Read source data
def read_source_data(filename):
""" Read input source data """
filelist_data= []
try:
filelist_data= read_list(filename,['#'])
except IOError:
errmsg= 'Cannot read file: ' + filename
print "ERROR: " + errmsg
raise IOError(errmsg)
filelist_data_shape= np.shape(filelist_data)
print 'filelist_data=',filelist_data
# - Check list
if len(filelist_data_shape) != 2:
errmsg= 'Invalid number of columns in file ' + filename + ' (2 expected)!'
print "ERROR: " + errmsg
raise IOError(errmsg)
nfiles_img= filelist_data_shape[0]
if nfiles_img<=0:
errmsg= 'Empty file: ' + filename
print "ERROR: " + errmsg
raise IOError(errmsg)
print ('INFO: #%d source image data found in list...' % nfiles_img)
return filelist_data
###########################
## PREPARE TRAIN DATA
###########################
# - Write FITS image
def write_fits(data,filename):
""" Read data to FITS image """
hdu= fits.PrimaryHDU(data)
hdul= fits.HDUList([hdu])
hdul.writeto(filename,overwrite=True)
# - Read FITS image
def read_img(filename):
""" Read FITS image and return data """
try:
hdu= fits.open(filename)
except Exception as ex:
errmsg= 'Cannot read image file: ' + filename
print "ERROR: " + errmsg
raise IOError(errmsg)
data= hdu[0].data
data_size= np.shape(data)
nchan= len(data.shape)
if nchan==4:
output_data= data[0,0,:,:]
elif nchan==2:
output_data= data
else:
errmsg= 'Invalid/unsupported number of channels found in file ' + filename + ' (nchan=' + str(nchan) + ')!'
print "ERROR: " + errmsg
raise IOError(errmsg)
return output_data
# - Get cropped image data
def crop_img(data,x0,y0,dx,dy):
""" Extract sub image of size (dx,dy) around pixel (x0,y0) """
#- Extract crop data
xmin= int(x0-dx/2)
xmax= int(x0+dx/2)
ymin= int(y0-dy/2)
ymax= int(y0+dy/2)
crop_data= data[ymin:ymax+1,xmin:xmax+1]
#print ('DEBUG: (xmin,xmax)=(%d,%d), (ymin,ymax)=(%d,%d)' % (xmin,xmax,ymin,ymax))
#- Replace NAN with zeros and inf with large numbers
np.nan_to_num(crop_data,False)
return crop_data
# - Set bkg train data
def make_bkg_train_data(imglist,nsamples,imgsizex,imgsizey,nmaxobjects,ntargetpars,normmin,normmax,writeimg=False):
""" Prepare bkg train data """
# - Read data in list
#train_data= []
input_data= []
#target_size= nmaxobjects*ntargetpars
#target_data= []
output_size= nmaxobjects*ntargetpars
output_data= []
#target_label_size= nmaxobjects
#target_label_data= []
output_label_size= nmaxobjects
output_label_data= []
Nchan= 1
imgcounter= 0
for item in imglist:
imgcounter+= 1
filename= item[0]
print ('INFO: Reading file %s ...' % filename)
# - Read main bkg img
try:
data= read_img(filename)
except Exception as ex:
errmsg= 'Failed to read bkg image data (err=' + str(ex) + ')'
print "ERROR: " + errmsg
raise IOError(errmsg)
imgsize= np.shape(data)
nx= imgsize[1]
ny= imgsize[0]
marginx= imgsizex/2
marginy= imgsizey/2
print ('INFO: Bkg image no. %d has size (%d,%d)' % (imgcounter,nx,ny) )
# - Extract nsamples per img
index= 0
while index < nsamples:
if index%100==0 :
print ("INFO: Generating sample image no. %s/%s from image %d ..." % (index+1,nsamples,imgcounter))
# - Generate crop img center randomly
x0= int(np.random.uniform(marginx,nx-marginx-1))
y0= int(np.random.uniform(marginy,ny-marginy-1))
#print ('DEBUG: (x0,y0)=(%d,%d)' % (x0,y0))
# - Extract crop img data
data_crop= crop_img(data,x0,y0,imgsizex,imgsizey)
imgcropsize= np.shape(data_crop)
#print ('INFO: Sample image no. %s/%s from image %d has size (%d,%d)' % (index+1,nsamples,imgcounter,imgcropsize[1],imgcropsize[0]) )
# - Check data integrity (skip if all zeros or nan/inf)
n_nonzero= np.count_nonzero(data_crop)
n_finite= (np.isfinite(data_crop)).sum()
if n_nonzero<=0 or n_finite<=0:
print ('WARN: Skip sample image (all pixels NaN/inf/zero)...')
continue
# - Save crop img to file?
outfilename= 'train_bkg_' + str(index+1) + '-RUN' + str(imgcounter) + '.fits'
if writeimg:
write_fits(data_crop,outfilename)
# - Set train data as a tensor of size [Nsamples,Nx,Ny,Nchan] Nchan=1
data_crop= data_crop.reshape(imgcropsize[0],imgcropsize[1],Nchan)
#train_data.append(data_crop)
input_data.append(data_crop)
# - Set train target & labels
#target_data.append( np.zeros((1,target_size)) )
#target_label_data.append( np.zeros((1,target_label_size)) )
output_data.append( np.zeros((1,output_size)) )
output_label_data.append( np.zeros((1,output_label_size)) )
index+= 1
#- Convert list to array
#x_train= np.array(train_data)
#x_train= x_train.astype('float32')
inputs= np.array(input_data)
inputs= inputs.astype('float32')
#y_train= np.array(target_data)
#y_train= y_train.astype('float32')
outputs= np.array(output_data)
outputs= outputs.astype('float32')
outputs_shape= outputs.shape
N= outputs_shape[0]
outputs= outputs.reshape((N,output_size))
#y_train_labels= np.array(target_label_data)
#y_train_labels= y_train_labels.astype('float32')
outputs_labels= np.array(output_label_data)
outputs_labels= outputs_labels.astype('float32')
outputs_labels= outputs_labels.reshape((N,output_label_size))
# - Normalize to [0,1]
inputs= (inputs - normmin)/(normmax-normmin)
return inputs,outputs,outputs_labels
# - Set source train data
def make_source_train_data(filelist,nsamples,imgsizex,imgsizey,nmaxobjects,ntargetpars,normmin,normmax,writeimg=False):
""" Prepare source train data """
# - Read data in list
#train_data= []
input_data= []
#target_size= nmaxobjects*ntargetpars
#target_data= []
output_size= nmaxobjects*ntargetpars
output_data= []
#target_label_size= nmaxobjects
#target_label_data= []
output_label_size= nmaxobjects
output_label_data= []
Nchan= 1
imgcounter= 0
for item in filelist:
imgcounter+= 1
filename= item[0]
filename_spar= item[1]
print ('INFO: Reading files: %s, %s ...' % (filename,filename_spar) )
# - Read main source img
try:
data= read_img(filename)
except Exception as ex:
errmsg= 'Failed to read source image data (err=' + str(ex) + ')'
print "ERROR: " + errmsg
raise IOError(errmsg)
imgsize= np.shape(data)
nx= imgsize[1]
ny= imgsize[0]
marginx= imgsizex/2
marginy= imgsizey/2
print ('INFO: Source image no. %d has size (%d,%d)' % (imgcounter,nx,ny) )
# - Read source pars
source_pars= []
skip_patterns= ['#']
try:
source_pars= read_list(filename_spar,skip_patterns)
except IOError:
errmsg= 'Cannot read file: ' + filename_spar
print "ERROR: " + errmsg
raise IOError(errmsg)
source_pars_size= np.shape(source_pars)
nsources= source_pars_size[0]
npars= source_pars_size[1]
#print ('DEBUG: nsources=%d, npars=%d' % (nsources,npars) )
# - Extract sources img
index= 0
nsources_gen= nsources
if nsamples!=-1 and nsamples<=nsources:
nsources_gen= nsamples
for index in range(nsources_gen):
if index%100==0 :
print ("INFO: Generating source image no. %s/%s from image %d ..." % (index+1,nsamples,imgcounter))
# - Get source position & name
sname= source_pars[index][0]
source_x0= float(source_pars[index][1])
source_y0= float(source_pars[index][2])
x0= int(source_x0)
y0= int(source_y0)
xmin= int(x0-imgsizex/2)
xmax= int(x0+imgsizex/2)
ymin= int(y0-imgsizey/2)
ymax= int(y0+imgsizey/2)
#print ('DEBUG: sname=%s, (x0,y0)=(%d,%d)' % (sname,x0,y0))
# - Extract crop img data
data_crop= crop_img(data,x0,y0,imgsizex,imgsizey)
imgcropsize= np.shape(data_crop)
#print ('DEBUG: Sample image no. %s/%s from image %d has size (%d,%d)' % (index+1,nsamples,imgcounter,imgcropsize[1],imgcropsize[0]) )
# - Check data integrity (skip if all zeros or nan/inf)
n_nonzero= np.count_nonzero(data_crop)
n_finite= (np.isfinite(data_crop)).sum()
if n_nonzero<=0 or n_finite<=0:
print ('WARN: Skip sample image (all pixels NaN/inf/zero)...')
continue
# - Save crop img to file?
outfilename= 'train_source_' + str(index+1) + '-RUN' + str(imgcounter) + '.fits'
if writeimg:
write_fits(data_crop,outfilename)
# - Set train data as a tensor of size [Nsamples,Nx,Ny,Nchan] Nchan=1
data_crop= data_crop.reshape(imgcropsize[0],imgcropsize[1],Nchan)
#train_data.append(data_crop)
input_data.append(data_crop)
# - Find all sources in range and sort sources in field from brightest to fainter
sources_in_field= []
for sources in source_pars:
xx= float(sources[1])
yy= float(sources[2])
x= int(xx)
y= int(yy)
if x>=xmin and x<=xmax and y>=ymin and y<=ymax:
sources_in_field.append(sources)
sources_in_field.sort(key=lambda S: S[3],reverse=True)
nsources_in_field= len(sources_in_field)
# - Set train targets
#targets= np.zeros((1,target_size))
#target_labels= np.zeros((1,target_label_size))
targets= np.zeros((1,output_size))
target_labels= np.zeros((1,output_label_size))
nobjs= min(nsources_in_field,nmaxobjects)
par_counter= 0
for k in range(nobjs):
target_labels[0,k]= 1
x0= float(sources_in_field[k][1])
y0= float(sources_in_field[k][2])
S= float(sources_in_field[k][3])
sigmaX= float(sources_in_field[k][4])
sigmaY= float(sources_in_field[k][5])
theta= np.radians(float(sources_in_field[k][6]))
targets[0,par_counter+0]= x0 - xmin
targets[0,par_counter+1]= y0 - ymin
targets[0,par_counter+2]= S
targets[0,par_counter+3]= sigmaX
targets[0,par_counter+4]= sigmaY
targets[0,par_counter+5]= theta
par_counter+= 6
#target_data.append(targets)
#target_label_data.append(target_labels)
output_data.append(targets)
output_label_data.append(target_labels)
#- Convert list to array
#x_train= np.array(train_data)
#x_train= x_train.astype('float32')
inputs= np.array(input_data)
inputs= inputs.astype('float32')
#y_train= np.array(target_data)
#y_train= y_train.astype('float32')
outputs= np.array(output_data)
outputs= outputs.astype('float32')
outputs_shape= outputs.shape
N= outputs_shape[0]
outputs= outputs.reshape((N,output_size))
#y_train_labels= np.array(target_label_data)
#y_train_labels= y_train_labels.astype('float32')
outputs_labels= np.array(output_label_data)
outputs_labels= outputs_labels.astype('float32')
outputs_labels= outputs_labels.reshape((N,output_label_size))
# - Normalize to [0,1]
#x_train= (x_train - normmin)/(normmax-normmin)
inputs= (inputs - normmin)/(normmax-normmin)
#return x_train,y_train,y_train_labels
return inputs,outputs,outputs_labels
###########################
## BUILD NETWORK
###########################
def build_network(img_height, img_width, nmaxobjects, ntargetpars, conv_nfilt_min=16, conv_nfilt_max=32, conv_kern_size_min=3, conv_kern_size_max=3,conv_act='relu', pool_size=2, dense_size_min=16,dense_size_max=32, dense_act='relu'):
""" Building deep network """
dropout= 0.25
dropout_dense= 0.5
padding= "same"
#padding= "valid"
#model = models.Sequential()
#model.add(layers.Conv2D(conv_nfilt_min, (conv_kern_size,conv_kern_size), activation=conv_tf, input_shape=(img_height,img_width, 1)))
#model.add(layers.MaxPooling2D((pool_size, pool_size)))
#model.add(layers.Conv2D(conv_nfilt_max, (conv_kern_size,conv_kern_size), activation=conv_tf))
#model.add(layers.MaxPooling2D((pool_size, pool_size)))
#model.add(layers.Conv2D(conv_nfilt_max, (conv_kern_size,conv_kern_size), activation=conv_tf))
#- Input layer
nchan= 1
inputShape = (img_height, img_width, nchan)
inputs= Input(shape=inputShape,dtype='float', name='input')
#- Convolutional layers
x = layers.Conv2D(filters=conv_nfilt_min, kernel_size=(conv_kern_size_min,conv_kern_size_min), activation=conv_act, padding=padding)(inputs)
x = layers.MaxPooling2D(pool_size=(pool_size, pool_size),strides=None,padding='valid')(x)
x = layers.Dropout(dropout)(x)
x = layers.Conv2D(filters=conv_nfilt_max, kernel_size=(conv_kern_size_min,conv_kern_size_min), activation=conv_act, padding=padding)(x)
x = layers.MaxPooling2D(pool_size=(pool_size, pool_size),strides=None,padding='valid')(x)
x = layers.Dropout(dropout)(x)
x = layers.Conv2D(filters=conv_nfilt_max, kernel_size=(conv_kern_size_max,conv_kern_size_max), activation=conv_act, padding=padding)(x)
x = layers.MaxPooling2D(pool_size=(pool_size, pool_size),strides=None,padding='valid')(x)
x = layers.Dropout(dropout)(x)
#- Fully connected layers
x = layers.Flatten()(x)
x = layers.Dense(dense_size_max, activation=dense_act)(x)
x = layers.Dropout(dropout_dense)(x)
x = layers.Dense(dense_size_min, activation=dense_act)(x)
x = layers.Dropout(dropout_dense)(x)
# - Output layers
type_prediction = layers.Dense(nmaxobjects, activation='sigmoid', name='type')(x)
pars_prediction = layers.Dense(nmaxobjects*ntargetpars, activation='linear', name='pars')(x)
#- Create NN model
model = Model(
inputs=inputs,
outputs=[type_prediction, pars_prediction],
name="SourceNet"
)
return model
#####################################
## DEFINE NETWORK ADDON METRICS
#####################################
#- These metrics were removed in Keras
# See https://gist.github.com/pishangujeniya/ca8dd46a5d5cf0b0391b712c1a03b9b6
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision. Computes the precision, a
metric for multi-label classification of how many selected items are
relevant.
"""
true_positives = keras.backend.sum(keras.backend.round(keras.backend.clip(y_true * y_pred, 0, 1)))
predicted_positives = keras.backend.sum(keras.backend.round(keras.backend.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + keras.backend.epsilon())
return precision
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall. Computes the recall, a metric
for multi-label classification of how many relevant items are selected.
"""
true_positives = keras.backend.sum(keras.backend.round(keras.backend.clip(y_true * y_pred, 0, 1)))
possible_positives = keras.backend.sum(keras.backend.round(keras.backend.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + keras.backend.epsilon())
return recall
def f1_score(y_true, y_pred):
"""Computes the F1 Score
Only computes a batch-wise average of recall. Computes the recall, a metric
for multi-label classification of how many relevant items are selected.
"""
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
return (2 * p * r) / (p + r + keras.backend.epsilon())
##############
## MAIN ##
##############
def main():
"""Main function"""
#===========================
#== PARSE ARGS
#===========================
print('INFO: Get script args ...')
try:
args= get_args()
except Exception as ex:
print("Failed to get and parse options (err=%s)",str(ex))
return 1
filelist_bkg= args.filelist_bkg
filelist_source= args.filelist_source
nx= args.nx
ny= args.ny
nsamples_bkg= args.nsamples_bkg
nsamples_source= args.nsamples_source
nmaxobjects= args.nmaxobjects
ntargetpars= args.ntargetpars
saveimg= args.saveimg
normdatamin= args.normdatamin
normdatamax= args.normdatamax
conv_nfilt_min= args.conv_nfilt_min
conv_nfilt_max= args.conv_nfilt_max
dense_size_min= args.dense_size_min
dense_size_max= args.dense_size_max
test_size= args.test_size
spars_loss_weight= args.spars_loss_weight
labels_loss_weight= args.labels_loss_weight
nepochs= args.nepochs
outfile_loss= args.outfile_loss
outfile_accuracy= args.outfile_accuracy
outfile_model= args.outfile_model
outfile_posaccuracy= args.outfile_posaccuracy
outfile_fluxaccuracy= args.outfile_fluxaccuracy
#===========================
#== READ BKG/SOURCE DATA
#===========================
print ('INFO: Reading bkg data from file %s ...' % filelist_bkg)
try:
filenames_bkg= read_bkg_data(filelist_bkg)
except Exception as ex:
print("Failed to read bkg data (err=%s)",str(ex))
return 1
print ('INFO: Reading source data from file %s ...' % filelist_source)
try:
filenames_source= read_source_data(filelist_source)
except Exception as ex:
print("Failed to read source data (err=%s)",str(ex))
return 1
#===========================
#== GENERATE TRAIN DATA
#===========================
# - Extract train data for bkg from images
print ('INFO: Generating bkg train data ...')
#(x_train_bkg,y_train_bkg,y_train_labels_bkg)= make_bkg_train_data(
(inputs_bkg,outputs_bkg,outputs_labels_bkg)= make_bkg_train_data(
imglist=filenames_bkg,
nsamples=nsamples_bkg,
imgsizex=nx,imgsizey=ny,
nmaxobjects=nmaxobjects,ntargetpars=ntargetpars,
normmin=normdatamin,normmax=normdatamax,
writeimg=saveimg
)
print 'DEBUG: inputs_bkg size=', np.shape(inputs_bkg)
print 'DEBUG: outputs_bkg size=', np.shape(outputs_bkg)
print 'DEBUG: outputs_labels_bkg size', np.shape(outputs_labels_bkg)
print 'DEBUG: outputs_bkg=',outputs_bkg
print 'DEBUG: outputs_labels_bkg=',outputs_labels_bkg
# - Extract train data for sources from images
print ('INFO: Generating source train data ...')
#(x_train_source,y_train_source,y_train_labels_source)= make_source_train_data(
(inputs_source,outputs_source,outputs_labels_source)= make_source_train_data(
filelist=filenames_source,
nsamples=nsamples_source,
imgsizex=nx,imgsizey=ny,
nmaxobjects=nmaxobjects,ntargetpars=ntargetpars,
normmin=normdatamin,normmax=normdatamax,
writeimg=saveimg
)
print 'DEBUG: inputs_source size=', np.shape(inputs_source)
print 'DEBUG: outputs_source size=', np.shape(outputs_source)
print 'DEBUG: outputs_labels_source size=', np.shape(outputs_labels_source)
print 'DEBUG: outputs_source=',outputs_source
print 'DEBUG: outputs_labels_source=',outputs_labels_source
# - Merge data for bkg & sources
print 'INFO: Merging train data for bkg & sources ...'
#inputs= []
#inputs.append(inputs_bkg)
#inputs.append(inputs_source)
inputs= np.concatenate((inputs_bkg,inputs_source))
#outputs= []
#outputs.append(outputs_bkg)
#outputs.append(outputs_source)
outputs= np.concatenate((outputs_bkg,outputs_source))
#outputs_labels= []
#outputs_labels.append(outputs_labels_bkg)
#outputs_labels.append(outputs_labels_source)
outputs_labels= np.concatenate((outputs_labels_bkg,outputs_labels_source))
# - Shuffle data before splitting in test & validation sample
print 'INFO: Shuffling train data ...'
indices= np.arange(inputs.shape[0])
np.random.shuffle(indices)
inputs= inputs[indices]
outputs= outputs[indices]
outputs_labels= outputs_labels[indices]
print 'DEBUG: inputs size=', np.shape(inputs)
print 'DEBUG: outputs size=', np.shape(outputs)
print 'DEBUG: outputs_labels size=', np.shape(outputs_labels)
# - Partition the data into training and cross-validation splits
print 'INFO: Splitting data into train & test samples ...'
split= train_test_split(
inputs,outputs,outputs_labels,
test_size=test_size,
random_state=None
)
(inputs_train, inputs_test, outputs_train, outputs_test, outputs_labels_train, outputs_labels_test) = split
print 'DEBUG: inputs_train size=', np.shape(inputs_train)
print 'DEBUG: inputs_test size=', np.shape(inputs_test)
print 'DEBUG: outputs_train size=', np.shape(outputs_train)
print 'DEBUG: outputs_test size=', np.shape(outputs_test)
print 'DEBUG: outputs_labels_train size=', np.shape(outputs_labels_train)
print 'DEBUG: outputs_labels_test size=', np.shape(outputs_labels_test)
#===========================
#== BUILD NN
#===========================
#- Create the network
print ('INFO: Building network architecture ...')
model= build_network(
img_height=nx, img_width=ny,
nmaxobjects=nmaxobjects, ntargetpars=ntargetpars,
conv_nfilt_min=conv_nfilt_min,conv_nfilt_max=conv_nfilt_max,
dense_size_min=dense_size_min,dense_size_max=dense_size_max
)
# - Print network architecture
model.summary()
#- Set optimizer & loss function per each output
print ('INFO: Compiling network...')
opt= optimizers.RMSprop(lr=1.e-4)
#opt= Adam(lr=INIT_LR, decay=INIT_LR / nepochs)
losses = {
"type": "binary_crossentropy",
"pars": "mse"
}
lossWeights = {
"type": labels_loss_weight,
"pars": spars_loss_weight
}
##model.compile(optimizer=opt,loss=losses, loss_weights=lossWeights, metrics=['accuracy',precision,recall,f1_score])
model.compile(optimizer=opt,loss=losses, loss_weights=lossWeights, metrics=['accuracy'])
#===========================
#== TRAIN NN
#===========================
print ('INFO: Training network...')
#fitout= model.fit(
# x=inputs_train,
# y={"type": outputs_labels_train,"pars": outputs_train},
# validation_data=(inputs_test,{"type": outputs_labels_test,"pars": outputs_test}),
# ##batch_size=64
# epochs=nepochs,
# verbose=1
#)
flipped_outputs_labels_train= outputs_labels_train
flipped_outputs_train= outputs_train
flipped_outputs_labels_test= outputs_labels_test
flipped_outputs_test= outputs_test
train_loss_vs_epoch= np.zeros((3,nepochs))
test_loss_vs_epoch= np.zeros((3,nepochs))
train_accuracy_vs_epoch= np.zeros((2,nepochs))
test_accuracy_vs_epoch= np.zeros((2,nepochs))
flip_test_data= True
for epoch in range(nepochs):
# - Train for 1 epoch
fitout= model.fit(
x=inputs_train,
y={"type": flipped_outputs_labels_train,"pars": flipped_outputs_train},
validation_data=(inputs_test,{"type": outputs_labels_test,"pars": outputs_test}),
#batch_size=64
epochs=1,
verbose=1
)
# - Save epoch loss
print ('== EPOCH %d ==' % epoch)
print fitout.history
train_loss_vs_epoch[0,epoch]= fitout.history['loss'][0]
train_loss_vs_epoch[1,epoch]= fitout.history['type_loss'][0]
train_loss_vs_epoch[2,epoch]= fitout.history['pars_loss'][0]
test_loss_vs_epoch[0,epoch]= fitout.history['val_loss'][0]
test_loss_vs_epoch[1,epoch]= fitout.history['val_type_loss'][0]
test_loss_vs_epoch[2,epoch]= fitout.history['val_pars_loss'][0]
train_accuracy_vs_epoch[0,epoch]= fitout.history['type_acc'][0]
train_accuracy_vs_epoch[1,epoch]= fitout.history['pars_acc'][0]
test_accuracy_vs_epoch[0,epoch]= fitout.history['val_type_acc'][0]
test_accuracy_vs_epoch[1,epoch]= fitout.history['val_pars_acc'][0]
# - Get predictions for train data and flip targets according to smallest MSE match
(outputs_labels_pred, outputs_pred)= model.predict(inputs_train)
nsamples= outputs_pred.shape[0]
npars= 2 # only (x,y) pars used for flipping
#npars= ntargetpars
for sample in range(nsamples):
mses= np.zeros((nmaxobjects,nmaxobjects))
predout_mse= np.zeros((nmaxobjects,npars))
expout_mse= np.zeros((nmaxobjects,npars))
predout= np.zeros((nmaxobjects,ntargetpars))
expout= np.zeros((nmaxobjects,ntargetpars))
expout_labels= np.zeros((nmaxobjects,1))
for i in range(nmaxobjects):
expout_labels[i,0]= flipped_outputs_labels_train[sample,i]
for j in range(ntargetpars):
predout[i,j]= outputs_pred[sample,j+i*ntargetpars]
expout[i,j]= flipped_outputs_train[sample,j+i*ntargetpars]
for j in range(npars):
predout_mse[i,j]= outputs_pred[sample,j+i*ntargetpars]
expout_mse[i,j]= flipped_outputs_train[sample,j+i*ntargetpars]
for i in range(nmaxobjects):
for j in range(nmaxobjects):
mse= np.mean(np.square(expout_mse[i,:]-predout_mse[j,:]))
mses[i,j]= mse
# - Find new ordering according to smallest MSE
mses_copy= mses
reorder_indexes= np.zeros(nmaxobjects,dtype=int)
for i in range(nmaxobjects):
ind_exp, ind_pred= np.unravel_index(mses.argmin(),mses.shape) # Find index of smallest mse
mses[ind_exp]= np.Inf # Set mse to largest value so that it is not re-assigned anymore
mses[:,ind_pred]= np.Inf
reorder_indexes[ind_pred]= ind_exp
#- Save before flipping
target= ','.join(map(str, flipped_outputs_train[sample,:]))
target_labels= ','.join(map(str, flipped_outputs_labels_train[sample,:]))
#- Flip target
flipped_outputs_train[sample]= expout[reorder_indexes].flatten()
flipped_outputs_labels_train[sample]= expout_labels[reorder_indexes].flatten()
#- Print
#flipped_target= ','.join(map(str, flipped_outputs_train[sample,:]))
#flipped_target_labels= ','.join(map(str, flipped_outputs_labels_train[sample,:]))
#pred= ','.join(map(str, outputs_pred[sample,:]))
#pred_labels= ','.join(map(str, outputs_labels_pred[sample,:]))
#mse= ','.join(map(str, mses_copy[:,:]))
#print("DEBUG: Entry no. %d: reorder_indexes=[%s], target_labels=[%s], flipped_target_labels=[%s]" % (sample+1,reorder_indexes,target_labels,flipped_target_labels) )
#print("DEBUG: Entry no. %d: pred_labels=[%s], target_labels=[%s], flipped_target_labels=[%s], pred=[%s], target=[%s], flipped_target=[%s], mse=[%s], reorder_indexes=[%s]" % (sample+1,pred_labels,target_labels,flipped_target_labels,pred,target,flipped_target,mse,reorder_indexes) )
# - Get predictions for test sample and flip according to smallest MSE match
if flip_test_data:
(outputs_labels_pred, outputs_pred)= model.predict(inputs_test)
nsamples= outputs_pred.shape[0]
for sample in range(nsamples):
mses= np.zeros((nmaxobjects,nmaxobjects))
predout_mse= np.zeros((nmaxobjects,npars))
expout_mse= np.zeros((nmaxobjects,npars))
predout= np.zeros((nmaxobjects,ntargetpars))
expout= np.zeros((nmaxobjects,ntargetpars))
expout_labels= np.zeros((nmaxobjects,1))
for i in range(nmaxobjects):
expout_labels[i,0]= flipped_outputs_labels_test[sample,i]
for j in range(ntargetpars):
predout[i,j]= outputs_pred[sample,j+i*ntargetpars]
expout[i,j]= flipped_outputs_test[sample,j+i*ntargetpars]
for j in range(npars):
predout_mse[i,j]= outputs_pred[sample,j+i*ntargetpars]
expout_mse[i,j]= flipped_outputs_test[sample,j+i*ntargetpars]
for i in range(nmaxobjects):
for j in range(nmaxobjects):
mse= np.mean(np.square(expout_mse[i,:]-predout_mse[j,:]))
mses[i,j]= mse
# - Find new ordering according to smallest MSE
mses_copy= mses
reorder_indexes= np.zeros(nmaxobjects,dtype=int)
for i in range(nmaxobjects):
ind_exp, ind_pred= np.unravel_index(mses.argmin(),mses.shape) # Find index of smallest mse
mses[ind_exp]= np.Inf # Set mse to largest value so that it is not re-assigned anymore
mses[:,ind_pred]= np.Inf
reorder_indexes[ind_pred]= ind_exp
#- Save before flipping
target= ','.join(map(str, flipped_outputs_test[sample,:]))
target_labels= ','.join(map(str, flipped_outputs_labels_test[sample,:]))
#- Flip target
flipped_outputs_test[sample]= expout[reorder_indexes].flatten()
flipped_outputs_labels_test[sample]= expout_labels[reorder_indexes].flatten()
#- Print
#flipped_target= ','.join(map(str, flipped_outputs_test[sample,:]))
#flipped_target_labels= ','.join(map(str, flipped_outputs_labels_test[sample,:]))
#pred= ','.join(map(str, outputs_pred[sample,:]))
#pred_labels= ','.join(map(str, outputs_labels_pred[sample,:]))
#mse= ','.join(map(str, mses_copy[:,:]))
#===========================
#== SAVE NN
#===========================
#- Save the model weights
print("INFO: Saving model weights ...")
model.save_weights('model_weights.h5')
#- Save the model
print("INFO: Saving model ...")
model.save('model.h5')
#===========================
#== EVALUATE NN
#===========================
print("INFO: Classifying train data ...")
(predictions_labels_train, predictions_train)= model.predict(inputs_train)
for i in range(predictions_labels_train.shape[0]):
#target= ','.join(map(str, outputs_labels_train[i,:]))
target= ','.join(map(str, flipped_outputs_labels_train[i,:]))
pred= ','.join(map(str, predictions_labels_train[i,:]))
print("DEBUG: Train labels entry no. %d: target=[%s], pred=[%s]" % (i+1,target,pred) )
for i in range(predictions_train.shape[0]):
#target= ','.join(map(str, outputs_train[i,:]))
target= ','.join(map(str, flipped_outputs_train[i,:]))
pred= ','.join(map(str, predictions_train[i,:]))
print("DEBUG: Train spars entry no. %d: target=[%s], pred=[%s]" % (i+1,target,pred) )
#- Computing true & false detections
nsamples_train= outputs_labels_train.shape[0]
detThr= 0.5
nobjs_tot= 0
nobjs_true= 0
nobjs_rec= 0
nobjs_rec_true= 0
nobjs_rec_false= 0
s_list= []
xpull_list= []
ypull_list= []
spull_list= []
for i in range(nsamples_train):
#target= outputs_labels_train[i,:]
target= flipped_outputs_labels_train[i,:]
pred= predictions_labels_train[i,:]
#target_pars= outputs_train[i,:]
target_pars= flipped_outputs_train[i,:]
pred_pars= predictions_train[i,:]
true_obj_indexes= np.argwhere(target==1).flatten()
rec_obj_indexes= np.argwhere(pred>detThr).flatten()
n= len(true_obj_indexes)
nrec= len(rec_obj_indexes)
ntrue= 0
nrec_true= 0
nrec_false= 0
for index in true_obj_indexes:
x0_true= target_pars[0 + index*ntargetpars]
y0_true= target_pars[1 + index*ntargetpars]
S_true= target_pars[2 + index*ntargetpars]
if pred[index]>detThr:
ntrue+= 1
x0_rec= pred_pars[0 + index*ntargetpars]
y0_rec= pred_pars[1 + index*ntargetpars]
S_rec= pred_pars[2 + index*ntargetpars]
s_list.append(np.log10(S_true))
spull_list.append(S_rec/S_true-1)
xpull_list.append(x0_rec-x0_true)
ypull_list.append(y0_rec-y0_true)
for index in rec_obj_indexes:
if target[index]==1:
nrec_true+= 1
else:
nrec_false+= 1
nobjs_tot+= n
nobjs_rec+= nrec
nobjs_true+= ntrue
nobjs_rec_true+= nrec_true
nobjs_rec_false+= nrec_false
completeness_train= float(nobjs_true)/float(nobjs_tot)
reliability_train= float(nobjs_rec_true)/float(nobjs_rec)
print("INFO: NN Train Results: Completeness(det/tot=%d/%d)=%s, Reliability(true/rec=%d/%d)=%s" % (nobjs_true,nobjs_tot,str(completeness_train),nobjs_rec_true,nobjs_rec,str(reliability_train)))
print("INFO: Classifying test data ...")
(predictions_labels_test, predictions_test)= model.predict(inputs_test)
nsamples_test= outputs_labels_test.shape[0]
detThr= 0.5
nobjs_tot= 0
nobjs_true= 0
nobjs_rec= 0
nobjs_rec_true= 0
nobjs_rec_false= 0
s_list_test= []
xpull_list_test= []
ypull_list_test= []
spull_list_test= []
for i in range(nsamples_test):
#target= outputs_labels_test[i,:]
target= flipped_outputs_labels_test[i,:]
pred= predictions_labels_test[i,:]
#target_pars= outputs_test[i,:]
target_pars= flipped_outputs_test[i,:]
pred_pars= predictions_test[i,:]
true_obj_indexes= np.argwhere(target==1).flatten()
rec_obj_indexes= np.argwhere(pred>detThr).flatten()
n= len(true_obj_indexes)
nrec= len(rec_obj_indexes)
ntrue= 0
nrec_true= 0
nrec_false= 0
for index in true_obj_indexes:
x0_true= target_pars[0 + index*ntargetpars]
y0_true= target_pars[1 + index*ntargetpars]
S_true= target_pars[2 + index*ntargetpars]
if pred[index]>detThr:
ntrue+= 1
x0_rec= pred_pars[0 + index*ntargetpars]
y0_rec= pred_pars[1 + index*ntargetpars]
S_rec= pred_pars[2 + index*ntargetpars]
s_list_test.append(np.log10(S_true))
spull_list_test.append(S_rec/S_true-1)
xpull_list_test.append(x0_rec-x0_true)
ypull_list_test.append(y0_rec-y0_true)
for index in rec_obj_indexes:
if target[index]==1:
nrec_true+= 1
else:
nrec_false+= 1
nobjs_tot+= n
nobjs_rec+= nrec
nobjs_true+= ntrue
nobjs_rec_true+= nrec_true
nobjs_rec_false+= nrec_false
completeness_test= float(nobjs_true)/float(nobjs_tot)
reliability_test= float(nobjs_rec_true)/float(nobjs_rec)
print("INFO: NN Test Results: Completeness(det/tot=%d/%d)=%s, Reliability(true/rec=%d/%d)=%s" % (nobjs_true,nobjs_tot,str(completeness_test),nobjs_rec_true,nobjs_rec,str(reliability_test)))
#for i in range(predictions_labels_test.shape[0]):
# target= ','.join(map(str, outputs_labels_test[i,:]))
# pred= ','.join(map(str, predictions_labels_test[i,:]))
# print("INFO: Test labels entry no. %d: target=[%s], pred=[%s]" % (i+1,target,pred) )
#for i in range(predictions_test.shape[0]):
# target= ','.join(map(str, outputs_test[i,:]))
# pred= ','.join(map(str, predictions_test[i,:]))
# print("INFO: Test spars entry no. %d: target=[%s], pred=[%s]" % (i+1,target,pred) )
#===========================
#== PLOT NN RESULTS
#===========================
# - Plot the network
print("INFO: Printing network model architecture to file ...")
plot_model(model, to_file=outfile_model)
# - Plot the total loss, type loss, spars loss
print("INFO: Plot the network loss to file ...")
lossNames = ["loss", "type_loss", "pars_loss"]
plt.style.use("ggplot")
(fig, ax) = plt.subplots(3, 1, figsize=(13, 13))
for (i, lossName) in enumerate(lossNames):
# Plot the loss for both the training and validation data
title = "Loss for {}".format(lossName) if lossName != "loss" else "Total loss"
ax[i].set_title(title)
ax[i].set_xlabel("Epoch #")
ax[i].set_ylabel("Loss")
#ax[i].plot(np.arange(0, nepochs), fitout.history[lossName], label="TRAIN SAMPLE - " + lossName)
#ax[i].plot(np.arange(0, nepochs), fitout.history["val_" + lossName], label="TEST SAMPLE - " + lossName)
ax[i].plot(np.arange(0, nepochs), train_loss_vs_epoch[i], label="TRAIN SAMPLE - " + lossName)
ax[i].plot(np.arange(0, nepochs), test_loss_vs_epoch[i], label="TEST SAMPLE - " + lossName)
ax[i].legend()
plt.tight_layout()
plt.savefig(outfile_loss)
plt.close()
# - Plot the accuracy
print("INFO: Plot the network accuracy metric to file ...")
accuracyNames = ["type_acc", "pars_acc"]
plt.style.use("ggplot")
(fig, ax) = plt.subplots(2, 1, figsize=(8, 8))
for (i, accuracyName) in enumerate(accuracyNames):
# Plot the loss for both the training and validation data
ax[i].set_title("Accuracy for {}".format(accuracyName))
ax[i].set_xlabel("Epoch #")
ax[i].set_ylabel("Accuracy")
#ax[i].plot(np.arange(0, nepochs), fitout.history[accuracyName], label="TRAIN SAMPLE - " + accuracyName)
#ax[i].plot(np.arange(0, nepochs), fitout.history["val_" + accuracyName], label="TEST SAMPLE - " + accuracyName)
ax[i].plot(np.arange(0, nepochs), train_accuracy_vs_epoch[i], label="TRAIN SAMPLE - " + accuracyName)
ax[i].plot(np.arange(0, nepochs), test_accuracy_vs_epoch[i], label="TEST SAMPLE - " + accuracyName)
ax[i].legend()
plt.tight_layout()
plt.savefig(outfile_accuracy)
plt.close()
# - Plot x, y position reco accuracy for detected sources
print("INFO: Plot the source (x, y) position accuracy ...")
plt.style.use("ggplot")
(fig, ax) = plt.subplots(2, 2, figsize=(8, 8))
ax[0,0].set_title("x Position Accuracy")
ax[0,0].set_xlabel("logS (Jy/beam)")
ax[0,0].set_ylabel("dx")
ax[0,0].scatter(np.array(s_list),np.array(xpull_list),label="TRAIN SAMPLE")
ax[0,0].legend()
ax[0,1].set_title("y Position Accuracy")
ax[0,1].set_xlabel("logS (Jy/beam)")
ax[0,1].set_ylabel("dy")
ax[0,1].scatter(np.array(s_list),np.array(ypull_list),label="TRAIN SAMPLE")
ax[0,1].legend()
ax[1,0].set_title("x Position Accuracy")
ax[1,0].set_xlabel("logS (Jy/beam)")
ax[1,0].set_ylabel("dx")
ax[1,0].scatter(np.array(s_list_test),np.array(xpull_list_test),label="TEST SAMPLE")
ax[1,0].legend()
ax[1,1].set_title("y Position Accuracy")
ax[1,1].set_xlabel("logS (Jy/beam)")
ax[1,1].set_ylabel("dy")
ax[1,1].scatter(np.array(s_list_test),np.array(ypull_list_test),label="TEST SAMPLE")
ax[1,1].legend()
plt.tight_layout()
plt.savefig(outfile_posaccuracy)
plt.close()
# - Plot flux reco accuracy for detected sources
print("INFO: Plot the source flux accuracy ...")
plt.style.use("ggplot")
(fig, ax) = plt.subplots(2, 1, figsize=(8, 8))
ax[0].set_title("Flux Accuracy")
ax[0].set_xlabel("logS (Jy/beam)")
ax[0].set_ylabel("dS")
ax[0].scatter(np.array(s_list),np.array(spull_list),label="TRAIN SAMPLE")
ax[0].legend()
ax[1].set_title("Flux Accuracy")
ax[1].set_xlabel("logS (Jy/beam)")
ax[1].set_ylabel("dS")
ax[1].scatter(np.array(s_list_test),np.array(spull_list_test),label="TEST SAMPLE")
ax[1].legend()
plt.tight_layout()
plt.savefig(outfile_fluxaccuracy)
plt.close()
###################
## MAIN EXEC ##
###################
if __name__ == "__main__":
sys.exit(main())
|
SKA-INAFREPO_NAMEcaesarPATH_START.@caesar_extracted@caesar-master@scripts@cnn_classifier.py@.PATH_END.py
|
{
"filename": "concatenation_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/compiler/tensorrt/test/concatenation_test.py",
"type": "Python"
}
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model script to test TF-TensorRT integration."""
import numpy as np
from tensorflow.python.compiler.tensorrt.test import tf_trt_integration_test_base as trt_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.platform import test
class ConcatenationTest(trt_test.TfTrtIntegrationTestBase):
"""Testing Concatenation in TF-TRT conversion."""
def GraphFn(self, x):
dtype = x.dtype
# scale
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r1 = x / a
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r2 = a / x
a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype)
r3 = a + x
a = constant_op.constant(np.random.randn(1, 3, 1), dtype=dtype)
r4 = x * a
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r5 = x - a
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r6 = a - x
a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
r7 = x - a
a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
r8 = a - x
a = constant_op.constant(np.random.randn(3, 1, 1), dtype=dtype)
r9 = gen_math_ops.maximum(x, a)
a = constant_op.constant(np.random.randn(3, 1), dtype=dtype)
r10 = gen_math_ops.minimum(a, x)
a = constant_op.constant(np.random.randn(3), dtype=dtype)
r11 = x * a
a = constant_op.constant(np.random.randn(1), dtype=dtype)
r12 = a * x
concat1 = array_ops.concat([r1, r2, r3, r4, r5, r6], axis=-1)
concat2 = array_ops.concat([r7, r8, r9, r10, r11, r12], axis=3)
x = array_ops.concat([concat1, concat2], axis=-1)
return gen_array_ops.reshape(x, [2, -1], name="output_0")
def GetParams(self):
return self.BuildParams(self.GraphFn, dtypes.float32, [[2, 3, 3, 1]],
[[2, 126]])
def ExpectedEnginesToBuild(self, run_params):
"""Return the expected engines to build."""
return ["TRTEngineOp_000"]
if __name__ == "__main__":
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@compiler@tensorrt@test@concatenation_test.py@.PATH_END.py
|
{
"filename": "_align.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/scene/annotation/_align.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="align", parent_name="layout.scene.annotation", **kwargs
):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["left", "center", "right"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@scene@annotation@_align.py@.PATH_END.py
|
{
"filename": "test_merge_asof.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/reshape/merge/test_merge_asof.py",
"type": "Python"
}
|
import datetime
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import (
Index,
Timedelta,
merge_asof,
option_context,
to_datetime,
)
import pandas._testing as tm
from pandas.core.reshape.merge import MergeError
class TestAsOfMerge:
def prep_data(self, df, dedupe=False):
if dedupe:
df = df.drop_duplicates(["time", "ticker"], keep="last").reset_index(
drop=True
)
df.time = to_datetime(df.time)
return df
@pytest.fixture
def trades(self):
df = pd.DataFrame(
[
["20160525 13:30:00.023", "MSFT", "51.9500", "75", "NASDAQ"],
["20160525 13:30:00.038", "MSFT", "51.9500", "155", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.7700", "100", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9200", "100", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "200", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "300", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "600", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "44", "NASDAQ"],
["20160525 13:30:00.074", "AAPL", "98.6700", "478343", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6700", "478343", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6600", "6", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "30", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "75", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "20", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "35", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "10", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"],
["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "1000", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "300", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "400", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "600", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"],
["20160525 13:30:00.078", "MSFT", "51.9500", "783", "NASDAQ"],
["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"],
["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"],
],
columns="time,ticker,price,quantity,marketCenter".split(","),
)
df["price"] = df["price"].astype("float64")
df["quantity"] = df["quantity"].astype("int64")
return self.prep_data(df)
@pytest.fixture
def quotes(self):
df = pd.DataFrame(
[
["20160525 13:30:00.023", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.023", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.041", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.072", "GOOG", "720.50", "720.88"],
["20160525 13:30:00.075", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.92", "51.95"],
],
columns="time,ticker,bid,ask".split(","),
)
df["bid"] = df["bid"].astype("float64")
df["ask"] = df["ask"].astype("float64")
return self.prep_data(df, dedupe=True)
@pytest.fixture
def asof(self):
df = pd.DataFrame(
[
[
"20160525 13:30:00.023",
"MSFT",
"51.95",
"75",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.038",
"MSFT",
"51.95",
"155",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.77",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.92",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"200",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"300",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"600",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"44",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.074",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.67",
"478343",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.66",
"6",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"30",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"75",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"20",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"35",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"10",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"1000",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"300",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"400",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"600",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"783",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
],
columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","),
)
df["price"] = df["price"].astype("float64")
df["quantity"] = df["quantity"].astype("int64")
df["bid"] = df["bid"].astype("float64")
df["ask"] = df["ask"].astype("float64")
return self.prep_data(df)
@pytest.fixture
def tolerance(self):
df = pd.DataFrame(
[
[
"20160525 13:30:00.023",
"MSFT",
"51.95",
"75",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.038",
"MSFT",
"51.95",
"155",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.77",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.92",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"200",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"300",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"600",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"44",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.074",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.67",
"478343",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.66",
"6",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"30",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"75",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"20",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"35",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"10",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"1000",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"300",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"400",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"600",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"783",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
],
columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","),
)
df["price"] = df["price"].astype("float64")
df["quantity"] = df["quantity"].astype("int64")
df["bid"] = df["bid"].astype("float64")
df["ask"] = df["ask"].astype("float64")
return self.prep_data(df)
def test_examples1(self):
"""doc-string examples"""
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 3, 7]}
)
result = merge_asof(left, right, on="a")
tm.assert_frame_equal(result, expected)
def test_examples2(self, unit):
"""doc-string examples"""
if unit == "s":
pytest.skip(
"This test is invalid for unit='s' because that would "
"round the trades['time']]"
)
trades = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
).astype(f"M8[{unit}]"),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.048",
"20160525 13:30:00.049",
"20160525 13:30:00.072",
"20160525 13:30:00.075",
]
).astype(f"M8[{unit}]"),
"ticker": [
"GOOG",
"MSFT",
"MSFT",
"MSFT",
"GOOG",
"AAPL",
"GOOG",
"MSFT",
],
"bid": [720.50, 51.95, 51.97, 51.99, 720.50, 97.99, 720.50, 52.01],
"ask": [720.93, 51.96, 51.98, 52.00, 720.93, 98.01, 720.88, 52.03],
},
columns=["time", "ticker", "bid", "ask"],
)
merge_asof(trades, quotes, on="time", by="ticker")
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("2ms"))
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.038",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
"20160525 13:30:00.048",
]
).astype(f"M8[{unit}]"),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.97, np.nan, np.nan, np.nan],
"ask": [np.nan, 51.98, np.nan, np.nan, np.nan],
},
columns=["time", "ticker", "price", "quantity", "bid", "ask"],
)
result = merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=Timedelta("10ms"),
allow_exact_matches=False,
)
tm.assert_frame_equal(result, expected)
def test_examples3(self):
"""doc-string examples"""
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, np.nan]}
)
result = merge_asof(left, right, on="a", direction="forward")
tm.assert_frame_equal(result, expected)
def test_examples4(self):
"""doc-string examples"""
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 6, 7], "right_val": [1, 2, 3, 6, 7]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, 6, 7]}
)
result = merge_asof(left, right, on="a", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_basic(self, trades, asof, quotes):
expected = asof
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_categorical(self, trades, asof, quotes):
expected = asof
trades.ticker = trades.ticker.astype("category")
quotes.ticker = quotes.ticker.astype("category")
expected.ticker = expected.ticker.astype("category")
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_left_index(self, trades, asof, quotes):
# GH14253
expected = asof
trades = trades.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_on="time", by="ticker"
)
# left-only index uses right"s index, oddly
expected.index = result.index
# time column appears after left"s columns
expected = expected[result.columns]
tm.assert_frame_equal(result, expected)
def test_basic_right_index(self, trades, asof, quotes):
expected = asof
quotes = quotes.set_index("time")
result = merge_asof(
trades, quotes, left_on="time", right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_basic_left_index_right_index(self, trades, asof, quotes):
expected = asof.set_index("time")
trades = trades.set_index("time")
quotes = quotes.set_index("time")
result = merge_asof(
trades, quotes, left_index=True, right_index=True, by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_multi_index_left(self, trades, quotes):
# MultiIndex is prohibited
trades = trades.set_index(["time", "price"])
quotes = quotes.set_index("time")
with pytest.raises(MergeError, match="left can only have one index"):
merge_asof(trades, quotes, left_index=True, right_index=True)
def test_multi_index_right(self, trades, quotes):
# MultiIndex is prohibited
trades = trades.set_index("time")
quotes = quotes.set_index(["time", "bid"])
with pytest.raises(MergeError, match="right can only have one index"):
merge_asof(trades, quotes, left_index=True, right_index=True)
def test_on_and_index_left_on(self, trades, quotes):
# "on" parameter and index together is prohibited
trades = trades.set_index("time")
quotes = quotes.set_index("time")
msg = 'Can only pass argument "left_on" OR "left_index" not both.'
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, left_on="price", left_index=True, right_index=True
)
def test_on_and_index_right_on(self, trades, quotes):
trades = trades.set_index("time")
quotes = quotes.set_index("time")
msg = 'Can only pass argument "right_on" OR "right_index" not both.'
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, right_on="bid", left_index=True, right_index=True
)
def test_basic_left_by_right_by(self, trades, asof, quotes):
# GH14253
expected = asof
result = merge_asof(
trades, quotes, on="time", left_by="ticker", right_by="ticker"
)
tm.assert_frame_equal(result, expected)
def test_missing_right_by(self, trades, asof, quotes):
expected = asof
q = quotes[quotes.ticker != "MSFT"]
result = merge_asof(trades, q, on="time", by="ticker")
expected.loc[expected.ticker == "MSFT", ["bid", "ask"]] = np.nan
tm.assert_frame_equal(result, expected)
def test_multiby(self):
# GH13936
trades = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
quotes = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": ["GOOG", "MSFT", "MSFT", "MSFT", "GOOG", "AAPL"],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": ["MSFT", "MSFT", "GOOG", "GOOG", "AAPL"],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["object", "string"])
def test_multiby_heterogeneous_types(self, dtype):
# GH13936
trades = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
},
columns=["time", "ticker", "exch", "price", "quantity"],
)
trades = trades.astype({"ticker": dtype, "exch": dtype})
quotes = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.030",
"20160525 13:30:00.041",
"20160525 13:30:00.045",
"20160525 13:30:00.049",
]
),
"ticker": [1, 0, 0, 0, 1, 2],
"exch": ["BATS", "NSDQ", "ARCA", "ARCA", "NSDQ", "ARCA"],
"bid": [720.51, 51.95, 51.97, 51.99, 720.50, 97.99],
"ask": [720.92, 51.96, 51.98, 52.00, 720.93, 98.01],
},
columns=["time", "ticker", "exch", "bid", "ask"],
)
quotes = quotes.astype({"ticker": dtype, "exch": dtype})
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.023",
"20160525 13:30:00.023",
"20160525 13:30:00.046",
"20160525 13:30:00.048",
"20160525 13:30:00.050",
]
),
"ticker": [0, 0, 1, 1, 2],
"exch": ["ARCA", "NSDQ", "NSDQ", "BATS", "NSDQ"],
"price": [51.95, 51.95, 720.77, 720.92, 98.00],
"quantity": [75, 155, 100, 100, 100],
"bid": [np.nan, 51.95, 720.50, 720.51, np.nan],
"ask": [np.nan, 51.96, 720.93, 720.92, np.nan],
},
columns=["time", "ticker", "exch", "price", "quantity", "bid", "ask"],
)
expected = expected.astype({"ticker": dtype, "exch": dtype})
result = merge_asof(trades, quotes, on="time", by=["ticker", "exch"])
tm.assert_frame_equal(result, expected)
def test_mismatched_index_dtype(self):
# similar to test_multiby_indexed, but we change the dtype on left.index
left = pd.DataFrame(
[
[to_datetime("20160602"), 1, "a"],
[to_datetime("20160602"), 2, "a"],
[to_datetime("20160603"), 1, "b"],
[to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
# different dtype for the index
left.index = left.index - pd.Timestamp(0)
right = pd.DataFrame(
[
[to_datetime("20160502"), 1, "a", 1.0],
[to_datetime("20160502"), 2, "a", 2.0],
[to_datetime("20160503"), 1, "b", 3.0],
[to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
msg = "incompatible merge keys"
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, left_index=True, right_index=True, by=["k1", "k2"])
def test_multiby_indexed(self):
# GH15676
left = pd.DataFrame(
[
[to_datetime("20160602"), 1, "a"],
[to_datetime("20160602"), 2, "a"],
[to_datetime("20160603"), 1, "b"],
[to_datetime("20160603"), 2, "b"],
],
columns=["time", "k1", "k2"],
).set_index("time")
right = pd.DataFrame(
[
[to_datetime("20160502"), 1, "a", 1.0],
[to_datetime("20160502"), 2, "a", 2.0],
[to_datetime("20160503"), 1, "b", 3.0],
[to_datetime("20160503"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
expected = pd.DataFrame(
[
[to_datetime("20160602"), 1, "a", 1.0],
[to_datetime("20160602"), 2, "a", 2.0],
[to_datetime("20160603"), 1, "b", 3.0],
[to_datetime("20160603"), 2, "b", 4.0],
],
columns=["time", "k1", "k2", "value"],
).set_index("time")
result = merge_asof(
left, right, left_index=True, right_index=True, by=["k1", "k2"]
)
tm.assert_frame_equal(expected, result)
with pytest.raises(
MergeError, match="left_by and right_by must be the same length"
):
merge_asof(
left,
right,
left_index=True,
right_index=True,
left_by=["k1", "k2"],
right_by=["k1"],
)
def test_basic2(self, datapath):
expected = pd.DataFrame(
[
[
"20160525 13:30:00.023",
"MSFT",
"51.95",
"75",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.038",
"MSFT",
"51.95",
"155",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.77",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.92",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"200",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"300",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"600",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"44",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.074",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.67",
"478343",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.66",
"6",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"30",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"75",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"20",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"35",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"10",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.075",
"AAPL",
"98.55",
"6",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"1000",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"300",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"400",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"600",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"783",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.92",
"51.95",
],
[
"20160525 13:30:00.084",
"AAPL",
"98.64",
"40",
"NASDAQ",
"98.55",
"98.56",
],
[
"20160525 13:30:00.084",
"AAPL",
"98.55",
"149",
"EDGX",
"98.55",
"98.56",
],
[
"20160525 13:30:00.086",
"AAPL",
"98.56",
"500",
"ARCA",
"98.55",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"647",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"300",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"50",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"50",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"70",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"70",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"1",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"62",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"10",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.104",
"AAPL",
"98.63",
"100",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.105",
"AAPL",
"98.63",
"100",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.105",
"AAPL",
"98.63",
"700",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.106",
"AAPL",
"98.63",
"61",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.107",
"AAPL",
"98.63",
"100",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.107",
"AAPL",
"98.63",
"53",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.108",
"AAPL",
"98.63",
"100",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.108",
"AAPL",
"98.63",
"839",
"ARCA",
"98.62",
"98.63",
],
[
"20160525 13:30:00.115",
"AAPL",
"98.63",
"5",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.118",
"AAPL",
"98.63",
"295",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.118",
"AAPL",
"98.63",
"5",
"EDGX",
"98.62",
"98.63",
],
[
"20160525 13:30:00.128",
"AAPL",
"98.63",
"100",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.128",
"AAPL",
"98.63",
"100",
"NASDAQ",
"98.62",
"98.63",
],
[
"20160525 13:30:00.128",
"MSFT",
"51.92",
"100",
"ARCA",
"51.92",
"51.95",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"100",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"10",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"59",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"31",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"69",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"12",
"NASDAQ",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"12",
"EDGX",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"100",
"ARCA",
"98.61",
"98.63",
],
[
"20160525 13:30:00.129",
"AAPL",
"98.62",
"100",
"ARCA",
"98.61",
"98.63",
],
[
"20160525 13:30:00.130",
"MSFT",
"51.95",
"317",
"ARCA",
"51.93",
"51.95",
],
[
"20160525 13:30:00.130",
"MSFT",
"51.95",
"283",
"ARCA",
"51.93",
"51.95",
],
[
"20160525 13:30:00.135",
"MSFT",
"51.93",
"100",
"EDGX",
"51.92",
"51.95",
],
[
"20160525 13:30:00.135",
"AAPL",
"98.62",
"100",
"ARCA",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"12",
"NASDAQ",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"88",
"NASDAQ",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"162",
"NASDAQ",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.61",
"100",
"BATS",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"61",
"ARCA",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"25",
"ARCA",
"98.61",
"98.62",
],
[
"20160525 13:30:00.144",
"AAPL",
"98.62",
"14",
"ARCA",
"98.61",
"98.62",
],
[
"20160525 13:30:00.145",
"AAPL",
"98.62",
"12",
"ARCA",
"98.6",
"98.63",
],
[
"20160525 13:30:00.145",
"AAPL",
"98.62",
"100",
"ARCA",
"98.6",
"98.63",
],
[
"20160525 13:30:00.145",
"AAPL",
"98.63",
"100",
"NASDAQ",
"98.6",
"98.63",
],
[
"20160525 13:30:00.145",
"AAPL",
"98.63",
"100",
"NASDAQ",
"98.6",
"98.63",
],
],
columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","),
)
expected["price"] = expected["price"].astype("float64")
expected["quantity"] = expected["quantity"].astype("int64")
expected["bid"] = expected["bid"].astype("float64")
expected["ask"] = expected["ask"].astype("float64")
expected = self.prep_data(expected)
trades = pd.DataFrame(
[
["20160525 13:30:00.023", "MSFT", "51.9500", "75", "NASDAQ"],
["20160525 13:30:00.038", "MSFT", "51.9500", "155", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.7700", "100", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9200", "100", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "200", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "300", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "600", "NASDAQ"],
["20160525 13:30:00.048", "GOOG", "720.9300", "44", "NASDAQ"],
["20160525 13:30:00.074", "AAPL", "98.6700", "478343", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6700", "478343", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6600", "6", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "30", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "75", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "20", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "35", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.6500", "10", "NASDAQ"],
["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"],
["20160525 13:30:00.075", "AAPL", "98.5500", "6", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "1000", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "300", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "400", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "600", "ARCA"],
["20160525 13:30:00.076", "AAPL", "98.5600", "200", "ARCA"],
["20160525 13:30:00.078", "MSFT", "51.9500", "783", "NASDAQ"],
["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"],
["20160525 13:30:00.078", "MSFT", "51.9500", "100", "NASDAQ"],
["20160525 13:30:00.084", "AAPL", "98.6400", "40", "NASDAQ"],
["20160525 13:30:00.084", "AAPL", "98.5500", "149", "EDGX"],
["20160525 13:30:00.086", "AAPL", "98.5600", "500", "ARCA"],
["20160525 13:30:00.104", "AAPL", "98.6300", "647", "EDGX"],
["20160525 13:30:00.104", "AAPL", "98.6300", "300", "EDGX"],
["20160525 13:30:00.104", "AAPL", "98.6300", "50", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "50", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "70", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "70", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "1", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "62", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "10", "NASDAQ"],
["20160525 13:30:00.104", "AAPL", "98.6300", "100", "ARCA"],
["20160525 13:30:00.105", "AAPL", "98.6300", "100", "ARCA"],
["20160525 13:30:00.105", "AAPL", "98.6300", "700", "ARCA"],
["20160525 13:30:00.106", "AAPL", "98.6300", "61", "EDGX"],
["20160525 13:30:00.107", "AAPL", "98.6300", "100", "ARCA"],
["20160525 13:30:00.107", "AAPL", "98.6300", "53", "ARCA"],
["20160525 13:30:00.108", "AAPL", "98.6300", "100", "ARCA"],
["20160525 13:30:00.108", "AAPL", "98.6300", "839", "ARCA"],
["20160525 13:30:00.115", "AAPL", "98.6300", "5", "EDGX"],
["20160525 13:30:00.118", "AAPL", "98.6300", "295", "EDGX"],
["20160525 13:30:00.118", "AAPL", "98.6300", "5", "EDGX"],
["20160525 13:30:00.128", "AAPL", "98.6300", "100", "NASDAQ"],
["20160525 13:30:00.128", "AAPL", "98.6300", "100", "NASDAQ"],
["20160525 13:30:00.128", "MSFT", "51.9200", "100", "ARCA"],
["20160525 13:30:00.129", "AAPL", "98.6200", "100", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "10", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "59", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "31", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "69", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "12", "NASDAQ"],
["20160525 13:30:00.129", "AAPL", "98.6200", "12", "EDGX"],
["20160525 13:30:00.129", "AAPL", "98.6200", "100", "ARCA"],
["20160525 13:30:00.129", "AAPL", "98.6200", "100", "ARCA"],
["20160525 13:30:00.130", "MSFT", "51.9500", "317", "ARCA"],
["20160525 13:30:00.130", "MSFT", "51.9500", "283", "ARCA"],
["20160525 13:30:00.135", "MSFT", "51.9300", "100", "EDGX"],
["20160525 13:30:00.135", "AAPL", "98.6200", "100", "ARCA"],
["20160525 13:30:00.144", "AAPL", "98.6200", "12", "NASDAQ"],
["20160525 13:30:00.144", "AAPL", "98.6200", "88", "NASDAQ"],
["20160525 13:30:00.144", "AAPL", "98.6200", "162", "NASDAQ"],
["20160525 13:30:00.144", "AAPL", "98.6100", "100", "BATS"],
["20160525 13:30:00.144", "AAPL", "98.6200", "61", "ARCA"],
["20160525 13:30:00.144", "AAPL", "98.6200", "25", "ARCA"],
["20160525 13:30:00.144", "AAPL", "98.6200", "14", "ARCA"],
["20160525 13:30:00.145", "AAPL", "98.6200", "12", "ARCA"],
["20160525 13:30:00.145", "AAPL", "98.6200", "100", "ARCA"],
["20160525 13:30:00.145", "AAPL", "98.6300", "100", "NASDAQ"],
["20160525 13:30:00.145", "AAPL", "98.6300", "100", "NASDAQ"],
],
columns="time,ticker,price,quantity,marketCenter".split(","),
)
trades["price"] = trades["price"].astype("float64")
trades["quantity"] = trades["quantity"].astype("int64")
trades = self.prep_data(trades)
quotes = pd.DataFrame(
[
["20160525 13:30:00.023", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.023", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.041", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
["20160525 13:30:00.072", "GOOG", "720.50", "720.88"],
["20160525 13:30:00.075", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.95", "51.95"],
["20160525 13:30:00.078", "MSFT", "51.92", "51.95"],
["20160525 13:30:00.079", "MSFT", "51.92", "51.95"],
["20160525 13:30:00.080", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.084", "AAPL", "98.55", "98.56"],
["20160525 13:30:00.086", "AAPL", "98.55", "98.63"],
["20160525 13:30:00.088", "AAPL", "98.65", "98.63"],
["20160525 13:30:00.089", "AAPL", "98.63", "98.63"],
["20160525 13:30:00.104", "AAPL", "98.63", "98.63"],
["20160525 13:30:00.104", "AAPL", "98.63", "98.63"],
["20160525 13:30:00.104", "AAPL", "98.63", "98.63"],
["20160525 13:30:00.104", "AAPL", "98.63", "98.63"],
["20160525 13:30:00.104", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.105", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.107", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.115", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.115", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.118", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.128", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.128", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.129", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.129", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.129", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.129", "AAPL", "98.62", "98.63"],
["20160525 13:30:00.129", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.130", "MSFT", "51.93", "51.95"],
["20160525 13:30:00.130", "MSFT", "51.93", "51.95"],
["20160525 13:30:00.130", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.131", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.131", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.135", "MSFT", "51.92", "51.95"],
["20160525 13:30:00.135", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.136", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.136", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.144", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.144", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.145", "AAPL", "98.61", "98.62"],
["20160525 13:30:00.145", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.145", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.145", "AAPL", "98.60", "98.63"],
["20160525 13:30:00.145", "AAPL", "98.61", "98.63"],
["20160525 13:30:00.145", "AAPL", "98.60", "98.63"],
],
columns="time,ticker,bid,ask".split(","),
)
quotes["bid"] = quotes["bid"].astype("float64")
quotes["ask"] = quotes["ask"].astype("float64")
quotes = self.prep_data(quotes, dedupe=True)
result = merge_asof(trades, quotes, on="time", by="ticker")
tm.assert_frame_equal(result, expected)
def test_basic_no_by(self, trades, asof, quotes):
f = (
lambda x: x[x.ticker == "MSFT"]
.drop("ticker", axis=1)
.reset_index(drop=True)
)
# just use a single ticker
expected = f(asof)
trades = f(trades)
quotes = f(quotes)
result = merge_asof(trades, quotes, on="time")
tm.assert_frame_equal(result, expected)
def test_valid_join_keys(self, trades, quotes):
msg = r"incompatible merge keys \[1\] .* must be the same type"
with pytest.raises(MergeError, match=msg):
merge_asof(trades, quotes, left_on="time", right_on="bid", by="ticker")
with pytest.raises(MergeError, match="can only asof on a key for left"):
merge_asof(trades, quotes, on=["time", "ticker"], by="ticker")
with pytest.raises(MergeError, match="can only asof on a key for left"):
merge_asof(trades, quotes, by="ticker")
def test_with_duplicates(self, datapath, trades, quotes, asof):
q = (
pd.concat([quotes, quotes])
.sort_values(["time", "ticker"])
.reset_index(drop=True)
)
result = merge_asof(trades, q, on="time", by="ticker")
expected = self.prep_data(asof)
tm.assert_frame_equal(result, expected)
def test_with_duplicates_no_on(self):
df1 = pd.DataFrame({"key": [1, 1, 3], "left_val": [1, 2, 3]})
df2 = pd.DataFrame({"key": [1, 2, 2], "right_val": [1, 2, 3]})
result = merge_asof(df1, df2, on="key")
expected = pd.DataFrame(
{"key": [1, 1, 3], "left_val": [1, 2, 3], "right_val": [1, 1, 3]}
)
tm.assert_frame_equal(result, expected)
def test_valid_allow_exact_matches(self, trades, quotes):
msg = "allow_exact_matches must be boolean, passed foo"
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches="foo"
)
def test_valid_tolerance(self, trades, quotes):
# dti
merge_asof(trades, quotes, on="time", by="ticker", tolerance=Timedelta("1s"))
# integer
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1,
)
msg = r"incompatible tolerance .*, must be compat with type .*"
# incompat
with pytest.raises(MergeError, match=msg):
merge_asof(trades, quotes, on="time", by="ticker", tolerance=1)
# invalid
with pytest.raises(MergeError, match=msg):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=1.0,
)
msg = "tolerance must be positive"
# invalid negative
with pytest.raises(MergeError, match=msg):
merge_asof(
trades, quotes, on="time", by="ticker", tolerance=-Timedelta("1s")
)
with pytest.raises(MergeError, match=msg):
merge_asof(
trades.reset_index(),
quotes.reset_index(),
on="index",
by="ticker",
tolerance=-1,
)
def test_non_sorted(self, trades, quotes):
trades = trades.sort_values("time", ascending=False)
quotes = quotes.sort_values("time", ascending=False)
# we require that we are already sorted on time & quotes
assert not trades.time.is_monotonic_increasing
assert not quotes.time.is_monotonic_increasing
with pytest.raises(ValueError, match="left keys must be sorted"):
merge_asof(trades, quotes, on="time", by="ticker")
trades = trades.sort_values("time")
assert trades.time.is_monotonic_increasing
assert not quotes.time.is_monotonic_increasing
with pytest.raises(ValueError, match="right keys must be sorted"):
merge_asof(trades, quotes, on="time", by="ticker")
quotes = quotes.sort_values("time")
assert trades.time.is_monotonic_increasing
assert quotes.time.is_monotonic_increasing
# ok, though has dupes
merge_asof(trades, quotes, on="time", by="ticker")
@pytest.mark.parametrize(
"tolerance_ts",
[Timedelta("1day"), datetime.timedelta(days=1)],
ids=["Timedelta", "datetime.timedelta"],
)
def test_tolerance(self, tolerance_ts, trades, quotes, tolerance):
result = merge_asof(
trades, quotes, on="time", by="ticker", tolerance=tolerance_ts
)
expected = tolerance
tm.assert_frame_equal(result, expected)
def test_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = merge_asof(left, right, on="a", direction="forward", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [1, np.nan, 11]}
)
result = merge_asof(left, right, on="a", direction="nearest", tolerance=1)
tm.assert_frame_equal(result, expected)
def test_tolerance_tz(self, unit):
# GH 14844
left = pd.DataFrame(
{
"date": pd.date_range(
start=to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=datetime.timezone.utc,
unit=unit,
),
"value1": np.arange(5),
}
)
right = pd.DataFrame(
{
"date": pd.date_range(
start=to_datetime("2016-01-01"),
freq="D",
periods=5,
tz=datetime.timezone.utc,
unit=unit,
),
"value2": list("ABCDE"),
}
)
result = merge_asof(left, right, on="date", tolerance=Timedelta("1 day"))
expected = pd.DataFrame(
{
"date": pd.date_range(
start=to_datetime("2016-01-02"),
freq="D",
periods=5,
tz=datetime.timezone.utc,
unit=unit,
),
"value1": np.arange(5),
"value2": list("BCDEE"),
}
)
tm.assert_frame_equal(result, expected)
def test_tolerance_float(self):
# GH22981
left = pd.DataFrame({"a": [1.1, 3.5, 10.9], "left_val": ["a", "b", "c"]})
right = pd.DataFrame(
{"a": [1.0, 2.5, 3.3, 7.5, 11.5], "right_val": [1.0, 2.5, 3.3, 7.5, 11.5]}
)
expected = pd.DataFrame(
{
"a": [1.1, 3.5, 10.9],
"left_val": ["a", "b", "c"],
"right_val": [1, 3.3, np.nan],
}
)
result = merge_asof(left, right, on="a", direction="nearest", tolerance=0.5)
tm.assert_frame_equal(result, expected)
def test_index_tolerance(self, trades, quotes, tolerance):
# GH 15135
expected = tolerance.set_index("time")
trades = trades.set_index("time")
quotes = quotes.set_index("time")
result = merge_asof(
trades,
quotes,
left_index=True,
right_index=True,
by="ticker",
tolerance=Timedelta("1day"),
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches(self, trades, quotes):
result = merge_asof(
trades, quotes, on="time", by="ticker", allow_exact_matches=False
)
df = pd.DataFrame(
[
[
"20160525 13:30:00.023",
"MSFT",
"51.95",
"75",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.038",
"MSFT",
"51.95",
"155",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.77",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.92",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"200",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"300",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"600",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"44",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.074",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.66",
"6",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"30",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"75",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"20",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"35",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"10",
"NASDAQ",
np.nan,
np.nan,
],
["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan],
["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"1000",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"300",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"400",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"600",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"783",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.95",
"51.95",
],
],
columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","),
)
df["price"] = df["price"].astype("float64")
df["quantity"] = df["quantity"].astype("int64")
df["bid"] = df["bid"].astype("float64")
df["ask"] = df["ask"].astype("float64")
expected = self.prep_data(df)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 7, 11]}
)
result = merge_asof(
left, right, on="a", direction="forward", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 2, 3, 7, 11], "right_val": [1, 2, 3, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [2, 3, 11]}
)
result = merge_asof(
left, right, on="a", direction="nearest", allow_exact_matches=False
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance(self, trades, quotes):
result = merge_asof(
trades,
quotes,
on="time",
by="ticker",
tolerance=Timedelta("100ms"),
allow_exact_matches=False,
)
df = pd.DataFrame(
[
[
"20160525 13:30:00.023",
"MSFT",
"51.95",
"75",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.038",
"MSFT",
"51.95",
"155",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.77",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.92",
"100",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"200",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"300",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"600",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.048",
"GOOG",
"720.93",
"44",
"NASDAQ",
"720.5",
"720.93",
],
[
"20160525 13:30:00.074",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.67",
"478343",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.66",
"6",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"30",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"75",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"20",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"35",
"NASDAQ",
np.nan,
np.nan,
],
[
"20160525 13:30:00.075",
"AAPL",
"98.65",
"10",
"NASDAQ",
np.nan,
np.nan,
],
["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan],
["20160525 13:30:00.075", "AAPL", "98.55", "6", "ARCA", np.nan, np.nan],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"1000",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"300",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"400",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"600",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.076",
"AAPL",
"98.56",
"200",
"ARCA",
"98.55",
"98.56",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"783",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.95",
"51.95",
],
[
"20160525 13:30:00.078",
"MSFT",
"51.95",
"100",
"NASDAQ",
"51.95",
"51.95",
],
],
columns="time,ticker,price,quantity,marketCenter,bid,ask".split(","),
)
df["price"] = df["price"].astype("float64")
df["quantity"] = df["quantity"].astype("int64")
df["bid"] = df["bid"].astype("float64")
df["ask"] = df["ask"].astype("float64")
expected = self.prep_data(df)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance2(self):
# GH 13695
df1 = pd.DataFrame(
{"time": to_datetime(["2016-07-15 13:30:00.030"]), "username": ["bob"]}
)
df2 = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = merge_asof(df1, df2, on="time")
expected = pd.DataFrame(
{
"time": to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [2],
}
)
tm.assert_frame_equal(result, expected)
result = merge_asof(df1, df2, on="time", allow_exact_matches=False)
expected = pd.DataFrame(
{
"time": to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [1],
}
)
tm.assert_frame_equal(result, expected)
result = merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": to_datetime(["2016-07-15 13:30:00.030"]),
"username": ["bob"],
"version": [np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance3(self):
# GH 13709
df1 = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
}
)
df2 = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.000", "2016-07-15 13:30:00.030"]
),
"version": [1, 2],
}
)
result = merge_asof(
df1,
df2,
on="time",
allow_exact_matches=False,
tolerance=Timedelta("10ms"),
)
expected = pd.DataFrame(
{
"time": to_datetime(
["2016-07-15 13:30:00.030", "2016-07-15 13:30:00.030"]
),
"username": ["bob", "charlie"],
"version": [np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_forward(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 6, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 6, 11]}
)
result = merge_asof(
left,
right,
on="a",
direction="forward",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_allow_exact_matches_and_tolerance_nearest(self):
# GH14887
left = pd.DataFrame({"a": [1, 5, 10], "left_val": ["a", "b", "c"]})
right = pd.DataFrame({"a": [1, 3, 4, 6, 11], "right_val": [1, 3, 4, 7, 11]})
expected = pd.DataFrame(
{"a": [1, 5, 10], "left_val": ["a", "b", "c"], "right_val": [np.nan, 4, 11]}
)
result = merge_asof(
left,
right,
on="a",
direction="nearest",
allow_exact_matches=False,
tolerance=1,
)
tm.assert_frame_equal(result, expected)
def test_forward_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Y", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Y", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, np.nan, 11, 15, 16],
}
)
result = merge_asof(left, right, on="a", by="b", direction="forward")
tm.assert_frame_equal(result, expected)
def test_nearest_by(self):
# GH14887
left = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
}
)
right = pd.DataFrame(
{
"a": [1, 6, 11, 15, 16],
"b": ["X", "Z", "Z", "Z", "Y"],
"right_val": [1, 6, 11, 15, 16],
}
)
expected = pd.DataFrame(
{
"a": [1, 5, 10, 12, 15],
"b": ["X", "X", "Z", "Z", "Y"],
"left_val": ["a", "b", "c", "d", "e"],
"right_val": [1, 1, 11, 11, 16],
}
)
result = merge_asof(left, right, on="a", by="b", direction="nearest")
tm.assert_frame_equal(result, expected)
def test_by_int(self):
# we specialize by type, so test that this is correct
df1 = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
},
columns=["time", "key", "value1"],
)
df2 = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.015",
"20160525 13:30:00.020",
"20160525 13:30:00.025",
"20160525 13:30:00.035",
"20160525 13:30:00.040",
"20160525 13:30:00.055",
"20160525 13:30:00.060",
"20160525 13:30:00.065",
]
),
"key": [2, 1, 1, 3, 2, 1, 2, 3],
"value2": [2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8],
},
columns=["time", "key", "value2"],
)
result = merge_asof(df1, df2, on="time", by="key")
expected = pd.DataFrame(
{
"time": to_datetime(
[
"20160525 13:30:00.020",
"20160525 13:30:00.030",
"20160525 13:30:00.040",
"20160525 13:30:00.050",
"20160525 13:30:00.060",
]
),
"key": [1, 2, 1, 3, 2],
"value1": [1.1, 1.2, 1.3, 1.4, 1.5],
"value2": [2.2, 2.1, 2.3, 2.4, 2.7],
},
columns=["time", "key", "value1", "value2"],
)
tm.assert_frame_equal(result, expected)
def test_on_float(self):
# mimics how to determine the minimum-price variation
df1 = pd.DataFrame(
{
"price": [5.01, 0.0023, 25.13, 340.05, 30.78, 1040.90, 0.0078],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "price"],
)
df2 = pd.DataFrame(
{"price": [0.0, 1.0, 100.0], "mpv": [0.0001, 0.01, 0.05]},
columns=["price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
result = merge_asof(df1, df2, on="price")
expected = pd.DataFrame(
{
"symbol": list("BGACEDF"),
"price": [0.0023, 0.0078, 5.01, 25.13, 30.78, 340.05, 1040.90],
"mpv": [0.0001, 0.0001, 0.01, 0.01, 0.01, 0.05, 0.05],
},
columns=["symbol", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type(self, any_real_numpy_dtype):
# see gh-13936
dtype = np.dtype(any_real_numpy_dtype).type
df1 = pd.DataFrame(
{"value": [5, 2, 25, 100, 78, 120, 79], "symbol": list("ABCDEFG")},
columns=["symbol", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "result": list("xyzw")},
columns=["value", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = merge_asof(df1, df2, on="value")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"value": [2, 5, 25, 78, 79, 100, 120],
"result": list("xxxxxyz"),
},
columns=["symbol", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_specialized_type_by_int(self, any_real_numpy_dtype):
# see gh-13936
dtype = np.dtype(any_real_numpy_dtype).type
df1 = pd.DataFrame(
{
"value": [5, 2, 25, 100, 78, 120, 79],
"key": [1, 2, 3, 2, 3, 1, 2],
"symbol": list("ABCDEFG"),
},
columns=["symbol", "key", "value"],
)
df1.value = dtype(df1.value)
df2 = pd.DataFrame(
{"value": [0, 80, 120, 125], "key": [1, 2, 2, 3], "result": list("xyzw")},
columns=["value", "key", "result"],
)
df2.value = dtype(df2.value)
df1 = df1.sort_values("value").reset_index(drop=True)
result = merge_asof(df1, df2, on="value", by="key")
expected = pd.DataFrame(
{
"symbol": list("BACEGDF"),
"key": [2, 1, 3, 3, 2, 2, 1],
"value": [2, 5, 25, 78, 79, 100, 120],
"result": [np.nan, "x", np.nan, np.nan, np.nan, "y", "x"],
},
columns=["symbol", "key", "value", "result"],
)
expected.value = dtype(expected.value)
tm.assert_frame_equal(result, expected)
def test_on_float_by_int(self):
# type specialize both "by" and "on" parameters
df1 = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [1, 2, 3, 1, 2, 3, 1, 2, 3],
"price": [
3.26,
3.2599,
3.2598,
12.58,
12.59,
12.5,
378.15,
378.2,
378.25,
],
},
columns=["symbol", "exch", "price"],
)
df2 = pd.DataFrame(
{
"exch": [1, 1, 1, 2, 2, 2, 3, 3, 3],
"price": [0.0, 1.0, 100.0, 0.0, 5.0, 100.0, 0.0, 5.0, 1000.0],
"mpv": [0.0001, 0.01, 0.05, 0.0001, 0.01, 0.1, 0.0001, 0.25, 1.0],
},
columns=["exch", "price", "mpv"],
)
df1 = df1.sort_values("price").reset_index(drop=True)
df2 = df2.sort_values("price").reset_index(drop=True)
result = merge_asof(df1, df2, on="price", by="exch")
expected = pd.DataFrame(
{
"symbol": list("AAABBBCCC"),
"exch": [3, 2, 1, 3, 1, 2, 1, 2, 3],
"price": [
3.2598,
3.2599,
3.26,
12.5,
12.58,
12.59,
378.15,
378.2,
378.25,
],
"mpv": [0.0001, 0.0001, 0.01, 0.25, 0.01, 0.01, 0.05, 0.1, 0.25],
},
columns=["symbol", "exch", "price", "mpv"],
)
tm.assert_frame_equal(result, expected)
def test_merge_datatype_error_raises(self):
msg = r"Incompatible merge dtype, .*, both sides must have numeric dtype"
left = pd.DataFrame({"left_val": [1, 5, 10], "a": ["a", "b", "c"]})
right = pd.DataFrame({"right_val": [1, 2, 3, 6, 7], "a": [1, 2, 3, 6, 7]})
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_datatype_categorical_error_raises(self):
msg = (
r"incompatible merge keys \[0\] .* both sides category, "
"but not equal ones"
)
left = pd.DataFrame(
{"left_val": [1, 5, 10], "a": pd.Categorical(["a", "b", "c"])}
)
right = pd.DataFrame(
{
"right_val": [1, 2, 3, 6, 7],
"a": pd.Categorical(["a", "X", "c", "X", "b"]),
}
)
with pytest.raises(MergeError, match=msg):
merge_asof(left, right, on="a")
def test_merge_groupby_multiple_column_with_categorical_column(self):
# GH 16454
df = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
result = merge_asof(df, df, on="x", by=["y", "z"])
expected = pd.DataFrame({"x": [0], "y": [0], "z": pd.Categorical([0])})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func", [lambda x: x, to_datetime], ids=["numeric", "datetime"]
)
@pytest.mark.parametrize("side", ["left", "right"])
def test_merge_on_nans(self, func, side):
# GH 23189
msg = f"Merge keys contain null values on {side} side"
nulls = func([1.0, 5.0, np.nan])
non_nulls = func([1.0, 5.0, 10.0])
df_null = pd.DataFrame({"a": nulls, "left_val": ["a", "b", "c"]})
df = pd.DataFrame({"a": non_nulls, "right_val": [1, 6, 11]})
with pytest.raises(ValueError, match=msg):
if side == "left":
merge_asof(df_null, df, on="a")
else:
merge_asof(df, df_null, on="a")
def test_by_nullable(self, any_numeric_ea_dtype, using_infer_string):
# Note: this test passes if instead of using pd.array we use
# np.array([np.nan, 1]). Other than that, I (@jbrockmendel)
# have NO IDEA what the expected behavior is.
# TODO(GH#32306): may be relevant to the expected behavior here.
arr = pd.array([pd.NA, 0, 1], dtype=any_numeric_ea_dtype)
if arr.dtype.kind in ["i", "u"]:
max_val = np.iinfo(arr.dtype.numpy_dtype).max
else:
max_val = np.finfo(arr.dtype.numpy_dtype).max
# set value s.t. (at least for integer dtypes) arr._values_for_argsort
# is not an injection
arr[2] = max_val
left = pd.DataFrame(
{
"by_col1": arr,
"by_col2": ["HELLO", "To", "You"],
"on_col": [2, 4, 6],
"value": ["a", "c", "e"],
}
)
right = pd.DataFrame(
{
"by_col1": arr,
"by_col2": ["WORLD", "Wide", "Web"],
"on_col": [1, 2, 6],
"value": ["b", "d", "f"],
}
)
result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
expected = pd.DataFrame(
{
"by_col1": arr,
"by_col2": ["HELLO", "To", "You"],
"on_col": [2, 4, 6],
"value_x": ["a", "c", "e"],
}
)
expected["value_y"] = np.array([np.nan, np.nan, np.nan], dtype=object)
if using_infer_string:
expected["value_y"] = expected["value_y"].astype("str")
tm.assert_frame_equal(result, expected)
def test_merge_by_col_tz_aware(self):
# GH 21184
left = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [2],
"values": ["a"],
}
)
right = pd.DataFrame(
{
"by_col": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"on_col": [1],
"values": ["b"],
}
)
result = merge_asof(left, right, by="by_col", on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), 2, "a", "b"]],
columns=["by_col", "on_col", "values_x", "values_y"],
)
tm.assert_frame_equal(result, expected)
def test_by_mixed_tz_aware(self, using_infer_string):
# GH 26649
left = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["HELLO"],
"on_col": [2],
"value": ["a"],
}
)
right = pd.DataFrame(
{
"by_col1": pd.DatetimeIndex(["2018-01-01"]).tz_localize("UTC"),
"by_col2": ["WORLD"],
"on_col": [1],
"value": ["b"],
}
)
result = merge_asof(left, right, by=["by_col1", "by_col2"], on="on_col")
expected = pd.DataFrame(
[[pd.Timestamp("2018-01-01", tz="UTC"), "HELLO", 2, "a"]],
columns=["by_col1", "by_col2", "on_col", "value_x"],
)
expected["value_y"] = np.array([np.nan], dtype=object)
if using_infer_string:
expected["value_y"] = expected["value_y"].astype("str")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "int16", "m8[ns]", "M8[us]"])
def test_by_dtype(self, dtype):
# GH 55453, GH 22794
left = pd.DataFrame(
{
"by_col": np.array([1], dtype=dtype),
"on_col": [2],
"value": ["a"],
}
)
right = pd.DataFrame(
{
"by_col": np.array([1], dtype=dtype),
"on_col": [1],
"value": ["b"],
}
)
result = merge_asof(left, right, by="by_col", on="on_col")
expected = pd.DataFrame(
{
"by_col": np.array([1], dtype=dtype),
"on_col": [2],
"value_x": ["a"],
"value_y": ["b"],
}
)
tm.assert_frame_equal(result, expected)
def test_timedelta_tolerance_nearest(self, unit):
# GH 27642
if unit == "s":
pytest.skip(
"This test is invalid with unit='s' because that would "
"round left['time']"
)
left = pd.DataFrame(
list(zip([0, 5, 10, 15, 20, 25], [0, 1, 2, 3, 4, 5])),
columns=["time", "left"],
)
left["time"] = pd.to_timedelta(left["time"], "ms").astype(f"m8[{unit}]")
right = pd.DataFrame(
list(zip([0, 3, 9, 12, 15, 18], [0, 1, 2, 3, 4, 5])),
columns=["time", "right"],
)
right["time"] = pd.to_timedelta(right["time"], "ms").astype(f"m8[{unit}]")
expected = pd.DataFrame(
list(
zip(
[0, 5, 10, 15, 20, 25],
[0, 1, 2, 3, 4, 5],
[0, np.nan, 2, 4, np.nan, np.nan],
)
),
columns=["time", "left", "right"],
)
expected["time"] = pd.to_timedelta(expected["time"], "ms").astype(f"m8[{unit}]")
result = merge_asof(
left, right, on="time", tolerance=Timedelta("1ms"), direction="nearest"
)
tm.assert_frame_equal(result, expected)
def test_int_type_tolerance(self, any_int_dtype):
# GH #28870
left = pd.DataFrame({"a": [0, 10, 20], "left_val": [1, 2, 3]})
right = pd.DataFrame({"a": [5, 15, 25], "right_val": [1, 2, 3]})
left["a"] = left["a"].astype(any_int_dtype)
right["a"] = right["a"].astype(any_int_dtype)
expected = pd.DataFrame(
{"a": [0, 10, 20], "left_val": [1, 2, 3], "right_val": [np.nan, 1.0, 2.0]}
)
expected["a"] = expected["a"].astype(any_int_dtype)
result = merge_asof(left, right, on="a", tolerance=10)
tm.assert_frame_equal(result, expected)
def test_merge_index_column_tz(self):
# GH 29864
index = pd.date_range("2019-10-01", freq="30min", periods=5, tz="UTC")
left = pd.DataFrame([0.9, 0.8, 0.7, 0.6], columns=["xyz"], index=index[1:])
right = pd.DataFrame({"from_date": index, "abc": [2.46] * 4 + [2.19]})
result = merge_asof(
left=left, right=right, left_index=True, right_on=["from_date"]
)
expected = pd.DataFrame(
{
"xyz": [0.9, 0.8, 0.7, 0.6],
"from_date": index[1:],
"abc": [2.46] * 3 + [2.19],
},
index=pd.date_range(
"2019-10-01 00:30:00", freq="30min", periods=4, tz="UTC"
),
)
tm.assert_frame_equal(result, expected)
result = merge_asof(
left=right, right=left, right_index=True, left_on=["from_date"]
)
expected = pd.DataFrame(
{
"from_date": index,
"abc": [2.46] * 4 + [2.19],
"xyz": [np.nan, 0.9, 0.8, 0.7, 0.6],
},
index=Index([0, 1, 2, 3, 4]),
)
tm.assert_frame_equal(result, expected)
def test_left_index_right_index_tolerance(self, unit):
# https://github.com/pandas-dev/pandas/issues/35558
if unit == "s":
pytest.skip(
"This test is invalid with unit='s' because that would round dr1"
)
dr1 = pd.date_range(
start="1/1/2020", end="1/20/2020", freq="2D", unit=unit
) + Timedelta(seconds=0.4).as_unit(unit)
dr2 = pd.date_range(start="1/1/2020", end="2/1/2020", unit=unit)
df1 = pd.DataFrame({"val1": "foo"}, index=pd.DatetimeIndex(dr1))
df2 = pd.DataFrame({"val2": "bar"}, index=pd.DatetimeIndex(dr2))
expected = pd.DataFrame(
{"val1": "foo", "val2": "bar"}, index=pd.DatetimeIndex(dr1)
)
result = merge_asof(
df1,
df2,
left_index=True,
right_index=True,
tolerance=Timedelta(seconds=0.5),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]
)
@pytest.mark.parametrize(
"kwargs", [{"on": "x"}, {"left_index": True, "right_index": True}]
)
@pytest.mark.parametrize(
"data",
[["2019-06-01 00:09:12", "2019-06-01 00:10:29"], [1.0, "2019-06-01 00:10:29"]],
)
def test_merge_asof_non_numerical_dtype(kwargs, data, infer_string):
# GH#29130
with option_context("future.infer_string", infer_string):
left = pd.DataFrame({"x": data}, index=data)
right = pd.DataFrame({"x": data}, index=data)
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(left, right, **kwargs)
def test_merge_asof_non_numerical_dtype_object():
# GH#29130
left = pd.DataFrame({"a": ["12", "13", "15"], "left_val1": ["a", "b", "c"]})
right = pd.DataFrame({"a": ["a", "b", "c"], "left_val": ["d", "e", "f"]})
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(
left,
right,
left_on="left_val1",
right_on="a",
left_by="a",
right_by="left_val",
)
@pytest.mark.parametrize(
"kwargs",
[
{"right_index": True, "left_index": True},
{"left_on": "left_time", "right_index": True},
{"left_index": True, "right_on": "right"},
],
)
def test_merge_asof_index_behavior(kwargs):
# GH 33463
index = Index([1, 5, 10], name="test")
left = pd.DataFrame({"left": ["a", "b", "c"], "left_time": [1, 4, 10]}, index=index)
right = pd.DataFrame({"right": [1, 2, 3, 6, 7]}, index=[1, 2, 3, 6, 7])
result = merge_asof(left, right, **kwargs)
expected = pd.DataFrame(
{"left": ["a", "b", "c"], "left_time": [1, 4, 10], "right": [1, 3, 7]},
index=index,
)
tm.assert_frame_equal(result, expected)
def test_merge_asof_numeric_column_in_index():
# GH#34488
left = pd.DataFrame({"b": [10, 11, 12]}, index=Index([1, 2, 3], name="a"))
right = pd.DataFrame({"c": [20, 21, 22]}, index=Index([0, 2, 3], name="a"))
result = merge_asof(left, right, left_on="a", right_on="a")
expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]})
tm.assert_frame_equal(result, expected)
def test_merge_asof_numeric_column_in_multiindex():
# GH#34488
left = pd.DataFrame(
{"b": [10, 11, 12]},
index=pd.MultiIndex.from_arrays([[1, 2, 3], ["a", "b", "c"]], names=["a", "z"]),
)
right = pd.DataFrame(
{"c": [20, 21, 22]},
index=pd.MultiIndex.from_arrays([[1, 2, 3], ["x", "y", "z"]], names=["a", "y"]),
)
result = merge_asof(left, right, left_on="a", right_on="a")
expected = pd.DataFrame({"a": [1, 2, 3], "b": [10, 11, 12], "c": [20, 21, 22]})
tm.assert_frame_equal(result, expected)
def test_merge_asof_numeri_column_in_index_object_dtype():
# GH#34488
left = pd.DataFrame({"b": [10, 11, 12]}, index=Index(["1", "2", "3"], name="a"))
right = pd.DataFrame({"c": [20, 21, 22]}, index=Index(["m", "n", "o"], name="a"))
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(left, right, left_on="a", right_on="a")
left = left.reset_index().set_index(["a", "b"])
right = right.reset_index().set_index(["a", "c"])
with pytest.raises(
MergeError,
match=r"Incompatible merge dtype, .*, both sides must have numeric dtype",
):
merge_asof(left, right, left_on="a", right_on="a")
def test_merge_asof_array_as_on(unit):
# GH#42844
dti = pd.DatetimeIndex(
["2021/01/01 00:37", "2021/01/01 01:40"], dtype=f"M8[{unit}]"
)
right = pd.DataFrame(
{
"a": [2, 6],
"ts": dti,
}
)
ts_merge = pd.date_range(
start=pd.Timestamp("2021/01/01 00:00"), periods=3, freq="1h", unit=unit
)
left = pd.DataFrame({"b": [4, 8, 7]})
result = merge_asof(
left,
right,
left_on=ts_merge,
right_on="ts",
allow_exact_matches=False,
direction="backward",
)
expected = pd.DataFrame({"b": [4, 8, 7], "a": [np.nan, 2, 6], "ts": ts_merge})
tm.assert_frame_equal(result, expected)
result = merge_asof(
right,
left,
left_on="ts",
right_on=ts_merge,
allow_exact_matches=False,
direction="backward",
)
expected = pd.DataFrame(
{
"a": [2, 6],
"ts": dti,
"b": [4, 8],
}
)
tm.assert_frame_equal(result, expected)
def test_merge_asof_raise_for_duplicate_columns():
# GH#50102
left = pd.DataFrame([[1, 2, "a"]], columns=["a", "a", "left_val"])
right = pd.DataFrame([[1, 1, 1]], columns=["a", "a", "right_val"])
with pytest.raises(ValueError, match="column label 'a'"):
merge_asof(left, right, on="a")
with pytest.raises(ValueError, match="column label 'a'"):
merge_asof(left, right, left_on="a", right_on="right_val")
with pytest.raises(ValueError, match="column label 'a'"):
merge_asof(left, right, left_on="left_val", right_on="a")
@pytest.mark.parametrize(
"dtype",
[
"Int64",
pytest.param("int64[pyarrow]", marks=td.skip_if_no("pyarrow")),
pytest.param("timestamp[s][pyarrow]", marks=td.skip_if_no("pyarrow")),
],
)
def test_merge_asof_extension_dtype(dtype):
# GH 52904
left = pd.DataFrame(
{
"join_col": [1, 3, 5],
"left_val": [1, 2, 3],
}
)
right = pd.DataFrame(
{
"join_col": [2, 3, 4],
"right_val": [1, 2, 3],
}
)
left = left.astype({"join_col": dtype})
right = right.astype({"join_col": dtype})
result = merge_asof(left, right, on="join_col")
expected = pd.DataFrame(
{
"join_col": [1, 3, 5],
"left_val": [1, 2, 3],
"right_val": [np.nan, 2.0, 3.0],
}
)
expected = expected.astype({"join_col": dtype})
tm.assert_frame_equal(result, expected)
@td.skip_if_no("pyarrow")
def test_merge_asof_pyarrow_td_tolerance():
# GH 56486
ser = pd.Series(
[datetime.datetime(2023, 1, 1)], dtype="timestamp[us, UTC][pyarrow]"
)
df = pd.DataFrame(
{
"timestamp": ser,
"value": [1],
}
)
result = merge_asof(df, df, on="timestamp", tolerance=Timedelta("1s"))
expected = pd.DataFrame(
{
"timestamp": ser,
"value_x": [1],
"value_y": [1],
}
)
tm.assert_frame_equal(result, expected)
def test_merge_asof_read_only_ndarray():
# GH 53513
left = pd.Series([2], index=[2], name="left")
right = pd.Series([1], index=[1], name="right")
# set to read-only
left.index.values.flags.writeable = False
right.index.values.flags.writeable = False
result = merge_asof(left, right, left_index=True, right_index=True)
expected = pd.DataFrame({"left": [2], "right": [1]}, index=[2])
tm.assert_frame_equal(result, expected)
def test_merge_asof_multiby_with_categorical():
# GH 43541
left = pd.DataFrame(
{
"c1": pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"]),
"c2": ["x"] * 4,
"t": [1] * 4,
"v": range(4),
}
)
right = pd.DataFrame(
{
"c1": pd.Categorical(["b", "b"], categories=["b", "a"]),
"c2": ["x"] * 2,
"t": [1, 2],
"v": range(2),
}
)
result = merge_asof(
left,
right,
by=["c1", "c2"],
on="t",
direction="forward",
suffixes=["_left", "_right"],
)
expected = pd.DataFrame(
{
"c1": pd.Categorical(["a", "a", "b", "b"], categories=["a", "b"]),
"c2": ["x"] * 4,
"t": [1] * 4,
"v_left": range(4),
"v_right": [np.nan, np.nan, 0.0, 0.0],
}
)
tm.assert_frame_equal(result, expected)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@reshape@merge@test_merge_asof.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "fabiorigamonti/bang",
"repo_path": "bang_extracted/bang-main/README.md",
"type": "Markdown"
}
|
# BANG
## BAyesian decomposiotioN of Galaxies
BANG is a GPU/CPU-python code for modelling both the photometry and kinematics of galaxies.
The underlying model is the superposition of different component the user has 3 possible
combination:
* Bulge + inner disc + outer disc + Halo
* Bulge + disc + Halo
* inner disc + outer disc + Halo
For any detail about the model construction see Rigamonti et al. 2022.
The parameter estimation is done with a python implementation [CPnest](https://github.com/johnveitch/cpnest)
of nested sampling algorithm.
We strongly suggest to run BANG on GPU. CPU parameter estimation can take
days. A fast CPU implementation will be available in a future release of the code.
All the function needed by the user are well documented. In order to run BANG on
your galaxy open the example.py script from the BANG/src/BANG or BANG/test directories
and follow the instructions.
Once your data have been correctly prepared and the config.yaml file has been created,
running BANG requires few lines of code.
For any problem or suggestion feel free to contact the authors at:
frigamonti@uninsubria.it
For installing BANG you can follow these instructions:
1- Download the BANG package from github. You can simply type from your terminal:
git clone https://github.com/FabioRigamonti/BANG.git
2- Instal the python modules with:
pip install BANGal
3- Copy the files:
setup_easy.pyx
utils_easy.pyx
dehnen/
From the github repo (you can find them in BANG/src/BANG) to the directory
where pip has installed BANG. You can find this directory by opening a python shell
importing BANG and printing BANG.
4- Move to the directory where pip has installed BANG and run the following command:
python setup_easy.py build_ext --inplace
|
fabiorigamontiREPO_NAMEbangPATH_START.@bang_extracted@bang-main@README.md@.PATH_END.py
|
{
"filename": "_tickfont.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/indicator/gauge/axis/_tickfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "indicator.gauge.axis"
_path_str = "indicator.gauge.axis.tickfont"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.indicator.gaug
e.axis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.indicator.gauge.axis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.indicator.gauge.axis.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@indicator@gauge@axis@_tickfont.py@.PATH_END.py
|
{
"filename": "checkstate.py",
"repo_name": "COSMIC-PopSynth/COSMIC",
"repo_path": "COSMIC_extracted/COSMIC-master/cosmic/checkstate.py",
"type": "Python"
}
|
from cosmic import _evolvebin
from .filter import parse_column_filters
import operator
import numpy
CHECKSTATE_COLUMNS = numpy.array(
[
"binstate",
"evol_type",
"mass_1",
"mass_2",
"kstar_1",
"kstar_2",
"sep",
"porb",
"ecc",
"rrlo_1",
"rrlo_2",
"aj_1",
"aj_2",
"tms_1",
"tms_2",
"massc_1",
"massc_2",
"rad_1",
"rad_2",
"mass0_1",
"mass0_2",
"lum_1",
"lum_2",
"radc_1",
"radc_2",
"menv_1",
"menv_2",
"renv_1",
"renv_2",
"omega_spin_1",
"omega_spin_2",
"b0_1",
"b0_2",
"bacc_1",
"bacc_2",
"tacc_1",
"tacc_2",
"epoch_1",
"epoch_2",
"bhspin_1",
"bhspin_2",
]
)
DEFAULT_CONDITIONS = [-10e30, -1, 10e30] * CHECKSTATE_COLUMNS.size
DEFAULT_CONDITIONS = numpy.array([DEFAULT_CONDITIONS] * 15)
DEFAULT_DTP_STATE = -1 * numpy.ones(15)
def set_checkstates(timestep_conditions=[]):
"""A function which will detemine different time resolution for different states
Parameters:
timestep_conditions : (list) default empty (no dynamic dtp setting)
a nested list of the many different time resolutions and conditions
for which you would like to apply those time resolution, e.g.,
>>> timestep_conditions = [['20.0<mass_1<25.5', '15.5>mass_2>10.0', 'dtp=1.0'],
>>> ['kstar_1=14', 'lum_1>10.0', 'dtp=0.01'],
>>> ['2>=binstate>=1', 'dtp=None']]
The first two sets of conditons would be done in a AND fashion.
The last condition would end time resolution for the BCM array and it
would skip to only printing the final state
"""
# assume that we are not doing any special dtp setting
_evolvebin.check_dtp.check_dtp = 0
# Set the default state for checkstate which is that there are no
# conditional states at which to set a special dtp
checkstate_array = getattr(_evolvebin.checkstate_array, "checkstate_array")
checkstate_array[:, :] = DEFAULT_CONDITIONS
# Again we assume that no condtions exist to set a special dtp
dtp_state = getattr(_evolvebin.checkstate_params, "dtp_state")
dtp_state[:] = DEFAULT_DTP_STATE
for index, condition in enumerate(timestep_conditions):
# we are checking for conditions
_evolvebin.check_dtp.check_dtp = 1
conditions = parse_column_filters(condition)
for param in conditions:
# find where in the checkstate_array this param is
param_index = numpy.argwhere(param[0].lower() == CHECKSTATE_COLUMNS)
if param[0] == "dtp":
dtp_state = getattr(_evolvebin.checkstate_params, "dtp_state")
if param[2] == "None":
dtp_state[index] = 13700.0
else:
dtp_state[index] = param[2]
continue
if param[1] == operator.eq:
checkstate_array[index, param_index * 3] = param[2]
checkstate_array[index, param_index * 3 + 2] = param[2]
checkstate_array[index, param_index * 3 + 1] = 0
elif param[1] == operator.gt:
checkstate_array[index, param_index * 3] = param[2]
checkstate_array[index, param_index * 3 + 1] = 1
elif param[1] == operator.ge:
checkstate_array[index, param_index * 3] = param[2]
checkstate_array[index, param_index * 3 + 1] = 2
elif param[1] == operator.lt:
checkstate_array[index, param_index * 3 + 2] = param[2]
checkstate_array[index, param_index * 3 + 1] = 3
elif param[1] == operator.le:
checkstate_array[index, param_index * 3 + 2] = param[2]
checkstate_array[index, param_index * 3 + 1] = 4
|
COSMIC-PopSynthREPO_NAMECOSMICPATH_START.@COSMIC_extracted@COSMIC-master@cosmic@checkstate.py@.PATH_END.py
|
{
"filename": "noncrossing.ipynb",
"repo_name": "ahermosillo/stochastic-randommigration",
"repo_path": "stochastic-randommigration_extracted/stochastic-randommigration-main/noncrossing/noncrossing.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pylab as pylb
%matplotlib inline
plt.style.use('/Users/arceliahermosillo/Research/stochastic/paper_labels_colors.mplstyle')
```
# Non-crossing figures
```python
colors = {"Bdazzled Blue":"#335471","Cadet Blue":"#69a2b0","French Lilac":"#73628a",\
"Fern Green":"#58804d","Olivine":"#a1c084","Amaranth":"#dd4053","Light Pink":"#ffa5a5",\
"Honey Yellow":"#ffb60a","Brown Sugar":"#a9714b","Dark Sienna":"#49111c"}
```
```python
```
```python
# constants
mSun = 1.98e33 #grams
mEarth = 3e-6*mSun #grams
mNep = (5.15e-5)*mSun #grams
mPlut = (6.58e-9)*mSun #grams
G = 6.67e-8 # cgs
AU = 1.496e13 #cm
yr_to_sec = 3.1536e7
# equation
def delta_ap_nc(i, m, Mp, Mstar, ap, e, x, C1):
if i == 0:
delap = -C1*m*Mp*(ap**6)/((Mstar**2)*x**5)
if i == 1:
delap = C1*m*(ap**4)*e/(Mstar*x**3)
return delap
#data
#regime 1
nc = np.genfromtxt("/Users/arceliahermosillo/Research/stochastic/LAU_MURRAYCLAY2011/ARCELIA/fig07/results.txt", names =True)
#regime 2
nc1 = np.genfromtxt("/Users/arceliahermosillo/Research/stochastic/LAU_MURRAYCLAY2011/ARCELIA/fig07/results1_cutdown.txt", names = True)
#regime 3
nc2 = np.genfromtxt("/Users/arceliahermosillo/Research/stochastic/LAU_MURRAYCLAY2011/ARCELIA/fig07/results2_cutdown.txt", names = True)
mplanet = (1e-5)*mSun
mplanetesimal = (5e-10)*mSun
a_semi = 30*AU
ecc = 0
ecc2 = 0.01
mplanetesimal3 = (1e-12)*mSun
### August 2023
### ok so decided not all \Delta a need to have the same coefficient. All can be different.
## we are only choosing values with x*rH < 25 au and fitting to that line..
## we got. 6.15 for regime 1 (equation a)
## 2.17 for regime 2 and 2.78 for regime 3. Average those to get 2.5 for equation b.
# only get values less than 25 rh.
# the reason we are doing this is because later when normalizing to C for the macro case
# we realized the micro cases were not actually all the same since before D = c2*delap^2*Ndot
# and now we are doing D = delapT^2/2T.
x1 = nc['x'][nc['x']<25*nc['rh'][0]]
da1 = nc['deltaa'][nc['x']<25*nc['rh'][0]]
x2 = nc1['x'][nc1['x']<25*nc1['rh'][0]]
da2 = nc1['deltaa'][nc1['x']<25*nc1['rh'][0]]
x3 = nc2['x'][nc2['x']<25*nc2['rh'][0]]
da3 = nc2['deltaa'][nc2['x']<25*nc2['rh'][0]]
rh = nc2['rh'][0]
# regime 1 verifying eq 8a (i ==0)
ap_1 = delta_ap_nc(0, mplanetesimal, mplanet, mSun, a_semi, ecc, x1*AU, 6)
#regime 2 verifying eq 8b (i==1)
ap_2 = delta_ap_nc(1, mplanetesimal, mplanet, mSun, a_semi, 0.01, x2*AU, 2.5)
#regime 3 verifying eq 8b (i==1)
ap_3 = delta_ap_nc(1, mplanetesimal3, mplanet, mSun, a_semi, ecc2, x3*AU, 2.5)
fig, axs = plt.subplots(2, figsize = (3.4, 3.4), sharex=True, sharey=True)
# fig.suptitle('Sharing both axes')
# axs[0].loglog((nc['x']/nc['rh']), (-ap_1/AU), '-', c= colors['Bdazzled Blue'])
axs[0].loglog((x1/rh), (-ap_1/AU), '-', c= colors['Bdazzled Blue'])
axs[0].loglog((nc['x']/rh), nc['deltaa'], '.', c = colors["Cadet Blue"])
# axs[1].loglog((nc1['x']/nc1['rh']), ap_2/AU, '-', c = colors["Fern Green"])
axs[1].loglog((x2/rh), ap_2/AU, '-', c = colors["Fern Green"])
axs[1].loglog((nc1['x']/rh), nc1['deltaa'], '.', c = colors["Olivine"])
# axs[1].loglog((nc1['x']/nc1['rh']), ap_3/AU, '-', c = colors["Amaranth"])
axs[1].loglog((x3/rh), ap_3/AU, '-', c = colors["Amaranth"])
axs[1].loglog((nc2['x']/rh), nc2['deltaa'], '.', c = colors["Light Pink"])
axs[0].set_ylim(1e-15, 1e-3)
axs[0].annotate( "NC(a)",(0.81,0.88),xycoords = 'subfigure fraction', color = colors['Bdazzled Blue'])
axs[1].annotate( "NC(b)",(0.81,0.45), xycoords = 'subfigure fraction', color = colors["Fern Green"])
axs[1].annotate("NC(c)",(0.81,0.39), xycoords = 'subfigure fraction', color = colors["Amaranth"])
axs[1].set_xlabel("$x/R_H$")
axs[0].tick_params(labelsize = 10,axis='y', which = 'both', right = False)
axs[0].tick_params(labelsize = 10, axis='x', which = 'both', top = False)
axs[1].tick_params(labelsize = 10,axis='y', which = 'both', right = False)
axs[1].tick_params(labelsize = 10, axis='x', which = 'both', top = False)
fig.supylabel("$\Delta {a}_p$ (au)")
plt.tight_layout(pad=0.35, w_pad=0.5)
plt.show()
```

|
ahermosilloREPO_NAMEstochastic-randommigrationPATH_START.@stochastic-randommigration_extracted@stochastic-randommigration-main@noncrossing@noncrossing.ipynb@.PATH_END.py
|
{
"filename": "SoundSpeedPolicy.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/PYB11/Hydro/SoundSpeedPolicy.py",
"type": "Python"
}
|
from PYB11Generator import *
from FieldUpdatePolicy import *
@PYB11module("SpheralHydro")
@PYB11template("Dimension")
class SoundSpeedPolicy(FieldUpdatePolicy):
PYB11typedefs = """
using Scalar = typename %(Dimension)s::Scalar;
using KeyType = typename SoundSpeedPolicy<%(Dimension)s>::KeyType;
"""
#...........................................................................
# Constructors
def pyinit0(self):
return
#...........................................................................
# Virtual methods
@PYB11virtual
def update(self,
key = "const KeyType&",
state = "State<%(Dimension)s>&",
derivs = "StateDerivatives<%(Dimension)s>&",
multiplier = "const double",
t = "const double",
dt = "const double"):
"Update a FieldList assoicated with the given key"
return "void"
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@PYB11@Hydro@SoundSpeedPolicy.py@.PATH_END.py
|
{
"filename": "leastsqnrm.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/jwst/ami/leastsqnrm.py",
"type": "Python"
}
|
import logging
import numpy as np
import numpy.linalg as linalg
from scipy.special import comb, jv
from . import hexee
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
def flip(holearray):
"""
Change sign of 2nd coordinate of holes
Parameters
----------
holearray: 2D float array
coordinates of holes
Return
------
fliparray: 2D float array
flipped coordinates of holes
"""
fliparray = holearray.copy()
fliparray[:, 1] = -1 * holearray[:, 1]
return fliparray
def rotatevectors(vectors, thetarad):
"""
Rotate vectors by specified angle
Parameters
----------
vectors: 2D float array
list of vectors - e.g. nrm hole centers; positive x decreases under
slight rotation, and positive y increases under slight rotation
thetarad: float
rotation angle
Returns
-------
rot_vectors: 2D float array
rotated vectors
"""
c, s = (np.cos(thetarad), np.sin(thetarad))
ctrs_rotated = []
for vector in vectors:
ctrs_rotated.append(
[c * vector[0] - s * vector[1], s * vector[0] + c * vector[1]]
)
rot_vectors = np.array(ctrs_rotated)
return rot_vectors
def mas2rad(mas):
"""
Convert angle in milli arc-sec to radians
Parameters
----------
mas: float
angle in milli arc-sec
Returns
-------
rad: float
angle in radians
"""
rad = mas * (10 ** (-3)) / (3600 * 180 / np.pi)
return rad
def rad2mas(rad):
"""
Convert input angle in radians to milli arc sec
Parameters
----------
rad: float
input angle in radians
Returns
-------
mas: float
input angle in milli arc sec
"""
mas = rad * (3600.0 * 180 / np.pi) * 10.0**3
return mas
def sin2deltapistons(coeffs):
"""
Each baseline has one sine and one cosine fringe with a coefficient that
depends on the piston difference between the two holes that make the
baseline. For a 7-hole mask there are 21 baselines and therefore there
are 42 sine and cosine terms that contribute to the fringe model. This
function calculate the sine of this piston difference.
Parameters
----------
coeffs: 1D float array
array of piston differences
Returns
-------
delta: 1D float array
sine of piston differences
"""
asize = int((len(coeffs) - 1) / 2)
delta = np.zeros(asize)
for q in range(asize):
delta[q] = np.arcsin(coeffs[2 * q + 2]) / (np.pi * 2.0)
return delta
def cos2deltapistons(coeffs):
"""
Each baseline has one sine and one cosine fringe with a coefficient that
depends on the piston difference between the two holes that make the
baseline. For a 7-hole mask there are 21 baselines and therefore there
are 42 sine and cosine terms that contribute to the fringe model. This
function calculate the cosine of this piston difference.
Parameters
----------
coeffs: 1D float array
array of piston differences
Returns
-------
delta: 1D float array
cosine of piston differences
"""
asize = int((len(coeffs) - 1) / 2)
delta = np.zeros(asize)
for q in range(asize):
if coeffs[2 * q + 2] < 0:
sgn = -1
else:
sgn = 1
delta[q] = sgn * np.arccos(coeffs[2 * q + 1]) / (np.pi * 2.0)
return delta
def replacenan(array):
"""
Replace singularities encountered in the analytical hexagon Fourier
transform with the analytically derived limits.
Parameters
----------
array: 2D float array
input array
Returns
-------
array: 2D float array
input array with NaNs replaced with analytically derived limits
"""
nanpos = np.where(np.isnan(array))
array[nanpos] = np.pi / 4
return array
def primarybeam(kx, ky):
"""
Calculate the envelope intensity for circular holes & monochromatic light
Parameters
----------
kx, ky: float, float
x-component and y-component of image plane (spatial frequency) vector
Return
------
env_int: 2D float array
envelope intensity for circular holes & monochromatic light
"""
R = (
(primarybeam.d / primarybeam.lam)
* primarybeam.pitch
* np.sqrt(
(kx - primarybeam.offx) * (kx - primarybeam.offx)
+ (ky - primarybeam.offy) * (ky - primarybeam.offy)
)
)
pb = replacenan(jv(1, np.pi * R) / (2.0 * R))
pb = pb.transpose()
env_int = pb * pb.conj()
return env_int
def hexpb():
"""
Calculate the primary beam for hexagonal holes.
Parameters
----------
None
Returns
-------
pb * pb.conj(): 2D float array
primary beam for hexagonal holes
"""
pb = hexee.hex_eeAG(
s=hexpb.size,
c=(hexpb.offx, hexpb.offy),
d=hexpb.d,
lam=hexpb.lam,
pitch=hexpb.pitch,
)
return pb * pb.conj()
def ffc(kx, ky):
"""
Calculate cosine terms of analytic model.
Parameters
----------
kx, ky: float, float
x-component and y-component of image plane (spatial frequency) vector
Returns
-------
cos_array: 2D float array
cosine terms of analytic model
"""
cos_array = 2 * np.cos(
2
* np.pi
* ffc.pitch
* (
(kx - ffc.offx) * (ffc.ri[0] - ffc.rj[0])
+ (ky - ffc.offy) * (ffc.ri[1] - ffc.rj[1])
)
/ ffc.lam
)
return cos_array
def ffs(kx, ky):
"""
Calculate sine terms of analytic model.
Parameters
----------
kx, ky: float, float
x-component and y-component of image plane (spatial frequency) vector
Returns
-------
sin_array: 2D float array
sine terms of analytic model
"""
sin_array = -2 * np.sin(
2
* np.pi
* ffs.pitch
* (
(kx - ffs.offx) * (ffs.ri[0] - ffs.rj[0])
+ (ky - ffs.offy) * (ffs.ri[1] - ffs.rj[1])
)
/ ffs.lam
)
return sin_array
def model_array(
ctrs, lam, oversample, pitch, fov, d, centering="PIXELCENTERED", shape="circ"
):
"""
Create a model using the specified wavelength.
Parameters
----------
ctrs: 2D float array
centers of holes
lam: float
wavelength in the bandpass for this particular model
oversample: integer
oversampling factor
pitch: float
sampling pitch in radians in image plane
fov: integer
number of detector pixels on a side.
d: float
hole diameter for 'circ'; flat to flat distance for 'hex
centering: string
subpixel centering; for now only option is PIXELCENTERED, which means
putting the brightest detector pixel at the center of the trimmed data
frame or simulated image.
shape: string
shape of hole; possible values are 'circ', 'hex', and 'fringe'
Returns
-------
if 'shape' == 'circ', returns the primary beam (2D float array)
for circular holes.
if 'shape' == 'hex', returns the primary beam (2D float array)
for hexagonal holes.
ffmodel: list of 3 2D float arrays
model array
"""
if centering == "PIXELCORNER":
off = np.array([0.0, 0.0])
elif centering == "PIXELCENTERED":
off = np.array([0.5, 0.5])
else:
off = centering
log.debug("------------------")
log.debug("Model Parameters:")
log.debug("------------------")
log.debug("pitch:%s fov:%s oversampling:%s ", pitch, fov, oversample)
log.debug("centers:%s", ctrs)
log.debug("wavelength:%s centering:%s off:%s ", lam, centering, off)
log.debug("shape:%s d:%s ", shape, d)
# primary beam parameters:
primarybeam.shape = shape
primarybeam.lam = lam
primarybeam.size = (oversample * fov, oversample * fov)
primarybeam.offx = oversample * fov / 2.0 - off[0] # in pixels
primarybeam.offy = oversample * fov / 2.0 - off[1]
primarybeam.pitch = pitch / float(oversample)
primarybeam.d = d
hexpb.shape = shape
hexpb.lam = lam
hexpb.size = (oversample * fov, oversample * fov)
hexpb.offx = oversample * fov / 2.0 - off[0] # in pixels
hexpb.offy = oversample * fov / 2.0 - off[1]
hexpb.pitch = pitch / float(oversample)
hexpb.d = d
# model fringe matrix parameters:
ffc.N = len(ctrs) # number of holes
ffc.lam = lam
ffc.over = oversample
ffc.pitch = pitch / float(oversample)
ffc.size = (oversample * fov, oversample * fov)
ffc.offx = oversample * fov / 2.0 - off[0]
ffc.offy = oversample * fov / 2.0 - off[1]
ffs.N = len(ctrs) # number of holes
ffs.lam = lam
ffs.over = oversample
ffs.pitch = pitch / float(oversample)
ffs.size = (oversample * fov, oversample * fov)
ffs.offx = oversample * fov / 2.0 - off[0]
ffs.offy = oversample * fov / 2.0 - off[1]
alist = []
for i in range(ffc.N - 1):
for j in range(ffc.N - 1):
if j + i + 1 < ffc.N:
alist = np.append(alist, i)
alist = np.append(alist, j + i + 1)
alist = alist.reshape(len(alist) // 2, 2)
ffmodel = []
ffmodel.append(ffc.N * np.ones(ffc.size))
for q, r in enumerate(alist):
# r[0] and r[1] are holes i and j, x-coord: 0, y-coord: 1
ffc.ri = ctrs[int(r[0])]
ffc.rj = ctrs[int(r[1])]
ffs.ri = ctrs[int(r[0])]
ffs.rj = ctrs[int(r[1])]
ffmodel.append(np.transpose(np.fromfunction(ffc, ffc.size)))
ffmodel.append(np.transpose(np.fromfunction(ffs, ffs.size)))
if shape == "circ": # if unspecified (default), or specified as 'circ'
return np.fromfunction(primarybeam, ffc.size), ffmodel
elif shape == "hex":
return hexpb(), ffmodel
else:
log.critical(
"Must provide a valid hole shape. Current supported shapes \
are circ and hex."
)
return None
def weighted_operations(img, model, dqm=None):
"""
Performs least squares matrix operations to solve A x = b, where A is the
model, b is the data (image), and x is the coefficient vector we are solving
for.
Here we are weighting data by Poisson variance:
x = inv(At.W.A).(At.W.b)
where W is a diagonal matrix of weights w_i,
weighting each data point i by the inverse of its variance:
w_i = 1 / sigma_i^2
For photon noise, the data, i.e. the image values b_i have variance
proportional to b_i with an e.g. ADU to electrons conversion factor.
If this factor is the same for all pixels, we do not need to include
it here.
Parameters
----------
img: 2D float array
input data
model: 2D float array
analytic model
dqm: 2D bool array
bad pixel mask
Returns
-------
x: 1D float array
coefficient vector
res: 2D float array
residual; difference between model and fit
Notes
-----
Use matrix_operations() for equal weighting of data.
"""
# Remove not-to-be-fit data from the flattened "img" data vector
flatimg = img.reshape(np.shape(img)[0] * np.shape(img)[1])
flatdqm = dqm.reshape(np.shape(img)[0] * np.shape(img)[1])
if dqm is not None:
nanlist = np.where(flatdqm) # where DO_NOT_USE up.
else:
nanlist = (np.array(()),) # shouldn't occur w/MAST JWST data
# see original linearfit https://github.com/agreenbaum/ImPlaneIA:
# agreenbaum committed on May 21, 2017 1 parent 3e0fb8b
# commit bf02eb52c5813cb5d77036174a7caba703f9d366
#
flatimg = np.delete(flatimg, nanlist) # DATA values
# photon noise variance - proportional to ADU
# (for roughly uniform adu2electron factor)
variance = np.abs(flatimg)
# this resets the weights of pixels with negative or unity values to zero
# we ignore data with unity or lower values - weight it not-at-all..
weights = np.where(flatimg <= 1.0, 0.0, 1.0 / np.sqrt(variance)) # anand 2022 Jan
log.debug(f"{len(nanlist[0]):d} bad pixels skipped in weighted fringefitter")
# A - but delete all pixels flagged by dq array
flatmodel_nan = model.reshape(
np.shape(model)[0] * np.shape(model)[1], np.shape(model)[2]
)
flatmodel = np.zeros((len(flatimg), np.shape(model)[2]))
for fringe in range(np.shape(model)[2]):
flatmodel[:, fringe] = np.delete(flatmodel_nan[:, fringe], nanlist)
# A.w
Aw = flatmodel * weights[:, np.newaxis]
bw = flatimg * weights
# resids are pixel value residuals, flattened to 1d vector
x, rss, rank, singvals = np.linalg.lstsq(Aw, bw)
# actual residuals in image:
res = flatimg - np.dot(flatmodel, x)
# put bad pixels back
naninsert = nanlist[0] - np.arange(len(nanlist[0]))
# calculate residuals with fixed but unused bad pixels as nans
res = np.insert(res, naninsert, np.nan)
res = res.reshape(img.shape[0], img.shape[1])
cond = None
return x, res, cond, singvals # no condition number yet...
def matrix_operations(img, model, flux=None, linfit=False, dqm=None):
"""
Use least squares matrix operations to solve A x = b, where A is the model,
b is the data (img), and x is the coefficient vector we are solving for.
In 2-D, data x = inv(At.A).(At.b). If a flux is given, it will be used it
to normalize the data.
Parameters
----------
img: 2D float array
input data
model: 2D float array
analytic model
flux: float
normalization factor
dqm: 2D bool array
bad pixel mask slice
Returns
-------
x: 1D float array
solution to fit
res: 2D float array
residuals in fit
cond: float
condition number of the inverse of the product of model and its
transpose
"""
flatimg = img.reshape(np.shape(img)[0] * np.shape(img)[1])
flatdqm = dqm.reshape(np.shape(img)[0] * np.shape(img)[1])
log.info("fringefitting.leastsqnrm.matrix_operations(): ")
log.info(f"\timg {img.shape:}")
log.info(f"\tdqm {dqm.shape:}")
log.info(
f"\tL x W = {img.shape[0]:d} x {img.shape[1]:d} = {img.shape[0] * img.shape[1]:d}",
)
log.info(f"\tflatimg {flatimg.shape:}")
log.info(f"\tflatdqm {flatdqm.shape:}")
log.info("")
log.info("\ttype(dqm) %s", type(dqm))
if dqm is not None:
nanlist = np.where(flatdqm) # where DO_NOT_USE up.
else:
nanlist = (np.array(()),) # shouldn't occur w/MAST JWST data
log.info(f"\ttype(nanlist) {type(nanlist):}, len={len(nanlist):}")
log.info(f"\tnumber of nanlist pixels: {len(nanlist[0]):d} items")
log.info(f"\t{len(nanlist[0]):d} DO_NOT_USE pixels found in data slice")
flatimg = np.delete(flatimg, nanlist)
log.info(f"\tflatimg {flatimg.shape:} after deleting {len(nanlist[0]):d}")
if flux is not None:
flatimg = flux * flatimg / flatimg.sum()
# A
flatmodel_nan = model.reshape(
np.shape(model)[0] * np.shape(model)[1], np.shape(model)[2]
)
flatmodel = np.zeros((len(flatimg), np.shape(model)[2]))
log.info(f"\tflatmodel_nan {flatmodel_nan.shape:}")
log.info(f"\tflatmodel {flatmodel.shape:}")
log.info(
f"\tdifference {flatmodel_nan.shape[0] - flatmodel.shape[0]:}"
)
log.info("flat model dimensions %s", np.shape(flatmodel))
log.info("flat image dimensions %s", np.shape(flatimg))
for fringe in range(np.shape(model)[2]):
flatmodel[:, fringe] = np.delete(flatmodel_nan[:, fringe], nanlist)
# At (A transpose)
flatmodeltransp = flatmodel.transpose()
# At.A (makes square matrix)
modelproduct = np.dot(flatmodeltransp, flatmodel)
# At.b
data_vector = np.dot(flatmodeltransp, flatimg)
# inv(At.A)
inverse = linalg.inv(modelproduct)
cond = np.linalg.cond(inverse)
x = np.dot(inverse, data_vector)
res = flatimg - np.dot(flatmodel, x)
# put bad pixels back
naninsert = nanlist[0] - np.arange(len(nanlist[0]))
# calculate residuals with fixed but unused bad pixels as nans
res = np.insert(res, naninsert, np.nan)
res = res.reshape(img.shape[0], img.shape[1])
log.info("model flux %s", flux)
log.info("data flux %s", flatimg.sum())
log.info("flat model dimensions %s", np.shape(flatmodel))
log.info("model transpose dimensions %s", np.shape(flatmodeltransp))
log.info("flat image dimensions %s", np.shape(flatimg))
log.info("transpose * image data dimensions %s", np.shape(data_vector))
log.info("flat img * transpose dimensions %s", np.shape(inverse))
if linfit:
try:
from linearfit import linearfit # type: ignore[import-not-found]
# dependent variables
M = np.asmatrix(flatimg)
# photon noise
noise = np.sqrt(np.abs(flatimg))
# this sets the weights of pixels fulfilling condition to zero
weights = np.where(np.abs(flatimg) <= 1.0, 0.0, 1.0 / (noise**2))
# uniform weight
wy = weights
S = np.asmatrix(np.diag(wy))
# matrix of independent variables
C = np.asmatrix(flatmodeltransp)
# initialize object
result = linearfit.LinearFit(M, S, C)
# do the fit
result.fit()
# delete inverse_covariance_matrix to reduce size of pickled file
result.inverse_covariance_matrix = []
linfit_result = result
log.info("Returned linearfit result")
except ImportError:
linfit_result = None
log.info("linearfit module not imported, no covariances saved.")
else:
linfit_result = None
log.info("linearfit not attempted, no covariances saved.")
return x, res, cond, linfit_result
def multiplyenv(env, fringeterms):
"""
Multiply the envelope by each fringe 'image'.
Parameters
----------
env: 2D float array
envelope
fringeterms: list of 3 2D float arrays
model
Returns
-------
full: 3D float array
envelope multiplied by each fringe 'image'
"""
# The envelope has size (fov, fov). This multiplies the envelope by each
# of the 43 slices in the fringe model
full = np.ones(
(
np.shape(fringeterms)[1],
np.shape(fringeterms)[2],
np.shape(fringeterms)[0] + 1,
)
)
for i, val in enumerate(fringeterms):
full[:, :, i] = env * fringeterms[i]
log.debug("Total number of fringe terms: %s", len(fringeterms) - 1)
return full
def tan2visibilities(coeffs):
"""
From the solution to the fit, calculate the fringe amplitude and phase.
Parameters
----------
coeffs: 1D float array
Returns
-------
amp, delta: 1D float array, 1D float array
fringe amplitude & phase
Notes
-----
Technically the fit measures phase AND amplitude, so to retrieve the
phase we need to consider both sin and cos terms. Consider one fringe:
A { cos(kx)cos(dphi) + sin(kx)sin(dphi) } =
A(a cos(kx) + b sin(kx)), where a = cos(dphi) and b = sin(dphi)
and A is the fringe amplitude, therefore coupling a and b.
In practice we measure A*a and A*b from the coefficients, so:
Ab/Aa = b/a = tan(dphi)
call a' = A*a and b' = A*b (we actually measure a', b')
(A*sin(dphi))^2 + (A*cos(dphi)^2) = A^2 = a'^2 + b'^2
"""
delta = np.zeros(int((len(coeffs) - 1) / 2))
amp = np.zeros(int((len(coeffs) - 1) / 2))
for q in range(int((len(coeffs) - 1) / 2)):
delta[q] = np.arctan2(coeffs[2 * q + 2], coeffs[2 * q + 1])
amp[q] = np.sqrt(coeffs[2 * q + 2] ** 2 + coeffs[2 * q + 1] ** 2)
log.debug(
f"tan2visibilities: shape coeffs:{np.shape(coeffs)} "
f"shape delta:{np.shape(delta)}"
)
# returns fringe amplitude & phase
return amp, delta
def populate_antisymmphasearray(deltaps, n=7):
"""
Populate the antisymmetric fringe phase array:
fringephasearray[0,q+1:] = coeffs[0:6]
fringephasearray[1,q+2:] = coeffs[6:11]
fringephasearray[2,q+3:] = coeffs[11:15]
fringephasearray[3,q+4:] = coeffs[15:18]
fringephasearray[4,q+5:] = coeffs[18:20]
fringephasearray[5,q+6:] = coeffs[20:]
Parameters
----------
deltaps: 1D float array
pistons between each pair of holes
n: integer, optional
number of holes (default=7)
Returns
-------
arr: 2D float array
fringe phases between each pair of holes
"""
# Initialize fringe phase array
arr = np.zeros((n, n))
step = 0
n = n - 1
for h in range(n):
arr[h, h + 1:] = deltaps[step:step + n]
step += n
n -= 1
arr -= arr.T
return arr
def populate_symmamparray(amps, n=7):
"""
Populate the symmetric fringe amplitude array
Parameters
----------
amps: 1D float array
fringe visibility between each pair of holes
n: integer, optional
number of holes (default=7)
Returns
-------
arr: 2D float array
fringe amplitude array
"""
arr = np.zeros((n, n))
step = 0
n = n - 1
for h in range(n):
arr[h, h + 1:] = amps[step:step + n]
step += n
n -= 1
arr += arr.T
return arr
def t3_amplitudes(amps, n=7):
"""
Populate the triple-product amplitude array
(NOT closure amplitudes)
Parameters
----------
amps: 1D float array
fringe visibility between each pair of holes
n: integer, optional
number of holes (default=7)
Returns
-------
cpamps: 1D float array
triple product amplitude array
"""
arr = populate_symmamparray(amps, n=n)
cpamps = np.zeros(int(comb(n, 3)))
nn = 0
for kk in range(n - 2):
for ii in range(n - kk - 2):
for jj in range(n - kk - ii - 2):
cpamps[nn + jj] = (
arr[kk, ii + kk + 1]
* arr[ii + kk + 1, jj + ii + kk + 2]
* arr[jj + ii + kk + 2, kk]
)
nn += jj + 1
return cpamps
def redundant_cps(deltaps, n=7):
"""
Calculate closure phases for each set of 3 holes
Parameters
----------
deltaps: 1D float array
pistons between each pair of holes
n: integer, optional
number of holes (default=7)
Returns
-------
cps: 1D float array
closure phases
"""
arr = populate_antisymmphasearray(deltaps, n=n) # fringe phase array
cps = np.zeros(int(comb(n, 3)))
nn = 0
for kk in range(n - 2):
for ii in range(n - kk - 2):
for jj in range(n - kk - ii - 2):
cps[nn + jj] = (
arr[kk, ii + kk + 1]
+ arr[ii + kk + 1, jj + ii + kk + 2]
+ arr[jj + ii + kk + 2, kk]
)
nn += jj + 1
return cps
def closurephase(deltap, n=7):
"""
Calculate closure phases between each pair of holes
Parameters
----------
deltap: 1D float array
pistons between each pair of holes
n: integer
number of holes in the mask; 7 and 10 holes available (JWST & GPI))
Returns
-------
cps: 1D float array
closure phases
"""
# p is a triangular matrix set up to calculate closure phases
if n == 7:
p = np.array(
[
deltap[:6],
deltap[6:11],
deltap[11:15],
deltap[15:18],
deltap[18:20],
deltap[20:],
],
dtype=object,
)
elif n == 10:
p = np.array(
[
deltap[:9],
deltap[9:17],
deltap[17:24],
deltap[24:30],
deltap[30:35],
deltap[35:39],
deltap[39:42],
deltap[42:44],
deltap[44:],
],
dtype=object,
)
else:
log.critical("invalid hole number: %s", n)
# calculates closure phases for general N-hole mask (with p-array set
# up properly above)
cps = np.zeros((n - 1) * (n - 2) // 2)
for j1 in range(n - 2):
for j2 in range(n - 2 - j1):
cps[int(j1 * ((n + (n - 3) - j1) / 2.0)) + j2] = (
p[j1][0] + p[j1 + 1][j2] - p[j1][j2 + 1]
)
return cps
def closure_amplitudes(amps, n=7):
"""
Calculate closure amplitudes
Parameters
----------
amps: 1D float array
fringe amplitudes
n: integer, optional
number of holes (default=7)
Returns
-------
CAs: 1D float array
closure amplitudes
"""
arr = populate_symmamparray(amps, n=n) # fringe amp array
nn = 0
cas = np.zeros(int(comb(n, 4)))
for ii in range(n - 3):
for jj in range(n - ii - 3):
for kk in range(n - jj - ii - 3):
for ll in range(n - jj - ii - kk - 3):
cas[nn + ll] = (
arr[ii, jj + ii + 1]
* arr[ll + ii + jj + kk + 3, kk + jj + ii + 2]
/ (
arr[ii, kk + ii + jj + 2]
* arr[jj + ii + 1, ll + ii + jj + kk + 3]
)
)
nn = nn + ll + 1
return cas
def q4_phases(deltaps, n=7):
"""
Calculate phases for each set of 4 holes
Parameters
----------
deltaps: 1D float array
pistons between each pair of holes
n: integer, optional
number of holes (default=7)
Returns
-------
quad_phases: 1D float array
quad phases
"""
arr = populate_antisymmphasearray(deltaps, n=n) # fringe phase array
nn = 0
quad_phases = np.zeros(int(comb(n, 4)))
for ii in range(n - 3):
for jj in range(n - ii - 3):
for kk in range(n - jj - ii - 3):
for ll in range(n - jj - ii - kk - 3):
quad_phases[nn + ll] = (
arr[ii, jj + ii + 1]
+ arr[ll + ii + jj + kk + 3, kk + jj + ii + 2]
- arr[ii, kk + ii + jj + 2]
- arr[jj + ii + 1, ll + ii + jj + kk + 3]
)
nn = nn + ll + 1
return quad_phases
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@jwst@ami@leastsqnrm.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.