metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "_text.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmap/legendgrouptitle/_text.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="text", parent_name="choroplethmap.legendgrouptitle", **kwargs
):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmap@legendgrouptitle@_text.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "quatrope/feets",
"repo_path": "feets_extracted/feets-master/doc/source/JSAnimation/__init__.py",
"type": "Python"
}
|
from .html_writer import HTMLWriter
|
quatropeREPO_NAMEfeetsPATH_START.@feets_extracted@feets-master@doc@source@JSAnimation@__init__.py@.PATH_END.py
|
{
"filename": "use_everybeam.py",
"repo_name": "JLBLine/WODEN",
"repo_path": "WODEN_extracted/WODEN-master/wodenpy/primary_beam/use_everybeam.py",
"type": "Python"
}
|
import numpy as np
from astropy.coordinates import ITRS, SkyCoord, AltAz, EarthLocation
from astropy.time import Time, TimeDelta
import astropy.units as u
import argparse
from wodenpy.use_libwoden.create_woden_struct_classes import Woden_Struct_Classes
import erfa
from typing import Union, Tuple
import concurrent.futures
from line_profiler import profile
import os
import astropy
##Are we just making online documentation? If so, don't import everybeam
##Installing everybeam is non-trivial, so trying to get readthedocs to install
##it is a waste of time
read_the_docs_build = os.environ.get('READTHEDOCS', None) == 'True'
from wodenpy.wodenpy_setup.run_setup import check_for_library
have_everybeam = check_for_library('everybeam')
class EB_fake:
"""
A fake `everybeam` class so we can build the documentation online
in ReadTheDocs without installing `everybeam`, which is non-trivial
"""
def __init__(self):
"""
Just set everything that is ever used to None
"""
self.OSKAR = None
self.LOFAR = None
self.MWA = None
self.MWALocal = None
self.load_telescope = None
self.Telescope = None
if have_everybeam:
import everybeam as eb
else:
eb = EB_fake()
# if read_the_docs_build:
# eb = EB_fake()
# else:
# import everybeam as eb
##This call is so we can use it as a type annotation
woden_struct_classes = Woden_Struct_Classes()
Source_Catalogue = woden_struct_classes.Source_Catalogue
Woden_Settings = woden_struct_classes.Woden_Settings
def radec_to_xyz(ra : float, dec : float, time : Time):
"""
Convert RA and Dec ICRS coordinates to ITRS cartesian coordinates.
Taken from the everybeam documentation
https://everybeam.readthedocs.io/en/latest/tree/demos/lofar-array-factor.html
Args:
ra (astropy.coordinates.Angle): Right ascension
dec (astropy.coordinates.Angle): Declination
time float astropy time instance
Returns:
pointing_xyz (ndarray): NumPy array containing the ITRS X, Y and Z coordinates
"""
coord = SkyCoord(ra*u.rad, dec*u.rad, frame='icrs')
coord_itrs = coord.transform_to(ITRS(obstime=time))
return np.asarray(coord_itrs.cartesian.xyz.transpose())
def load_OSKAR_telescope(ms_path : str, response_model = "skala40_wave",
use_differential_beam : bool = True) -> eb.OSKAR: # type: ignore
"""Load an OSKAR telescope from a measurement set.
Parameters
----------
ms_path : str
Path to the measurement set
response_model : str, optional
Response model to use, by default "skala40_wave"
use_differential_beam : bool, optional
Use the differential beam a.k.a return a "normalised" beam, by default True
Returns
-------
eb.OSKAR
Telescope object
"""
print("OSKAR response model", response_model)
# Load the telescope
telescope = eb.load_telescope(
ms_path,
use_differential_beam=use_differential_beam,
element_response_model=response_model,
)
# assert type(telescope) == eb.OSKAR
if type(telescope) != eb.OSKAR:
print(f'WARNING: Telescope specified in {ms_path} is not an OSKAR telescope. Proceeding, but you might get nonsense results.')
return telescope
def load_LOFAR_telescope(ms_path : str, response_model : str = "hamaker",
use_differential_beam : bool = False) -> eb.LOFAR: # type: ignore
"""Load an LOFAR telescope from a measurement set. Settings lifted
directly from https://everybeam.readthedocs.io/en/latest/tree/demos/lofar-lobes.html
Parameters
----------
ms_path : str
Path to the measurement set
response_model : str, optional
Response model to use, by default "hamaker"
use_differential_beam : bool, optional
Use the differential beam a.k.a return a "normalised" beam, by default False
Returns
-------
eb.LOFAR
Telescope object
"""
telescope = eb.load_telescope(ms_path,
use_differential_beam=use_differential_beam,
element_response_model=response_model)
if type(telescope) != eb.LOFAR:
print(f'WARNING: Telescope specified in {ms_path} is not an OSKAR telescope. Proceeding, but you might get nonsense results.')
return telescope
def load_MWA_telescope(ms_path : str, coeff_path : str,
use_local_mwa : bool = True) -> Union[eb.MWA, eb.MWALocal]: # type: ignore
"""Load an MWA telescope from a measurement set.
Parameters
----------
ms_path : str
Path to the measurement set
response_model : str, optional
Response model to use, by default "lobes"
use_local_mwa : bool, optional
Use the local MWA model, which takes za/az instead of RA/Dec. Defaults
to True
Returns
-------
Union[eb.MWA, eb.MWALocal]
Telescope object, either MWA (if use_local_mwa is False) or MWALocal
(if use_local_mwa is True)
"""
## Load the telescope. Adding use_differential_beam seems to do nothing,
## so always leave it as False
telescope = eb.load_telescope(ms_path,
use_differential_beam=False,
coeff_path=coeff_path,
use_local_mwa=use_local_mwa)
# assert type(telescope) == eb.MWA
if type(telescope) != eb.MWA and type(telescope) != eb.MWALocal:
print(f'WARNING: Telescope specified in {ms_path} is not an MWA telescope. Proceeding, but you might get nonsense results.')
return telescope
def eb_local_xyz_from_radec(ra : float, dec : float,
altaz_frame : astropy.coordinates.AltAz,
delta_az : float = (1/2)*np.pi,
negative_azimuth=True):
"""
Get the local cartesian coordinates used by EveryBeam from a given RA, Dec, and AltAz frame.
This function transforms the given right ascension (RA) and declination (Dec) into local
cartesian coordinates based on the provided AltAz frame. The azimuth is adjusted by reversing
it and adding 90 degrees (or a specified delta) to match the expected coordinates.
This function is used to emulate the way the EveryBeam does it's parallactic
rotation. `delta_az` is set to π/2 by experiment to match outputs from EveryBeam.
Not really needed, as when can use EveryBeam to do the parallactic rotation
for everything aside the MWA beam.
Parameters
-----------
ra : float
Right ascension in radians.
dec : float
Declination in radians.
altaz_frame : `astropy.coordinates.AltAz`
The AltAz frame to transform the coordinates into.
delta_az : float, optional
The azimuth adjustment in radians. Default is π/2.
negative_azimuth : bool, optional
If True, the azimuth is reversed before adding the delta. Default is True.
Returns
--------
numpy.ndarray
A 3xN array of the local cartesian coordinates.
"""
coord = SkyCoord(ra=ra*u.rad, dec=dec*u.rad, frame='icrs')
coord = coord.transform_to(altaz_frame)
delta_az = delta_az * u.rad
if negative_azimuth:
updated_coord = SkyCoord(az=-coord.az + delta_az,
alt=coord.alt,
distance=coord.distance,
frame=coord.frame)
else:
updated_coord = SkyCoord(az=coord.az + delta_az,
alt=coord.alt,
distance=coord.distance,
frame=coord.frame)
return np.array(updated_coord.cartesian.xyz.transpose())
def eb_north_east(direction : np.ndarray, ncp_t : np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the north and east vectors given a direction ITRF vector,
and the ITRF vector towards the north celestial pole.
This function calculates the east vector by taking the cross product of the normal vector (ncp_t)
and the direction vector, and then normalizes it. The north vector is then calculated as the cross
product of the direction vector and the east vector.
Translated from EveryBeam station.cc Station::ComputeElementResponse
const vector3r_t east = normalize(cross(ncp_t, direction));
const vector3r_t north = cross(direction, east);
options.east = east;
options.north = north;
Parameters
------------
direction : np.ndarray
A 3-element array representing the direction vector.
ncp_t : np.ndarray
A 3-element array representing the normal vector.
Returns
--------
Tuple[np.ndarray, np.ndarray]
A tuple containing the north and east vectors as 3-element arrays.
"""
east = np.cross(ncp_t, direction)
east = east/np.linalg.norm(east)
north = np.cross(direction, east)
return north, east
def calc_everybeam_rotation(direction : np.ndarray, north : np.ndarray,
east : np.ndarray) -> np.ndarray:
"""Given an ITRF 3-vector in the direction of interest `direction`,
and the associated north and east vectors, calculate the 2x2 rotation
matrix to rotate by parallactic angle.
Translated from EveryBeam beamformer.cc BeamFormer::LocalResponse
const vector3r_t e_phi = normalize(cross(direction));
const vector3r_t e_theta = cross(e_phi, direction);
result *= {dot(e_theta, options.north), dot(e_theta, options.east),
dot(e_phi, options.north), dot(e_phi, options.east)};
Parameters
------------
direction : np.ndarray
A 3-element array representing the direction ITRF vector.
north : np.ndarray
A 3-element array representing the north vector.
east : np.ndarray
A 3-element array representing the east vector.
Returns
--------
np.ndarray
A 2x2 rotation matrix.
"""
e_phi = np.cross([0.0, 0.0, 1.0], direction)
e_phi = e_phi/np.linalg.norm(e_phi)
e_theta = np.cross(e_phi, direction)
e_theta = e_theta/np.linalg.norm(e_theta)
rot_matrix = np.array([[np.dot(e_theta, north), np.dot(e_theta, east)],
[np.dot(e_phi, north), np.dot(e_phi, east)]])
return rot_matrix
# @profile
def run_everybeam(ras: np.ndarray, decs: np.ndarray,
beam_ra0: float, beam_dec0: float,
j2000_latitudes: np.ndarray, j2000_lsts: np.ndarray,
current_latitude: float, current_longitude: float,
times: np.ndarray, freqs: np.ndarray,
telescope: eb.Telescope, # type: ignore
station_ids: np.ndarray,
apply_beam_norms: bool = True,
reorder_jones: bool = False,
element_only: bool = False,
eb_rotate: bool = False,
parallactic_rotate: bool = False,
para_angle_offset: float = 0) -> np.ndarray:
"""
Calculate the Jones matrices for a given set of coordinates, times,
frequencies, and station ids using the EveryBeam library.
`j2000_latitudes` should be the array latitude as precessed back to J2000,
with `j2000_lsts` being the matching LST in J2000. `current_latitude` and
`current_longitude` should be latitude and longitude of the array at the
time of the observation. `telescope` should be an EveryBeam telescope object.
Parameters
------------
ras : np.ndarray
Right ascensions of the coordinates in radians.
decs : np.ndarray
Declinations of the coordinates in radians.
beam_ra0 : float
Right ascension of the beam center in radians.
beam_dec0 : float
Declination of the beam center in radians.
j2000_latitudes : np.ndarray
Latitudes in J2000 coordinates.
j2000_lsts : np.ndarray
Local sidereal times in J2000 coordinates.
current_latitude : float
Current latitude in radians.
current_longitude : float
Current longitude in radians.
times : np.ndarray
Array of observation times.
freqs : np.ndarray
Array of frequencies.
telescope : (eb.Telescope):
Telescope object from the EveryBeam library.
station_ids : np.ndarray
Array of station IDs.
apply_beam_norms : bool, optional
Whether to apply beam normalisation. Defaults to True. Achieved by
calculating the beam response at beam centre, and multiplying all
Jones by the inverse of this central beam response.
reorder_jones : bool, optional
Whether to reorder the Jones matrices. Defaults to False. Just rearranges
the Jones matrix from [[0,0, 0,1,], [1,0, 1,1]] to [[1,1, 1,0,], [0,1, 0,0]].
element_only : bool, optional
Whether to use only the element response. Defaults to False. Use this to
look at the dipole response only, not the beam formed response.
eb_rotate : bool, optional
Whether to apply parallactic rotation using EveryBeam. Defaults to False.
Should probably be used for everything apart from MWA beams.
parallactic_rotate : bool, optional
Whether to apply parallactic angle rotation using `wodenpy`. Defaults to False.
Should be True for MWA beams if you want rotation. If True for a non-MWA beam,
`wodenpy` should match the output as if `eb_rotate` was True.
para_angle_offset : float, optional
Offset to add to the parallactic angle. Defaults to 0.
Returns
--------
np.ndarray
The calculated Jones matrices with shape (num_stations, num_times, num_freqs, num_coords, 2, 2).
"""
num_stations = len(station_ids)
num_times = len(times)
num_freqs = len(freqs)
num_coords = len(ras)
all_output_jones = np.zeros((num_stations, num_times, num_freqs, num_coords, 2, 2), dtype=np.complex128)*np.nan
non_itrf_beams = [eb.MWA, eb.MWALocal]
if parallactic_rotate:
if type(telescope) not in non_itrf_beams:
coords = SkyCoord(ras*u.rad, decs*u.rad, frame='icrs')
location = EarthLocation(lat=current_latitude*u.rad,
lon=current_longitude*u.rad)
for time_ind, time in enumerate(times):
if type(telescope) in non_itrf_beams:
comp_has = j2000_lsts[time_ind] - ras
azs, els = erfa.hd2ae(comp_has, decs, j2000_latitudes[time_ind])
zas = np.pi/2 - els
if parallactic_rotate:
beam_ha0 = j2000_lsts[time_ind] - beam_ra0
beam_az0, beam_el0 = erfa.hd2ae(beam_ha0, beam_dec0,
j2000_latitudes[time_ind])
beam_za0 = np.pi/2 - beam_el0
else:
phase_itrf = radec_to_xyz(beam_ra0, beam_dec0, time)
dir_itrfs = radec_to_xyz(ras, decs, time)
if parallactic_rotate:
altaz_frame = AltAz(obstime=time, location=location)
ncp_t = eb_local_xyz_from_radec(0, np.radians(90), altaz_frame)
dir_local = eb_local_xyz_from_radec(ras, decs, altaz_frame)
time_mjd_secs = time.mjd*3600*24
if parallactic_rotate:
has = j2000_lsts[time_ind] - ras
para_angles = erfa.hd2pa(has, decs, j2000_latitudes[time_ind])
rot_matrix = np.empty((num_coords, 2,2))
if type(telescope) in non_itrf_beams:
rot_matrix[:,0,0] = np.sin(-para_angles)
rot_matrix[:,0,1] = -np.cos(-para_angles)
rot_matrix[:,1,0] = -np.cos(-para_angles)
rot_matrix[:,1,1] = -np.sin(-para_angles)
else:
for dir_ind, dir_itrf in enumerate(dir_itrfs):
dir_az = dir_local[dir_ind]
north, east = eb_north_east(dir_az, ncp_t)
rot = calc_everybeam_rotation(dir_az, north, east)
rot_matrix[dir_ind] = rot
for station_ind, station_id in enumerate(station_ids):
for freq_ind, freq in enumerate(freqs):
if apply_beam_norms:
if type(telescope) == eb.MWA:
##Get the response
norm_jones = telescope.station_response(time_mjd_secs, station_id, freq,
beam_ra0, beam_dec0)
elif type(telescope) == eb.MWALocal:
norm_jones = telescope.station_response(time_mjd_secs, station_id, freq,
beam_az0, beam_za0)
if type(telescope) in non_itrf_beams:
if parallactic_rotate:
ha0 = j2000_lsts[time_ind] - beam_ra0
para_angles = erfa.hd2pa(ha0, beam_dec0, j2000_latitudes[time_ind])
rot = np.empty((2,2))
rot[0,0] = np.sin(-para_angles)
rot[0,1] = -np.cos(-para_angles)
rot[1,0] = -np.cos(-para_angles)
rot[1,1] = -np.sin(-para_angles)
else:
element_id = 0
if element_only:
norm_jones = telescope.element_response(time_mjd_secs, station_id, element_id, freq,
phase_itrf, rotate=eb_rotate)
else:
norm_jones = telescope.station_response(time_mjd_secs, station_id, freq,
phase_itrf, phase_itrf,
rotate=eb_rotate)
if parallactic_rotate:
dir_phase_local = eb_local_xyz_from_radec(beam_ra0, beam_dec0, altaz_frame)
north, east = eb_north_east(dir_phase_local, ncp_t)
rot = calc_everybeam_rotation(dir_phase_local, north, east)
if parallactic_rotate:
norm_jones = np.matmul(norm_jones, rot)
for coord_ind, (ra, dec) in enumerate(zip(ras, decs)):
##Only MWA uses ra,dec as a direct input
if type(telescope) == eb.MWA:
response = telescope.station_response(time_mjd_secs, station_id, freq,
ra, dec)
##Only MWALocal uses az,za as a direct input
elif type(telescope) == eb.MWALocal:
response = telescope.station_response(time_mjd_secs, station_id, freq,
zas[coord_ind], azs[coord_ind])
##Everything else uses ITRF coordinates
else:
if element_only:
response = telescope.element_response(time_mjd_secs, station_id, 0, freq,
dir_itrfs[coord_ind], rotate=eb_rotate)
else:
response = telescope.station_response(time_mjd_secs, station_id, freq,
dir_itrfs[coord_ind], phase_itrf,
rotate=eb_rotate)
all_output_jones[station_ind, time_ind, freq_ind, coord_ind] = response
if parallactic_rotate:
##Parallactic angle doesn't change per station or freq, but
##if we are normalising the beam, we want to rotate before we normalise
##So do the rotation now
rot_jones = np.einsum('klm,kmn->kln', all_output_jones[station_ind, time_ind, freq_ind, :, :, :], rot_matrix)
all_output_jones[station_ind, time_ind, freq_ind, :, :, :] = rot_jones
if apply_beam_norms:
##Each station, time, and freq gets it's own normalisation
##Same 2x2 normalisation for all directions
inv_beam_norms = np.linalg.inv(norm_jones)
output_jones = np.einsum('lm,kmn->kln', inv_beam_norms, all_output_jones[station_ind, time_ind, freq_ind, :, :, :])
all_output_jones[station_ind, time_ind, freq_ind, :, :, :] = output_jones
if reorder_jones:
##swap all_output_jones[:,:,:,:,0,0] with all_output_jones[:,:,:,:,1,1]
all_output_jones[:, :, :, :, [0, 1], [0, 1]] = all_output_jones[:, :, :, :, [1, 0], [1, 0]]
##swap all_output_jones[:,:,:,:,0,1] with all_output_jones[:,:,:,:,1,0]
all_output_jones[:, :, :, :, [0, 1], [1, 0]] = all_output_jones[:, :, :, :, [1, 0], [0, 1]]
return all_output_jones
def run_everybeam_thread(num_threads : int, thread_id : int,
ms_path : str, coeff_path : str,
ras : np.ndarray, decs : np.ndarray,
ra0 : float, dec0 : float,
j2000_latitudes : np.ndarray, j2000_lsts : np.ndarray,
current_latitude : float, current_longitude : float,
times : np.ndarray, freqs : np.ndarray,
station_ids : np.ndarray,
use_differential_beam : bool = True,
apply_beam_norms : bool = True,
reorder_jones : bool = False,
element_only : bool = False,
eb_rotate : bool = False,
parallactic_rotate : bool = True,
para_angle_offset : float = 0,
element_response_model='hamaker',
use_local_mwa : bool = True) -> Tuple[np.ndarray, int]:
"""
Thread function called by `run_everybeam_over_threads` to calculate the
EveryBeam response in parrallel. Calls `run_everybeam` with a subset of
the coordinates; see `run_everybeam` for more details of the parameters.
Creates a new EveryBeam telescope object from `ms_path` for each thread.
This has to be done because `concurrent.futures.ProcessPoolExecutor` has
to pickle the function and all it's arguments, and EveryBeam objects can't
be pickled. This is somewhat wasteful but I can't work out a better way
to make things parallel.
Parameters
------------
num_threads : int
Number of threads being in call by `run_everybeam_over_threads`.
thread_id : int
ID of the current thread. Useds to work out what chunk of `ras` and `decs`
to process.
ms_path : str
Path to the measurement set to load the EveryBeam telescope from.
ras : np.ndarray
Right ascensions of the coordinates in radians.
decs : np.ndarray
Declinations of the coordinates in radians.
beam_ra0 : float
Right ascension of the beam center in radians.
beam_dec0 : float
Declination of the beam center in radians.
j2000_latitudes : np.ndarray
Latitudes in J2000 coordinates.
j2000_lsts : np.ndarray
Local sidereal times in J2000 coordinates.
current_latitude : float
Current latitude in radians.
current_longitude : float
Current longitude in radians.
times : np.ndarray
Array of observation times.
freqs : np.ndarray
Array of frequencies.
station_ids : np.ndarray
Array of station IDs.
apply_beam_norms : bool, optional
Whether to apply beam normalisation. Defaults to True. Achieved by
calculating the beam response at beam centre, and multiplying all
Jones by the inverse of this central beam response.
reorder_jones : bool, optional
Whether to reorder the Jones matrices. Defaults to False. Just rearranges
the Jones matrix from [[0,0, 0,1,], [1,0, 1,1]] to [[1,1, 1,0,], [0,1, 0,0]].
element_only : bool, optional
Whether to use only the element response. Defaults to False. Use this to
look at the dipole response only, not the beam formed response.
eb_rotate : bool, optional
Whether to apply parallactic rotation using EveryBeam. Defaults to False.
Should probably be used for everything apart from MWA beams.
parallactic_rotate : bool, optional
Whether to apply parallactic angle rotation using `wodenpy`. Defaults to False.
Should be True for MWA beams if you want rotation. If True for a non-MWA beam,
`wodenpy` should match the output as if `eb_rotate` was True.
para_angle_offset : float, optional
Offset to add to the parallactic angle. Defaults to 0.
element_response_model : str, optional
The Everybeam element response model to use. Defaults to 'hamaker'.
Avaible options are 'hamaker' (LOFAR), 'skala40_wave' (OSKAR), and 'MWA' (MWA).
use_local_mwa : bool, optional
Whether to use the local MWA model. Defaults to True. The local MWA model
takes za/az instead of RA/Dec.
Returns
--------
Tuple[np.ndarray, int]
The calculated Jones matrices with shape
(num_stations, num_times, num_freqs, num_coords_in_thread, 2, 2), as
well as the thread ID. Use the thread ID to insert this thread output
into the correct place in the final Jones matrix.
"""
telescope = eb.load_telescope(ms_path,
use_differential_beam=use_differential_beam,
coeff_path=coeff_path,
element_response_model=element_response_model,
use_local_mwa=use_local_mwa)
num_coords = len(ras)
coords_per_thread = int(np.ceil(num_coords / num_threads))
low_coord = thread_id * coords_per_thread
high_coord = (thread_id + 1) * coords_per_thread
print(f"Thread {thread_id} processing coords {low_coord} to {high_coord}")
jones = run_everybeam(ras[low_coord:high_coord],
decs[low_coord:high_coord],
ra0, dec0,
j2000_latitudes, j2000_lsts,
current_latitude, current_longitude,
times, freqs,
telescope, station_ids,
apply_beam_norms=apply_beam_norms,
reorder_jones=reorder_jones,
element_only=element_only,
eb_rotate=eb_rotate,
parallactic_rotate=parallactic_rotate)
print(f"Thread {thread_id} finished")
return jones, thread_id
def run_everybeam_over_threads(num_threads : int,
ms_path : str,
coeff_path : str,
ras : np.ndarray, decs : np.ndarray,
ra0 : float, dec0 : float,
j2000_latitudes : np.ndarray, j2000_lsts : np.ndarray,
current_latitude : float, current_longitude : float,
times : np.ndarray, freqs : np.ndarray,
station_ids : np.ndarray,
use_differential_beam : bool = True,
apply_beam_norms : bool = True,
reorder_jones : bool = False,
element_only : bool = False,
eb_rotate : bool = False,
parallactic_rotate : bool = True,
use_local_mwa : bool = True,
para_angle_offset : float = 0,
element_response_model='hamaker'):
"""
Runs `run_everybeam` in parallel over `num_threads` threads, using
`concurrent.futures.ProcessPoolExecutor`. See `run_everybeam` for more
details of what each parameter does.
Creates a new EveryBeam telescope object from `ms_path` for each thread.
This has to be done because `concurrent.futures.ProcessPoolExecutor` has
to pickle the function and all it's arguments, and EveryBeam objects can't
be pickled. This is somewhat wasteful but I can't work out a better way
to make things parallel.
Parameters
------------
num_threads : int
Number of threads being in call by `run_everybeam_over_threads`.
ms_path : str
Path to the measurement set to load the EveryBeam telescope from.
ras : np.ndarray
Right ascensions of the coordinates in radians.
decs : np.ndarray
Declinations of the coordinates in radians.
beam_ra0 : float
Right ascension of the beam center in radians.
beam_dec0 : float
Declination of the beam center in radians.
j2000_latitudes : np.ndarray
Latitudes in J2000 coordinates.
j2000_lsts : np.ndarray
Local sidereal times in J2000 coordinates.
current_latitude : float
Current latitude in radians.
current_longitude : float
Current longitude in radians.
times : np.ndarray
Array of observation times.
freqs : np.ndarray
Array of frequencies.
station_ids : np.ndarray
Array of station IDs.
apply_beam_norms : bool, optional
Whether to apply beam normalisation. Defaults to True. Achieved by
calculating the beam response at beam centre, and multiplying all
Jones by the inverse of this central beam response.
reorder_jones : bool, optional
Whether to reorder the Jones matrices. Defaults to False. Just rearranges
the Jones matrix from [[0,0, 0,1,], [1,0, 1,1]] to [[1,1, 1,0,], [0,1, 0,0]].
element_only : bool, optional
Whether to use only the element response. Defaults to False. Use this to
look at the dipole response only, not the beam formed response.
eb_rotate : bool, optional
Whether to apply parallactic rotation using EveryBeam. Defaults to False.
Should probably be used for everything apart from MWA beams.
parallactic_rotate : bool, optional
Whether to apply parallactic angle rotation using `wodenpy`. Defaults to False.
Should be True for MWA beams if you want rotation. If True for a non-MWA beam,
`wodenpy` should match the output as if `eb_rotate` was True.
para_angle_offset : float, optional
Offset to add to the parallactic angle. Defaults to 0.
element_response_model : str, optional
The Everybeam element response model to use. Defaults to 'hamaker'.
Avaible options are 'hamaker' (LOFAR), 'skala40_wave' (OSKAR), and 'MWA' (MWA).
use_local_mwa : bool, optional
Whether to use the local MWA model. Defaults to True. The local MWA model
takes za/az instead of RA/Dec.
Returns
--------
np.ndarray
The calculated Jones matrices with shape
(num_stations, num_times, num_freqs, num_coord, 2, 2)
"""
with concurrent.futures.ProcessPoolExecutor(max_workers=num_threads) as executor:
future_data = [executor.submit(run_everybeam_thread,
num_threads, thread_id,
ms_path,
coeff_path,
ras, decs,
ra0, dec0,
j2000_latitudes, j2000_lsts,
current_latitude, current_longitude,
times, freqs,
station_ids,
use_differential_beam=use_differential_beam,
apply_beam_norms=apply_beam_norms,
reorder_jones=reorder_jones,
element_only=element_only,
eb_rotate=eb_rotate,
parallactic_rotate=parallactic_rotate,
para_angle_offset=para_angle_offset,
element_response_model=element_response_model,
use_local_mwa=use_local_mwa)
for thread_id in range(num_threads)]
all_jones_chunks = []
all_thread_ids = []
for future in concurrent.futures.as_completed(future_data):
jones_chunk, thread_id = future.result()
all_jones_chunks.append(jones_chunk)
all_thread_ids.append(thread_id)
num_stations = len(station_ids)
num_times = len(times)
num_freqs = len(freqs)
num_coords = len(ras)
all_jones = np.zeros((num_stations, num_times, num_freqs, num_coords, 2, 2), dtype=np.complex128)*np.nan
coords_per_thread = int(np.ceil(num_coords / num_threads))
for jones_chunk, thread_id in zip(all_jones_chunks, all_thread_ids):
low_coord = thread_id * coords_per_thread
high_coord = (thread_id + 1) * coords_per_thread
all_jones[:, :, :, low_coord:high_coord, :, :] = jones_chunk
return all_jones
|
JLBLineREPO_NAMEWODENPATH_START.@WODEN_extracted@WODEN-master@wodenpy@primary_beam@use_everybeam.py@.PATH_END.py
|
{
"filename": "test_sequential.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/tests/unit_tests/chains/test_sequential.py",
"type": "Python"
}
|
"""Test pipeline functionality."""
from typing import Dict, List, Optional
import pytest
from langchain_core.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.chains.base import Chain
from langchain.chains.sequential import SequentialChain, SimpleSequentialChain
from langchain.memory import ConversationBufferMemory
from langchain.memory.simple import SimpleMemory
from tests.unit_tests.callbacks.fake_callback_handler import FakeCallbackHandler
class FakeChain(Chain):
"""Fake Chain for testing purposes."""
input_variables: List[str]
output_variables: List[str]
@property
def input_keys(self) -> List[str]:
"""Input keys this chain returns."""
return self.input_variables
@property
def output_keys(self) -> List[str]:
"""Input keys this chain returns."""
return self.output_variables
def _call(
self,
inputs: Dict[str, str],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
outputs = {}
for var in self.output_variables:
variables = [inputs[k] for k in self.input_variables]
outputs[var] = f"{' '.join(variables)}foo"
return outputs
async def _acall(
self,
inputs: Dict[str, str],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
outputs = {}
for var in self.output_variables:
variables = [inputs[k] for k in self.input_variables]
outputs[var] = f"{' '.join(variables)}foo"
return outputs
def test_sequential_usage_single_inputs() -> None:
"""Test sequential on single input chains."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) # type: ignore[call-arg]
output = chain({"foo": "123"})
expected_output = {"baz": "123foofoo", "foo": "123"}
assert output == expected_output
def test_sequential_usage_multiple_inputs() -> None:
"""Test sequential on multiple input chains."""
chain_1 = FakeChain(input_variables=["foo", "test"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"]) # type: ignore[call-arg]
output = chain({"foo": "123", "test": "456"})
expected_output = {
"baz": "123 456foo 123foo",
"foo": "123",
"test": "456",
}
assert output == expected_output
def test_sequential_usage_memory() -> None:
"""Test sequential usage with memory."""
memory = SimpleMemory(memories={"zab": "rab"})
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain( # type: ignore[call-arg]
memory=memory, chains=[chain_1, chain_2], input_variables=["foo"]
)
output = chain({"foo": "123"})
expected_output = {"baz": "123foofoo", "foo": "123", "zab": "rab"}
assert output == expected_output
memory = SimpleMemory(memories={"zab": "rab", "foo": "rab"})
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
SequentialChain( # type: ignore[call-arg]
memory=memory, chains=[chain_1, chain_2], input_variables=["foo"]
)
def test_sequential_internal_chain_use_memory() -> None:
"""Test sequential usage with memory for one of the internal chains."""
memory = ConversationBufferMemory(memory_key="bla")
memory.save_context({"input": "yo"}, {"output": "ya"})
chain_1 = FakeChain(
input_variables=["foo", "bla"], output_variables=["bar"], memory=memory
)
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) # type: ignore[call-arg]
output = chain({"foo": "123"})
print("HEYYY OUTPUT", output) # noqa: T201
expected_output = {"foo": "123", "baz": "123 Human: yo\nAI: yafoofoo"}
assert output == expected_output
def test_sequential_usage_multiple_outputs() -> None:
"""Test sequential usage on multiple output chains."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
chain = SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) # type: ignore[call-arg]
output = chain({"foo": "123"})
expected_output = {
"baz": "123foo 123foo",
"foo": "123",
}
assert output == expected_output
def test_sequential_missing_inputs() -> None:
"""Test error is raised when input variables are missing."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "test"], output_variables=["baz"])
with pytest.raises(ValueError):
# Also needs "test" as an input
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo"]) # type: ignore[call-arg]
def test_sequential_bad_outputs() -> None:
"""Test error is raised when bad outputs are specified."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
# "test" is not present as an output variable.
SequentialChain(
chains=[chain_1, chain_2],
input_variables=["foo"],
output_variables=["test"],
)
def test_sequential_valid_outputs() -> None:
"""Test chain runs when valid outputs are specified."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SequentialChain(
chains=[chain_1, chain_2],
input_variables=["foo"],
output_variables=["bar", "baz"],
)
output = chain({"foo": "123"}, return_only_outputs=True)
expected_output = {"baz": "123foofoo", "bar": "123foo"}
assert output == expected_output
def test_sequential_overlapping_inputs() -> None:
"""Test error is raised when input variables are overlapping."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "test"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
# "test" is specified as an input, but also is an output of one step
SequentialChain(chains=[chain_1, chain_2], input_variables=["foo", "test"]) # type: ignore[call-arg]
def test_simple_sequential_functionality() -> None:
"""Test simple sequential functionality."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
chain = SimpleSequentialChain(chains=[chain_1, chain_2])
output = chain({"input": "123"})
expected_output = {"output": "123foofoo", "input": "123"}
assert output == expected_output
@pytest.mark.parametrize("isAsync", [False, True])
async def test_simple_sequential_functionality_with_callbacks(isAsync: bool) -> None:
"""Test simple sequential functionality."""
handler_1 = FakeCallbackHandler()
handler_2 = FakeCallbackHandler()
handler_3 = FakeCallbackHandler()
chain_1 = FakeChain(
input_variables=["foo"], output_variables=["bar"], callbacks=[handler_1]
)
chain_2 = FakeChain(
input_variables=["bar"], output_variables=["baz"], callbacks=[handler_2]
)
chain_3 = FakeChain(
input_variables=["jack"], output_variables=["baf"], callbacks=[handler_3]
)
chain = SimpleSequentialChain(chains=[chain_1, chain_2, chain_3])
if isAsync:
output = await chain.ainvoke({"input": "123"})
else:
output = chain({"input": "123"})
expected_output = {"output": "123foofoofoo", "input": "123"}
assert output == expected_output
# Check that each of the callbacks were invoked once per the entire run
for handler in [handler_1, handler_2, handler_3]:
assert handler.starts == 1
assert handler.ends == 1
assert handler.errors == 0
def test_multi_input_errors() -> None:
"""Test simple sequential errors if multiple input variables are expected."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar"])
chain_2 = FakeChain(input_variables=["bar", "foo"], output_variables=["baz"])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2])
def test_multi_output_errors() -> None:
"""Test simple sequential errors if multiple output variables are expected."""
chain_1 = FakeChain(input_variables=["foo"], output_variables=["bar", "grok"])
chain_2 = FakeChain(input_variables=["bar"], output_variables=["baz"])
with pytest.raises(ValueError):
SimpleSequentialChain(chains=[chain_1, chain_2])
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@tests@unit_tests@chains@test_sequential.py@.PATH_END.py
|
{
"filename": "_gridwidth.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/xaxis/minor/_gridwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class GridwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="gridwidth", parent_name="layout.xaxis.minor", **kwargs
):
super(GridwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "ticks"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@xaxis@minor@_gridwidth.py@.PATH_END.py
|
{
"filename": "MM_v4.md",
"repo_name": "rpoleski/MulensModel",
"repo_path": "MulensModel_extracted/MulensModel-master/documents/MM_v4.md",
"type": "Markdown"
}
|
# What we want to change when going from v3.X.Y to v4.0.0?
Once the changes are accepted to be made, **mark them in the code using warnings.warn("XXX", FutureWarning)** and note it here. Also release a version that differs from previous one only in these warnings - this will allow users to correct their codes. Also give **suggested changes in warnings**.
### Major changes:
???
### Minor changes:
* Remove ModelParameters.as\_dict() because it is the same as ModelParameters.parameters
* `ModelParameters.is_static` -> `is_lens_static`
* ephemerides\_file -> ephemeris\_file - maybe
* Model.get\_residuals should have keyword phot\_fmt, not type to be consistent with other functions
* test\_MulensData.py - in test\_copy() remove warnings.catch\_warnings() because you remove coords, ra, and dec from init
### Yet unsorted/undecided:
* remove MulensData.bad - see https://github.com/rpoleski/MulensModel/issues/40
* `Model.set\_times()` - `n\_epochs` should be None as default, so that we can check if both dt and `n\_epochs` were set
* Caustics.get\_caustics() should return np.arrays, not lists
* check all NotImplementedError and maybe remove some functions/options
* new class for a collection of datasets to make looping over datasets easier; also there will be data\_ref defined
* see (this comment by Jen)[https://github.com/rpoleski/MulensModel/pull/15#issuecomment-1080879537] on how magnification methods are named and called in different parts of the code
* Add an Observatory class - for terresital parallax
|
rpoleskiREPO_NAMEMulensModelPATH_START.@MulensModel_extracted@MulensModel-master@documents@MM_v4.md@.PATH_END.py
|
{
"filename": "GLMeshItem.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/examples/GLMeshItem.py",
"type": "Python"
}
|
"""
Simple examples demonstrating the use of GLMeshItem.
"""
import pyqtgraph as pg
import pyqtgraph.opengl as gl
app = pg.mkQApp("GLMeshItem Example")
w = gl.GLViewWidget()
w.show()
w.setWindowTitle('pyqtgraph example: GLMeshItem')
w.setCameraPosition(distance=40)
g = gl.GLGridItem()
g.scale(2,2,1)
w.addItem(g)
import numpy as np
## Example 1:
## Array of vertex positions and array of vertex indexes defining faces
## Colors are specified per-face
verts = np.array([
[0, 0, 0],
[2, 0, 0],
[1, 2, 0],
[1, 1, 1],
])
faces = np.array([
[0, 1, 2],
[0, 1, 3],
[0, 2, 3],
[1, 2, 3]
])
colors = np.array([
[1, 0, 0, 0.3],
[0, 1, 0, 0.3],
[0, 0, 1, 0.3],
[1, 1, 0, 0.3]
])
## Mesh item will automatically compute face normals.
m1 = gl.GLMeshItem(vertexes=verts, faces=faces, faceColors=colors, smooth=False)
m1.translate(5, 5, 0)
m1.setGLOptions('additive')
w.addItem(m1)
## Example 2:
## Array of vertex positions, three per face
verts = np.empty((36, 3, 3), dtype=np.float32)
theta = np.linspace(0, 2*np.pi, 37)[:-1]
verts[:,0] = np.vstack([2*np.cos(theta), 2*np.sin(theta), [0]*36]).T
verts[:,1] = np.vstack([4*np.cos(theta+0.2), 4*np.sin(theta+0.2), [-1]*36]).T
verts[:,2] = np.vstack([4*np.cos(theta-0.2), 4*np.sin(theta-0.2), [1]*36]).T
## Colors are specified per-vertex
colors = np.random.random(size=(verts.shape[0], 3, 4))
m2 = gl.GLMeshItem(vertexes=verts, vertexColors=colors, smooth=False, shader='balloon',
drawEdges=True, edgeColor=(1, 1, 0, 1))
m2.translate(-5, 5, 0)
w.addItem(m2)
## Example 3:
## sphere
md = gl.MeshData.sphere(rows=10, cols=20)
#colors = np.random.random(size=(md.faceCount(), 4))
#colors[:,3] = 0.3
#colors[100:] = 0.0
colors = np.ones((md.faceCount(), 4), dtype=float)
colors[::2,0] = 0
colors[:,1] = np.linspace(0, 1, colors.shape[0])
md.setFaceColors(colors)
m3 = gl.GLMeshItem(meshdata=md, smooth=False)#, shader='balloon')
m3.translate(5, -5, 0)
w.addItem(m3)
# Example 4:
# wireframe
md = gl.MeshData.sphere(rows=4, cols=8)
m4 = gl.GLMeshItem(meshdata=md, smooth=False, drawFaces=False, drawEdges=True, edgeColor=(1,1,1,1))
m4.translate(0,10,0)
w.addItem(m4)
# Example 5:
# cylinder
md = gl.MeshData.cylinder(rows=10, cols=20, radius=[1., 2.0], length=5.)
md2 = gl.MeshData.cylinder(rows=10, cols=20, radius=[2., 0.5], length=10.)
colors = np.ones((md.faceCount(), 4), dtype=float)
colors[::2,0] = 0
colors[:,1] = np.linspace(0, 1, colors.shape[0])
md.setFaceColors(colors)
m5 = gl.GLMeshItem(meshdata=md, smooth=True, drawEdges=True, edgeColor=(1,0,0,1), shader='balloon')
colors = np.ones((md.faceCount(), 4), dtype=float)
colors[::2,0] = 0
colors[:,1] = np.linspace(0, 1, colors.shape[0])
md2.setFaceColors(colors)
m6 = gl.GLMeshItem(meshdata=md2, smooth=True, drawEdges=False, shader='balloon')
m6.translate(0,0,7.5)
m6.rotate(0., 0, 1, 1)
#m5.translate(-3,3,0)
w.addItem(m5)
w.addItem(m6)
if __name__ == '__main__':
pg.exec()
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@examples@GLMeshItem.py@.PATH_END.py
|
{
"filename": "Example_CNNs.py",
"repo_name": "xpsi-group/xpsi",
"repo_path": "xpsi_extracted/xpsi-main/xpsi/utilities/Example_CNNs.py",
"type": "Python"
}
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import (resnet18, ResNet18_Weights,
resnet34, ResNet34_Weights,
resnet50, ResNet50_Weights,
resnet101, ResNet101_Weights,
resnet152, ResNet152_Weights,
resnext50_32x4d,
resnext101_32x8d,
resnext101_64x4d)
class C2P1_FC1(nn.Module):
"""
Two convolutional layers followed by one pooling layer and one fully connected layer.
Args
----
nchannels : int
Number of energy bins.
nphases : int
Number of phase bins.
out_channels : int, optional
Number of channels in the first convolutional layer. Default is 16.
kernel_size : int, optional
Kernel size of the convolutional layers. Default is 3.
pool_kernel_size : int, optional
Kernel size of the pooling layer. Default is 2.
pool_stride : int, optional
Stride of the pooling layer. Default is 2.
fc_out_features : int, optional
Number of features in the output of the fully connected layer. Default is 10.
Attributes
----------
conv1 : torch.nn.LazyConv2d
First convolutional layer.
conv2 : torch.nn.LazyConv2d
Second convolutional layer.
pool : torch.nn.MaxPool2d
Pooling layer.
fc : torch.nn.LazyLinear
Fully connected layer.
"""
def __init__(self, nchannels, nphases,
out_channels=16, kernel_size=3,
pool_kernel_size=2, pool_stride=2,
fc_out_features=10):
super().__init__()
self.conv1 = nn.LazyConv2d(out_channels=out_channels, kernel_size=kernel_size, padding=kernel_size//2)
self.conv2 = nn.LazyConv2d(out_channels=out_channels*2, kernel_size=kernel_size, padding=kernel_size//2)
self.pool = nn.MaxPool2d(kernel_size=pool_kernel_size, stride=pool_stride)
self.fc = nn.LazyLinear(out_features=fc_out_features)
self.nchannels = nchannels
self.nphases = nphases
def forward(self, x):
x = x.view(-1, 1, self.nchannels, self.nphases)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.pool(x)
x = torch.flatten(x, 1)
x = F.relu(self.fc(x))
return x
class C2P1_C1P1_FC1(nn.Module):
"""
Two convolutional layers followed by one pooling layer, followed by one convolutional layer,
followed by one pooling layer, and one fully connected layer.
Args
----
nchannels : int
Number of energy bins.
nphases : int
Number of phase bins.
out_channels : int, optional
Number of channels in the first convolutional layer. Default is 16.
kernel_size : int, optional
Kernel size of the convolutional layers. Default is 3.
pool_kernel_size : int, optional
Kernel size of the pooling layers. Default is 2.
pool_stride : int, optional
Stride of the pooling layers. Default is 2.
fc_out_features : int, optional
Number of features in the output of the fully connected layer. Default is 10.
Attributes
----------
conv1 : torch.nn.LazyConv2d
First convolutional layer.
conv2 : torch.nn.LazyConv2d
Second convolutional layer.
pool1 : torch.nn.MaxPool2d
First pooling layer.
conv3 : torch.nn.LazyConv2d
Third convolutional layer.
pool2 : torch.nn.MaxPool2d
Second pooling layer.
fc : torch.nn.LazyLinear
Fully connected layer.
"""
def __init__(self, nchannels, nphases,
out_channels=16, kernel_size=3,
pool_kernel_size=2, pool_stride=2,
fc_out_features=10):
super().__init__()
self.conv1 = nn.LazyConv2d(out_channels=out_channels, kernel_size=kernel_size, padding=kernel_size//2)
self.conv2 = nn.LazyConv2d(out_channels=out_channels*2, kernel_size=kernel_size, padding=kernel_size//2)
self.pool1 = nn.MaxPool2d(kernel_size=pool_kernel_size, stride=pool_stride)
self.conv3 = nn.LazyConv2d(out_channels=out_channels*4, kernel_size=kernel_size, padding=kernel_size//2)
self.pool2 = nn.MaxPool2d(kernel_size=pool_kernel_size, stride=pool_stride)
self.fc = nn.LazyLinear(out_features=fc_out_features)
self.nchannels = nchannels
self.nphases = nphases
def forward(self, x):
x = x.view(-1, 1, self.nchannels, self.nphases)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = self.pool1(x)
x = F.relu(self.conv3(x))
x = self.pool2(x)
x = torch.flatten(x, 1)
x = F.relu(self.fc(x))
return x
class embedding_resnet(nn.Module):
"""
A class that wraps a pre-trained `ResNet/ResNeXt model <https://pytorch.org/vision/stable/models.html#general-information-on-pre-trained-weights>`
and unfreezes the last backbone and linear layers for fine-tuning.
Args
----
resnet_version (str): The version of ResNet/ResNeXt to use
(one of 'resnet18', 'resnet34', 'resnet50',
'resnet101', 'resnet152', 'resnext50',
'resnext101', 'resnext101_64x4d').
Attributes
----------
resnet: The pre-trained ResNet/ResNeXt model
fc: The linear layer at the end of the model
"""
def __init__(self, resnet_version='resnet18'):
super().__init__()
if resnet_version == 'resnet18':
self.resnet = resnet18(weights=ResNet18_Weights.DEFAULT)
print('Using resnet18')
elif resnet_version == 'resnet34':
self.resnet = resnet34(weights=ResNet34_Weights.DEFAULT)
print('Using resnet34')
elif resnet_version == 'resnet50':
self.resnet = resnet50(weights=ResNet50_Weights.DEFAULT)
print('Using resnet50')
elif resnet_version == 'resnet101':
self.resnet = resnet101(weights=ResNet101_Weights.DEFAULT)
print('Using resnet101')
elif resnet_version == 'resnet152':
self.resnet = resnet152(weights=ResNet152_Weights.DEFAULT)
print('Using resnet152')
elif resnet_version == 'resnext50':
self.resnet = resnext50_32x4d(weights='DEFAULT')
elif resnet_version == 'resnext101':
self.resnet = resnext101_32x8d(weights='DEFAULT')
elif resnet_version == 'resnext101_64x4d':
self.resnet = resnext101_64x4d(weights='DEFAULT')
else:
raise ValueError(f'Invalid resnet version: {resnet_version}')
# Freeze the backbone layers
for param in self.resnet.parameters():
param.requires_grad = False
# Unfreeze the last few layers for fine-tuning
for param in self.resnet.layer4.parameters():
param.requires_grad = True
for param in self.resnet.fc.parameters():
param.requires_grad = True
self.resnet.fc = nn.Linear(self.resnet.fc.in_features, 100)
def forward(self, x):
# Convert single-channel input to 3-channel
x = x.unsqueeze(1)
x = torch.cat([x, x, x], dim=1)
x = self.resnet(x)
return x
|
xpsi-groupREPO_NAMExpsiPATH_START.@xpsi_extracted@xpsi-main@xpsi@utilities@Example_CNNs.py@.PATH_END.py
|
{
"filename": "calculate_tori_magnetization.py",
"repo_name": "IAS-Astrophysics/athenak",
"repo_path": "athenak_extracted/athenak-main/vis/python/calculate_tori_magnetization.py",
"type": "Python"
}
|
#! /usr/bin/env python3
"""
Script for calculating average magnetization in an AthenaK GRMHD data dump.
Usage:
[python3] calculate_tori_magnetization.py <input_file> [options]
Example:
~/athenak/vis/python/calculate_tori_magnetization.py basename.prim.00000.bin
<input_file> can be any standard AthenaK .bin data dump that uses GR (Cartesian
Kerr-Schild coordinates) and MHD.
Options include:
--r_max: maximum radial coordinate to consider in the analysis
--rho_min: minimum code density to consider in the analysis
Run "calculate_tori_magnetization.py -h" to see a full description of inputs.
The results will be printed to screen. The include volume- and mass-weighted
averages of plasma sigma and beta^{-1} over the region of interest.
The domain extends from the outer horizon to r <= r_max (default: infinity), and
counts cells with rho >= rho_min (default: 0). Volume weighting weights cells by
dV = sqrt(-g)*dx*dy*dv = dx*dy*dv. Mass weighting weights cells by dm = rho*dV.
Plasma sigma is defined as sigma = b_mu b^mu / rho. Plasma beta^{-1} is defined
as beta^{-1} = b_mu b^mu / (2 p_gas). Radiation is not considered in this
calculation.
"""
# Python standard modules
import argparse
import struct
# Numerical modules
import numpy as np
# Main function
def main(**kwargs):
# Parameters
variable_names = ('dens', 'eint', 'velx', 'vely', 'velz', 'bcc1', 'bcc2', 'bcc3')
# Prepare summed values
vol_sum = 0.0
mass_sum = 0.0
sigma_vol_sum = 0.0
sigma_mass_sum = 0.0
beta_inv_vol_sum = 0.0
beta_inv_mass_sum = 0.0
# Read data
with open(kwargs['filename'], 'rb') as f:
# Get file size
f.seek(0, 2)
file_size = f.tell()
f.seek(0, 0)
# Read header metadata
line = f.readline().decode('ascii')
if line != 'Athena binary output version=1.1\n':
raise RuntimeError('Unrecognized data file format.')
next(f)
next(f)
next(f)
line = f.readline().decode('ascii')
if line[:19] != ' size of location=':
raise RuntimeError('Could not read location size.')
location_size = int(line[19:])
line = f.readline().decode('ascii')
if line[:19] != ' size of variable=':
raise RuntimeError('Could not read variable size.')
variable_size = int(line[19:])
next(f)
line = f.readline().decode('ascii')
if line[:12] != ' variables:':
raise RuntimeError('Could not read variable names.')
variable_names_base = line[12:].split()
line = f.readline().decode('ascii')
if line[:16] != ' header offset=':
raise RuntimeError('Could not read header offset.')
header_offset = int(line[16:])
# Process header metadata
if location_size not in (4, 8):
raise RuntimeError('Only 4- and 8-byte integer types supported '
'for location data.')
location_format = 'f' if location_size == 4 else 'd'
if variable_size not in (4, 8):
raise RuntimeError('Only 4- and 8-byte integer types supported '
'for cell data.')
variable_format = 'f' if variable_size == 4 else 'd'
num_variables_base = len(variable_names_base)
variable_inds = []
for variable_name in variable_names:
if variable_name not in variable_names_base:
raise RuntimeError('{0} not found.'.format(variable_name))
variable_ind = 0
while variable_names_base[variable_ind] != variable_name:
variable_ind += 1
variable_inds.append(variable_ind)
variable_names_sorted = [name for _, name
in sorted(zip(variable_inds, variable_names))]
variable_inds_sorted = [ind for ind, _
in sorted(zip(variable_inds, variable_names))]
# Read input file metadata
input_data = {}
start_of_data = f.tell() + header_offset
while f.tell() < start_of_data:
line = f.readline().decode('ascii')
if line[0] == '#':
continue
if line[0] == '<':
section_name = line[1:-2]
input_data[section_name] = {}
continue
key, val = line.split('=', 1)
input_data[section_name][key.strip()] = val.split('#', 1)[0].strip()
# Extract number of ghost cells from input file metadata
try:
num_ghost = int(input_data['mesh']['nghost'])
except: # noqa: E722
raise RuntimeError('Unable to find number of ghost cells in input file.')
# Extract adiabatic index from input file metadata
try:
gamma_adi = float(input_data['hydro']['gamma'])
except: # noqa: E722
try:
gamma_adi = float(input_data['mhd']['gamma'])
except: # noqa: E722
raise RuntimeError('Unable to find adiabatic index in input file.')
# Extract black hole spin from input file metadata
try:
a = float(input_data['coord']['a'])
a2 = a ** 2
except: # noqa: E722
raise RuntimeError('Unable to find black hole spin in input file.')
# Prepare lists to hold results
quantities = {}
for name in variable_names_sorted:
quantities[name] = []
# Go through blocks
first_time = True
while f.tell() < file_size:
# Read and process grid structure data
if first_time:
block_indices = [block_index - num_ghost
for block_index in struct.unpack('@6i', f.read(24))]
block_nx = block_indices[1] - block_indices[0] + 1
block_ny = block_indices[3] - block_indices[2] + 1
block_nz = block_indices[5] - block_indices[4] + 1
cells_per_block = block_nz * block_ny * block_nx
block_cell_format = '=' + str(cells_per_block) + variable_format
variable_data_size = cells_per_block * variable_size
first_time = False
else:
f.seek(24, 1)
f.seek(16, 1)
# Read and process coordinate data
block_lims = struct.unpack('=6' + location_format, f.read(6 * location_size))
xf, dx = np.linspace(block_lims[0], block_lims[1], block_nx + 1, retstep=True)
yf, dy = np.linspace(block_lims[2], block_lims[3], block_ny + 1, retstep=True)
zf, dz = np.linspace(block_lims[4], block_lims[5], block_nz + 1, retstep=True)
x = 0.5 * (xf[:-1] + xf[1:])
y = 0.5 * (yf[:-1] + yf[1:])
z = 0.5 * (zf[:-1] + zf[1:])
# Read cell data
quantities = {}
cell_data_start = f.tell()
for ind, name in zip(variable_inds_sorted, variable_names_sorted):
f.seek(cell_data_start + ind * variable_data_size, 0)
quantities[name] = np.array(struct.unpack(block_cell_format,
f.read(variable_data_size))). \
reshape(block_nz, block_ny, block_nx)
f.seek((num_variables_base - ind - 1) * variable_data_size, 1)
# Calculate radial coordinate
rr2 = np.maximum(x[None, None, :] ** 2 + y[None, :, None] ** 2
+ z[:, None, None] ** 2, 1.0)
r2 = 0.5 * (rr2 - a2 + np.sqrt((rr2 - a2) ** 2
+ 4.0 * a2 * z[:, None, None] ** 2))
r = np.sqrt(r2)
# Calculate volume and mass
rho = quantities['dens']
vol = np.full_like(r, dx * dy * dz)
vol = np.where(r < 1.0 + (1.0 - a2) ** 0.5, np.nan, vol)
vol = np.where(r > kwargs['r_max'], np.nan, vol)
vol = np.where(rho < kwargs['rho_min'], np.nan, vol)
mass = rho * vol
# Calculate metric
factor = 2.0 * r2 * r / (r2 ** 2 + a2 * z[:, None, None] ** 2)
l1 = (r * x[None, None, :] + a * y[None, :, None]) / (r2 + a2)
l2 = (r * y[None, :, None] - a * x[None, None, :]) / (r2 + a2)
l3 = z[:, None, None] / r
g_00 = factor - 1.0
g_01 = factor * l1
g_02 = factor * l2
g_03 = factor * l3
g_11 = factor * l1 ** 2 + 1.0
g_12 = factor * l1 * l2
g_13 = factor * l1 * l3
g_22 = factor * l2 ** 2 + 1.0
g_23 = factor * l2 * l3
g_33 = factor * l3 ** 2 + 1.0
g00 = -factor - 1.0
g01 = factor * l1
g02 = factor * l2
g03 = factor * l3
alpha = 1.0 / np.sqrt(-g00)
beta1 = -g01 / g00
beta2 = -g02 / g00
beta3 = -g03 / g00
# Calculate gas pressure
pgas = quantities['eint'] * (gamma_adi - 1.0)
# Calculate velocity
uu1 = quantities['velx']
uu2 = quantities['vely']
uu3 = quantities['velz']
uu0 = np.sqrt(1.0 + g_11 * uu1 ** 2 + 2.0 * g_12 * uu1 * uu2
+ 2.0 * g_13 * uu1 * uu3 + g_22 * uu2 ** 2
+ 2.0 * g_23 * uu2 * uu3 + g_33 * uu3 ** 2)
u0 = uu0 / alpha
u1 = uu1 - beta1 * u0
u2 = uu2 - beta2 * u0
u3 = uu3 - beta3 * u0
u_0 = g_00 * u0 + g_01 * u1 + g_02 * u2 + g_03 * u3 # noqa: F841
u_1 = g_01 * u0 + g_11 * u1 + g_12 * u2 + g_13 * u3
u_2 = g_02 * u0 + g_12 * u1 + g_22 * u2 + g_23 * u3
u_3 = g_03 * u0 + g_13 * u1 + g_23 * u2 + g_33 * u3
# Calculate magnetic field
bb1 = quantities['bcc1']
bb2 = quantities['bcc2']
bb3 = quantities['bcc3']
b0 = u_1 * bb1 + u_2 * bb2 + u_3 * bb3
b1 = (bb1 + b0 * u1) / u0
b2 = (bb2 + b0 * u2) / u0
b3 = (bb3 + b0 * u3) / u0
b_0 = g_00 * b0 + g_01 * b1 + g_02 * b2 + g_03 * b3
b_1 = g_01 * b0 + g_11 * b1 + g_12 * b2 + g_13 * b3
b_2 = g_02 * b0 + g_12 * b1 + g_22 * b2 + g_23 * b3
b_3 = g_03 * b0 + g_13 * b1 + g_23 * b2 + g_33 * b3
pmag = (b_0 * b0 + b_1 * b1 + b_2 * b2 + b_3 * b3) / 2.0
# Add to summed values
vol_sum += np.nansum(vol)
mass_sum += np.nansum(mass)
sigma_vol_sum += np.nansum(2.0 * pmag / rho * vol)
sigma_mass_sum += np.nansum(2.0 * pmag / rho * mass)
beta_inv_vol_sum += np.nansum(pmag / pgas * vol)
beta_inv_mass_sum += np.nansum(pmag / pgas * mass)
# Report results
print('')
print('<sigma>_vol = ' + repr(sigma_vol_sum / vol_sum))
print('<sigma>_mass = ' + repr(sigma_mass_sum / mass_sum))
print('<beta_inv>_vol = ' + repr(beta_inv_vol_sum / vol_sum))
print('<beta_inv>_mass = ' + repr(beta_inv_mass_sum / mass_sum))
print('')
# Process inputs and execute main function
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('filename', help='name of primitive file to analyze')
parser.add_argument('--r_max', type=float,
default=np.inf, help='maximum radius to analyze')
parser.add_argument('--rho_min', type=float, default=0.0,
help='minimum density to analyze')
args = parser.parse_args()
main(**vars(args))
|
IAS-AstrophysicsREPO_NAMEathenakPATH_START.@athenak_extracted@athenak-main@vis@python@calculate_tori_magnetization.py@.PATH_END.py
|
{
"filename": "_tickformatstops.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/treemap/marker/colorbar/_tickformatstops.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatstopsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self,
plotly_name="tickformatstops",
parent_name="treemap.marker.colorbar",
**kwargs,
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@treemap@marker@colorbar@_tickformatstops.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/contourcarpet/contours/labelfont/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="contourcarpet.contours.labelfont",
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@contourcarpet@contours@labelfont@_family.py@.PATH_END.py
|
{
"filename": "PublicData_10y_ps_wMC.py",
"repo_name": "icecube/skyllh",
"repo_path": "skyllh_extracted/skyllh-master/skyllh/datasets/i3/PublicData_10y_ps_wMC.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Author: Dr. Martin Wolf <mail@martin-wolf.org>
import numpy as np
from skyllh.datasets.i3 import (
PublicData_10y_ps,
)
def create_dataset_collection(
cfg,
base_path=None,
sub_path_fmt=None,
):
"""Defines the dataset collection for IceCube's 10-year
point-source public data, which is available at
http://icecube.wisc.edu/data-releases/20210126_PS-IC40-IC86_VII.zip, and
adds monte-carlo files.
Parameters
----------
cfg : instance of Config
The instance of Config holding the local configuration.
base_path : str | None
The base path of the data files. The actual path of a data file is
assumed to be of the structure <base_path>/<sub_path>/<file_name>.
If None, use the default path ``cfg['repository']['base_path']``.
sub_path_fmt : str | None
The sub path format of the data files of the public data sample.
If None, use the default sub path format
'icecube_10year_ps'.
Returns
-------
dsc : DatasetCollection
The dataset collection containing all the seasons as individual
I3Dataset objects.
"""
dsc = PublicData_10y_ps.create_dataset_collection(
cfg=cfg,
base_path=base_path,
sub_path_fmt=sub_path_fmt)
dsc.description += """
This dataset collection has monte-carlo (MC) files defined for each dataset.
These MC files are not part of the original data release and need to be
generated by the user.
"""
(
IC40,
IC59,
IC79,
IC86_I,
IC86_II,
IC86_III,
IC86_IV,
IC86_V,
IC86_VI,
IC86_VII,
IC86_II_VII,
) = dsc[
'IC40',
'IC59',
'IC79',
'IC86_I',
'IC86_II',
'IC86_III',
'IC86_IV',
'IC86_V',
'IC86_VI',
'IC86_VII',
'IC86_II-VII',
]
IC40.mc_pathfilename_list = 'sim/IC40_MC.npy'
IC59.mc_pathfilename_list = 'sim/IC59_MC.npy'
IC79.mc_pathfilename_list = 'sim/IC79_MC.npy'
IC86_I.mc_pathfilename_list = 'sim/IC86_I_MC.npy'
IC86_II.mc_pathfilename_list = 'sim/IC86_II-VII_MC.npy'
IC86_III.mc_pathfilename_list = IC86_II.mc_pathfilename_list
IC86_IV.mc_pathfilename_list = IC86_II.mc_pathfilename_list
IC86_V.mc_pathfilename_list = IC86_II.mc_pathfilename_list
IC86_VI.mc_pathfilename_list = IC86_II.mc_pathfilename_list
IC86_VII.mc_pathfilename_list = IC86_II.mc_pathfilename_list
IC86_II_VII.mc_pathfilename_list = IC86_II.mc_pathfilename_list
def add_time(data):
mc = data.mc
mc.append_field('time', np.repeat(0, len(mc)))
def add_azimuth_and_zenith(data):
mc = data.mc
mc.append_field('azi', np.repeat(0, len(mc)))
mc.append_field('zen', np.repeat(0, len(mc)))
dsc.add_data_preparation(add_time)
dsc.add_data_preparation(add_azimuth_and_zenith)
return dsc
|
icecubeREPO_NAMEskyllhPATH_START.@skyllh_extracted@skyllh-master@skyllh@datasets@i3@PublicData_10y_ps_wMC.py@.PATH_END.py
|
{
"filename": "phot_datalab.py",
"repo_name": "astro-datalab/nsc",
"repo_path": "nsc_extracted/nsc-master/python/nsc/phot_datalab.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# PHOT.PY - SExtractor and DAOPHOT routines
#
from __future__ import print_function
__authors__ = 'David Nidever <dnidever@noao.edu>'
__version__ = '20180823' # yyyymmdd
import os
import sys
import numpy as np
import warnings
from astropy.io import fits
from astropy.wcs import WCS
from astropy.utils.exceptions import AstropyWarning
from astropy.table import Table, Column
import time
import shutil
import subprocess
import os
#import requests
#import urllib.request
import pandas as pd
import logging
#from scipy.signal import convolve2d
from dlnpyutils.utils import *
from scipy.ndimage.filters import convolve
import astropy.stats
import struct
import tempfile
# Ignore these warnings, it's a bug
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
# Parse the DAOPHOT PSF profile errors
def parseprofs(lines):
'''
This parses the PSF profile errors output from the DAOPHOT PSF program.
It returns a numpy structured array with ID, SIG, FLAG for the PSF stars.
This is an example of the PDF profile error lines:
1044 0.010 1039 0.010 304 0.013 1118 0.020 119 0.027
610 0.012 580 0.013 373 0.010 617 0.017 1087 0.027
696 0.010 229 0.012 170 0.016 628 0.018 1211 0.030
Parameters
----------
lines : list
The list of string lines from the DAOPHOT PSF program for the profile errors.
Returns
-------
profs : numpy structured array
The catalog containing ID, SIG, and FLAG columns for the PSF stars.
Example
-------
.. code-block:: python
profs = parseprofs(lines)
'''
# From psf.f
# SIG is the root-mean-square scatter about the best-fitting analytic
# function averaged over the central disk of radius FITRAD, expressed
# as a fraction of the peak amplitude of the analytic model.
dtype = np.dtype([('ID',int),('SIG',float),('FLAG',np.str_,10)])
profs = np.zeros(len(lines)*5,dtype=dtype)
profs['ID'] = -1
cnt = 0
for i in range(len(lines)):
l = lines[i].rstrip()
if l != "":
# Loop through five columns
for j in range(5):
line1 = l[j*17:j*17+17]
id1 = line1[0:7]
sig1 = line1[7:14]
flag1 = line1[14:17]
if sig1 == " satura" or sig1 == " defect":
sig1 = 99.99
flag1 = "saturated"
if id1.strip() != "":
profs[cnt]['ID'] = int(id1)
profs[cnt]['SIG'] = float(sig1)
profs[cnt]['FLAG'] = flag1.strip()
cnt = cnt + 1
# Trimming any blank ones
gd = (profs['ID'] > -1)
profs = profs[gd]
return profs
# Parse the DAOPHOT PSF parameter errors
def parsepars(lines):
'''
This parses the PSF parameter errors output from the DAOPHOT PSF program.
It returns a list (one element per line) where each element constains
a list of the 3-5 parameters.
This is an example of lines of the PSF parameter errors:
Chi Parameters...
>> 0.0319 1.79190 1.69498
>> 0.0382 1.21314 1.26585 -0.00693
>> 0.0215 1.62418 1.52379 -0.00385
>> 0.0196 1.66754 1.57059 -0.00304
>> 0.0543 1.41140 1.30613 -0.00290
>> 0.0197 1.68487 1.58727 0.68797 -0.00305
Parameters
----------
lines : list
The list of string lines from the DAOPHOT PSF program for the parameter errors.
Returns
-------
out : list
The list of lists containing the individual parameters.
chi : list
The list of chi values per line.
Example
-------
.. code-block:: python
out, chi = parseparse(lines)
'''
# Only want lines with ">>"
# somtimes theare is a "Failed to converge." line
lines2 = grep(lines,">>")
if len(lines2)==0:
print("No lines with >> found")
return None, None
out = []
chi = []
for i in range(len(lines2)):
line1 = lines2[i].strip()
if line1[0:2] == ">>": line1=line1[2:] # strip leading >>
line1.strip()
arr = line1.split() # split on whitespace
if len(arr)>0:
chi.append(float(arr[0]))
chi.append(float(arr[0]))
out.append(arr)
return out, chi
# Write DAOPHOT apertures files
def aperswrite(filename=None,apertures=None):
'''
This program creates a DAOPHOT file with apertures with an array/list of apertures.
The last two are assumed to be the inner and outer sky apertures.
Parameters
----------
filename : str
The filename for the apertures.
apertures : list or array
The array of apertures.
Returns
-------
Nothing is returned but the apertures file is created.
Example
-------
.. code-block:: python
aperswrite("photo.opt",apertures)
'''
# Not enough inputs
if filename is None:
print("No file name input")
return
if apertures is None:
print("No apertures input")
return
# Make apertures file
nap = len(apertures)
if nap<3:
print("Only "+str(nap)+" apertures input. Need at least 3")
return
f = open(filename,'w')
for i in range(nap-2):
# use hexidecimal for aperture id, 2 digits, first starts with A
id = hex(160+i+1)
id = id[2:].capitalize()
f.write("%2s = %7.4f\n" % (id,apertures[i]))
f.write("IS = %7.4f\n" % apertures[nap-2])
f.write("OS = %7.4f\n" % apertures[nap-1])
f.close()
# Read DAOPHOT files
def daoread(fil):
'''
This program reads in DAOPHOT-style files and return an astropy table.
The supported types are .coo, .lst, .ap (in development), and .als.
Parameters
----------
fil : str
The filename of the DAOPHOT catalog file.
Returns
-------
cat : astropy table
The DAOPHOT catalog as an astropy table.
Example
-------
Load an ALLSTAR catalog file:
.. code-block:: python
cat = daoread("image1.als")
'''
# Not enough inputs
if fil is None:
print("No file name input")
return None
# Make sure the file exists
if os.path.exists(fil) is False:
print(fil+" NOT found")
return None
lines = readlines(fil)
nstars = len(lines)-3
if nstars == 0:
print("No stars in "+file)
return None
# Check header
line2 = lines[1]
nl = int(line2.strip().split(' ')[0])
# NL is a code indicating the file type:
# NL = 3 a group file
# NL = 2 an aperture photometry file
# NL = 1 other (output from FIND, PEAK, or NSTAR) or ALLSTAR, DAOGROW
# NL = 0 a file without a header
# Check number of columns
arr1 = lines[3].split()
if len(arr1)==0: arr1 = lines[4].split()
ncols = len(arr1)
# NL = 1 coo file
if (nl==1) & (ncols==7):
dtype = np.dtype([('ID',int),('X',float),('Y',float),('MAG',float),('SHARP',float),('ROUND',float),('ROUND2',float)])
cat = np.zeros(nstars,dtype=dtype)
lengths = np.array([7,9,9,9,9,9,9])
lo = np.concatenate((np.array([0]), np.cumsum(lengths[0:-1])))
hi = lo+lengths
names = cat.dtype.names
for i in range(nstars):
line1 = lines[i+3]
for j in range(len(names)):
cat[i][names[j]] = np.array(line1[lo[j]:hi[j]],dtype=dtype[names[j]])
# NL = 1 tot file
elif (nl==1) & (ncols==9) & (arr1[-1].isdigit() is True):
# NL NX NY LOWBAD HIGHBAD THRESH AP1 PH/ADU RNOISE FRAD
# 1 2046 4094 117.7 38652.0 13.12 3.00 3.91 1.55 6.00
#
# 11 454.570 37.310 13.9710 0.0084 164.683 14.040 -0.0690 6
# 36 287.280 93.860 14.5110 0.0126 165.018 14.580 -0.0690 6
dtype = np.dtype([('ID',int),('X',float),('Y',float),('MAG',float),('ERR',float),('SKY',float),('MAGFAP',float),('APCORR',float),('FINALAP',int)])
cat = np.zeros(nstars,dtype=dtype)
lengths = np.array([7,9,9,9,9,9,9,9,9])
lo = np.concatenate((np.array([0]), np.cumsum(lengths[0:-1])))
hi = lo+lengths
names = cat.dtype.names
for i in range(nstars):
line1 = lines[i+3]
for j in range(len(names)):
cat[i][names[j]] = np.array(line1[lo[j]:hi[j]],dtype=dtype[names[j]])
# NL = 1 als file
elif (nl==1) & (ncols==9) & (arr1[-1].isdigit() is False):
dtype = np.dtype([('ID',int),('X',float),('Y',float),('MAG',float),('ERR',float),('SKY',float),('ITER',float),('CHI',float),('SHARP',float)])
cat = np.zeros(nstars,dtype=dtype)
lengths = np.array([7,9,9,9,9,9,9,9,9])
lo = np.concatenate((np.array([0]), np.cumsum(lengths[0:-1])))
hi = lo+lengths
names = cat.dtype.names
for i in range(nstars):
line1 = lines[i+3]
for j in range(len(names)):
cat[i][names[j]] = np.array(line1[lo[j]:hi[j]],dtype=dtype[names[j]])
# NL = 2 aperture photometry
elif nl==2:
#
# 1 1434.670 15.590 99.999 99.999 99.999 99.999 99.999
# 1615.662 20.90 0.00 9.9999 9.9999 9.9999 9.9999 9.9999
#
# 2 233.850 18.420 99.999 99.999 99.999 99.999 99.999
# 1613.601 20.96 0.02 9.9999 9.9999 9.9999 9.9999 9.9999
#
# The columns are: ID, X, Y, Mag1, Mag2, etc..
# Sky, St.Dev. of sky, skew of sky, Mag1err, Mag2err, etc.
ncols = len(lines[4].split())
naper = ncols-3 # apertures
nstars = int((numlines(fil)-3.0)/3.0) # stars
dtype = np.dtype([('ID',int),('X',float),('Y',float),('SKY',float),('SKYSIG',float),('SKYSKEW',float),('MAG',float,naper),('ERR',float,naper)])
cat = np.zeros(nstars,dtype=dtype)
# for line 1
lengths1 = np.concatenate([np.array([7,9,9]),np.zeros(naper,dtype=int)+9])
dtype1 = np.concatenate([np.array([7,9,9]),np.zeros(naper,dtype=int)+9])
lo1 = np.concatenate((np.array([0]), np.cumsum(lengths1[0:-1])))
hi1 = lo1+lengths1
names1 = ['ID','X','Y']+['MAG'+f for f in (np.arange(naper)+1).astype(str)]
# for line 2
lengths2 = np.concatenate([np.array([14,6,6]),np.zeros(naper,dtype=int)+9])
lo2 = np.concatenate((np.array([0]), np.cumsum(lengths2[0:-1])))
hi2 = lo2+lengths2
names1 = ['SKY','SKYSIG','SKYSKEW']+['ERR'+f for f in (np.arange(naper)+1).astype(str)]
for i in range(nstars):
# line 1
# ID, X, Y, Mag1, Mag2, etc..
line1 = lines[i*3+4]
cat[i]['ID'] = int(line1[lo1[0]:hi1[0]])
cat[i]['X'] = float(line1[lo1[1]:hi1[1]])
cat[i]['Y'] = float(line1[lo1[2]:hi1[2]])
mag = np.zeros(naper,dtype=float)
for j in range(naper):
mag[j] = np.array(line1[lo1[j+3]:hi1[j+3]],dtype=float)
cat[i]['MAG'] = mag
# line 2
# Sky, St.Dev. of sky, skew of sky, Mag1err, Mag2err, etc.
line2 = lines[i*3+5]
cat[i]['SKY'] = float(line2[lo2[0]:hi2[0]])
cat[i]['SKYSIG'] = float(line2[lo2[1]:hi2[1]])
cat[i]['SKYSKEW'] = float(line2[lo2[2]:hi2[2]])
err = np.zeros(naper,dtype=float)
for j in range(naper):
err[j] = np.array(line2[lo2[j+3]:hi2[j+3]],dtype=float)
cat[i]['ERR'] = err
# NL = 3 list
elif nl==3:
dtype = np.dtype([('ID',int),('X',float),('Y',float),('MAG',float),('ERR',float),('SKY',float)])
cat = np.zeros(nstars,dtype=dtype)
lengths = np.array([7,9,9,9,9,9,9])
lo = np.concatenate((np.array([0]), np.cumsum(lengths[0:-1])))
hi = lo+lengths
names = cat.dtype.names
for i in range(nstars):
line1 = lines[i+3]
for j in range(len(names)):
cat[i][names[j]] = np.array(line1[lo[j]:hi[j]],dtype=dtype[names[j]])
else:
print("Cannot load this file")
return None
# Return as astropy Table
return Table(cat)
# Make meta-data dictionary for an image:
def makemeta(fluxfile=None,header=None):
'''
This creates a meta-data dictionary for an exposure that is used by many
of the photometry programs. Either the filename or the header must be input.
Note that sometimes in multi-extension FITS (MEF) files the information needed
is both in the primary header and the extension header. In that case it is best
to combine them into one header and input that to makemeta(). This can easily
be accomplished like this:
head0 = fits.getheader("image1.fits",0)
head = fits.getheader("image1.fits",1)
head.extend(head0,unique=True)
meta = makemeta(header=head)
Parameters
----------
fluxfile : str, optional
The filename of the FITS image.
header : str, optional
The header of the image.
Returns
-------
meta : astropy header
The meta-data dictionary which is an astropy header with additional
keyword/value pairs added.
Example
-------
Create the meta-data dictionary for `image.fits`
.. code-block:: python
meta = makemeta("image.fits")
Create the meta-data dictionary from `head`.
.. code-block:: python
meta = makemeta(header=head)
'''
# You generally need BOTH the PDU and extension header
# To get all of this information
if (fluxfile is None) & (header is None):
print("No fluxfile or headerinput")
return
# Initialize meta using the header
if fluxfile is not None:
header = fits.getheader(fluxfile,0)
meta = header
#- INSTCODE -
if "DTINSTRU" in meta.keys():
if meta["DTINSTRU"] == 'mosaic3':
meta["INSTCODE"] = 'k4m'
elif meta["DTINSTRU"] == '90prime':
meta["INSTCODE"] = 'ksb'
elif meta["DTINSTRU"] == 'decam':
meta["INSTCODE"] = 'c4d'
else:
print("Cannot determine INSTCODE type")
return
else:
print("No DTINSTRU found in header. Cannot determine instrument type")
return
#- RDNOISE -
if "RDNOISE" not in meta.keys():
# Check DECam style rdnoise
if "RDNOISEA" in meta.keys():
rdnoisea = meta["RDNOISEA"]
rdnoiseb = meta["RDNOISEB"]
rdnoise = (rdnoisea+rdnoiseb)*0.5
meta["RDNOISE"] = rdnoise
# Check other names
if meta.get('RDNOISE') is None:
for name in ['READNOIS','ENOISE']:
if name in meta.keys(): meta['RDNOISE']=meta[name]
# Bok
if meta['INSTCODE'] == 'ksb':
meta['RDNOISE']= [6.625, 7.4, 8.2, 7.1][meta['CCDNUM']-1]
if meta.get('RDNOISE') is None:
print('No RDNOISE found')
return
#- GAIN -
if "GAIN" not in meta.keys():
try:
gainmap = { 'c4d': lambda x: 0.5*(x.get('GAINA')+x.get('GAINB')),
'k4m': lambda x: x.get('GAIN'),
'ksb': lambda x: [1.3,1.5,1.4,1.4][x.get['CCDNUM']-1] } # bok gain in HDU0, use list here
gain = gainmap[meta["INSTCODE"]](meta)
meta["GAIN"] = gain
except:
gainmap_avg = { 'c4d': 3.9845419, 'k4m': 1.8575, 'ksb': 1.4}
gain = gainmap_avg[meta["INSTCODE"]]
meta["GAIN"] = gain
#- CPFWHM -
# FWHM values are ONLY in the extension headers
cpfwhm_map = { 'c4d': 1.5 if meta.get('FWHM') is None else meta.get('FWHM')*0.27,
'k4m': 1.5 if meta.get('SEEING1') is None else meta.get('SEEING1'),
'ksb': 1.5 if meta.get('SEEING1') is None else meta.get('SEEING1') }
cpfwhm = cpfwhm_map[meta["INSTCODE"]]
meta['CPFWHM'] = cpfwhm
#- PIXSCALE -
if "PIXSCALE" not in meta.keys():
pixmap = { 'c4d': 0.27, 'k4m': 0.258, 'ksb': 0.45 }
try:
meta["PIXSCALE"] = pixmap[meta["INSTCODE"]]
except:
w = WCS(meta)
meta["PIXSCALE"] = np.max(np.abs(w.pixel_scale_matrix))
return meta
# Write SE catalog in DAO format
def sextodao(cat=None,meta=None,outfile=None,format="lst",naxis1=None,naxis2=None,saturate=None,rdnoise=None,gain=None,lowbad=None,thresh=None,logger=None):
'''
This writes out a Source Extractor catalog in a DAOPHOT format.
Parameters
----------
cat : numpy structured arrray or astropy Table format
The Source Extractor catalog.
meta : astropy header
The image meta-data dictionary (naxis1, naxis2, saturate, rdnoise, gain, etc.). The parameters
can be input individually (see below).
outfile : str
The output filename.
format : str, (lst, coo, ap, als)
The output DAOPHOT format (lst, coo, ap, als).
naxis1 : int, optional
The X-dimensional size (in pixels) of the image.
naxis2 : int, optional
The Y-dimenaional size (in pixels) of the image.
saturate : float, optional
The saturation level of the image.
rdnoise : float, optional
The read noise of the image (in electrons).
gain : float, optional
The gain of the image (electrons/ADU).
lowbad : float, optional
The lower limit of the good range of values.
thresh : float, optional
The detection threshold.
logger : logger object, optional
The Logger to use for logging output.
Returns
-------
Nothing is returned. The catalog is written to `outfile`.
Example
-------
.. code-block:: python
sextodao(cat,meta,"cat.coo")
'''
if logger is None: logger = basiclogger('phot') # set up basic logger if necessary
# Not enough inputs
if cat is None:
logger.warning("No catalog input")
return
if meta is None:
logger.warning("No image meta-data dictionary input")
return
if outfile is None:
logger.warning("No outfile given")
return
# Delete outfile
if os.path.exists(outfile): os.remove(outfile)
# Get meta-data parameters, keyword inputs take priority over "meta"
if naxis1 is None: naxis1=meta['NAXIS1']
if naxis2 is None: naxis2=meta['NAXIS2']
if saturate is None: saturate=meta['SATURATE']
if rdnoise is None: rdnoise=meta['RDNOISE']
if gain is None: gain=meta['GAIN']
if lowbad is None:
if meta.get('SKYMED') is not None:
skymed = meta['SKYMED']
skyrms = meta['SKYRMS']
lowbad = skymed-7.*skyrms > 0.0
thresh = skyrms*3.5
else:
logger.info("No sky value found in meta. Using LOWBAD=1.0")
lowbad = 1.0
if thresh is None: thresh=20.0
# Formats: coo, lst, ap, als
# Header values: this information comes from daophot2.pdf pg.69
# NL: Originally meant "number of lines" but not anymore
# NX: size of X-dimension of image in pixels
# NY: size of Y-dimension of image in pixels
# LOWBAD: lower good data limit, calculated by FIND
# HIGHBAD: upper good data limit, specified in option file
# THRESH: threshold calculated by FIND
# AP1: radius (pixels) of the first aperture used by PHOTOMETRY
# PH/ADU: gain in photons/ADU used when running FIND
# RDNOISE: rdnoise (ADU) used when running FIND
# FRAD: value of fitting radius
# Go through the formats
# "coo" file from FIND
if format == "coo":
#NL NX NY LOWBAD HIGHBAD THRESH AP1 PH/ADU RNOISE FRAD
# 1 2046 4094 1472.8 38652.0 80.94 0.00 3.91 1.55 3.90
#
# 1 1434.67 15.59 -0.045 0.313 0.873 1.218
# 2 233.85 18.42 -0.018 0.218 -0.781 1.433
# ID X Y MAG SHARP ROUND ROUND2
f = open(outfile,'w')
# Header
f.write(" NL NX NY LOWBAD HIGHBAD THRESH AP1 PH/ADU RNOISE FRAD\n")
f.write(" 3 %5d %5d %7.1f %7.1f %7.2f %7.2f %7.2f %7.2f %7.2f\n" %
(naxis1,naxis2,lowbad,saturate,thresh,3.0,gain,rdnoise/gain,3.9))
f.write("\n")
#f.write(" 3 2046 4094 1472.8 38652.0 80.94 3.00 3.91 1.55 3.90\n")
# Write the data
for e in cat:
f.write("%7d %8.2f %8.2f %8.3f %8.3f %8.3f %8.3f\n" %
(e["NUMBER"],e["X_IMAGE"],e["Y_IMAGE"],e["MAG_AUTO"],0.6,0.0,0.0))
f.close()
# "lst" file from PICKPSF
elif format == "lst":
#NL NX NY LOWBAD HIGHBAD THRESH AP1 PH/ADU RNOISE FRAD
# 3 2046 4094 1472.8 38652.0 80.94 3.00 3.91 1.55 3.90
#
# 318 1519.850 622.960 10.963 0.001 0.315
# 1199 1036.580 2257.650 11.008 0.001 0.321
# ID X Y MAG ERR SKY?
f = open(outfile,'w')
# Header
f.write(" NL NX NY LOWBAD HIGHBAD THRESH AP1 PH/ADU RNOISE FRAD\n")
f.write(" 3 %5d %5d %7.1f %7.1f %7.2f %7.2f %7.2f %7.2f %7.2f\n" %
(naxis1,naxis2,lowbad,saturate,thresh,3.0,gain,rdnoise/gain,3.9))
f.write("\n")
#f.write(" 3 2046 4094 1472.8 38652.0 80.94 3.00 3.91 1.55 3.90\n")
# Write the data
for e in cat:
f.write("%7d %8.3f %8.3f %8.3f %8.3f %8.3f\n" %
(e["NUMBER"],e["X_IMAGE"]+1,e["Y_IMAGE"]+1,e["MAG_AUTO"],e["MAGERR_AUTO"],0.3))
f.close()
# "ap" file from PHOTOMETRY
elif format == "ap":
logger.warning(".ap files not supported yet")
return
# "als" file from ALLSTAR
elif format == "als":
# NL NX NY LOWBAD HIGHBAD THRESH AP1 PH/ADU RNOISE FRAD
# 1 2046 4094 1472.8 38652.0 80.94 3.00 3.91 1.55 3.90
#
# 7 219.110 30.895 16.934 0.0935 1613.224 4. 0.872 0.040
# 25 1396.437 62.936 12.588 0.0063 1615.938 4. 1.102 -0.042
# ID X Y MAG ERR SKY ITER CHI SHARP
f = open(outfile,'w')
# Header
f.write(" NL NX NY LOWBAD HIGHBAD THRESH AP1 PH/ADU RNOISE FRAD\n")
f.write(" 3 %5d %5d %7.1f %7.1f %7.2f %7.2f %7.2f %7.2f %7.2f\n" %
(naxis1,naxis2,lowbad,saturate,thresh,3.0,gain,rdnoise/gain,3.9))
f.write("\n")
#f.write(" 3 2046 4094 1472.8 38652.0 80.94 3.00 3.91 1.55 3.90\n")
# Write the data
for e in cat:
f.write("%7d %8.3f %8.3f %8.3f %8.4f %8.3f %8.0f %8.3f %8.3f\n" %
(e["NUMBER"],e["X_IMAGE"]+1,e["Y_IMAGE"]+1,e["MAG_AUTO"],e["MAGERR_AUTO"],1500.0,1,1.0,0.0))
f.close()
# Not supported
else:
logger.warning(format+" NOT supported")
return
# Run Source Extractor
#---------------------
#def runsex(fluxfile=None,wtfile=None,maskfile=None,meta=None,outfile=None,configdir=None,logfile=None,logger=None):
def runsex(fluxfile=None,wtfile=None,maskfile=None,meta=None,outfile=None,configdir=None,offset=0,sexiter=1,dthresh=2.0,logfile=None,logger=None): #ktedit:sex2
'''
Run Source Extractor on an exposure. The program is configured to work with files
created by the NOAO Community Pipeline.
Parameters
----------
fluxfile : str
The filename of the flux FITS image.
wtfile : str
The filename of the weight (1/variance) FITS image.
maskfile : str
The filename of the mask FITS image.
meta : astropy header
The meta-data dictionary for the exposure.
outfile : str
The output filename of the final catalog.
configdir : str
The directory that contains the Source Extractor configuration files.
default.config, default.conv, default.nnw, default.param
offset : int
The value to add to the star id number, default=0
logfile : str, optional
The name to use for the logfile. If this is not input then the name will
be the base name of `fluxfile` with the suffix "_sex.log".
logger : logger object, optional
The Logger to use for logging output.
Returns
-------
cat : astropy Table
The final Source Extractor catalog.
maglim : float
The magnitude limit of the exposure.
The catalog is written to `outfile` and the output of Source Extractor to `logfile`.
Example
-------
.. code-block:: python
cat, maglim = runsex("flux.fits","wt.fits","mask.fits",meta,"cat.fits","/data/config/","sex.log")
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
logger.info("-- Running SExtractor --")
logger.info("input file: "+str(fluxfile))
# Not enough inputs
if fluxfile is None:
logger.warning("No fluxfile input")
return
if wtfile is None:
logger.warning("No wtfile input")
return
if maskfile is None:
logger.warning("No maskfile input")
return
if meta is None:
logger.warning("No meta-data dictionary input")
return
if outfile is None:
logger.warning("No outfile input")
return
if configdir is None:
logger.warning("No configdir input")
return
# Check that necessary files exist
for f in [fluxfile,wtfile,maskfile]:
if os.path.exists(f) is False:
logger.warning(f+" NOT found")
return None
base = os.path.basename(fluxfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
if logfile is None: logfile=base+"_sex.log"
# Working filenames
sexbase = base+"_sex"
sfluxfile = sexbase+".flux.fits"
swtfile = sexbase+".wt.fits"
smaskfile = sexbase+".mask.fits"
if os.path.exists(outfile): os.remove(outfile)
if os.path.exists(sfluxfile): os.remove(sfluxfile)
if os.path.exists(swtfile): os.remove(swtfile)
if os.path.exists(smaskfile): os.remove(smaskfile)
if os.path.exists(logfile): os.remove(logfile)
# Load the data
flux,fhead = fits.getdata(fluxfile,header=True)
wt,whead = fits.getdata(wtfile,header=True)
mask,mhead = fits.getdata(maskfile,header=True)
# 3a) Make subimages for flux, weight, mask
# Turn the mask from integer to bitmask
if ((meta["INSTCODE"]=='c4d') & (meta["plver"]>='V3.5.0')) | (meta["INSTCODE"]=='k4m') | (meta["INSTCODE"]=='ksb'):
# 1 = bad (in static bad pixel mask) -> 1
# 2 = no value (for stacks) -> 2
# 3 = saturated -> 4
# 4 = bleed mask -> 8
# 5 = cosmic ray -> 16
# 6 = low weight -> 32
# 7 = diff detect -> 64
omask = mask.copy()
mask *= 0
nonzero = (omask>0)
mask[nonzero] = 2**((omask-1)[nonzero]) # This takes about 1 sec
# Fix the DECam Pre-V3.5.0 masks
if (meta["INSTCODE"]=='c4d') & (meta["plver"]<'V3.5.0'):
# --CP bit masks, Pre-V3.5.0 (PLVER)
# Bit DQ Type PROCTYPE
# 1 detector bad pixel -> 1
# 2 saturated -> 4
# 4 interpolated -> 32
# 16 single exposure cosmic ray -> 16
# 64 bleed trail -> 8
# 128 multi-exposure transient -> 0 TURN OFF
# --CP bit masks, V3.5.0 on (after ~10/28/2014), integer masks
# 1 = bad (in static bad pixel mask)
# 2 = no value (for stacks)
# 3 = saturated
# 4 = bleed mask
# 5 = cosmic ray
# 6 = low weight
# 7 = diff detect
omask = mask.copy()
mask *= 0 # re-initialize
mask += (np.bitwise_and(omask,1)==1) * 1 # bad pixels
mask += (np.bitwise_and(omask,2)==2) * 4 # saturated
mask += (np.bitwise_and(omask,4)==4) * 32 # interpolated
mask += (np.bitwise_and(omask,16)==16) * 16 # cosmic ray
mask += (np.bitwise_and(omask,64)==64) * 8 # bleed trail
# Mask out bad pixels in WEIGHT image
# set wt=0 for mask>0 pixels
wt[ (mask>0) | (wt<0) ] = 0 # CP sets bad pixels to wt=0 or sometimes negative
# Write out the files
shutil.copy(fluxfile,sfluxfile)
fits.writeto(swtfile,wt,header=whead,output_verify='warn')
# 3b) Make SExtractor config files
# Copy the default files
shutil.copyfile(configdir+"default.conv","default.conv")
shutil.copyfile(configdir+"default.nnw","default.nnw")
shutil.copyfile(configdir+"default.param","default.param")
# Read in configuration file and modify for this image
lines = readlines(configdir+'default.config')
# Gain, saturation, pixscale
# Things to change
# SATUR_LEVEL 59000.00 # level (in ADUs) at which arises saturation
# GAIN 43.52 # detector gain in e-/ADU.
# SEEING_FWHM 1.46920 # stellar FWHM in arcsec
# WEIGHT_IMAGE F4-00507860_01_comb.mask.fits
# CHECKIMAGE_TYPE SEGMENTATION
# CHECKIMAGE_NAME segment.fits
# DETECT_THRESH 1.1 originally, will be changed depending on density/fwhm #ktedit:sex2
# ANALYSIS_THRESH same as DETECT_THRESH #ktedit:sex2
filter_name = ''
cnt = 0
for l in lines:
# CATALOG_NAME
m = re.search('^CATALOG_NAME',l)
if m != None:
lines[cnt] = "CATALOG_NAME "+outfile+" # name of the output catalog\n"
# FLAG_IMAGE
m = re.search('^FLAG_IMAGE',l)
if m != None:
lines[cnt] = "FLAG_IMAGE "+smaskfile+" # filename for an input FLAG-image\n"
# WEIGHT_IMAGE
m = re.search('^WEIGHT_IMAGE',l)
if m != None:
lines[cnt] = "WEIGHT_IMAGE "+swtfile+" # Weight image name\n"
# SATUR_LEVEL
m = re.search('^SATUR_LEVEL',l)
if m != None:
lines[cnt] = "SATUR_LEVEL "+str(meta["saturate"])+" # level (in ADUs) at which arises saturation\n"
# Gain
m = re.search('^GAIN',l)
if m != None:
lines[cnt] = "GAIN "+str(meta["gain"])+" # detector gain in e-/ADU.\n"
#-----------------------------------------------------------#ktedit:sex2 T
# Check_image #ktedit:sex2 (for visual analysis purposes)
m = re.search('^CHECKIMAGE_TYPE',l)
if m != None:
lines[cnt] = "CHECKIMAGE_TYPE SEGMENTATION"
# Check_image name
m = re.search('^CHECKIMAGE_NAME',l)
if m != None:
lines[cnt] = "CHECKIMAGE_NAME seg_"+str(sexiter)+".fits"
# DETECT_THRESH #ktedit:sex2 (may be changed after first sexiteration)
m = re.search('^DETECT_THRESH',l)
if m != None:
lines[cnt] = "DETECT_THRESH "+str(dthresh)+" # <sigmas> or <threshold>,<ZP> in mag.arcsec-2"
# ANALYSIS_THRESH
m = re.search('^ANALYSIS_THRESH',l)
if m != None:
lines[cnt] = "ANALYSIS_THRESH "+str(dthresh)+" # <sigmas> or <threshold>,<ZP> in mag.arcsec-2"
#-----------------------------------------------------------#ktedit:sex2 B
# SEEING_FWHM
m = re.search('^SEEING_FWHM',l)
if m != None:
lines[cnt] = "SEEING_FWHM "+str(meta["cpfwhm"])+" # stellar FWHM in arcsec\n"
# PHOT_APERTURES, aperture diameters in pixels
m = re.search('^PHOT_APERTURES',l)
if m != None:
aper_world = np.array([ 0.5, 1.0, 2.0, 3.0, 4.0]) * 2 # radius->diameter, 1, 2, 4, 6, 8"
aper_pix = aper_world / meta["pixscale"]
lines[cnt] = "PHOT_APERTURES "+', '.join(np.array(np.round(aper_pix,2),dtype='str'))+" # MAG_APER aperture diameter(s) in pixels\n"
# Filter name
m = re.search('^FILTER_NAME',l)
if m != None:
filter_name = (l.split())[1]
cnt = cnt+1
# Add newlines
lines = [line + '\n' for line in lines]
# Write out the new config file
if os.path.exists("default.config"):
os.remove("default.config")
fo = open('default.config', 'w')
fo.writelines(lines)
fo.close()
# Convolve the mask file with the convolution kernel to "grow" the regions
# around bad pixels the SE already does to the weight map
if (filter_name != ''):
# Load the filter array
f = open(filter_name,'r')
linenum = 0
for line in f:
if (linenum == 1):
shape = line.split(' ')[1]
# Make it two pixels larger
filter = np.ones(np.array(shape.split('x'),dtype='i')+2,dtype='i')
#filter = np.zeros(np.array(shape.split('x'),dtype='i'),dtype='f')
#if (linenum > 1):
# linedata = np.array(line.split(' '),dtype='f')
# filter[:,linenum-2] = linedata
linenum += 1
f.close()
# Normalize the filter array
#filter /= np.sum(filter)
# Convolve with mask
#filter = np.ones(np.array(shape.split('x'),dtype='i'),dtype='i')
#mask2 = convolve2d(mask,filter,mode="same",boundary="symm")
mask2 = convolve(mask,filter,mode="reflect")
bad = ((mask == 0) & (mask2 > 0))
newmask = np.copy(mask)
newmask[bad] = 1 # mask out the neighboring pixels
# Write new mask
fits.writeto(smaskfile,newmask,header=mhead,output_verify='warn')
# 3c) Run SExtractor
try:
# Save the SExtractor info to a logfile
sf = open(logfile,'w')
retcode = subprocess.call(["sex",sfluxfile,"-c","default.config"],stdout=sf,stderr=subprocess.STDOUT)
sf.close()
if retcode < 0:
logger.error("Child was terminated by signal"+str(-retcode))
else:
pass
except OSError as e:
logger.error("SExtractor Execution failed:"+str(e))
logger.error(e)
# Check that the output file exists
if os.path.exists(outfile) is True:
# Load the catalog and keep it in memory for later use
cat = Table.read(outfile,2)
# How many sources were detected, final catalog file
logger.info(str(len(cat))+" sources detected")
logger.info("Final catalog is "+outfile)
# Get the magnitude limit, use 90th percentile
gdcat = (cat["MAG_AUTO"]<50)
ngdcat = np.sum(gdcat)
mag = cat["MAG_AUTO"][gdcat]
mag_sorted = np.sort(mag)
maglim = mag_sorted[int(np.round(0.90*ngdcat))]
logger.info("Estimated magnitude limit = %6.2f mag" % maglim)
# Get background value and RMS and add to meta
plines = readlines(logfile)
plines2 = grep(plines,'Background')
arr = plines2[0].split()
ind1 = grep(arr,'Background:',index=True)
ind2 = grep(arr,'RMS',index=True)
ind3 = grep(arr,'Threshold',index=True)
background = np.float(arr[ind1[0]+1])
rms = np.float(arr[ind2[0]+1])
meta["SKYMED"] = (background,"Median sky background")
meta["SKYRMS"] = (rms,"RMS of sky")
# offset the star id (if this is not the first sextractor run #ktedit:sex2
cat['NUMBER'] = cat['NUMBER']+offset
else:
cat = None
maglim = None
# Delete temporary files
if os.path.exists(sfluxfile): os.remove(sfluxfile)
if os.path.exists(smaskfile): os.remove(smaskfile)
if os.path.exists(smaskfile): os.remove(swtfile)
#os.remove("default.conv")
return cat,maglim
# Determine seeing FWHM using SE catalog
#---------------------------------------
def sexfwhm(cat=None,logger=None):
'''
Determine the seeing FWHM using a Source Extractor catalog.
Parameters
----------
cat : astropy Table
The Source Extractor catalog.
Returns
-------
fwhm : float
The seeing FWHM in arcsec.
Example
-------
.. code-block:: python
fwhm = sexfwhm(cat)
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
# Make sure we have the SE catalog
if cat is None:
logger.warning("No catalog input")
return
# Select good sources
gdcat = ((cat['MAG_AUTO']< 50) & (cat['MAGERR_AUTO']<0.05) & (cat['CLASS_STAR']>0.8) &
(cat['FLAGS']==0) & (cat['IMAFLAGS_ISO']==0))
ngdcat = np.sum(gdcat)
# Not enough good source, remove FLAGS cut
if (ngdcat<10):
gdcat = ((cat['MAG_AUTO']< 50) & (cat['MAGERR_AUTO']<0.05) & (cat['CLASS_STAR']>0.8) &
(cat['IMAFLAGS_ISO']==0))
ngdcat = np.sum(gdcat)
# Not enough good source, remove FLAGS/CLASS_STAR cuts
if (ngdcat<10):
gdcat = ((cat['MAG_AUTO']< 50) & (cat['MAGERR_AUTO']<0.05) & (cat['IMAFLAGS_ISO']==0))
ngdcat = np.sum(gdcat)
# Not enough sources, lower thresholds
if (ngdcat<10):
gdcat = ((cat['MAG_AUTO']< 50) & (cat['MAGERR_AUTO']<0.08))
ngdcat = np.sum(gdcat)
medfwhm = np.median(cat[gdcat]['FWHM_WORLD']*3600.)
logger.info('FWHM = %5.2f arcsec (%d sources)' % (medfwhm, ngdcat))
return medfwhm
# Pick PSF candidates using SE catalog
#-------------------------------------
def sexpickpsf(cat=None,fwhm=None,meta=None,outfile=None,nstars=100,logger=None):
'''
Pick PSF stars using a Source Extractor catalog and output to a DAOPHOT-style file.
Parameters
----------
cat : astropy Table
The Source Extractor catalog.
fwhm : float
The seeing FWHM of the exposure (in arcsec).
meta : astropy dictionary
The metal-data dictionary for the image.
outfile : str
The filaname of the DAOPHOT-style lst file to write the PSF stars to.
nstars : int, optional, default is 100
The number of PSF stars to pick.
logger : logging object
The logger to use for logging information.
Returns
-------
psfcat : astropy Table
The table of PSF stars.
The table of PSF stars is also written to `outfile`.
Example
-------
.. code-block:: python
psfcat = sexpickpsf(cat,fwhm,meta,"psfstars.lst",nstars=100)
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
# Make sure we have the SE catalog
if cat is None:
logger.warning("No catalog input")
return
# Make sure we have FWHM
if fwhm is None:
logger.warning("No FWHM input")
return
# Make sure we have meta
if meta is None:
logger.warning("No meta-data dictionary input")
return
# Make sure we have the output file
if outfile is None:
logger.warning("No outfile input")
return
# Select good sources
gdcat1 = ((cat['MAG_AUTO']< 50) & (cat['MAGERR_AUTO']<0.05) & (cat['CLASS_STAR']>0.8))
ngdcat1 = np.sum(gdcat1)
# Bright and faint limit, use 5th and 95th percentile
minmag, maxmag = np.sort(cat[gdcat1]['MAG_AUTO'])[[int(np.round(0.05*ngdcat1)),int(np.round(0.95*ngdcat1))]]
# Select stars with
# -good FWHM values
# -good clas_star values (unless FWHM too large)
# -good mag range, bright but not too bright
# -no flags set
if fwhm<1.8:
gdcat = ((cat['MAG_AUTO']< 50) & (cat['MAGERR_AUTO']<0.1) & (cat['CLASS_STAR']>0.8) &
(cat['FWHM_WORLD']*3600.>0.5*fwhm) & (cat['FWHM_WORLD']*3600.<1.5*fwhm) &
(cat['MAG_AUTO']>(minmag+1.0)) & (cat['MAG_AUTO']<(maxmag-0.5)) &
(cat['FLAGS']==0) & (cat['IMAFLAGS_ISO']==0))
ngdcat = np.sum(gdcat)
# Do not use CLASS_STAR if seeing bad, not as reliable
else:
gdcat = ((cat['MAG_AUTO']< 50) & (cat['MAGERR_AUTO']<0.1) &
(cat['FWHM_WORLD']*3600.>0.5*fwhm) & (cat['FWHM_WORLD']*3600.<1.5*fwhm) &
(cat['MAG_AUTO']>(minmag+1.0)) & (cat['MAG_AUTO']<(maxmag-0.5)) &
(cat['FLAGS']==0) & (cat['IMAFLAGS_ISO']==0))
ngdcat = np.sum(gdcat)
# No candidate, loosen cuts
if ngdcat<10:
logger.info("Too few PSF stars on first try. Loosening cuts")
gdcat = ((cat['MAG_AUTO']< 50) & (cat['MAGERR_AUTO']<0.15) &
(cat['FWHM_WORLD']*3600.>0.2*self.seeing) & (cat['FWHM_WORLD']*3600.<1.8*fwhm) &
(cat['MAG_AUTO']>(minmag+0.5)) & (cat['MAG_AUTO']<(maxmag-0.5)))
ngdcat = np.sum(gdcat)
# No candidates
if ngdcat==0:
logger.error("No good PSF stars found")
raise
# Candidate PSF stars, use only Nstars, and sort by magnitude
si = np.argsort(cat[gdcat]['MAG_AUTO'])
psfcat = cat[gdcat][si]
if ngdcat>nstars: psfcat=psfcat[0:nstars]
logger.info(str(len(psfcat))+" PSF stars found")
# Output them in DAO format
sextodao(psfcat,meta,outfile,format="lst")
if os.path.exists(outfile) is False:
logger.error("Output file "+outfile+" NOT found")
raise
return psfcat
# Do we a need separate aperture photometry file?
# Make DAOPHOT option files
#--------------------------
def mkopt(base=None,meta=None,VA=1,LO=7.0,TH=3.5,LS=0.2,HS=1.0,LR=-1.0,HR=1.0,
WA=-2,AN=-7,EX=5,PE=0.75,PR=5.0,CR=2.5,CE=6.0,MA=50.0,RED=1.0,WA2=0.0,
fitradius_fwhm=1.0,HI=None,RD=None,GA=None,FW=None,logger=None):
'''
Create the DAOPHOT and ALLSTAR option files (.opt and .als.opt) for an exposure.
Parameters
----------
base : str
The base name to use for the option files. The DAOPHOT option file will
be called `base`.opt and the ALLSTAR option file `base`.als.opt
meta : astropy dictionary
The metal-data dictionary for the image.
VA : int, default = 1
The variable type of PSF to use.
-1: Analytic PSF only
0: Analytic PSF and look-up table of empirical corrections
1: linear variations across the field
2: quadratic variations across the field
LO : float, default = 7.0
Low good datum (7. works fine on most imags).
TH : float, default = 3.5
Threshold in sigma above the background (3.5 works fine).
LS : float, default = 0.2
Lower sharpness cutoff.
HS : float, default = 1.0
High sharpness cutoff.
LR : float, default = -1.0
Lower roundness cutoff.
HR : float, default = 1.0
High roundness cutoff.
WA : int, default = -2
Watch progress for DAOPHOT. Determines what output is displayed.
AN : int, default = -6
Analytic model PSF.
1: Gaussian (3 pararameters)
2: Moffat function (3 parameters), beta=1.5
3: Moffat function (3 parameters), beta=2.5
4: Lorentz function (3 parameters)
5: Penny function, Gauss+Lorentz (4 parameters), G+L are parallel
6: Penny function (5 parameters), G and L can be in different directions
A negative sign in front means to try all functions up to X and pick the best one.
EX : int, default = 5
Extra PSF cleaning passes.
PE : float, default = 0.75
Percent error due to the uncertainty in the fine-scale structure of the flat field.
PR : float, default = 5.0
Profile error due to the incompleteness of the PSF model.
CR : float, default = 2.5
Clipping range. Used to remove outlier pixels. Parameter "a" in the formula given in
Stetson 1987, PASP, 99, 191, section III.D.2.d "Resisting bad data".
CE : float, default = 6.0
Clipping exponent. Parameter b in above clipping formula.
MA : float, default = 50.0
Maximum group size
RED : float, default = 1.0
Redetermine centroid (0 = no, 1 = yes).
WA2 : float, default = 0.0
Watch progress for ALLSTAR.
fitradius_fwhm : float, default = 1.0
The fitting radius size in units of the seeing FWHM for the area to be fit.
HI : float, optional
High good datum. Normally set by `saturate` from `meta`.
RD : float, optional
The read noise in electrons. Normally set by `rdnoise` from `meta`.
GA : float, optional
The gain in electrons/ADU. Normally set by `gain` from `meta`.
FW : float, optional
The seeing FWHM in pixels. Normally set by `fwhm`/`pixscale` from `meta`.
logger : logger object, optional
The Logger to use for logging output.
Returns
-------
Nothing is returned. The DAOPHOT option file is written to `base`.opt and the ALLSTAR
option file to `base`.als.opt.
Example
-------
.. code-block:: python
mkopt("image",meta)
'''
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# % MAKING THE OPT FILES
#
# (1) DAOPHOT parameters
#
# LO : Low good datum (7. works fine on most imags)
# TH : Threshold (3.5 works fine)
# LS,HS : Low and high sharpness (default : 0.2 - 1.0)
# LR,HR : Low roundness and high roundness (default : -1.0 - 1.0)
# WA : Watch progress
# VA : Variable PSF
# AN : Analytic model PSF
# EX : Extra PSF cleaning passes
# PE : Percent error
# PR : Profile error
#
# (2) ALLSTAR parameters
#
# CR : Clipping range (leave it)
# CE : Clipping exponent (leave it)
# MA : Maximum group size
# RED : Redetermine centroid (0 = no, 1 = yes)
#
# Frame-specific parameters.
#
# GA : gain (e/ADU)
# RD : readout noise (e)
# RE : readout noise (ADU)
# FW : FWHM
# HI : hi good datum in ADU - saturation level
# FI : fitting radius
# PS : PSF radius
# IS,OS : inner and outer sky annalus
# VA defined above
#AN = -6 # It will try all PSF models (#1-6) and use the one with the lowest chi value
#EX = 5 # extra PSF passes
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
optfile = base+".opt"
alsoptfile = base+".als.opt"
# Get frame specific parameters from meta if necessary
if GA is None: GA = meta['gain']
if RD is None: RD = meta['rdnoise']
if FW is None: FW = meta['fwhm'] / meta['pixscale']
if HI is None: HI = meta['saturate']
# Calculating some things
FW = np.min([ FW , 20 ]) # daophot won't accept anything higher than 20
RE = RD/GA
FI = np.min([ fitradius_fwhm*FW , 51 ]) # daophot won't accept anything higher than 51
PS = np.min([ (4.0*FW) , 51 ]) # daophot won't accept anything higher than 51
IS = np.min([ (FI - 1.0) , 35 ]) # daophot won't accept anything higher than 35
OS = np.min([ (PS + 1.0) , 100 ]) # daophot won't accept anything higher than 100
# Writing the DAOPHOT parameter
#------------------------------
#
# RE : readout noise (ADU)
# GA : gain (e/ADU)
# LO : Low good datum (7. works fine on most imags)
# HI : hi good datum in ADU - saturation level
# FW : FWHM
# TH : Threshold (3.5 works fine)
# LS,HS : Low and high sharpness (default : 0.2 - 1.0)
# LR,HR : Low roundness and high roundness (default : -1.0 - 1.0)
# WA : Watch progress
# FI : fitting radius
# PS : PSF radius
# VA : Variable PSF
# AN : Analytic model PSF
# EX : Extra PSF cleaning passes
# PE : Percent error
# PR : Profile error
outarr = [RE,GA,LO,HI,FW,TH,LS,HS,LR,HR,WA,FI,PS,VA,AN,EX,PE,PR]
anotarr = ['RE','GA','LO','HI','FW','TH','LS','HS','LR','HR','WA','FI','PS','VA','AN','EX','PE','PR']
nanot = len(anotarr)
# Delete file if it exists
if os.path.exists(optfile):
os.remove(optfile)
# Write opt file
f = open(optfile,'w')
for j in range(len(outarr)):
if anotarr[j] == "HI":
f.write("%2s = %8d\n" % (anotarr[j], outarr[j]))
else:
f.write("%2s = %8.2f\n" % (anotarr[j], outarr[j]))
f.close()
# Writing the ALLSTAR parameter file
#-----------------------------------
#
# FI : fitting radius
# IS : ??
# OS : ??
# RED : Redetermine centroid (0 = no, 1 = yes)
# WA2 : Watch progress
# PE : Percent error
# PR : Profile error
# CR : Clipping range (leave it)
# CE : Clipping exponent (leave it)
# MA : Maximum group size
outarr2 = [FI,IS,OS,RED,WA2,PE,PR,CR,CE,MA]
anotarr2 = ['FI','IS','OS','RE','WA','PE','PR','CR','CE','MA']
nanot2 = len(anotarr2)
form = '(A5,F8.2)'
# Delete file if it exists
if os.path.exists(alsoptfile):
os.remove(alsoptfile)
# Write opt file
f = open(alsoptfile,'w')
for j in range(len(outarr2)):
f.write("%2s = %8.2f\n" % (anotarr2[j], outarr2[j]))
f.close()
logger.info("Created "+optfile+" and "+alsoptfile)
# Make image ready for DAOPHOT
def mkdaoim(fluxfile=None,wtfile=None,maskfile=None,meta=None,outfile=None,logger=None):
'''
This constructs a FITS image that is prepared for DAOPHOT.
This program was designed for exposures from the NOAO Community Pipeline.
Parameters
----------
fluxfile : str
The filename of the flux FITS image.
wtfile : str
The filename of the weight (1/variance) FITS image.
maskfile : str
The filename of the mask FITS image.
meta : astropy header
The meta-data dictionary for the exposure.
outfile : str
The name of the output FITS file.
Returns
-------
Nothing is returned. The DAOPHOT-ready image is written to `outfile`.
Example
-------
.. code-block:: python
mkdaoim("flux.fits","wt.fits","mask.fits","image.fits")
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
# Not enough inputs
if fluxfile is None:
logger.warning("No fluxfile input")
return
if wtfile is None:
logger.warning("No wtfile input")
return
if maskfile is None:
logger.warning("No maskfile input")
return
if meta is None:
logger.warning("No meta-data dictionary input")
return
if outfile is None:
logger.warning("No outfile input")
return
# Check that necessary files exist
for f in [fluxfile,wtfile,maskfile]:
if os.path.exists(f) is False:
logger.warning(f+" NOT found")
return None
# Load the FITS files
flux,fhead = fits.getdata(fluxfile,header=True)
wt,whead = fits.getdata(wtfile,header=True)
mask,mhead = fits.getdata(maskfile,header=True)
# Set bad pixels to saturation value
# --DESDM bit masks (from Gruendl):
# BADPIX_BPM 1 /* set in bpm (hot/dead pixel/column) */
# BADPIX_SATURATE 2 /* saturated pixel */
# BADPIX_INTERP 4
# /* interpolated pixel */
# BADPIX_LOW 8 /* too little signal- i.e. poor read */
# BADPIX_CRAY 16 /* cosmic ray pixel */
# BADPIX_STAR 32 /* bright star pixel */
# BADPIX_TRAIL 64 /* bleed trail pixel */
# BADPIX_EDGEBLEED 128 /* edge bleed pixel */
# BADPIX_SSXTALK 256 /* pixel potentially effected by xtalk from super-saturated source */
# BADPIX_EDGE 512 /* pixel flagged to exclude CCD glowing edges */
# BADPIX_STREAK 1024 /* pixel associated with satellite (airplane/meteor) streak */
# BADPIX_FIX 2048 /* a bad pixel that was fixed */
# --CP bit masks, Pre-V3.5.0 (PLVER)
# Bit DQ Type PROCTYPE
# 1 detector bad pixel InstCal
# 1 detector bad pixel/no data Resampled
# 1 No data Stacked
# 2 saturated InstCal/Resampled
# 4 interpolated InstCal/Resampled
# 16 single exposure cosmic ray InstCal/Resampled
# 64 bleed trail InstCal/Resampled
# 128 multi-exposure transient InstCal/Resampled
# --CP bit masks, V3.5.0 on (after ~10/28/2014), integer masks
# 1 = bad (in static bad pixel mask)
# 2 = no value (for stacks)
# 3 = saturated
# 4 = bleed mask
# 5 = cosmic ray
# 6 = low weight
# 7 = diff detect
# You can't have combinations but the precedence as in the order
# of the list (which is also the order in which the processing
# discovers them). So a pixel marked as "bad" (1) won't ever be
# flagged as "diff detect" (7) later on in the processing.
#
# "Turn off" the "difference image masking", clear the 8th bit
# 128 for Pre-V3.5.0 images and set 7 values to zero for V3.5.0 or later.
#logger.info("Turning off the CP difference image masking flags")
if meta.get("plver") is not None: # CP data
# V3.5.0 and on, Integer masks
versnum = meta["plver"].split('.')
if (int(versnum[0][-1])>3) | ((int(versnum[0][-1])==3) & (int(versnum[1])>=5)):
bdpix = (mask == 7)
nbdpix = np.sum(bdpix)
if nbdpix > 0: mask[bdpix]=0
# Pre-V3.5.0, Bitmasks
else:
bdpix = ( (mask & 2**7) == 2**7)
nbdpix = np.sum(bdpix)
if nbdpix > 0: mask[bdpix]-=128 # clear 128
logger.info("%d pixels cleared of difference image mask flag" % nbdpix)
bdpix = (mask > 0.0)
nbdpix = np.sum(bdpix)
if nbdpix>0: flux[bdpix]=6e4
logger.info("%d bad pixels masked" % nbdpix)
fhead.append('GAIN',meta["GAIN"])
fhead.append('RDNOISE',meta["RDNOISE"])
# DAOPHOT can only handle BITPIX=16, 32, -32
if fhead['BITPIX'] not in [16,32,-32]:
logger.info("BITPIX="+str(fhead['BITPIX'])+" DAOPHOT can only handle 16,32,-32. Changing to -32")
flux = np.array(flux,dtype=np.float32)
fhead['BITPIX'] = -32
# Write new image
logger.info("Wrote DAOPHOT-ready image to "+outfile)
fits.writeto(outfile,flux,fhead,overwrite=True)
# DAOPHOT FIND detection
#-----------------------
def daofind(imfile=None,optfile=None,outfile=None,logfile=None,logger=None):
'''
This runs DAOPHOT FIND on an image.
Parameters
----------
imfile : str
The filename of the DAOPHOT-ready FITS image.
optfile : str, optional
The option file for `imfile`. By default it is assumed that this is
the base name of `imfile` with a ".opt" suffix.
outfile : str, optional
The output filename of the FIND catalog. By default this is
the base name of `imfile` with a ".coo" suffix.
logfile : str, optional
The name of the logfile to constrain the output of the DAOPHOT FIND
run. By default this is the base name of `imfile` with a ".coo.log" suffix.
logger : logging object
The logger to use for the loggin information.
Returns
-------
cat : astropy table
The DAOPHOT FIND catalog.
The output catalog and logfile will also be created.
Example
-------
.. code-block:: python
cat = daofind("image.fits")
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
logger.info("-- Running DAOPHOT detection --")
# Make sure we have the image file name
if imfile is None:
logger.warning("No image filename input")
return None
# Set up filenames, make sure they don't exist
base = os.path.basename(imfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
if optfile is None: optfile = base+".opt"
if outfile is None: outfile = base+".coo"
if logfile is None: logfile = base+".coo.log"
scriptfile = base+".coo.sh"
for f in [outfile,logfile,scriptfile]:
if os.path.exists(f): os.remove(f)
# Check that necessary files exist
for f in [imfile,optfile]:
if os.path.exists(f) is False:
logger.warning(f+" NOT found")
return None
# Make temporary short filenames to DAOPHOT can handle them
tid,tfile = tempfile.mkstemp(prefix="tcoo",dir=".")
os.close(tid) # close open file
tbase = os.path.basename(tfile)
timfile = tbase+".fits"
toptfile = tbase+".opt"
toutfile = tbase+".coo"
os.symlink(imfile,timfile)
os.symlink(optfile,toptfile)
# Lines for the DAOPHOT script
lines = "#!/bin/sh\n" \
"daophot << END_DAOPHOT >> "+logfile+"\n" \
"OPTIONS\n" \
""+toptfile+"\n" \
"\n" \
"ATTACH "+timfile+"\n" \
"FIND\n" \
"1,1\n" \
""+toutfile+"\n" \
"y\n" \
"EXIT\n" \
"EXIT\n" \
"END_DAOPHOT\n"
# Write the script
f = open(scriptfile,'w')
f.writelines(lines)
f.close()
os.chmod(scriptfile,509)
# Copy option file to daophot.opt
if os.path.exists("daophot.opt") is False: shutil.copyfile(base+".opt","daophot.opt")
# Run the script
try:
retcode = subprocess.call(["./"+scriptfile],stderr=subprocess.STDOUT,shell=False)
if retcode < 0:
logger.error("Child was terminated by signal"+str(-retcode))
else:
pass
except OSError as e:
logger.error("DAOPHOT detection failed:"+str(e))
logger.error(e)
raise Exception("DAOPHOT failed")
# Check that the output file exists
if os.path.exists(toutfile) is True:
# Move output file to the final filename
os.rename(toutfile,outfile)
# Remove the temporary links
for f in [tfile,timfile,toptfile]: os.remove(f)
# Get info from the logfile
if os.path.exists(logfile) is True:
dlines = readlines(logfile)
l1 = grep(dlines,"Sky mode and standard deviation")
if len(l1)>0:
logger.info(l1[0].strip()) # clip \n
#l1 = l1[0]
#lo = l1.find("=")
#sky = np.array( l1[lo+1:].split(' '),dtype=float)
l2 = grep(dlines,"Clipped mean and median")
if len(l2)>0:
logger.info(l2[0].strip())
#l2 = l2[0]
#lo = l2.find("=")
#mnmed = np.array( l2[lo+2:].split(' '),dtype=float)
# Number of sources
l3 = grep(dlines," stars.")
if len(l3)>0:
logger.info(l3[0].rstrip().strip())
# Failure
else:
logger.error("Output file "+outfile+" NOT Found")
raise Exception("Output not found")
# Delete the script
os.remove(scriptfile)
# Load and return the catalog
logger.info("Output file = "+outfile)
return daoread(outfile)
# DAOPHOT aperture photometry
#----------------------------
def daoaperphot(imfile=None,coofile=None,apertures=None,outfile=None,optfile=None,apersfile=None,logfile=None,logger=None):
'''
This runs DAOPHOT aperture photometry on an image.
Parameters
----------
imfile : str
The filename of the DAOPHOT-ready FITS image.
coofile : str, optional
The filename of the catalog of sources for which to obtain aperture photometry.
By default it is assumed that this is the base name of `imfile` with a ".coo" suffix.
apertures : list or array, optional
The list of aperture to use. The last two are used as the inner and outer sky radius.
The default apertures are: apertures = [3.0, 6.0803, 9.7377, 15.5952, 19.7360, 40.0, 50.0]
outfile : str, optional
The output filename of the aperture photometry catalog. By default this is
the base name of `imfile` with a ".ap" suffix.
optfile : str, optional
The option file for `imfile`. By default it is assumed that this is
the base name of `imfile` with a ".opt" suffix.
apersfile : str, optional
The file that will constrain the apertures used.
logfile : str, optional
The name of the logfile to constrain the output of the DAOPHOT FIND
run. By default this is the base name of `imfile` with a ".ap.log" suffix.
logger : logging object
The logger to use for the loggin information.
Returns
-------
cat : astropy table
The DAOPHOT aperture photometry catalog.
maglim : float
The magnitude limit of the exposure.
The output catalog and logfile will also be created.
Example
-------
.. code-block:: python
cat, maglim = daoaperphot("image.fits","image.coo")
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
logger.info("-- Running DAOPHOT aperture photometry --")
# Make sure we have the image file name
if imfile is None:
logger.warning("No image filename input")
return None
# Set up filenames, make sure they don't exist
base = os.path.basename(imfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
if optfile is None: optfile = base+".opt"
if coofile is None: coofile = base+".coo"
if outfile is None: outfile = base+".ap"
if logfile is None: logfile = base+".ap.log"
if apersfile is None: apersfile = base+".apers"
scriptfile = base+".coo.sh"
for f in [outfile,apersfile,logfile,scriptfile]:
if os.path.exists(f): os.remove(f)
# Check that necessary files exist
for f in [imfile,optfile,coofile]:
if os.path.exists(f) is False:
logger.warning(f+" NOT found")
return None
# Make temporary short filenames to DAOPHOT can handle them
tid,tfile = tempfile.mkstemp(prefix="tap",dir=".")
os.close(tid) # close open file
tbase = os.path.basename(tfile)
timfile = tbase+".fits"
cooext = os.path.splitext(coofile)[1]
tcoofile = tbase+cooext
toptfile = tbase+".opt"
tapersfile = tbase+".apers"
toutfile = tbase+".ap"
os.symlink(imfile,timfile)
os.symlink(optfile,toptfile)
os.symlink(coofile,tcoofile)
logger.info("coofile = "+coofile)
# Make apertures file
if apertures is None:
# The last two are inner and outer sky apertures
#apertures = [3.0, 3.7965, 4.8046, 6.0803, 7.6947, 9.7377, 12.3232, 15.5952, 19.7360, \
# 24.9762, 31.6077, 40.0000, 50.0000]
apertures = [3.000, 6.0803, 9.7377, 15.5952, 19.7360, 40.0000, 50.0000]
aperswrite(tapersfile,apertures)
#nap = len(apertures)
#if nap<3:
# logger.warning("Only "+str(nap)+" apertures input. Need at least 3")
# return None
#f = open(tapersfile,'w')
#for i in range(nap-2):
# # use hexidecimal for aperture id, 2 digits, first starts with A
# id = hex(160+i+1)
# id = id[2:].capitalize()
# f.write("%2s = %7.4f\n" % (id,apertures[i]))
#f.write("IS = %7.4f\n" % apertures[nap-2])
#f.write("OS = %7.4f\n" % apertures[nap-1])
#f.close()
# Lines for the DAOPHOT script
lines = "#!/bin/sh\n" \
"daophot << END_DAOPHOT >> "+logfile+"\n" \
"OPTIONS\n" \
""+toptfile+"\n" \
"\n" \
"ATTACH "+timfile+"\n" \
"PHOTOMETRY\n" \
""+tapersfile+"\n" \
" \n" \
""+tcoofile+"\n" \
""+toutfile+"\n" \
"EXIT\n" \
"EXIT\n" \
"END_DAOPHOT\n"
# Write the script
f = open(scriptfile,'w')
f.writelines(lines)
f.close()
os.chmod(scriptfile,509)
# Copy option file to daophot.opt
if os.path.exists("daophot.opt") is False: shutil.copyfile(base+".opt","daophot.opt")
# If PSF file exists temporarily move it out of the way
if os.path.exists(base+".psf"):
logger.info(base+".psf exists. Temporarily moving it out of the way to perform aperture photometry.")
psftemp = base+".psf.bak"
if os.path.exists(psftemp): os.remove(psftemp)
os.rename(base+".psf",psftemp)
movedpsf = True
else:
movedpsf = False
# Run the script
try:
retcode = subprocess.call(["./"+scriptfile],stderr=subprocess.STDOUT,shell=True)
if retcode < 0:
logger.error("Child was terminated by signal"+str(-retcode))
else:
pass
except OSError as e:
logger.error("DAOPHOT aperture photometry failed:"+str(e))
logger.error(e)
raise Exception("DAOPHOT failed")
# Check that the output file exists
if os.path.exists(toutfile) is True:
# Move output file to the final filename
os.rename(toutfile,outfile)
if apersfile is not None: shutil.copyfile(tapersfile,apersfile)
# Remove the temporary links
for f in [tfile,timfile,toptfile,tcoofile,tapersfile]: os.remove(f)
# Get info from the logfile
maglim = None
if os.path.exists(logfile):
plines = readlines(logfile)
l1 = grep(plines,"Estimated magnitude limit")
if len(l1)>0:
l1 = l1[0]
l1 = l1[0:len(l1)-7] # strip BELL at end \x07\n
lo = l1.find(":")
hi = l1.find("+-")
maglim = np.float(l1[lo+1:hi])
logger.info(l1.strip()) # clip leading/trailing whitespace
# Failure
else:
logger.error("Output file "+outfile+" NOT Found")
raise Exception("Output not found")
# Delete the script
os.remove(scriptfile)
# Move PSF file back
if movedpsf is True: os.rename(psftemp,base+".psf")
# Return the catalog
logger.info("Output file = "+outfile)
return daoread(outfile), maglim
# Pick PSF stars using DAOPHOT
#-----------------------------
def daopickpsf(imfile=None,catfile=None,maglim=None,outfile=None,nstars=100,optfile=None,logfile=None,logger=None):
'''
This runs DAOPHOT aperture photometry on an image.
Parameters
----------
imfile : str
The filename of the DAOPHOT-ready FITS image.
catfile : str
The catalog file from which to pick PSF stars. This is normally the .ap file.
maglim : float
The magnitude limit for this image.
nstars : int, optional, default = 100
The number of PSF stars to pick.
optfile : str, optional
The option file for `imfile`. By default it is assumed that this is
the base name of `imfile` with a ".opt" suffix.
outfile : str, optional
The output filename of the aperture photometry catalog. By default this is
the base name of `imfile` with a ".lst" suffix.
logfile : str, optional
The name of the logfile to constrain the output of the DAOPHOT FIND
run. By default this is the base name of `imfile` with a ".lst.log" suffix.
logger : logging object
The logger to use for the loggin information.
Returns
-------
cat : astropy table
The list of PSF stars.
The output catalog and logfile will also be created.
Example
-------
.. code-block:: python
psfcat = daopickpsf("image.fits","image.coo",19.5,nstars=100)
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
logger.info("-- Running DAOPHOT PICKPSF -- ")
# Make sure we have the image file name
if imfile is None:
logger.warning("No image filename input")
return None
# Make sure we have the catalog file name
if catfile is None:
logger.warning("No catalog filename input")
return None
# Set up filenames, make sure they don't exist
base = os.path.basename(imfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
if optfile is None: optfile = base+".opt"
if outfile is None: outfile = base+".lst"
if logfile is None: logfile = base+".lst.log"
scriptfile = base+".pickpsf.sh"
for f in [outfile,logfile,scriptfile]:
if os.path.exists(f): os.remove(f)
# Check that necessary files exist
for f in [imfile,catfile,optfile]:
if os.path.exists(f) is False:
logger.warning(f+" NOT found")
return None
# Make temporary short filenames to DAOPHOT can handle them
tid,tfile = tempfile.mkstemp(prefix="tlst",dir=".")
os.close(tid) # close open file
tbase = os.path.basename(tfile)
timfile = tbase+".fits"
toptfile = tbase+".opt"
catext = os.path.splitext(catfile)[1]
tcatfile = tbase+catext
toutfile = tbase+".lst"
os.symlink(imfile,timfile)
os.symlink(optfile,toptfile)
os.symlink(catfile,tcatfile)
# Lines for the DAOPHOT script
lines = "#!/bin/sh\n" \
"daophot << END_DAOPHOT >> "+logfile+"\n" \
"OPTIONS\n" \
""+toptfile+"\n" \
"\n" \
"ATTACH "+timfile+"\n" \
"PICKPSF\n" \
""+tcatfile+"\n" \
""+str(nstars)+","+str(maglim-1.0)+"\n" \
""+toutfile+"\n" \
"EXIT\n" \
"EXIT\n" \
"END_DAOPHOT\n"
# Write the script
f = open(scriptfile,'w')
f.writelines(lines)
f.close()
os.chmod(scriptfile,509)
# Copy option file to daophot.opt
if os.path.exists("daophot.opt") is False: shutil.copyfile(base+".opt","daophot.opt")
# Run the script
try:
retcode = subprocess.call(["./"+scriptfile],stderr=subprocess.STDOUT,shell=True)
if retcode < 0:
logger.error("Child was terminated by signal"+str(-retcode))
else:
pass
except OSError as e:
logger.error("DAOPHOT PICKPSF failed:"+str(e))
logger.error(e)
raise Exception("DAOPHOT failed")
# Check that the output file exists
if os.path.exists(toutfile) is True:
# Move output file to the final filename
os.rename(toutfile,outfile)
# Remove the temporary links
for f in [tfile,timfile,toptfile,tcatfile]: os.remove(f)
# Get info from the logfile
if os.path.exists(logfile):
plines = readlines(logfile)
l1 = grep(plines,"suitable candidates were found.")
if len(l1)>0:
logger.info(l1[0].strip()) # clip \n
# Failure
else:
logger.error("Output file "+outfile+" NOT Found")
raise Exception("DAOPHOT failed")
# Delete the script
os.remove(scriptfile)
# Return the catalog
logger.info("Output file = "+outfile)
return daoread(outfile)
# Run DAOPHOT PSF
#-------------------
def daopsf(imfile=None,listfile=None,apfile=None,optfile=None,neifile=None,outfile=None,logfile=None,verbose=False,logger=None):
'''
This runs DAOPHOT PSF to create a .psf file.
Parameters
----------
imfile : str
The filename of the DAOPHOT-ready FITS image.
listfile : str
The filename of the list of PSF stars.
optfile : str, optional
The option file for `imfile`. By default it is assumed that this is
the base name of `imfile` with a ".opt" suffix.
apfile : str, optional
The filename of the aperture photometry file. By default it is assumed
that this is the base name of `imfile` with a ".ap" suffix.
neifile : str, optional
The output PSF stars and neighbors file. By default this is the base name of `imfile`
with a ".nei" suffix.
outfile : str, optional
The output filename of the aperture photometry catalog. By default this is
the base name of `imfile` with a ".psf" suffix. #ktedit:cpsf; changed so it's "<subiter>.psf"
logfile : str, optional
The name of the logfile to constrain the output of the DAOPHOT FIND
run. By default this is the base name of `imfile` with a ".psf.log" suffix.
verbose : bool, default is False
Verbose output of the DAOPHOT PSF parameter errors and PSF star profile errors.
logger : logging object
The logger to use for the loggin information.
Returns
-------
pararr : list
A list of lists giving the parameters for the various PSF parameter fits.
parchi : list
The array of chi values for the various parameter fits.
profs : structured numpy array
The catalog of PSF star profiles giving ID, CHI and FLAG.
The output catalog and logfile will be created.
Example
-------
.. code-block:: python
pararr, parchi, profs = daopsf("image.fits","image.lst")
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
logger.info("-- Running DAOPHOT PSF -- ")
# Make sure we have the image file name
if imfile is None:
logger.warning("No image filename input")
return None
# Make sure we have the list file name
if listfile is None:
logger.warning("No list filename input")
return None
logger.info("Input file = "+imfile) #ktedit:cpsf
# Set up filenames, make sure they don't exist
base = os.path.basename(imfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
if optfile is None: optfile = base+".opt"
if apfile is None: apfile = base+".ap"
if outfile is None: outfile = base+".psf"
if logfile is None: logfile = base+".psf.log"
if neifile is None: neifile = base+".nei"
scriptfile = base+".psf.sh"
for f in [outfile,neifile,logfile,scriptfile]:
if os.path.exists(f): os.remove(f)
# Check that necessary files exist
for f in [imfile,listfile,optfile,apfile]:
if os.path.exists(f) is False:
logger.warning(f+" NOT found")
return None
# Make temporary short filenames to DAOPHOT can handle them
tid,tfile = tempfile.mkstemp(prefix="tpsf",dir=".")
os.close(tid) # close open file
tbase = os.path.basename(tfile)
timfile = tbase+".fits"
toptfile = tbase+".opt"
tapfile = tbase+".ap"
listext = os.path.splitext(listfile)[1]
tlistfile = tbase+listext
toutfile = tbase+".psf"
tneifile = tbase+".nei"
os.symlink(imfile,timfile)
os.symlink(optfile,toptfile)
os.symlink(listfile,tlistfile)
os.symlink(apfile,tapfile)
# Lines for the DAOPHOT script
lines = "#!/bin/sh\n" \
"daophot << END_DAOPHOT >> "+logfile+"\n" \
"OPTIONS\n" \
""+toptfile+"\n" \
"\n" \
"ATTACH "+timfile+"\n" \
"PSF\n" \
""+tapfile+"\n" \
""+tlistfile+"\n" \
""+toutfile+"\n" \
"\n" \
"EXIT\n" \
"EXIT\n" \
"END_DAOPHOT\n"
# Write the script
f = open(scriptfile,'w')
f.writelines(lines)
f.close()
os.chmod(scriptfile,509)
# Copy option file to daophot.opt
if os.path.exists("daophot.opt") is False: shutil.copyfile(base+".opt","daophot.opt")
# Run the script
try:
retcode = subprocess.call(["./"+scriptfile],stderr=subprocess.STDOUT,shell=True)
if retcode < 0:
logger.error("Child was terminated by signal"+str(-retcode))
else:
pass
except OSError as e:
logger.error("DAOPHOT PSF failed:"+str(e))
logger.error(e)
raise Exception("DAOPHOT failed")
# Check that the output file exists
if (os.path.exists(toutfile)) is True and (os.path.getsize(toutfile)!=0):
# Move output file to the final filename
os.rename(toutfile,outfile)
os.rename(tneifile,neifile)
# Remove the temporary links
for f in [tfile,timfile,toptfile,tlistfile,tapfile]: os.remove(f)
# Get info from the logfile
if os.path.exists(logfile):
plines = readlines(logfile)
# Get parameter errors
l1 = grep(plines,"Chi Parameters",index=True)
l2 = grep(plines,"Profile errors",index=True)
l3 = grep(plines,"File with PSF stars and neighbors",index=True)
if len(l1)>0:
parlines = plines[l1[0]+1:l2[0]-1]
pararr, parchi = parsepars(parlines)
minchi = np.min(parchi)
logger.info("Chi = "+str(minchi))
# Get profile errors
if len(l2)>0:
proflines = plines[l2[0]+1:l3[0]-1]
if verbose: logger.info(" ".join(proflines))
profs = parseprofs(proflines)
logger.info(str(len(profs))+" PSF stars used")
else:
logger.error("No DAOPHOT profile errors found in logfile")
raise Exception("DAOPHOT problem")
# Failure
else:
logger.error("Output file "+outfile+" NOT Found")
raise Exception("DAOPHOT output not found")
# Delete the script
os.remove(scriptfile)
# Return the parameter and profile error information
logger.info("Output file = "+outfile)
return pararr, parchi, profs
# Subtract neighbors of PSF stars
#--------------------------------
def subpsfnei(imfile=None,listfile=None,photfile=None,outfile=None,optfile=None,psffile=None,
nstfile=None,grpfile=None,logfile=None,logger=None):
'''
This subtracts neighbors of PSF stars so that an improved PSF can be made.
Parameters
----------
imfile : str
The filename of the DAOPHOT-ready FITS image.
listfile : str
The filename of the list of PSF stars.
photfile : str, optional
The filename of the photometry file (normally the .nei aperture photometry file).
By default it is assumed that this is the base name of `imfile` with a ".nei" suffix.
outfile : str
The FITS filename for the image with the neighbors subtracted. By default this is
the base name of `imfile` with a "a.fits" suffix.
optfile : str, optional
The option file for `imfile`. By default it is assumed that this is
the base name of `imfile` with a ".opt" suffix.
psffile : str, optional
The name of the PSF file. By default it is assumed that this is the base name of
`imfile` with a ".psf" suffix.
nstfile : str, optional
The name of the output .nst file.
By default it is assumed that this is the base name of `imfile` with a ".nst" suffix.
grpfile : str, optional
The name of the output .grp file that contains information on the groups of stars.
By default it is assumed that this is the base name of `imfile` with a ".grp" suffix.
logfile : str, optional
The name of the logfile to constrain the output of the DAOPHOT FIND
run. By default this is the base name of `imfile` with a ".subnei.log" suffix.
logger : logging object
The logger to use for the loggin information.
Returns
-------
Nothing is returned. The subtracted image and logfile will be created.
Example
-------
.. code-block:: python
subpsfnei("image.fits","image.lst","image.nei","imagea.fits")
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
logger.info("-- Subtracting PSF stars neighbors -- ")
# Make sure we have the image file name
if imfile is None:
logger.warning("No image filename input")
return
# Make sure we have the list file name
if listfile is None:
logger.warning("No list filename input")
return
# Make sure we have the subtracted image (output) file name
if outfile is None:
logger.warning("No subtracted image file name input")
return
# Set up filenames, make sure they don't exist
base = os.path.basename(imfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
if optfile is None: optfile = base+".opt"
if photfile is None: photfile = base+".nei"
if outfile is None: outfile = base+"a.fits"
if logfile is None: logfile = base+".subnei.log"
if psffile is None: psffile = base+".psf"
if nstfile is None: nstfile = base+".nst"
if grpfile is None: grpfile = base+".grp"
scriptfile = base+".subnei.sh"
for f in [outfile,logfile,scriptfile,nstfile,grpfile]:
if os.path.exists(f): os.remove(f)
# Check that necessary files exist
for f in [imfile,listfile,optfile,psffile,photfile]:
if os.path.exists(f) is False:
logger.warning(f+" NOT found")
return
# Make temporary short filenames to DAOPHOT can handle them
tid,tfile = tempfile.mkstemp(prefix="tsubnei",dir=".")
os.close(tid) # close open file
tbase = os.path.basename(tfile)
timfile = tbase+".fits"
toptfile = tbase+".opt"
tphotfile = tbase+".ap"
listext = os.path.splitext(listfile)[1]
tlistfile = tbase+listext
toutfile = tbase+"a.fits"
tpsffile = tbase+".psf"
tnstfile = tbase+".nst"
tgrpfile = tbase+".grp"
os.symlink(imfile,timfile)
os.symlink(optfile,toptfile)
os.symlink(listfile,tlistfile)
os.symlink(photfile,tphotfile)
os.symlink(psffile,tpsffile)
# Lines for the DAOPHOT script
lines = "#!/bin/sh\n" \
"daophot << END_DAOPHOT >> "+logfile+"\n" \
"OPTIONS\n" \
""+toptfile+"\n" \
"\n" \
"ATTACH "+timfile+"\n" \
"GROUP\n" \
""+tphotfile+"\n" \
""+tpsffile+"\n" \
"5.\n" \
""+tgrpfile+"\n" \
"NSTAR\n" \
""+tpsffile+"\n" \
""+tgrpfile+"\n" \
""+tnstfile+"\n" \
"SUBSTAR\n" \
""+tpsffile+"\n" \
""+tnstfile+"\n" \
"y\n" \
""+tlistfile+"\n" \
""+toutfile+"\n" \
"\n" \
"EXIT\n" \
"END_DAOPHOT\n"
# Write the script
f = open(scriptfile,'w')
f.writelines(lines)
f.close()
os.chmod(scriptfile,509)
# Copy option file to daophot.opt
if os.path.exists("daophot.opt") is False: shutil.copyfile(base+".opt","daophot.opt")
# Run the script
try:
retcode = subprocess.call(["./"+scriptfile],stderr=subprocess.STDOUT,shell=True)
if retcode < 0:
logger.error("Child was terminated by signal"+str(-retcode))
else:
pass
except OSError as e:
logger.error("PSF star neighbor subtracting failed:"+str(e))
logger.error(e)
raise Exception("PSF subtraction failed")
# Check that the output file exists
if os.path.exists(toutfile):
# Move output file to the final filename
os.rename(toutfile,outfile)
os.rename(tnstfile,nstfile)
os.rename(tgrpfile,grpfile)
# Remove the temporary links
for f in [tfile,timfile,toptfile,tlistfile,tphotfile]: os.remove(f)
else:
logger.error("Output file "+outfile+" NOT Found")
raise Exception("PSF subtraction failed")
# Delete the script
os.remove(scriptfile)
# Print final output filename
logger.info("Output file = "+outfile)
# Create DAOPHOT PSF
#-------------------
def createpsf(imfile=None,apfile=None,listfile=None,psffile=None,doiter=True,maxiter=5,minstars=6,nsigrej=2,subneighbors=True,
subfile=None,optfile=None,neifile=None,nstfile=None,grpfile=None,meta=None,logfile=None,verbose=False,logger=None,
submaxit=5,subminit=2):#ktedit:cpsf
'''
Iteratively create a DAOPHOT PSF for an image.
Parameters
----------
imfile : str
The filename of the DAOPHOT-ready FITS image.
apfile : str, optional
The filename of the photometry file (normally the .ap aperture photometry file).
By default it is assumed that this is the base name of `imfile` with a ".ap" suffix.
listfile : str, optional
The filename that will contain the final list of PSF stars. By default this is the
base name of `imfile` with a ".lst" suffix.
psffile : str, optional
The name of the PSF file. By default it is assumed that this is the base name of
`imfile` with a ".psf" suffix.
doiter : bool, default is True
Iteratively remove bad or suspect PSF stars and refit the PSF.
maxiter : int, optional, default = 5
The maximum number of iterations of removing suspect stars.
minstars : int, optional, default = 6
The minimum required stars for a PSF.
nsigrej : float, optional, default = 2
Reject stars with profile rms scatter higher than 2x the median.
subneighbors : bool, optional, default = True
Subtract stars neighboring the PSF stars and then refit the PSF.
subfile : str, optional
The FITS filename for the image with the neighbors subtracted. By default this is
the base name of `imfile` with a "a.fits" suffix.
optfile : str, optional
The option file for `imfile`. By default it is assumed that this is
the base name of `imfile` with a ".opt" suffix.
neifile : str, optional
The name of the output .nei file of PSF stars and neighbors. By default is is assumed
that this is the base name of `imfile` with a ".nei" suffix.
nstfile : str, optional
The name of the output .nst file created by NSTAR.
By default it is assumed that this is the base name of `imfile` with a ".nst" suffix.
grpfile : str, optional
The name of the output .grp file that contains information on the groups of stars.
By default it is assumed that this is the base name of `imfile` with a ".grp" suffix.
meta : str, optional
The meta-data dictionary for this image.
logfile : str, optional
The name of the logfile to constrain the output of the DAOPHOT FIND
run. By default this is the base name of `imfile` with a ".subnei.log" suffix.
verbose : bool, default is False
Verbose output of the DAOPHOT PSF parameter errors and PSF star profile errors.
logger : logging object
The logger to use for the loggin information.
submaxit : int, optional, default = 5 #ktedit:cpsf
The maximum number of times to iterate the entire flag & neighbor subtraction process
subminit : int, optional, default = 2 #ktedit:cpsf
The minimum number of times to iterate the entire flag & neighbor subtraction process
Returns
-------
#Nothing is returned. The PSF, subtracted image and logfile are created.
subiter is returned #ktedit:cpsf; so all the neighbor-subtracted images are saved (for now)
Example
-------
.. code-block:: python
createpsf("image.fits","image.ap","image.lst","image.psf")
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
logger.info("-- Creating PSF Iteratively --")
# Make sure we have the image file name
if imfile is None:
logger.warning("No image filename input")
return
# Set up filenames, make sure they don't exist
base = os.path.basename(imfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
if optfile is None: optfile = base+".opt"
if listfile is None: listfile = base+".lst"
if apfile is None: apfile = base+".ap"
if psffile is None: psffile = base+".psf"
if subfile is None: outfile = base+"a.fits"
if logfile is None: logfile = base+".cpsf.log"
if neifile is None: neifile = base+".nei"
if nstfile is None: nstfile = base+".nsf"
if grpfile is None: grpfile = base+".grp"
for f in [outfile,logfile,psffile,nstfile,grpfile,neifile]:
if os.path.exists(f): os.remove(f)
# Check that necessary files exist
for f in [imfile,optfile,listfile,apfile]:
if os.path.exists(f) is False:
logger.warning(f+" NOT found")
return
# Working list file
wlistfile = listfile+"1"
if os.path.exists(wlistfile): os.remove(wlistfile)
shutil.copy(listfile,wlistfile)
# Make copy of original PSF list
if os.path.exists(listfile+".orig"): os.remove(listfile+".orig")
shutil.copy(listfile,listfile+".orig")
#---------------------------------------------------------------- #ktedit:cpsf; start of changed section
# Iterate entire flag & neighbor subtraction process
#----------------------------------------------------------------
subiter = 1
subendflag = 0
sublastchi = 99.99
mean_subchi_last = 0
subdchi_thresh = 0.002
while (subendflag==0):
logger.info("Flag & neighbor subtraction iter = "+str(subiter))
#---------------------------------------------------------------- #ktedit:cpsf b
# Iterate subtraction of flagged PSF sources from psf list #ktedit:cpsf
#---------
if doiter is False: maxiter=1
iter = 1
endflag = 0
lastchi = 99.99
dchi_thresh = 0.002
#---------------------------------------------------------------- #ktedit:cpsf t
# check subiter to make sure psf is being run on the neighbor-subtracted
# image from the last iteration, if subiter>1
psfnames=["GAUSSIAN","MOFFAT15","MOFFAT25","MOFFAT35","LORENTZ","PENNY1","PENNY2"]
if (subiter>1):
imfile_new = base+str(subiter-1)+"a.fits" #nei-sub image from last iter.
os.rename(imfile,"temp_"+imfile) #move the image to a temporary name
os.rename(imfile_new,imfile) #move the subimage from last iter to image's filename
logger.info(imfile+" moved to temp_"+imfile+", "+imfile_new+" moved to "+imfile)
# make sure AN != -6 (in the opt file) after the first iteration
if (subiter==2):
psfan = readlines(psffile)[0][0:10].strip() #the analytic function chosen for the psf
lookup_index=psfnames.index(psfan)+1 #the number to make AN based on the analytic function chosen
logger.info("new AN = "+str(lookup_index))
opttable=readlines(optfile) #the option file that you need to change the AN value in
opttable[14]="AN = %8.2f"%(lookup_index)
writelines(optfile,opttable,overwrite=True)
#---------------------------------------------------------------- #ktedit:cpsf b
while (endflag==0):
logger.info("Iter = "+str(iter))
# Run DAOPSF
try:
pararr, parchi, profs = daopsf(imfile,wlistfile,apfile,logger=logger)
chi = np.min(parchi)
mean_chi = np.mean(profs['SIG']) #ktedit:cpsf
logger.info("mean chi = "+str(mean_chi)) #ktedit:cpsf
except:
logger.error("Failure in DAOPSF")
raise
# Check for bad stars
nstars = len(profs)
gdstars = (profs['FLAG'] != 'saturated')
medsig = np.median(profs['SIG'][gdstars])
bdstars = (profs['FLAG'] != '') | (profs['SIG']>nsigrej*medsig)
nbdstars = np.sum(bdstars)
# Make sure we have enough stars left
if (nstars-nbdstars < minstars):
nbdstars = nstars-minstars
si = np.argsort(profs['SIG'])[::-1]
bdstars = si[0:nbdstars] # take the worse ones
logger.info(" "+str(nbdstars)+" stars with flag or high sig")
# Delete stars with flags or high SIG values
if (nbdstars>0) & (nstars>minstars):
listlines = readlines(wlistfile)
# Read the list
lstcat = daoread(wlistfile)
# Match up with the stars we are deleting
mid, ind1, ind2 = np.intersect1d(profs[bdstars]['ID'],lstcat['ID'],return_indices=True)
# Remove the lines from listlines
newlistlines = remove_indices(listlines,ind2+3)
# Write new list
writelines(wlistfile,newlistlines,overwrite=True)
logger.info(" Removing IDs="+str(" ".join(profs[bdstars]['ID'].astype(str))))
logger.info(" "+str(nbdstars)+" bad stars removed. "+str(nstars-nbdstars)+" PSF stars left")
# Should we end flagged star subtraction? #ktedit:cpsf
if (iter==maxiter) | (nbdstars==0) | (nstars<=minstars) | (np.abs(lastchi-chi)<dchi_thresh): endflag=1
iter = iter+1
lastchi = chi
#---------------------------------------------------------------- #ktedit:cpsf t
# copy image fil & last iteration's subtracted file back to their og names
if (subiter>1):
os.rename(imfile,imfile_new) # put last iteration's subfile back
os.rename("temp_"+imfile,imfile) # put the imfile back
logger.info(imfile+" moved to "+imfile_new+", "+"temp_"+imfile+" moved back to "+imfile)
#---------------------------------------------------------------- #ktedit:cpsf b
# Subtract PSF star neighbors
if subneighbors:
subfile = base+"a.fits"
#subfile = base+str(subiter)+"a.fits" #ktedit:cpsf
try:
subpsfnei(imfile,wlistfile,neifile,subfile,psffile=psffile,logger=logger)
except:
logger.error("Subtracting neighbors failed. Keeping original PSF file")
# Check that the subtracted image exist and rerun DAOPSF
if os.path.exists(subfile):
# Final run of DAOPSF
logger.info("Final DAOPSF run (on subtracted image)") #ktedit:cpsf
#---------------------------------------------------------------- #ktedit:cpsf t
# copy the imfile somewhere temporary, again, and replace with THIS iteration's subfile that was just created
os.rename(imfile,"temp_"+imfile)
os.rename(subfile,imfile)
logger.info(imfile+" once again moved to temp_"+imfile+", "+subfile+" moved to "+imfile)
#---------------------------------------------------------------- #ktedit:cpsf b
try:
spararr, sparchi, sprofs = daopsf(imfile,wlistfile,apfile,logger=logger)
chi = np.min(sparchi)
#---------------------------------------------------------------- #ktedit:cpsf t
subsigs, profsind, sprofsind = np.intersect1d(profs['ID'],sprofs['ID'],return_indices=True)
profsigs=profs['SIG'][profsind]
sprofsigs=sprofs['SIG'][sprofsind]
diffsigs=np.absolute(profsigs-sprofsigs)
mean_diffchi=np.mean(diffsigs)
mean_subchi=np.mean(sprofs['SIG'])
logger.info("mean (diff in individual chi values) = "+str(mean_diffchi))
logger.info("mean subchi, chi, last subchi values= "+str(mean_subchi)+", "+str(mean_chi)+", "+str(mean_subchi_last))
logger.info("diff between mean chi and mean subchi values = "+str(abs(mean_chi-mean_subchi)))
logger.info("diff between this and last mean subchi values = "+str(abs(mean_subchi_last-mean_subchi)))
mean_subchi_last = mean_subchi
#---------------------------------------------------------------- #ktedit:cpsf b
except:
logger.error("Failure in DAOPSF")
raise
#---------------------------------------------------------------- #ktedit:cpsf t
finalsubfile=base+str(subiter)+"a.fits"
if ((subiter==submaxit) | (np.abs(sublastchi-chi)<dchi_thresh)) & (subiter>=subminit):
subendflag=1
finalsubfile=base+"a.fits"
sublastchi=chi
os.rename(imfile,finalsubfile) #copy subfile to a version that marks the iteration
os.rename("temp_"+imfile,imfile) #copy the image file back to its name
logger.info(imfile+" moved back to "+finalsubfile+", temp_"+imfile+" moved back to "+imfile)
subiter=subiter+1
#---------------------------------------------------------------- #ktedit:cpsf; end of changed section
# Put information in meta
if meta is not None:
meta['PSFCHI'] = (chi,"Final PSF Chi value")
meta['PSFSTARS'] = (len(profs),"Number of PSF stars")
# Copy working list to final list
if os.path.exists(listfile): os.remove(listfile)
shutil.move(wlistfile,listfile)
logger.info("Final list of PSF stars in "+listfile+". Original list in "+listfile+".orig")
return subiter #ktedit:cpsf
# Run ALLSTAR
#-------------
def allstar(imfile=None,psffile=None,apfile=None,subfile=None,outfile=None,optfile=None,meta=None,logfile=None,logger=None):
'''
Run DAOPHOT ALLSTAR on an image.
Parameters
----------
imfile : str
The filename of the DAOPHOT-ready FITS image.
psffile : str, optional
The name of the PSF file. By default it is assumed that this is the base name of
`imfile` with a ".psf" suffix.
apfile : str, optional
The filename of the photometry file (normally the .ap aperture photometry file).
By default it is assumed that this is the base name of `imfile` with a ".ap" suffix.
subfile : str, optional
The FITS filename for the image with all stars subtracted. By default this is
the base name of `imfile` with a "s.fits" suffix.
outfile : str, optional
The file name of the final .als source catalog.
optfile : str, optional
The option file for `imfile`. By default it is assumed that this is
the base name of `imfile` with a ".als.opt" suffix.
meta : str, optional
The meta-data dictionary for this image.
logfile : str, optional
The name of the logfile to constrain the output of the DAOPHOT FIND
run. By default this is the base name of `imfile` with a ".subnei.log" suffix.
logger : logging object
The logger to use for the loggin information.
Returns
-------
cat : astropy table
The catalog of ALLSTAR sources.
The PSF subtracted image and logfile will also be created.
Example
-------
.. code-block:: python
cat = allstar("image.fits","image.psf")
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
logger.info("-- Running ALLSTAR --")
# Make sure we have the image file name
if imfile is None:
logger.warning("No image filename input")
return
# Set up filenames, make sure they don't exist
base = os.path.basename(imfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
if psffile is None: psffile = base+".psf"
if optfile is None: optfile = base+".als.opt"
if apfile is None: apfile = base+".ap"
if subfile is None: subfile = base+"s.fits"
if outfile is None: outfile = base+".als"
if logfile is None: logfile = base+".als.log"
scriptfile = base+".als.sh"
for f in [outfile,subfile,logfile,scriptfile]:
if os.path.exists(f): os.remove(f)
# Check that necessary files exist
for f in [imfile,psffile,apfile,optfile]:
if os.path.exists(f) is False:
logger.warning(f+" NOT found")
return
# Make temporary short filenames to DAOPHOT can handle them
tid,tfile = tempfile.mkstemp(prefix="tals",dir=".")
os.close(tid) # close open file
tbase = os.path.basename(tfile)
timfile = tbase+".fits"
toptfile = tbase+".als.opt"
tapfile = tbase+".ap"
tpsffile = tbase+".psf"
tsubfile = tbase+"s.fits"
toutfile = tbase+".als"
os.symlink(imfile,timfile)
os.symlink(optfile,toptfile)
os.symlink(apfile,tapfile)
os.symlink(psffile,tpsffile)
# Load the option file lines
optlines = readlines(optfile)
print("optfile = ",optfile)
optlines=[line +'\n' for line in optlines]
# Lines for the DAOPHOT ALLSTAR script
lines = ["#!/bin/sh\n",
"allstar << END_ALLSTAR >> "+logfile+"\n"]
lines += optlines
lines += ["\n",
timfile+"\n",
tpsffile+"\n",
tapfile+"\n",
toutfile+"\n",
tsubfile+"\n",
"EXIT\n",
"EXIT\n",
"END_ALLSTAR\n"]
# Write the script
f = open(scriptfile,'w')
f.writelines(lines)
f.close()
os.chmod(scriptfile,509)
# Copy option file to daophot.opt
if os.path.exists("allstar.opt") is False: shutil.copyfile(optfile,"allstar.opt")
# Run the script
try:
retcode = subprocess.call(["./"+scriptfile],stderr=subprocess.STDOUT,shell=False)
if retcode < 0:
logger.warning("Child was terminated by signal"+str(-retcode))
else:
pass
except OSError as e:
logger.warning("ALLSTAR failed:"+str(e))
logger.warning(e)
raise Exception("ALLSTAR failed")
# Check that the output file exists
if os.path.exists(toutfile) is True:
# Move output file to the final filename
os.rename(toutfile,outfile)
os.rename(tsubfile,subfile)
# Remove the temporary links
for f in [tfile,timfile,toptfile,tpsffile,tapfile]: os.remove(f)
# How many sources converged
num = numlines(outfile)-3
logger.info(str(num)+" stars converged")
logger.info("Output file = "+outfile)
logger.info("Subfile = "+subfile)
# Failure
else:
logger.error("Output file "+outfile+" NOT Found")
raise Exception("ALLSTAR failed")
# Delete the script
os.remove(scriptfile)
# Put information in the header
if meta is not None:
meta["NALLSTAR"] = (num,"Number of ALLSTAR converged sources")
# Return the final catalog
return daoread(outfile)
# Calculate aperture corrections
#-------------------------------
def daogrow(photfile,aperfile,meta,nfree=3,fixedvals=None,maxerr=0.2,logfile=None,logger=None):
'''
Run DAOGROW that calculates curve of growths using aperture photometry.
Parameters
----------
photfile : str
The aperture photometry file.
aperfile : str
The file containing the apertures used for the aperture photometry.
meta : astropy header
The meta-data dictionary for the image.
nfree : float, optional, default = 3
The number of parameters to fit. Max is 5.
fixedvals : float, optional
The values for the parameters that are fixed. Shoul have 5-nfree elements.
By default they are [1.03, 0.2, 0.1, 0.6, 0.0].
maxerr : float, optional, default = 0.2
The maximum error to allow in DAOGROW.
logfile : str, optional
The name of the logfile to constrain the output of the DAOPHOT FIND
run. By default this is the base name of `imfile` with a ".gro.log" suffix.
logger : logging object
The logger to use for the logging information.
Returns
-------
totcat : astropy table
The aperture corrected photometry file (.tot).
Also, the .tot and other DAOGROW files are created.
Example
-------
.. code-block:: python
totcat = daogrow("im101a.ap","photo.opt",meta)
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
logger.info("-- Running DAOGROW --")
# Make sure we have apfile
if photfile is None:
logger.warning("No photfile input")
return None
# Make sure we have apfile
if aperfile is None:
logger.warning("No aperfile input")
return None
# Make sure we have the meta-data dictionary
if meta is None:
logger.warning("No meta input")
return None
# Checked number of elements for fixedvals
if fixedvals is not None:
if len(fixedvals) != 5-nfree:
logger.warning("Fixedvals must have 5-nfree elements."+str(len(fixedvals))+" found.")
return None
# Check that necessary files exist
for f in [photfile,aperfile]:
if os.path.exists(f) is False:
logger.warning(apfile+" NOT found")
return None
# Set up filenames, make sure they don't exist
base = os.path.basename(photfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
if logfile is None: logfile = base+".gro.log"
outfile = base+".tot"
scriptfile = base+".gro.sh"
for f in [logfile,scriptfile,outfile,base+".poi",base+".cur",base+".gro",base+".crl"]:
if os.path.exists(f): os.remove(f)
# Make temporary short filenames to DAOPHOT can handle them
tid,tfile = tempfile.mkstemp(prefix="tcoo",dir=".")
os.close(tid) # close open file
tbase = os.path.basename(tfile)
tphotfile = tbase+".ap"
taperfile = tbase+".opt"
textfile = tbase+".ext"
tinffile = tbase+".inf"
toutfile = tbase+".tot"
os.symlink(photfile,tphotfile)
os.symlink(aperfile,taperfile)
# Write the .inf and .ext files
# F1-00507800_01a 11 04 51 1.900 30.000
dateobs = meta['DATE-OBS']
timearr = (dateobs.split('T')[1]).split(':')
if meta.get('airmass') is not None:
airmass = meta['airmass']
else:
airmass = 1.0
lines = " %-23s %9d %3d %2d %6.3f %9.3f\n" % (tbase,int(timearr[0]),int(timearr[1]),int(float(timearr[2])),airmass,meta['exptime'])
writelines(tinffile,lines)
# .ext just has the .ap filename
writelines(textfile,tphotfile+"\n")
# The fixed values for the other parameters that are fixed
if fixedvals is None:
allfixedvals = [1.03, 0.2, 0.1, 0.6, 0.0]
fixedvals = allfixedvals[nfree:]
# Lines for the DAOPHOT script
lines = "#!/bin/sh\n" \
"daogrow << DONE >> "+logfile+"\n" \
""+taperfile+"\n" \
"\n" \
""+tinffile+"\n" \
""+textfile+"\n" \
""+str(nfree)+"\n" \
""+",".join(np.array(fixedvals).astype(str))+"\n" \
""+str(maxerr)+"\n" \
"DONE\n"
# Write the script
f = open(scriptfile,'w')
f.writelines(lines)
f.close()
os.chmod(scriptfile,509)
# Run the script
try:
retcode = subprocess.call(["./"+scriptfile],stderr=subprocess.STDOUT,shell=False)
if retcode < 0:
logger.error("Child was terminated by signal"+str(-retcode))
else:
pass
except OSError as e:
logger.error("DAOGROW failed:"+str(e))
logger.error(e)
raise Exception("DAOGROW failed")
# Check that the outfile file exists
if os.path.exists(toutfile) is True:
# Move output file to the final filename
os.rename(toutfile,outfile)
if os.path.exists(tbase+".poi"): os.rename(tbase+".poi",base+".poi")
if os.path.exists(tbase+".cur"): os.rename(tbase+".cur",base+".cur")
if os.path.exists(tbase+".gro"): os.rename(tbase+".gro",base+".gro")
if os.path.exists(tbase+".crl"): os.rename(tbase+".crl",base+".crl")
# Remove the temporary links
###for f in [tfile,tphotfile,taperfile,tinffile,textfile]: os.remove(f)
# Failure
else:
logger.error("Output file "+outfile+" NOT Found")
raise Exception("Output not found")
# Delete the script
###os.remove(scriptfile)
# Load and return the catalog
logger.info("Output file = "+outfile)
# Return the .tot catalog
return daoread(outfile)
# Calculate aperture corrections
#-------------------------------
def apcor(imfile=None,listfile=None,psffile=None,meta=None,optfile=None,alsoptfile=None,logger=None):
'''
Calculate the aperture correction for an image.
Parameters
----------
imfile : str
The filename of the PSF-neighbor-subtracted image.
listfile : str
The list of PSF stars.
psffile : str, optional
The name of the PSF file.
meta : astropy headre
The meta-data dictionary for the image.
optfile : str, optional
The DAOPHOT option file for `imfile`.
alsoptfile : str, optional
The ALLSTAR option file for `imfile`.
logfile : str, optional
The name of the logfile to constrain the output of the DAOPHOT FIND
run. By default this is the base name of `imfile` with a ".daogrow.log" suffix.
logger : logging object
The logger to use for the loggin information.
Returns
-------
apcor : float
The aperture correction in magnitudes.
Example
-------
.. code-block:: python
apcor = apcor("im101a.fits","im101.lst","im101.psf",meta,"im101.opt")
'''
if logger is None: logger=basiclogger('phot') # set up basic logger if necessary
logger.info("-- Calculating aperture correction --")
# Make sure we have imfile
if imfile is None:
logger.warning("No image filename input")
return
# Make sure we have listfile
if listfile is None:
logger.warning("No listfile input")
return
# Make sure we have psffile
if psffile is None:
logger.warning("No psffile input")
return
# Make sure we have optfile
if optfile is None:
logger.warning("No optfile input")
return
# Make sure we have alsoptfile
if alsoptfile is None:
logger.warning("No alsoptfile input")
return
# Make sure we have meta
if meta is None:
logger.warning("No meta input")
return
# Check that necessary files exist
for f in [imfile,listfile,psffile,optfile]:
if os.path.exists(f) is False:
logger.warning(f+" NOT found")
return
base = os.path.basename(imfile)
base = os.path.splitext(os.path.splitext(base)[0])[0]
# Step 1: Get aperture photometry for the PSF stars on the PSF-neighbor subtracted image
logger.info("Getting aperture photometry for PSF stars")
apertures = [3.0, 3.7965, 4.8046, 6.0803, 7.6947, 9.7377, 12.3232, 15.5952, 19.7360, \
24.9762, 31.6077, 40.0000, 50.0000]
apersfile = base+".apers"
apcat, maglim = daoaperphot(imfile,listfile,apertures,optfile=optfile,apersfile=apersfile,logger=logger)
# Step 2: Get PSF photometry from the same image
psfcat = allstar(imfile,psffile,base+".ap",optfile=alsoptfile,logger=logger)
# Step 3: Run DAOGROW
# it creates a .tot, .cur, .poi files
# use .tot and .als files to calculate delta mag for each star (see mkdel.pro)
# and then a total aperture correction for all the stars.
totcat = daogrow(base+".ap",apersfile,meta,logger=logger)
# Check that the magnitudes arent' all NANs, this can sometimes happen
if np.sum(np.isnan(totcat['MAG'])) > 0:
logger.info("DAOGROW .tot file has NANs. Trying 2 free parameters instead.")
totcat = daogrow(base+".ap",apersfile,meta,nfree=2,logger=logger)
if np.sum(np.isnan(totcat['MAG'])) > 0:
logger.info("DAOGROW .tot file has NANs. Trying 4 free parameters instead.")
totcat = daogrow(base+".ap",apersfile,meta,nfree=4,logger=logger)
# Step 4: Calculate median aperture correction
totcat = daoread(base+".tot")
# Match up with the stars we are deleting
mid, ind1, ind2 = np.intersect1d(psfcat['ID'],totcat['ID'],return_indices=True)
apcorr = np.median(psfcat[ind1]['MAG']-totcat[ind2]['MAG'])
logger.info("aperture correction = %7.3f mag" % apcorr)
return apcorr
|
astro-datalabREPO_NAMEnscPATH_START.@nsc_extracted@nsc-master@python@nsc@phot_datalab.py@.PATH_END.py
|
{
"filename": "image_classification.py",
"repo_name": "astrolabsoftware/fink-science",
"repo_path": "fink-science_extracted/fink-science-master/fink_science/image_classification/image_classification.py",
"type": "Python"
}
|
# Copyright 2021 AstroLab Software
# Author: Roman Le Montagner
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import gzip
import io
from astropy.io import fits
import numpy as np
from fink_science.image_classification.utils import img_normalizer
from skimage.exposure import equalize_adapthist
from skimage.filters import median
from skimage.filters import threshold_triangle
from skimage.measure import label
from skimage.measure import regionprops_table
from skimage.segmentation import chan_vese
def is_neg(img):
""" Test if an image contains negative values
Parameters
----------
img: 2D numpy array
alert image after extraction from gzip format
Returns
-------
out: bool
return True if img contains negative values, False otherwise
Examples
--------
>>> test_1 = [[-1, 1, 1], [3, 5, -1], [0, 4, -3]]
>>> test_2 = [[1, 1, 1], [3, 5, 1], [0, 4, 3]]
>>> is_neg(test_1)
True
>>> is_neg(test_2)
False
"""
return not np.all(np.greater_equal(img, 0))
def peak_snr(img):
""" Estimate the noise level of an image
NB: The noise level threshold for the image classification is set to 3.5
Parameters
----------
img: 2D numpy array
alert image after extraction from gzip format
Returns
-------
out: float
a noise level estimation of the image
Examples
--------
>>> test_1 = [[0, 1, 2], [3, 40, 5], [2, 1, 0]]
>>> peak_snr(test_1)
6.666666666666667
>>> test_2 = [[0, 0, 0], [1, 0.5, 1], [1, 1, 1]]
>>> peak_snr(test_2)
1.6363636363636362
"""
return np.max(img) / np.mean(img)
def img_labelisation(stamp, noise_threshold=3.5):
""" Perform image classification based on their visual content.
Two final labels available for images which are not noisy and not corrupted.
Star label means this image contains only ponctual objects.
Extend label means this image contains at least one extend object.
Object size is only based on a perimeter calculation and custom thresholding, false positive
can occur when ponctual objects is sufficiently large or multiple ponctual object is
sufficiently close to pass thresholds.
Parameters
----------
stamp: gzip format file
an image in fits file compressed into a gzip format file
Returns
-------
out: string
a string which contains all the labels assigned during the classification process
All possible returns are:
- 'corrupted_noisy'
- 'corrupted_clear'
- 'safe_noisy'
- 'safe_clear_star'
- 'safe_clear_extend'
Examples
--------
>>> df = spark.read.format('parquet').load(ztf_alert_sample).select(['objectId', 'cutoutScience']).toPandas()
>>> example_byte_array = list(df[df['objectId'] == 'ZTF18acrunkm']['cutoutScience'])[0]['stampData']
>>> img_labelisation(example_byte_array)
'safe_clear_star'
>>> example_byte_array = list(df[df['objectId'] == 'ZTF20aafdzuq']['cutoutScience'])[0]['stampData']
>>> img_labelisation(example_byte_array)
'safe_noisy'
>>> example_byte_array = list(df[df['objectId'] == 'ZTF18aabipja']['cutoutScience'])[0]['stampData']
>>> img_labelisation(example_byte_array)
'corrupted_clear'
>>> example_byte_array = list(df[df['objectId'] == 'ZTF18abuajuu']['cutoutScience'])[0]['stampData']
>>> img_labelisation(example_byte_array)
'safe_clear_extend'
"""
with gzip.open(io.BytesIO(stamp), 'rb') as fits_file:
with fits.open(io.BytesIO(fits_file.read()), ignore_missing_simple=True) as hdul:
img = hdul[0].data[::-1]
label_img = ""
# detect if image is corrupted or/and is noisy
if np.any(np.isnan(img)):
label_img += "corrupted_"
# shift the image if it contains negative values
elif is_neg(img):
img = img + np.abs(np.min(img))
label_img += "safe_"
else:
label_img += "safe_"
if peak_snr(img) <= noise_threshold:
label_img += "noisy"
else:
label_img += "clear"
# if image is not corrupted and not noisy
if label_img == "safe_clear":
label_img += "_"
# define threshold between ponctual object and extend object for the first pass
star_limit = 30
# remove background of the image and keep only high value signal
threshold = threshold_triangle(img)
# binarize the image with the threshold
thresh_img = np.where(img < threshold, 0, 1).astype(bool)
# labeled segmented part and create region
labeled_img = label(thresh_img, connectivity=1).astype(np.byte)
# define the properties that we want to compute on the segmented part of the image
properties = ('label', 'perimeter')
region_props = regionprops_table(labeled_img, intensity_image=img, properties=properties)
region_df = pd.DataFrame(region_props)
object_max_size = list(region_df['perimeter'])
if len(object_max_size) > 0:
# get the object of maximal size in the segmented image
object_max_size = np.max(object_max_size)
# if the maximal size object is small enough then the image is classed as star
# else the image go to the second pass
if object_max_size < star_limit:
label_img += "star"
else:
# image is normalized between -1 and 1
norm_img = img_normalizer(img, -1, 1)
# then a median filter is applied to reduce some noise
norm_img = median(norm_img)
# and finally the image contrast is enhanced by an histogram equalization method
norm_img = equalize_adapthist(norm_img, clip_limit=0.01, nbins=512)
# the enhanced image is then processed by the chan vese algorithm.
# the image is segmented between high intensity region and low intensity region.
# source: https://arxiv.org/abs/1107.2782
cv = chan_vese(norm_img, mu=0, lambda1=1, lambda2=2, tol=1e-9, max_num_iter=600,
dt=100, init_level_set="checkerboard").astype(bool)
# the segmented region is then labeled in order to compute some information
labeled_img_cv = label(cv, connectivity=1).astype(np.byte)
# the properties computed are the same as the first part but the area is added. It is just a properties
# that return the number of pixels of the region.
properties = ('label', 'area', 'perimeter')
region_props_cv = regionprops_table(labeled_img_cv, intensity_image=img, properties=properties)
region_df_chan_vese = pd.DataFrame(region_props_cv)
# a small filter remove the regions with only one pixels. we assume that one pixel area are just noise.
zero_filter = region_df_chan_vese[region_df_chan_vese['area'] != 1]
object_size = list(zero_filter['perimeter'])
if len(object_size) > 0:
object_max_size = np.max(object_size)
# a new higher threshold is used because median filtering and histogram equalization tend to
# expand the size of ponctual object.
if object_max_size < 40:
label_img += "star"
else:
# extend label is given to images that pass all steps.
label_img += "extend"
else:
label_img += "errorchanvese"
else:
label_img += "errorthreshold"
return label_img
|
astrolabsoftwareREPO_NAMEfink-sciencePATH_START.@fink-science_extracted@fink-science-master@fink_science@image_classification@image_classification.py@.PATH_END.py
|
{
"filename": "_valueminus.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter/error_y/_valueminus.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ValueminusValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="valueminus", parent_name="scatter.error_y", **kwargs
):
super(ValueminusValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter@error_y@_valueminus.py@.PATH_END.py
|
{
"filename": "test_scheduler.py",
"repo_name": "panoptes/POCS",
"repo_path": "POCS_extracted/POCS-main/tests/scheduler/test_scheduler.py",
"type": "Python"
}
|
import pytest
import requests
from panoptes.utils import error
from panoptes.utils.config.client import set_config
from panoptes.pocs.scheduler import create_scheduler_from_config
from panoptes.pocs.scheduler import BaseScheduler
from panoptes.pocs.utils.location import create_location_from_config
from panoptes.utils.serializers import to_json
def reset_conf(config_host, config_port):
url = f'http://{config_host}:{config_port}/reset-config'
response = requests.post(url,
data=to_json({'reset': True}),
headers={'Content-Type': 'application/json'}
)
assert response.ok
def test_bad_scheduler_namespace(config_host, config_port):
set_config('scheduler.type', 'dispatch')
site_details = create_location_from_config()
with pytest.raises(error.NotFound):
create_scheduler_from_config(observer=site_details['observer'])
set_config('scheduler.type', 'panoptes.pocs.scheduler.dispatch')
scheduler = create_scheduler_from_config(observer=site_details['observer'])
assert isinstance(scheduler, BaseScheduler)
reset_conf(config_host, config_port)
def test_bad_scheduler_type(config_host, config_port):
set_config('scheduler.type', 'foobar')
site_details = create_location_from_config()
with pytest.raises(error.NotFound):
create_scheduler_from_config(observer=site_details['observer'])
reset_conf(config_host, config_port)
def test_bad_scheduler_fields_file(config_host, config_port):
set_config('scheduler.fields_file', 'foobar')
site_details = create_location_from_config()
with pytest.raises(error.NotFound):
create_scheduler_from_config(observer=site_details['observer'])
reset_conf(config_host, config_port)
def test_no_observer():
assert isinstance(create_scheduler_from_config(observer=None), BaseScheduler) is True
def test_no_scheduler_in_config(config_host, config_port):
set_config('scheduler', None)
site_details = create_location_from_config()
assert create_scheduler_from_config(
observer=site_details['observer']) is None
reset_conf(config_host, config_port)
|
panoptesREPO_NAMEPOCSPATH_START.@POCS_extracted@POCS-main@tests@scheduler@test_scheduler.py@.PATH_END.py
|
{
"filename": "test_reflected_occultations.py",
"repo_name": "rodluger/starry",
"repo_path": "starry_extracted/starry-master/tests/greedy/test_reflected_occultations.py",
"type": "Python"
}
|
import numpy as np
import starry
import matplotlib.pyplot as plt
from datetime import datetime
import pytest
from scipy.interpolate import interp1d
from tqdm import tqdm
@pytest.mark.parametrize(
"xs,ys,zs,source_npts",
[
[0, 1, 1, 1],
[-1, 0, 1, 1],
[0.5, 1, -0.5, 1],
[-0.5, -0.5, -0.5, 1],
[0.5, -0.5, 0.5, 1],
[1e-08, 0, 1, 1], # almost noon
[0, 0, 1, 1], # exactly noon
[0, 1, 1, 300],
],
)
def test_X(
xs,
ys,
zs,
source_npts,
theta=0,
ro=0.1,
res=300,
ydeg=2,
tol=1e-3,
plot=False,
):
# Params
npts = 250
xo = np.linspace(-1.5, 1.5, npts)
yo = np.linspace(-0.3, 0.5, npts)
theta = 0
ro = 0.1
res = 300
ydeg = 2
tol = 1e-3
# Instantiate
map = starry.Map(ydeg=ydeg, reflected=True, source_npts=source_npts)
# Analytic
X = map.amp * map.design_matrix(
xs=xs, ys=ys, zs=zs, theta=theta, xo=xo, yo=yo, ro=ro
)
# Numerical
(lat, lon), (x, y, z) = map.ops.compute_ortho_grid(res)
image = np.zeros((map.Ny, res * res))
image[0] = map.render(theta=theta, xs=xs, ys=ys, zs=zs, res=res).flatten()
n = 1
for l in range(1, map.ydeg + 1):
for m in range(-l, l + 1):
map.reset()
map[l, m] = 1
image[n] = (
map.render(theta=theta, xs=xs, ys=ys, zs=zs, res=res).flatten()
) - image[0]
n += 1
X_num = np.zeros_like(X)
for k in range(len(xo)):
idx = (x - xo[k]) ** 2 + (y - yo[k]) ** 2 > ro ** 2
for n in range(map.Ny):
X_num[k, n] = np.nansum(image[n][idx])
X_num *= 4 / res ** 2
# Plot
if plot:
fig, ax = plt.subplots(
ydeg + 1, 2 * ydeg + 1, figsize=(9, 6), sharex=True, sharey=True
)
for axis in ax.flatten():
axis.set_xticks([])
axis.set_yticks([])
axis.spines["top"].set_visible(False)
axis.spines["right"].set_visible(False)
axis.spines["bottom"].set_visible(False)
axis.spines["left"].set_visible(False)
n = 0
for i, l in enumerate(range(ydeg + 1)):
for j, m in enumerate(range(-l, l + 1)):
j += ydeg - l
med = np.median(X_num[:, n])
ax[i, j].plot(X[:, n] - med, lw=2)
ax[i, j].plot(X_num[:, n] - med, lw=1)
n += 1
fig.savefig(
"test_X_{}.pdf".format(datetime.now().strftime("%d%m%Y%H%M%S")),
bbox_inches="tight",
)
plt.close()
# Compare
diff = (X - X_num).flatten()
assert np.max(np.abs(diff)) < tol
def test_inference():
"""
Test inference on a problem with phase curve + occultations in reflected light.
The orbital parameters here are contrived to ensure there's no null space;
note the tiny observational uncertainty as well. Given this setup, a posterior
map draw should look *very* similar to the true map.
"""
# Orbital/geometric parameters
npts = 50000
t = np.linspace(0, 1, npts)
porb = 0.19
prot = 0.12
rorb = 50
ro = 38.0
yo = np.sin(2 * np.pi / porb * t + 0.5)
xo = np.cos(2 * np.pi / porb * t)
zo = np.sin(2 * np.pi / porb * t)
amp = rorb / np.sqrt(xo ** 2 + yo ** 2 + zo ** 2)
xo *= amp
yo *= amp
zo *= amp
theta = 360.0 / prot * t
xs = np.sin(7 * np.pi * t)
ys = np.cos(5 * np.pi * t)
zs = 5
kwargs = dict(xs=xs, ys=ys, zs=zs, theta=theta, xo=xo, yo=yo, zo=zo, ro=ro)
# Generate a synthetic dataset
map = starry.Map(ydeg=10, reflected=True)
map.load("earth")
img0 = map.render(projection="rect", illuminate=False)
flux0 = map.flux(**kwargs)
err = 1e-9
np.random.seed(3)
flux = flux0 + np.random.randn(npts) * err
# Solve the linear problem & draw a sample
map.set_data(flux, C=err ** 2)
map.set_prior(L=1e-4)
map.solve(**kwargs)
map.draw()
img = map.render(projection="rect", illuminate=False)
# Verify we recovered the map
assert np.allclose(img, img0, atol=1e-4)
@pytest.mark.parametrize(
"b,theta,ro",
[
[0.25, np.pi / 3, 0.3],
[-0.25, np.pi / 3, 0.3],
[0.25, -np.pi / 3, 0.3],
[-0.25, -np.pi / 3, 0.3],
[0.25, 2 * np.pi / 3, 0.3],
[-0.25, 2 * np.pi / 3, 0.3],
[0.25, 4 * np.pi / 3, 0.3],
[-0.25, 4 * np.pi / 3, 0.3],
[0.5, np.pi / 2, 1.0],
[0.0, 0.0, 0.5],
[0.5, 0.0, 0.1],
[1.0 - 1e-3, 0.0, 0.5],
[-1.0 + 1e-3, 0.0, 0.5],
[-1.0, 0.0, 0.5],
[1.0, 0.0, 0.5],
[0.25, np.pi / 2, 0.5],
],
)
def test_lightcurve(b, theta, ro, ydeg=1, ns=1000, nb=50, res=999, plot=False):
# Array over full occultation, including all singularities
xo = 0.0
yo = np.linspace(0, 1 + ro, ns, endpoint=True)
for pt in [ro, 1, 1 - ro, b + ro]:
if pt >= 0:
yo[np.argmin(np.abs(yo - pt))] = pt
if theta == 0:
xs = 0
ys = 1
else:
xs = 0.5
ys = -xs / np.tan(theta)
rxy2 = xs ** 2 + ys ** 2
if b == 0:
zs = 0
elif b == 1:
zs = -1
xs = 0
ys = 0
elif b == -1:
zs = 1
xs = 0
ys = 0
else:
zs = -np.sign(b) * np.sqrt(rxy2 / (b ** -2 - 1))
# Compute analytic
map = starry.Map(ydeg=ydeg, reflected=True)
map[1:, :] = 1
flux = map.flux(xs=xs, ys=ys, zs=zs, xo=xo, yo=yo, ro=ro)
# Compute numerical
flux_num = np.zeros_like(yo) * np.nan
computed = np.zeros(ns, dtype=bool)
(lat, lon), (x, y, z) = map.ops.compute_ortho_grid(res)
img = map.render(xs=xs, ys=ys, zs=zs, res=res).flatten()
for i, yoi in tqdm(enumerate(yo), total=len(yo)):
if (i == 0) or (i == ns - 1) or (i % (ns // nb) == 0):
idx = (x - xo) ** 2 + (y - yoi) ** 2 > ro ** 2
flux_num[i] = np.nansum(img[idx]) * 4 / res ** 2
computed[i] = True
# Interpolate over numerical result
f = interp1d(yo[computed], flux_num[computed], kind="cubic")
flux_num_interp = f(yo)
# Plot
if plot:
fig = plt.figure()
plt.plot(yo, flux, "C0-", label="starry", lw=2)
plt.plot(yo, flux_num, "C1o", label="brute")
plt.plot(yo, flux_num_interp, "C1-", lw=1)
plt.legend(loc="best")
plt.xlabel("impact parameter")
plt.ylabel("flux")
fig.savefig(
"test_lightcurve[{}-{}-{}].pdf".format(b, theta, ro),
bbox_inches="tight",
)
plt.close()
# Compare with very lax tolerance; we're mostly looking
# for gross outliers
diff = np.abs(flux - flux_num_interp)
assert np.max(diff) < 0.001
@pytest.mark.parametrize(
"b,theta,bo,ro",
[
#
# Occultor does not touch the terminator
#
[0.5, 0.1, 1.2, 0.1],
[0.5, 0.1, 0.1, 1.2],
[0.5, 0.1, 0.8, 0.1],
[0.5, 0.1, 0.9, 0.2],
[0.5, np.pi + 0.1, 0.8, 0.1],
[0.5, np.pi + 0.1, 0.9, 0.2],
[0.5, 0.1, 0.5, 1.25],
[0.5, np.pi + 0.1, 0.5, 1.25],
#
# Occultations involving all three primitive integrals
#
[0.4, np.pi / 3, 0.5, 0.7],
[0.4, 2 * np.pi - np.pi / 3, 0.5, 0.7],
[0.4, np.pi / 2, 0.5, 0.7],
[0.4, np.pi / 2, 1.0, 0.2],
[0.00001, np.pi / 2, 0.5, 0.7],
[0, np.pi / 2, 0.5, 0.7],
[0.4, -np.pi / 2, 0.5, 0.7],
[-0.4, np.pi / 3, 0.5, 0.7],
[-0.4, 2 * np.pi - np.pi / 3, 0.5, 0.7],
[-0.4, np.pi / 2, 0.5, 0.7],
#
# Occultations involving only P and T
#
[0.4, np.pi / 6, 0.3, 0.3],
[0.4, np.pi + np.pi / 6, 0.1, 0.6],
[0.4, np.pi + np.pi / 3, 0.1, 0.6],
[0.4, np.pi / 6, 0.6, 0.5],
[0.4, -np.pi / 6, 0.6, 0.5],
[0.4, 0.1, 2.2, 2.0],
[0.4, -0.1, 2.2, 2.0],
[0.4, np.pi + np.pi / 6, 0.3, 0.8],
[0.75, np.pi + 0.1, 4.5, 5.0],
[-0.95, 0.0, 2.0, 2.5],
[-0.1, np.pi / 6, 0.6, 0.75],
[-0.5, np.pi, 0.8, 0.5],
[-0.1, 0.0, 0.5, 1.0],
#
# Occultations involving three points of intersection with the terminator
#
[
0.5488316824842527,
4.03591586925189,
0.34988513192814663,
0.7753986686719786,
],
[
0.5488316824842527,
2 * np.pi - 4.03591586925189,
0.34988513192814663,
0.7753986686719786,
],
[
-0.5488316824842527,
4.03591586925189 - np.pi,
0.34988513192814663,
0.7753986686719786,
],
[
-0.5488316824842527,
2 * np.pi - (4.03591586925189 - np.pi),
0.34988513192814663,
0.7753986686719786,
],
#
# Occultations involving four points of intersection with the terminator
#
[0.5, np.pi, 0.99, 1.5],
[-0.5, 0.0, 0.99, 1.5],
#
# Miscellaneous edge cases
#
[0.5, np.pi, 1.0, 1.5],
[0.5, 2 * np.pi - np.pi / 4, 0.4, 0.4],
[0.5, 2 * np.pi - np.pi / 4, 0.3, 0.3],
[-0.25, 4 * np.pi / 3, 0.3, 0.3],
],
)
def test_cases(b, theta, bo, ro, ydeg=1, res=999):
# Array over full occultation, including all singularities
xo = 0.0
yo = bo
if theta == 0:
xs = 0
ys = 1
else:
xs = 0.5
ys = -xs / np.tan(theta)
rxy2 = xs ** 2 + ys ** 2
if b == 0:
zs = 0
elif b == 1:
zs = -1
xs = 0
ys = 0
elif b == -1:
zs = 1
xs = 0
ys = 0
else:
zs = -np.sign(b) * np.sqrt(rxy2 / (b ** -2 - 1))
# Compute analytic
map = starry.Map(ydeg=ydeg, reflected=True)
map[1:, :] = 1
flux = map.flux(xs=xs, ys=ys, zs=zs, xo=xo, yo=yo, ro=ro)
# Compute numerical
(lat, lon), (x, y, z) = map.ops.compute_ortho_grid(res)
img = map.render(xs=xs, ys=ys, zs=zs, res=res).flatten()
idx = (x - xo) ** 2 + (y - yo) ** 2 > ro ** 2
flux_num = np.nansum(img[idx]) * 4 / res ** 2
# Compare with very lax tolerance; we're mostly looking
# for gross outliers
diff = np.abs(flux - flux_num)
assert diff < 0.001
def test_theta_poles(res=500, tol=1e-3):
"""Test cases near the poles for theta."""
# Settings
ydeg = 10
zs = -0.25
xo = 0.0
yo = 0.35
ro = 0.25
n = 5
# Compare
map = starry.Map(ydeg, reflected=True)
map[ydeg, :] = 1
x = np.array([0.0, 0.5, 1.0, 1.5, 2.0]).reshape(-1, 1) * np.pi
dx = np.concatenate(
(-np.logspace(-15, -5, n)[::-1], [0], np.logspace(-15, -5, n))
).reshape(1, -1)
theta = (x + dx).reshape(-1)
(lat, lon), (x, y, z) = map.ops.compute_ortho_grid(res)
err = np.zeros_like(theta)
for i in range(len(theta)):
if theta[i] == 0:
xs = 0
ys = 1
else:
xs = 0.5
ys = -xs / np.tan(theta[i])
flux = map.flux(xs=xs, ys=ys, zs=zs, xo=xo, yo=yo, ro=ro)
img = map.render(xs=xs, ys=ys, zs=zs, res=res).flatten()
idx = (x - xo) ** 2 + (y - yo) ** 2 > ro ** 2
flux_num = np.nansum(img[idx]) * 4 / res ** 2
err[i] = np.max(np.abs(flux - flux_num))
assert np.all(err < tol)
# BROKEN: Figure out why the root finder fails here.
@pytest.mark.xfail
def test_root_finder():
"""
Test cases that cause the root finder to fail.
"""
map = starry.Map(reflected=True)
map.ops._sT.func([-0.358413], [-1.57303], [55.7963], 54.8581, 0.0)
# BROKEN: Figure this out
@pytest.mark.xfail
def test_bad_case():
"""
Test pathological wrong case identification.
"""
map = starry.Map(reflected=True)
# These values lead to a (very) wrong flux
theta0 = -0.0409517311212404
b0 = -0.83208413089546
bo0 = 12.073565287605442
ro = 12.155639360414618
# Perturb theta in the vicinity of theta0
delta = np.linspace(0, 1e-6, 100)
theta = np.concatenate((theta0 - delta[::-1], theta0 + delta))
# Compute the flux
b = b0 * np.ones_like(theta)
bo = bo0 * np.ones_like(theta)
sT, *_ = map.ops._sT.func(b, theta, bo, ro, 0.0)
flux = sT[:, 0]
# DEBUG
# plt.plot(theta, flux)
# plt.show()
# Check that it's approximately constant over the range
assert np.allclose(flux, flux[0])
|
rodlugerREPO_NAMEstarryPATH_START.@starry_extracted@starry-master@tests@greedy@test_reflected_occultations.py@.PATH_END.py
|
{
"filename": "typehinting.py",
"repo_name": "ledatelescope/bifrost",
"repo_path": "bifrost_extracted/bifrost-master/python/typehinting.py",
"type": "Python"
}
|
import os
# Build a type hinting helper for libbifrost_generated.py
def build_typehinting(filename):
enums = {'status': {},
'space': {},
'dtype': {},
'capture': {},
'io': {},
'whence': {},
'reduce': {}}
with open(filename, 'r') as fh:
for line in fh:
if line.startswith('BF_'):
for tag in enums.keys():
if line.startswith(f"BF_{tag.upper()}_"):
name, value = line.split('=', 1)
name = name.strip().rstrip()
value = value.strip().rstrip()
enums[tag][name] = value
if tag == 'space':
name = name.replace('BF_SPACE_', '')
enums[tag][name.lower()] = value
elif tag == 'io':
name = name.replace('BF_IO_', '')
enums[tag][name.lower()] = value
elif tag == 'reduce':
name = name.replace('BF_REDUCE_', '')
name = name.replace('POWER_', 'pwr')
enums[tag][name.lower()] = value
break
outname = filename.replace('generated', 'typehints')
with open(outname, 'w') as fh:
fh.write(f"""
\"\"\"
Type hints generated from {filename}
Do not modify this file.
\"\"\"
import enum
""")
for tag in enums.keys():
fh.write(f"class BF{tag}_enum(enum.IntEnum):\n")
for key,value in enums[tag].items():
fh.write(f" {key} = {value}\n")
fh.write("\n")
|
ledatelescopeREPO_NAMEbifrostPATH_START.@bifrost_extracted@bifrost-master@python@typehinting.py@.PATH_END.py
|
{
"filename": "_text.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/waterfall/_text.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="text", parent_name="waterfall", **kwargs):
super(TextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@waterfall@_text.py@.PATH_END.py
|
{
"filename": "_tickvalssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergeo/marker/colorbar/_tickvalssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="tickvalssrc",
parent_name="scattergeo.marker.colorbar",
**kwargs
):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergeo@marker@colorbar@_tickvalssrc.py@.PATH_END.py
|
{
"filename": "_legendwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/image/_legendwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendwidth", parent_name="image", **kwargs):
super(LegendwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@image@_legendwidth.py@.PATH_END.py
|
{
"filename": "InteractiveParameter.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/examples/InteractiveParameter.py",
"type": "Python"
}
|
from functools import wraps
import pyqtgraph as pg
from pyqtgraph.Qt import QtWidgets
from pyqtgraph.parametertree import (
Parameter,
ParameterTree,
RunOptions,
InteractiveFunction,
Interactor,
)
app = pg.mkQApp()
class LAST_RESULT:
"""Just for testing purposes"""
value = None
def printResult(func):
@wraps(func)
def wrapper(*args, **kwargs):
LAST_RESULT.value = func(*args, **kwargs)
QtWidgets.QMessageBox.information(
QtWidgets.QApplication.activeWindow(),
"Function Run!",
f"Func result: {LAST_RESULT.value}",
)
return wrapper
host = Parameter.create(name="Interactive Parameter Use", type="group")
interactor = Interactor(parent=host, runOptions=RunOptions.ON_CHANGED)
@interactor.decorate()
@printResult
def easySample(a=5, b=6):
return a + b
@interactor.decorate()
@printResult
def stringParams(a="5", b="6"):
return a + b
@interactor.decorate(a=10)
@printResult
def requiredParam(a, b=10):
return a + b
@interactor.decorate(ignores=["a"])
@printResult
def ignoredAParam(a=10, b=20):
return a * b
@interactor.decorate(runOptions=RunOptions.ON_ACTION)
@printResult
def runOnButton(a=10, b=20):
return a + b
x = 5
@printResult
def accessVarInDifferentScope(x, y=10):
return x + y
func_interactive = InteractiveFunction(
accessVarInDifferentScope, closures={"x": lambda: x}
)
# Value is redeclared, but still bound
x = 10
interactor(func_interactive)
with interactor.optsContext(titleFormat=str.upper):
@interactor.decorate()
@printResult
def capslocknames(a=5):
return a
@interactor.decorate(
runOptions=(RunOptions.ON_CHANGED, RunOptions.ON_ACTION),
a={"type": "list", "limits": [5, 10, 20]},
)
@printResult
def runOnBtnOrChange_listOpts(a=5):
return a
@interactor.decorate(nest=False)
@printResult
def onlyTheArgumentsAppear(thisIsAFunctionArg=True):
return thisIsAFunctionArg
tree = ParameterTree()
tree.setParameters(host)
tree.show()
if __name__ == "__main__":
pg.exec()
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@examples@InteractiveParameter.py@.PATH_END.py
|
{
"filename": "_hoveron.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterternary/_hoveron.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoveronValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="hoveron", parent_name="scatterternary", **kwargs):
super(HoveronValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
flags=kwargs.pop("flags", ["points", "fills"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterternary@_hoveron.py@.PATH_END.py
|
{
"filename": "test_pyfftw_base.py",
"repo_name": "pyFFTW/pyFFTW",
"repo_path": "pyFFTW_extracted/pyFFTW-master/tests/test_pyfftw_base.py",
"type": "Python"
}
|
# Copyright 2014 Knowledge Economy Developments Ltd
#
# Henry Gomersall
# heng@kedevelopments.co.uk
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from pyfftw import FFTW, _supported_types, _all_types_human_readable
import numpy
import struct
from timeit import Timer
import unittest
try:
import mkl_fft
# mkl_fft monkeypatches numpy.fft
# explicitly import from fftpack or pocketfft instead
try:
# numpy 1.17 replaced fftpack with pocketfft
from numpy.fft import pocketfft as np_fft
except ImportError:
from numpy.fft import fftpack as np_fft
except ImportError:
from numpy import fft as np_fft
def miss(*xs):
'''Skip test if the precisions in the iterable `xs` are not available.'''
msg = 'Requires %s' % _all_types_human_readable[xs[0]]
for x in xs[1:]:
msg += ' and %s' % _all_types_human_readable[x]
msg += ' precision.'
skip = not all(x in _supported_types for x in xs)
return (skip, msg)
def require(self, *xs):
skip, msg = miss(*xs)
if skip:
self.skipTest(msg)
class FFTWBaseTest(unittest.TestCase):
def reference_fftn(self, a, axes):
return np_fft.fftn(a, axes=axes)
def __init__(self, *args, **kwargs):
super(FFTWBaseTest, self).__init__(*args, **kwargs)
self.make_shapes()
if not hasattr(self, 'assertRaisesRegex'):
self.assertRaisesRegex = self.assertRaisesRegexp
def setUp(self):
require(self, '32')
self.input_dtype = numpy.complex64
self.output_dtype = numpy.complex64
self.np_fft_comparison = np_fft.fft
self.direction = 'FFTW_FORWARD'
return
def tearDown(self):
return
def get_input_dtype_alignment(self):
return self.input_dtype([]).real.dtype.alignment
def get_output_dtype_alignment(self):
return self.input_dtype([]).real.dtype.alignment
def make_shapes(self):
self.input_shapes = {
'small_1d': (16,),
'1d': (2048,),
'2d': (256, 2048),
'3d': (5, 256, 2048)}
self.output_shapes = {
'small_1d': (16,),
'1d': (2048,),
'2d': (256, 2048),
'3d': (5, 256, 2048)}
def create_test_arrays(self, input_shape, output_shape, axes=None):
a = self.input_dtype(numpy.random.randn(*input_shape)
+1j*numpy.random.randn(*input_shape))
b = self.output_dtype(numpy.random.randn(*output_shape)
+1j*numpy.random.randn(*output_shape))
return a, b
def timer_routine(self, pyfftw_callable, numpy_fft_callable,
comparison_string='numpy.fft'):
N = 100
t = Timer(stmt=pyfftw_callable)
t_numpy_fft = Timer(stmt=numpy_fft_callable)
t_str = ("%.2f" % (1000.0/N*t.timeit(N)))+' ms'
t_numpy_str = ("%.2f" % (1000.0/N*t_numpy_fft.timeit(N)))+' ms'
print('One run: '+ t_str + \
' (versus ' + t_numpy_str + ' for ' + comparison_string + \
')')
def run_validate_fft(self, a, b, axes, fft=None, ifft=None,
force_unaligned_data=False, create_array_copies=True,
threads=1, flags=('FFTW_ESTIMATE',)):
''' Run a validation of the FFTW routines for the passed pair
of arrays, a and b, and the axes argument.
a and b are assumed to be the same shape (but not necessarily
the same layout in memory).
fft and ifft, if passed, should be instantiated FFTW objects.
If force_unaligned_data is True, the flag FFTW_UNALIGNED
will be passed to the fftw routines.
The threads argument runs the validation with multiple threads.
flags is passed to the creation of the FFTW object.
'''
if create_array_copies:
# Don't corrupt the original mutable arrays
a = a.copy()
b = b.copy()
a_orig = a.copy()
flags = list(flags)
if force_unaligned_data:
flags.append('FFTW_UNALIGNED')
if fft == None:
fft = FFTW(a,b,axes=axes, direction='FFTW_FORWARD',
flags=flags, threads=threads)
else:
fft.update_arrays(a,b)
if ifft == None:
ifft = FFTW(b, a, axes=axes, direction='FFTW_BACKWARD',
flags=flags, threads=threads)
else:
ifft.update_arrays(b,a)
a[:] = a_orig
# Test the forward FFT by comparing it to the result from numpy.fft
fft.execute()
ref_b = self.reference_fftn(a, axes=axes)
# This is actually quite a poor relative error, but it still
# sometimes fails. I assume that numpy.fft has different internals
# to fftw.
self.assertTrue(numpy.allclose(b, ref_b, rtol=1e-2, atol=1e-3))
# Test the inverse FFT by comparing the result to the starting
# value (which is scaled as per FFTW being unnormalised).
ifft.execute()
# The scaling is the product of the lengths of the fft along
# the axes along which the fft is taken.
scaling = numpy.prod(numpy.array(a.shape)[list(axes)])
self.assertEqual(ifft.N, scaling)
self.assertEqual(fft.N, scaling)
self.assertTrue(numpy.allclose(a/scaling, a_orig, rtol=1e-2, atol=1e-3))
return fft, ifft
def run_test_suites(test_suites, run_tests=None):
'''From each test case (derived from TestCase) in test_suites,
load and run all the test cases within.
If run_tests is not None, then it should be a dictionary with
keys being the test suite class name, and the values being
a list of test methods to run. Alternatively, the key can
be 'all' in which case all the test suites will be run with
the provided list of test suites.
'''
suite = unittest.TestSuite()
for test_class in test_suites:
tests = unittest.TestLoader().loadTestsFromTestCase(test_class)
if run_tests is not None:
if test_class.__name__ in run_tests:
this_suite_run = set(run_tests[test_class.__name__])
else:
this_suite_run = set()
if 'all' in run_tests:
this_suite_run = this_suite_run.union(run_tests['all'])
_tests = []
for each_test in tests:
if (each_test.id().split('.')[-1] in this_suite_run):
_tests.append(each_test)
tests = _tests
suite.addTests(tests)
unittest.TextTestRunner(verbosity=2).run(suite)
|
pyFFTWREPO_NAMEpyFFTWPATH_START.@pyFFTW_extracted@pyFFTW-master@tests@test_pyfftw_base.py@.PATH_END.py
|
{
"filename": "usage.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py3/IPython/core/usage.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""Usage information for the main IPython applications.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
# Copyright (C) 2001-2007 Fernando Perez. <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
import sys
from IPython.core import release
cl_usage = """\
=========
IPython
=========
Tools for Interactive Computing in Python
=========================================
A Python shell with automatic history (input and output), dynamic object
introspection, easier configuration, command completion, access to the
system shell and more. IPython can also be embedded in running programs.
Usage
ipython [subcommand] [options] [-c cmd | -m mod | file] [--] [arg] ...
If invoked with no options, it executes the file and exits, passing the
remaining arguments to the script, just as if you had specified the same
command with python. You may need to specify `--` before args to be passed
to the script, to prevent IPython from attempting to parse them. If you
specify the option `-i` before the filename, it will enter an interactive
IPython session after running the script, rather than exiting. Files ending
in .py will be treated as normal Python, but files ending in .ipy can
contain special IPython syntax (magic commands, shell expansions, etc.).
Almost all configuration in IPython is available via the command-line. Do
`ipython --help-all` to see all available options. For persistent
configuration, look into your `ipython_config.py` configuration file for
details.
This file is typically installed in the `IPYTHONDIR` directory, and there
is a separate configuration directory for each profile. The default profile
directory will be located in $IPYTHONDIR/profile_default. IPYTHONDIR
defaults to to `$HOME/.ipython`. For Windows users, $HOME resolves to
C:\\Users\\YourUserName in most instances.
To initialize a profile with the default configuration file, do::
$> ipython profile create
and start editing `IPYTHONDIR/profile_default/ipython_config.py`
In IPython's documentation, we will refer to this directory as
`IPYTHONDIR`, you can change its default location by creating an
environment variable with this name and setting it to the desired path.
For more information, see the manual available in HTML and PDF in your
installation, or online at https://ipython.org/documentation.html.
"""
interactive_usage = """
IPython -- An enhanced Interactive Python
=========================================
IPython offers a fully compatible replacement for the standard Python
interpreter, with convenient shell features, special commands, command
history mechanism and output results caching.
At your system command line, type 'ipython -h' to see the command line
options available. This document only describes interactive features.
GETTING HELP
------------
Within IPython you have various way to access help:
? -> Introduction and overview of IPython's features (this screen).
object? -> Details about 'object'.
object?? -> More detailed, verbose information about 'object'.
%quickref -> Quick reference of all IPython specific syntax and magics.
help -> Access Python's own help system.
If you are in terminal IPython you can quit this screen by pressing `q`.
MAIN FEATURES
-------------
* Access to the standard Python help with object docstrings and the Python
manuals. Simply type 'help' (no quotes) to invoke it.
* Magic commands: type %magic for information on the magic subsystem.
* System command aliases, via the %alias command or the configuration file(s).
* Dynamic object information:
Typing ?word or word? prints detailed information about an object. Certain
long strings (code, etc.) get snipped in the center for brevity.
Typing ??word or word?? gives access to the full information without
snipping long strings. Strings that are longer than the screen are printed
through the less pager.
The ?/?? system gives access to the full source code for any object (if
available), shows function prototypes and other useful information.
If you just want to see an object's docstring, type '%pdoc object' (without
quotes, and without % if you have automagic on).
* Tab completion in the local namespace:
At any time, hitting tab will complete any available python commands or
variable names, and show you a list of the possible completions if there's
no unambiguous one. It will also complete filenames in the current directory.
* Search previous command history in multiple ways:
- Start typing, and then use arrow keys up/down or (Ctrl-p/Ctrl-n) to search
through the history items that match what you've typed so far.
- Hit Ctrl-r: opens a search prompt. Begin typing and the system searches
your history for lines that match what you've typed so far, completing as
much as it can.
- %hist: search history by index.
* Persistent command history across sessions.
* Logging of input with the ability to save and restore a working session.
* System shell with !. Typing !ls will run 'ls' in the current directory.
* The reload command does a 'deep' reload of a module: changes made to the
module since you imported will actually be available without having to exit.
* Verbose and colored exception traceback printouts. See the magic xmode and
xcolor functions for details (just type %magic).
* Input caching system:
IPython offers numbered prompts (In/Out) with input and output caching. All
input is saved and can be retrieved as variables (besides the usual arrow
key recall).
The following GLOBAL variables always exist (so don't overwrite them!):
_i: stores previous input.
_ii: next previous.
_iii: next-next previous.
_ih : a list of all input _ih[n] is the input from line n.
Additionally, global variables named _i<n> are dynamically created (<n>
being the prompt counter), such that _i<n> == _ih[<n>]
For example, what you typed at prompt 14 is available as _i14 and _ih[14].
You can create macros which contain multiple input lines from this history,
for later re-execution, with the %macro function.
The history function %hist allows you to see any part of your input history
by printing a range of the _i variables. Note that inputs which contain
magic functions (%) appear in the history with a prepended comment. This is
because they aren't really valid Python code, so you can't exec them.
* Output caching system:
For output that is returned from actions, a system similar to the input
cache exists but using _ instead of _i. Only actions that produce a result
(NOT assignments, for example) are cached. If you are familiar with
Mathematica, IPython's _ variables behave exactly like Mathematica's %
variables.
The following GLOBAL variables always exist (so don't overwrite them!):
_ (one underscore): previous output.
__ (two underscores): next previous.
___ (three underscores): next-next previous.
Global variables named _<n> are dynamically created (<n> being the prompt
counter), such that the result of output <n> is always available as _<n>.
Finally, a global dictionary named _oh exists with entries for all lines
which generated output.
* Directory history:
Your history of visited directories is kept in the global list _dh, and the
magic %cd command can be used to go to any entry in that list.
* Auto-parentheses and auto-quotes (adapted from Nathan Gray's LazyPython)
1. Auto-parentheses
Callable objects (i.e. functions, methods, etc) can be invoked like
this (notice the commas between the arguments)::
In [1]: callable_ob arg1, arg2, arg3
and the input will be translated to this::
callable_ob(arg1, arg2, arg3)
This feature is off by default (in rare cases it can produce
undesirable side-effects), but you can activate it at the command-line
by starting IPython with `--autocall 1`, set it permanently in your
configuration file, or turn on at runtime with `%autocall 1`.
You can force auto-parentheses by using '/' as the first character
of a line. For example::
In [1]: /globals # becomes 'globals()'
Note that the '/' MUST be the first character on the line! This
won't work::
In [2]: print /globals # syntax error
In most cases the automatic algorithm should work, so you should
rarely need to explicitly invoke /. One notable exception is if you
are trying to call a function with a list of tuples as arguments (the
parenthesis will confuse IPython)::
In [1]: zip (1,2,3),(4,5,6) # won't work
but this will work::
In [2]: /zip (1,2,3),(4,5,6)
------> zip ((1,2,3),(4,5,6))
Out[2]= [(1, 4), (2, 5), (3, 6)]
IPython tells you that it has altered your command line by
displaying the new command line preceded by -->. e.g.::
In [18]: callable list
-------> callable (list)
2. Auto-Quoting
You can force auto-quoting of a function's arguments by using ',' as
the first character of a line. For example::
In [1]: ,my_function /home/me # becomes my_function("/home/me")
If you use ';' instead, the whole argument is quoted as a single
string (while ',' splits on whitespace)::
In [2]: ,my_function a b c # becomes my_function("a","b","c")
In [3]: ;my_function a b c # becomes my_function("a b c")
Note that the ',' MUST be the first character on the line! This
won't work::
In [4]: x = ,my_function /home/me # syntax error
"""
interactive_usage_min = """\
An enhanced console for Python.
Some of its features are:
- Tab completion in the local namespace.
- Logging of input, see command-line options.
- System shell escape via ! , eg !ls.
- Magic commands, starting with a % (like %ls, %pwd, %cd, etc.)
- Keeps track of locally defined variables via %who, %whos.
- Show object information with a ? eg ?x or x? (use ?? for more info).
"""
quick_reference = r"""
IPython -- An enhanced Interactive Python - Quick Reference Card
================================================================
obj?, obj?? : Get help, or more help for object (also works as
?obj, ??obj).
?foo.*abc* : List names in 'foo' containing 'abc' in them.
%magic : Information about IPython's 'magic' % functions.
Magic functions are prefixed by % or %%, and typically take their arguments
without parentheses, quotes or even commas for convenience. Line magics take a
single % and cell magics are prefixed with two %%.
Example magic function calls:
%alias d ls -F : 'd' is now an alias for 'ls -F'
alias d ls -F : Works if 'alias' not a python name
alist = %alias : Get list of aliases to 'alist'
cd /usr/share : Obvious. cd -<tab> to choose from visited dirs.
%cd?? : See help AND source for magic %cd
%timeit x=10 : time the 'x=10' statement with high precision.
%%timeit x=2**100
x**100 : time 'x**100' with a setup of 'x=2**100'; setup code is not
counted. This is an example of a cell magic.
System commands:
!cp a.txt b/ : System command escape, calls os.system()
cp a.txt b/ : after %rehashx, most system commands work without !
cp ${f}.txt $bar : Variable expansion in magics and system commands
files = !ls /usr : Capture system command output
files.s, files.l, files.n: "a b c", ['a','b','c'], 'a\nb\nc'
History:
_i, _ii, _iii : Previous, next previous, next next previous input
_i4, _ih[2:5] : Input history line 4, lines 2-4
exec(_i81) : Execute input history line #81 again
%rep 81 : Edit input history line #81
_, __, ___ : previous, next previous, next next previous output
_dh : Directory history
_oh : Output history
%hist : Command history of current session.
%hist -g foo : Search command history of (almost) all sessions for 'foo'.
%hist -g : Command history of (almost) all sessions.
%hist 1/2-8 : Command history containing lines 2-8 of session 1.
%hist 1/ ~2/ : Command history of session 1 and 2 sessions before current.
%hist ~8/1-~6/5 : Command history from line 1 of 8 sessions ago to
line 5 of 6 sessions ago.
%edit 0/ : Open editor to execute code with history of current session.
Autocall:
f 1,2 : f(1,2) # Off by default, enable with %autocall magic.
/f 1,2 : f(1,2) (forced autoparen)
,f 1 2 : f("1","2")
;f 1 2 : f("1 2")
Remember: TAB completion works in many contexts, not just file names
or python names.
The following magic functions are currently available:
"""
default_banner_parts = ["Python %s\n"%sys.version.split("\n")[0],
"Type 'copyright', 'credits' or 'license' for more information\n" ,
"IPython {version} -- An enhanced Interactive Python. Type '?' for help.\n".format(version=release.version),
]
default_banner = ''.join(default_banner_parts)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py3@IPython@core@usage.py@.PATH_END.py
|
{
"filename": "_shadowsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/icicle/insidetextfont/_shadowsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="shadowsrc", parent_name="icicle.insidetextfont", **kwargs
):
super(ShadowsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@icicle@insidetextfont@_shadowsrc.py@.PATH_END.py
|
{
"filename": "simscan_ground.ipynb",
"repo_name": "hpc4cmb/toast",
"repo_path": "toast_extracted/toast-main/tutorial/02_Simulated_Scan_Strategies/simscan_ground.ipynb",
"type": "Jupyter Notebook"
}
|
# Ground observing schedules
In this notebook we learn about creating ground observing schedules.
```python
# Load common tools for all lessons
import sys
sys.path.insert(0, "..")
from lesson_tools import (
fake_focalplane
)
# Capture C++ output in the jupyter cells
%reload_ext wurlitzer
```
## First schedule
TOAST `pipelines` include a tool called `toast_ground_schedule.py`, also known as the *opportunistic scheduler*. It builds observing schedules heuristically by building a list of available targets and scheduling and always choosing the highest priority target. `toast_ground_schedule.py` can be used to create site-specific observing schedules subject to a number of constraints. At the minimum, the tool needs the location of the observatory, observing window and at least one target. Here is a minimal example:
```python
! toast_ground_schedule.py \
--site-lat "-22.958064" \
--site-lon "-67.786222" \
--site-alt 5200 \
--site-name Atacama \
--telescope LAT \
--start "2020-01-01 00:00:00" \
--stop "2020-01-01 12:00:00" \
--patch-coord C \
--patch small_patch,1,40,-40,44,-44 \
--out schedule.txt
```
Let's look at the contents of the schedule file.
```python
! cat schedule.txt
```
The rectangular patch definition takes the form `--patch <name>,<priority>,<RA left>,<DEC top>,<RA right>,<DEC bottom>`. No spaces are allowed in the definition. Other patch definition formats will be discussed below.
The start and stop times are given in UTC.
The resulting schedule is a plain ASCII file. The header defines the telescope and each line after that defines a constant elevation scan (CES) with a fixed azimuth range. When a full pass of the target takes longer than allowed observation time, `--ces-max-time`, the CES is broken up into sub passes that use the same observing elevation but adjust the azimuth range. The above schedule includes 10 passes of the target "small_patch" that fit in the given 12-hour observing window. Some passes are split into as many as 4 sub passes, each no longer than 20 minutes (default).
## Second patch
Let's add another patch, this time using the circular patch definition format, set the observing elevation limits and enable Sun avoidance. We'll also increase `ces-max-time` so we get fewer entries in the schedule. The circular patch format is
`--patch <name>,<priority>,<RA>,<DEC>,<radius>`
```python
! toast_ground_schedule.py \
--site-lat "-22.958064" \
--site-lon "-67.786222" \
--site-alt 5200 \
--site-name Atacama \
--telescope LAT \
--start "2020-01-01 00:00:00" \
--stop "2020-01-04 00:00:00" \
--patch-coord C \
--patch small_patch,1,80,-13,10 \
--patch large_patch,1,80,-33,20 \
--el-min 30 \
--el-max 60 \
--ces-max-time 86400 \
--sun-avoidance-angle 20 \
--out schedule.txt \
--debug
! cat schedule.txt
```
Note that we added the `--debug` option to the command line. This produces a helpful diagnostic plot, `patches.png`, that shows the locations of your patches, the Sun, the Moon and their avoidance areas. The plot is shown below. The motion of the Moon is already apparent in this 3-day schedule. The Sun (on the right) is effectively stationary. `--debug` can be expensive, especially if you have lots of patches or request a long observing schedule.
```python
from IPython.display import Image
Image("patches.png")
```
We deliberately chose the locations of the patches so that they compete over the observing time. This allows us to point out some advanced features of the scheduler. If you examine the very end of the observing schedule, you can note that both `small_patch` and `large_patch` were observed 7 times. Given that `large_patch` is twice as wide and only takes twice as long to observe, equal number of observations actually implies that `large_patch` will end up with *half* as many hits per sky pixel.
## Patch priority
The scheduler offers two ways to remedy this issue. First, one can simply increase the priority of the large patch to dedicate more observing time to it. **All things being equal, the number of visits to a given patch is inversely proportional to the `priority` in the patch definition**:
```python
! toast_ground_schedule.py \
--site-lat "-22.958064" \
--site-lon "-67.786222" \
--site-alt 5200 \
--site-name Atacama \
--telescope LAT \
--start "2020-01-01 00:00:00" \
--stop "2020-01-04 00:00:00" \
--patch-coord C \
--patch small_patch,1,80,-13,10 \
--patch large_patch,0.5,80,-33,20 \
--el-min 30 \
--el-max 60 \
--ces-max-time 86400 \
--sun-avoidance-angle 20 \
--out schedule.txt
! cat schedule.txt
```
Now the large patch is observed 9 times and the small patch is observed 4 times.
## Equalize area and time
Typically we do not use the priority field to normalize the depths. Instead, the user can balance the integration depths with two command line arguments: `--equalize-area` and `--equalize time`.
With `--equalize-area` the scheduler will automatically modulate the user-given priorities with the area of each patch.
With `--equalize-time` the scheduler will balance the actual time spent in each patch rather than the number of visits. There is a difference, because the observing time per pass can vary greatly depending on the patch shape and orientation
```python
! toast_ground_schedule.py \
--site-lat "-22.958064" \
--site-lon "-67.786222" \
--site-alt 5200 \
--site-name Atacama \
--telescope LAT \
--start "2020-01-01 00:00:00" \
--stop "2020-01-04 00:00:00" \
--patch-coord C \
--patch small_patch,1,80,-13,10 \
--patch large_patch,1,80,-33,20 \
--el-min 30 \
--el-max 60 \
--ces-max-time 86400 \
--sun-avoidance-angle 20 \
--equalize-area \
--equalize-time \
--out schedule.txt
! cat schedule.txt
```
As with the by-hand-modulated priorities, `large_patch` ends up with twice as many visits.
## Binning the schedule
We take an observing schedule from `toast_ground_sim.py` and translate it into a depth map.
First, we need a focalplane. If one does not already exist, TOAST `pipelines` includes a tool for generating mock hexagonal focalplanes:
```python
! toast_fake_focalplane.py --help
```
Now we create a focalplane with 10-degree FOV and a mininimum of 20 pixels:
```python
! toast_fake_focalplane.py \
--minpix 20 \
--out focalplane \
--fwhm 30 \
--fov 10 \
--psd_fknee 5e-2 \
--psd_NET 1e-3 \
--psd_alpha 1 \
--psd_fmin 1e-5
```
The actual focalplane ends up having 37 pixels, instead of the minimum of 20. This is because regular packing of the hexagon is quantized. Notice that the final name of the focalplane is `focalplane_37.pkl`. We'll need the name to run the simulation script.
We will use the versatile ground simulation pipeline, `toast_ground_sim.py`, to bin the map. It will be covered in detail in lesson 7 so here we simply write out a parameter file:
```python
%%writefile bin_schedule.par
--sample-rate
10.0
--scan-rate
0.3
--scan-accel
10.0
--nside
64
--focalplane
focalplane_37.pkl
--schedule
schedule.txt
--out
out
--simulate-noise
--freq
100
--no-destripe
--no-binmap
--hits
--wcov
```
Then run the pipeline. Because the pipeline uses `libMadam`, an MPI code, we must submit the job to a compute node.
```python
import subprocess as sp
runcom = "toast_ground_sim.py @bin_schedule.par"
print(runcom, flush=True)
sp.check_call(runcom, stderr=sp.STDOUT, shell=True)
```
Let's examine the resulting hits and depth map. The file naming convention may seem a little awkward but follows from the fact that a single run of `toast_ground_sim.py` may map multiple telescopes, frequencies and time splits.
```python
import matplotlib.pyplot as plt
%matplotlib inline
import healpy
hits = healpy.read_map("out/00000000/100/toast_100_telescope_all_time_all_hmap.fits")
hits[hits == 0] = healpy.UNSEEN
healpy.mollview(hits, unit="hits", title="Total hits")
healpy.graticule(22.5, verbose=False)
```
```python
wcov = healpy.read_map("out/00000000/100/toast_100_telescope_all_time_all_wcov.fits")
wcov *= 1e12 # from K^2 to uK^2
wcov[wcov == 0] = healpy.UNSEEN
healpy.mollview(wcov, unit="$\mu$K$^2$", title="White noise variance", min=1e0, max=1e3)
healpy.graticule(22.5, verbose=False)
```
## Advanced topics in ground scheduling
### Cooler cycle format
it is possible to instruct the scheduler to add regular breaks in the schedule to cycle the cooler or to perform other maintenance activities. The cooler cycle is a pseudo patch that the scheduler considers like other targets when deciding what to observe next. The full syntax is:
`--patch <name>,COOLER,<weight>,<power>,<hold_time_min>,<hold_time_max>,<cycle_time>,<az>,<el>`
All of the time arguments are given in hours. The priority of the patch depends on the time since the last cycle occurred. It is `infinity` until `hold_time_min` has elapsed and then begins to decrease according to a power law set by `power`. Priority at `hold_time_max` is zero.
### Planet scans
The scheduler can target planets just like stationary patches. The SSO (solar system object) format is
`--patch <name>,SSO,<priority>,<radius [deg]>`
All orbiting bodies recognized by `pyEphem` are supported.
### Oscillating patches
The scheduler designs the scans so that the azimuth range is kept fixed and the boresight sweeps the entire patch. This usually implies a certain amount of spillover integration time outside the patch. This can produce an excess of hits at the boundary of two patches. The scheduler offers a way to smear the spillover by systematically shifting the position of the patches in RA and DEC. The arguments to accomplish this are
`--ra-period <period [visits]>`
`--ra-amplitude <amplitude [deg]>`
`--dec-period <period [visits]>`
`--dec-amplitude <amplitude [deg]>`
Patches will systematically shift after each visit, returning to their fiducial positions after each period.
### Horizontal (high cadence) patch definition
Horizontal patch definition specifies the observing elevation and the azimuth range. The scheduler parks the telescope at the given elevation and scans until the constraints (Sun, Moon, cooler hold time) prevent continuing. If possible, scanning is continued by switching between rising and setting scan.
`--patch <name>,HORIZONTAL,<priority>,<az min [deg]>,<az max [deg]>,<el [deg]>,<scan time [min]>`
### Polygon patch definition
Patches do not need to be rectangular or circular. An arbitrary polygon shape can be specified by giving the corner coordinates.
`--patch <name>,<priority>,<RA_0 [deg]>,<DEC_0 [deg]>,...,,<RA_N-1 [deg]>,<DEC_N-1 [deg]>`
### Elevation penalty
Lower observing elevations are subject to higher levels of photon noise from the atmosphere. It is possible to instruct the scheduler to modulate the relative priorities of the available patches based on their elevation.
`--elevation-penalty-limit <elevation [deg]>`
`--elevation-penalty-power <power>`
If the available patch is below `elevation-penalty-limit`, the priority is modulated by $\left(\frac{limit}{elevation}\right)^{power}$. This way low elevation scans are reserved for targets that cannot be observed at higher elevation or when no targets are available higher.
### Block-out
January and February weather in the Atacama is known to be problematic for observing. It is possible to instruct the scheduler to skip certain periods of the calendar year with
`--block-out <start month>/<start day>-<end month>/<end day>`
or with
`--block-out <start year>/<start month>/<start day>-<end year>/<end month>/<end day>`
All fields are integers. The dates are in UTC.
### Gaps
The are two short gap lengths in the scheduler
`--gap-small <gap [s]>`
`--gap <gap [s]>`
The `gap-small` is applied when a single CES is broken up into sub scans. The regular `gap` is applied between separate observations.
### Pole scheduling
Observing from the Poles is unlike anywhere else on Earth. Patches will not drift across a constant elevation line. Instead, the telescope must be stepped in elevation. The Pole scheduling mode is enabled with
`--pole-mode`
And the step time and size are controlled with
`--pole-el-step <step [deg]>`
`--pole-ces-time <time [s]>`
```python
# --site-lat "-89:59.464" \
# --site-lon "-44:39" \
! toast_ground_schedule.py \
--site-lat "-89.991" \
--site-lon "-44.65" \
--site-alt 2843 \
--site-name South_Pole \
--telescope LAT \
--start "2020-01-01 00:00:00" \
--stop "2020-01-01 12:00:00" \
--patch-coord C \
--patch small_patch,1,40,-40,44,-44 \
--pole-mode \
--pole-el-step 0.25 \
--pole-ces-time 600 \
--out pole_schedule.txt
```
The resulting schedule has each pass of the target split into (0.25$^\circ$, 10min) steps. It takes 16 steps (2:40h) to cover the 4$^\circ\times$4$^\circ$ degree field.
```python
! cat pole_schedule.txt
```
Let's bin this schedule as well. We also demonstrate how parameters in the parameter file may be overridden
```python
runcom = "toast_ground_sim.py @bin_schedule.par --schedule pole_schedule.txt --out out_pole"
print(runcom, flush=True)
sp.check_call(runcom, stderr=sp.STDOUT, shell=True)
```
```python
hits = healpy.read_map("out_pole/00000000/100/toast_100_telescope_all_time_all_hmap.fits")
hits[hits == 0] = healpy.UNSEEN
healpy.mollview(hits, unit="hits", title="Total hits, Pole")
healpy.graticule(22.5, verbose=False)
```
## Exercises
- Go back to the "Second patch" section of the notebook and add a third patch of your choosing. Executing that cell will visualize the patch on the sky.
- In the same section, increase the observation length to one month, this will cause the Moon avoidance region to draw a band around the Equator.
- In "Binning the schedule", try increasing the FOV from 10 degrees to 35, representantive of modern SATs
```python
```
|
hpc4cmbREPO_NAMEtoastPATH_START.@toast_extracted@toast-main@tutorial@02_Simulated_Scan_Strategies@simscan_ground.ipynb@.PATH_END.py
|
{
"filename": "make-pklin.py",
"repo_name": "fastpm/fastpm",
"repo_path": "fastpm_extracted/fastpm-master/python/make-pklin.py",
"type": "Python"
}
|
from nbodykit.lab import *
from nbodykit.cosmology import WMAP9, LinearPower, Cosmology
import numpy
MYPlanck = Cosmology(m_ncdm=[],
Omega0_b=0.0223/0.6774**2,
Omega0_cdm=0.1188/0.6774**2,
h=0.6774)\
.match(sigma8=0.8159)
pklin0 = LinearPower(MYPlanck, redshift=0.0)
k = numpy.logspace(-3, 2, 10000, endpoint=True)
numpy.savetxt('myplanck-z0.txt', list(zip(k, pklin0(k))))
|
fastpmREPO_NAMEfastpmPATH_START.@fastpm_extracted@fastpm-master@python@make-pklin.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/surface/contours/x/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._width import WidthValidator
from ._usecolormap import UsecolormapValidator
from ._start import StartValidator
from ._size import SizeValidator
from ._show import ShowValidator
from ._project import ProjectValidator
from ._highlightwidth import HighlightwidthValidator
from ._highlightcolor import HighlightcolorValidator
from ._highlight import HighlightValidator
from ._end import EndValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._width.WidthValidator",
"._usecolormap.UsecolormapValidator",
"._start.StartValidator",
"._size.SizeValidator",
"._show.ShowValidator",
"._project.ProjectValidator",
"._highlightwidth.HighlightwidthValidator",
"._highlightcolor.HighlightcolorValidator",
"._highlight.HighlightValidator",
"._end.EndValidator",
"._color.ColorValidator",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@surface@contours@x@__init__.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "spacetelescope/imexam",
"repo_path": "imexam_extracted/imexam-master/docs/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Documentation build configuration file.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything
# else. See astropy.sphinx.conf for which values are set there.
from configparser import ConfigParser
from datetime import datetime
import os
import sys
try:
from sphinx_astropy.conf.v1 import * # noqa
except ImportError:
print('ERROR: the documentation requires the sphinx-astropy package to '
'be installed')
sys.exit(1)
# Get configuration information from setup.cfg
conf = ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# By default, highlight as Python 3.
# highlight_language = 'python3'
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.7'
# Extend astropy intersphinx_mapping with packages we use here
intersphinx_mapping['ginga'] = ('http://ginga.readthedocs.io/en/latest/', None)
# Exclude astropy intersphinx_mapping for unused packages
del intersphinx_mapping['h5py'] # noqa
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates') # noqa
# This is added to the end of RST files - a good place to put
# substitutions to be used globally.
rst_epilog = """.. _imexam: high-level_API.html"""
# Render inheritance diagrams in SVG
#graphviz_output_format = "svg"
#
#graphviz_dot_args = [
# '-Nfontsize=10',
# '-Nfontname=Helvetica Neue, Helvetica, Arial, sans-serif',
# '-Efontsize=10',
# '-Efontname=Helvetica Neue, Helvetica, Arial, sans-serif',
# '-Gfontsize=10',
# '-Gfontname=Helvetica Neue, Helvetica, Arial, sans-serif'
#]
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['name']
author = setup_cfg['author']
copyright = f'2011-{datetime.utcnow().year}, {author}'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(project)
package = sys.modules[project]
# The short X.Y version.
version = package.__version__.split('-', 1)[0] # The full version, including alpha/beta/rc tags.
release = package.__version__
# -- Options for HTML output ------------------------------------------------
# The global astropy configuration uses a custom theme,
# 'bootstrap-astropy', which is installed along with astropy. A different
# theme can be used or the options for this theme can be modified by
# overriding some of the variables set in the global configuration. The
# variables set in the global configuration are listed below, commented
# out.
# Add any paths that contain custom themes here, relative to this
# directory.
# html_theme_path = [] # The theme to use for HTML and HTML Help pages. See the documentation
# for a list of builtin themes. To override the custom theme, set this
# to the name of a builtin theme or the name of a custom theme in
# html_theme_path.
# html_theme = Noneo
html_theme_options = {
'logotext1': 'im', # white, semi-bold
'logotext2': 'exam', # orange, light
'logotext3': ':docs' # white, light
}
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
html_logo = '_static/imexam_logo_trans.png'
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being 16x16
# or 32x32 pixels large.
html_favicon = os.path.join('_static', 'imexam.ico')
# A "Last built" timestamp is inserted at every page bottom, using the
# given strftime format. Set to '' to omit this timestamp.
# html_last_updated_fmt = '%d %b %Y'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release>".
html_title = f'{project} {release}'
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# Static files to copy after template files
html_static_path = ['_static']
html_style = 'imexam.css'
# -- Options for LaTeX output -------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples (source
# start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation', author, 'manual')]
latex_logo = '_static/imexam_logo.pdf'
# -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples (source start file, name,
# description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
# -- Resolving issue number to links in changelog -----------------------------
github_project = setup_cfg['github_project']
github_issues_url = f'https://github.com/{github_project}/issues/'
# -- Turn on nitpicky mode for sphinx (to warn about references not found) ----
nitpicky = False
nitpick_ignore = []
# Some warnings are impossible to suppress, and you can list specific
# references that should be ignored in a nitpick-exceptions file which
# should be inside the docs/ directory. The format of the file should be:
#
# <type> <class>
#
# for example: #
# py:class astropy.io.votable.tree.Element
# py:class astropy.io.votable.tree.SimpleElement
# py:class astropy.io.votable.tree.SimpleElementWithContent # # Uncomment the following lines to enable the exceptions:
nitpick_filename = 'nitpick-exceptions.txt'
if os.path.isfile(nitpick_filename):
for line in open(nitpick_filename):
if line.strip() == "" or line.startswith("#"):
continue
dtype, target = line.split(None, 1)
target = target.strip()
nitpick_ignore.append((dtype, target))
# -- Options for linkcheck output ---------------------------------------------
linkcheck_retry = 5
linkcheck_ignore = ['http://data.astropy.org',
r'https://iraf.net/*',
r'https://github\.com/astropy/photutils/(?:issues|pull)/\d+']
linkcheck_timeout = 180
linkcheck_anchors = False
|
spacetelescopeREPO_NAMEimexamPATH_START.@imexam_extracted@imexam-master@docs@conf.py@.PATH_END.py
|
{
"filename": "approx_topk_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/kernel_tests/math_ops/approx_topk_test.py",
"type": "Python"
}
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.math.approx_max_k and tf.math.approx_min_k."""
import itertools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.eager.def_function import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
@test_util.run_all_in_graph_and_eager_modes
class ApproxTopkTest(test_util.TensorFlowTestCase, parameterized.TestCase):
def setUp(self):
test_util.TensorFlowTestCase.setUp(self)
self._rng = np.random.default_rng(42)
def compute_recall(self, result_neighbors, ground_truth_neighbors):
"""Computes the recall of an approximate nearest neighbor search.
Args:
result_neighbors: int32 numpy array of the shape [num_queries,
neighbors_per_query] where the values are the indices of the dataset.
ground_truth_neighbors: int32 numpy array of with shape [num_queries,
ground_truth_neighbors_per_query] where the values are the indices of
the dataset.
Returns:
The recall.
"""
self.assertLen(result_neighbors.shape, 2)
self.assertLen(ground_truth_neighbors.shape, 2)
self.assertEqual(result_neighbors.shape[0], ground_truth_neighbors.shape[0])
gt_sets = [set(np.asarray(x)) for x in ground_truth_neighbors]
def hits_per_q(q, nn_per_q):
return len(list(x for x in nn_per_q if x.item() in gt_sets[q]))
hits = sum(
hits_per_q(q, nn_per_q) for q, nn_per_q in enumerate(result_neighbors))
return hits / ground_truth_neighbors.size
@parameterized.parameters(
itertools.product(
[dtypes.bfloat16, dtypes.float16, dtypes.float32],
[1, 10], # k
[100, 500], # row_size
[1, 10, 128], # num_rows
[True, False], # aggregate_to_topk
))
def test_non_fused_max_k(self, dtype, k, row_size, num_rows,
aggregate_to_topk):
row = np.arange(row_size, dtype=np.float32)
db = np.stack(list(self._rng.permutation(row) for _ in range(num_rows)))
db_op = constant_op.constant(db, dtype=dtype)
# Must jit-compile to access the xla kernel.
@function(jit_compile=True)
def ann(db, k):
return nn_ops.approx_max_k(db, k, aggregate_to_topk=aggregate_to_topk)
_, idx = self.evaluate(ann(db_op, k))
gt = np.argsort(-db)[:, :k]
ann_recall = self.compute_recall(idx, gt)
self.assertGreaterEqual(ann_recall, 0.95)
@parameterized.parameters(
itertools.product(
[dtypes.bfloat16, dtypes.float16, dtypes.float32],
[1, 10], # k
[100, 500], # row_size
[1, 10, 128], # num_rows
[True, False], # aggregate_to_topk
))
def test_non_fused_min_k(self, dtype, k, row_size, num_rows,
aggregate_to_topk):
# Use the new rng api
row = np.arange(row_size, dtype=np.float32)
db = np.stack(list(self._rng.permutation(row) for _ in range(num_rows)))
db_op = constant_op.constant(db, dtype=dtype)
# Must jit-compile to access the xla kernel.
@function(jit_compile=True)
def ann(db, k=10):
return nn_ops.approx_min_k(db, k, aggregate_to_topk=aggregate_to_topk)
_, idx = self.evaluate(ann(db_op, k))
gt = np.argsort(db)[:, :k]
ann_recall = self.compute_recall(idx, gt)
self.assertGreaterEqual(ann_recall, 0.95)
@parameterized.parameters(
itertools.product(
[dtypes.float32], # Use float32 for numerical stability.
[1, 10], # k
[100, 500], # db_size
[1, 10, 128], # qy_size
[2, 32], # feature dim
))
# MIPS = Maximal Inner Product Search
def test_mips(self, dtype, k, db_size, qy_size, feature_dim):
qy = self._rng.random([qy_size, feature_dim])
db = self._rng.random([db_size, feature_dim])
qy_op = constant_op.constant(qy, dtype=dtype)
db_op = constant_op.constant(db, dtype=dtype)
# Must jit-compile to access the xla kernel.
@function(jit_compile=True)
def ann(qy, db, k):
scores = math_ops.matmul(qy, db, transpose_b=True)
return nn_ops.approx_max_k(scores, k)
_, idx = self.evaluate(ann(qy_op, db_op, k))
scores = self.evaluate(-math_ops.matmul(qy_op, db_op, transpose_b=True))
gt = np.argsort(scores)[:, :k]
ann_recall = self.compute_recall(idx, gt)
self.assertGreaterEqual(ann_recall, 0.95)
@parameterized.parameters(
itertools.product(
[dtypes.float32], # Use float32 for numerical stability.
[1, 10], # k
[100, 500], # db_size
[1, 10, 128], # qy_size
[2, 32], # feature dim
))
# L2ANN = Approximate Nearest Neighbor search in the L2 metric space
def test_l2ann(self, dtype, k, db_size, qy_size, feature_dim):
qy = self._rng.random([qy_size, feature_dim])
db = self._rng.random([db_size, feature_dim])
db_half_norm_sq = np.linalg.norm(db, axis=1)**2 / 2
qy_op = constant_op.constant(qy, dtype=dtype)
db_op = constant_op.constant(db, dtype=dtype)
db_half_norm_sq_op = constant_op.constant(db_half_norm_sq, dtype=dtype)
# Must jit-compile to access the xla kernel.
@function(jit_compile=True)
def ann(qy, db, db_half_norm_sq, k):
scores = db_half_norm_sq - math_ops.matmul(qy, db, transpose_b=True)
return nn_ops.approx_min_k(scores, k)
_, idx = self.evaluate(ann(qy_op, db_op, db_half_norm_sq_op, k))
scores = self.evaluate(db_half_norm_sq_op -
math_ops.matmul(qy_op, db_op, transpose_b=True))
gt = np.argsort(scores)[:, :k]
ann_recall = self.compute_recall(idx, gt)
self.assertGreaterEqual(ann_recall, 0.95)
def test_highdim(self):
db = self._rng.random([2, 10, 200, 3], dtype=np.float32)
k = 5
@function(jit_compile=True)
def ann(db, k):
return nn_ops.approx_min_k(db, k=k, reduction_dimension=2)
_, idx = self.evaluate(ann(db, k))
gt = np.argsort(db, axis=2)[:, :, :k, :]
flat_idx = np.reshape(np.transpose(idx, [0, 1, 3, 2]), [2 * 10 * 3, k])
flat_gt = np.reshape(np.transpose(gt, [0, 1, 3, 2]), [2 * 10 * 3, k])
ann_recall = self.compute_recall(flat_idx, flat_gt)
self.assertGreaterEqual(ann_recall, 0.95)
@parameterized.parameters(
itertools.product(
[dtypes.bfloat16, dtypes.float16, dtypes.float32],
[1, 10], # k
[100, 500], # row_size
[1, 10, 128], # num_rows
))
def test_gradients(self, dtype, k, row_size, num_rows):
row = np.arange(row_size, dtype=np.float32)
db = np.stack(list(self._rng.permutation(row) for _ in range(num_rows)))
db_op = constant_op.constant(db, dtype=dtype)
out_grads = self._rng.random([num_rows, k])
out_grads_op = constant_op.constant(out_grads, dtype=dtype)
# Must jit-compile to access the xla kernel.
@function(jit_compile=True)
def ann_with_grads(db, out_grads):
with backprop.GradientTape() as tape:
tape.watch(db)
val, idx = nn_ops.approx_max_k(db, k)
result_in_grads = tape.gradient(val, db, out_grads)
lifted_k_idx = array_ops.reshape(idx, [num_rows, k, 1])
iota_idx = array_ops.broadcast_to(
array_ops.reshape(math_ops.range(num_rows), [num_rows, 1, 1]),
[num_rows, k, 1])
lifted_idx = array_ops.concat([iota_idx, lifted_k_idx], axis=2)
k_idx_s = array_ops.reshape(lifted_idx, [num_rows * k, 2])
k_gra_s = array_ops.reshape(out_grads, [num_rows * k])
expected_in_grads = array_ops.scatter_nd(k_idx_s, k_gra_s,
[num_rows, row_size])
return [expected_in_grads, result_in_grads]
expected_in_grads, result_in_grads = self.evaluate(
ann_with_grads(db_op, out_grads_op))
self.assertAllClose(expected_in_grads, result_in_grads)
def test_invalid_input(self):
@function(jit_compile=True)
def fuzz_jit():
return nn_ops.approx_max_k(
[
183.39395141601562,
62.6842041015625,
83.8385238647461,
204.36642456054688,
],
4774,
reduction_dimension=0x8282828,
recall_target=135.9822179933652,
reduction_input_size_override=6154,
aggregate_to_topk=True,
)
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
fuzz_jit()
def test_b272094281(self):
@function(jit_compile=True)
def fuzz_jit():
return nn_ops.approx_max_k(
[],
9223372036854775807,
reduction_dimension=-4294967297 + 0x41,
reduction_input_size_override=-9223372036854775807,
aggregate_to_topk=False,
)
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
fuzz_jit()
@parameterized.parameters(
itertools.product(
[dtypes.float16, dtypes.bfloat16, dtypes.float32],
[1, 10], # k
[100, 500], # row_size
[1, 10, 128], # num_rows
)
)
def test_nonjit(self, dtype, k, row_size, num_rows):
# Support regular topk semantics.
row = np.arange(row_size, dtype=np.float32)
db = np.stack(list(self._rng.permutation(row) for _ in range(num_rows)))
db_tensor = constant_op.constant(db, dtype=dtype)
_, idx = self.evaluate(nn_ops.approx_max_k(db_tensor, k))
sorted_idx = np.sort(idx)
expected = np.sort(np.argsort(-db)[:, :k])
self.assertAllEqual(sorted_idx, expected)
if __name__ == '__main__':
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@kernel_tests@math_ops@approx_topk_test.py@.PATH_END.py
|
{
"filename": "detectors.py",
"repo_name": "scalessim/scalessim",
"repo_path": "scalessim_extracted/scalessim-main/scalessim/detectors.py",
"type": "Python"
}
|
import configparser
from .detector import nghxrg as ng
from .io import read_ini
import os
class Detector:
def __init__(self):
config = configparser.ConfigParser()
config.read('scalessim/detector/h2rg.ini')
self.arg_detector = {}
self.arg_detector.update(read_ini(config['Defined']))
self.ng_h2rg = ng.HXRGNoise(verbose=False,
wind_mode='WINDOW',
naxis1=2048, naxis2=2048,
pca0_file='scalessim/detector/lmircam_pca0.fits')
def make_noise(self,ofile=None,rn=1.,ped=1.,cpink=1.,upink=1.,acn=1.,pca0_amp=1.):
self.rd_noise=self.arg_detector['readout_noise_cds']*rn # White read noise per integration
self.pedestal=self.arg_detector['pedestal']*ped # DC pedestal drift rms
self.c_pink=self.arg_detector['c_pink']*cpink # Correlated pink noise
self.u_pink=self.arg_detector['u_pink']*upink # Uncorrelated pink noise
self.acn=self.arg_detector['acn']*acn # Correlated ACN
self.pca0_amp=self.arg_detector['pca0_amp']*pca0_amp # Amplitude of PCA zero "picture frame" noise
return self.ng_h2rg.mknoise(ofile)
|
scalessimREPO_NAMEscalessimPATH_START.@scalessim_extracted@scalessim-main@scalessim@detectors.py@.PATH_END.py
|
{
"filename": "redcal_inspect_2458116.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/redcal_inspect/redcal_inspect_2458116.ipynb",
"type": "Jupyter Notebook"
}
|
# Stage 2 Redundant Calibration Nightly Notebook
**Josh Dillon**, Last Revised 7/30/20
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from hera_cal import io, redcal, apply_cal
from hera_qm.metrics_io import load_metric_file
import glob
import os
from copy import deepcopy
import inspect
import h5py
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = '2459122'
# data_path = '/lustre/aoc/projects/hera/H4C/2459122'
# os.environ["JULIANDATE"] = JD
# os.environ["DATA_PATH"] = data_path
```
```python
# Use environment variables to figure out path to data
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
print(f'JD = "{JD}"')
print(f'data_path = "{data_path}"')
```
JD = "2458116"
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458116"
```python
print('Looking for data in', data_path, 'on JD', JD)
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.sum.uvh5')))
if len(data_list) == 0:
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.uvh5')))
print('Found {} files.'.format(len(data_list)))
```
Looking for data in /lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458116 on JD 2458116
Found 73 files.
# Load Single File
```python
# Pick middle of the night data file to examine
example_file = data_list[len(data_list)//2]
file_JD = '.'.join([s for s in example_file.split('.') if s.isdigit()])
```
```python
# controls how many redundant baseline groups to plot.
# 2 means the most common ee- and nn-polarized baseline.
n_reds_to_plot = 2
```
```python
# Load omnical gains and determine ex_ants
hc = io.HERACal(example_file.replace('.uvh5', f'.omni.calfits'))
gains, gain_flags, _, _ = hc.read()
ex_ants = [ant for ant in gain_flags if np.all(gain_flags[ant])]
# Load the most common redundant baselines and calibrate
hd = io.HERAData(example_file)
reds = redcal.get_reds(hd.antpos, pols=['ee', 'nn'])
red_bl_map = {bl: red[0] for red in reds for bl in red}
reds = redcal.filter_reds(reds, ex_ants=ex_ants)
reds = sorted(reds, key=len, reverse=True)
data, flags, nsamples = hd.read(
bls=[bl for red in reds[0:n_reds_to_plot] for bl in red])
apply_cal.calibrate_in_place(data, gains, data_flags=flags, cal_flags=gain_flags)
# Load omnical visibility solutions
hdo = io.HERAData(example_file.replace('.uvh5', f'.omni_vis.uvh5'))
omni_data, omni_flags, omni_nsamples = hdo.read(
bls=[red_bl_map[red[0]] for red in reds[0:n_reds_to_plot]])
```
# Inspect Single File
```python
plt.figure(figsize=(8,8))
plt.scatter(np.array(list(hd.antpos.values()))[:,0],
np.array(list(hd.antpos.values()))[:,1], c='w', s=0)
for ant,pos in hd.antpos.items():
bad = ant in [ant[0] for ant in ex_ants]
plt.gca().add_artist(plt.Circle(tuple(pos[0:2]), radius=7,
fill=(~bad), color=['grey','r'][bad]))
plt.text(pos[0],pos[1],str(ant), va='center', ha='center', color='w')
plt.xlabel("Antenna East-West Position (meters)")
plt.ylabel("Antenna North-South Position (meters)")
plt.title('Antenna Positions on {} (Red = Flagged)'.format(file_JD));
plt.axis('equal')
plt.tight_layout()
plt.show()
```

### Figure 1: Array and Flagged Antennas
#### OBSERVER CHECKLIST:
* Check that the array configuration looks reasonable.
* Check that all flags expected to be flagged are actually flagged but also that not everything is getting flagged.
```python
# Plot redundant groups
for red in reds[0:n_reds_to_plot]:
blvec = hd.antpos[red[0][1]] - hd.antpos[red[0][0]]
for func, plot, ylabel in zip([np.abs, np.angle], [plt.semilogy, plt.plot], ['Amplitude (Arbitrary Units)', 'Phase (Radians)']):
plt.figure(figsize=(16,4))
for bl in red:
plot(hd.freqs/1e6, func(np.median(data[bl], axis=0)))
plot(hd.freqs/1e6, func(np.median(omni_data[red_bl_map[red[0]]], axis=0)), 'k-', label='Omnical Visibility Solution')
plt.xlabel('Frequency (MHz)')
plt.ylabel(ylabel)
plt.legend(loc='lower right')
plt.title('{}-Polarized, {:f} m East, {:f} m North Visibility on {}'.format(red[0][2], blvec[0], blvec[1], file_JD))
```




### Figure 2: Example redundant baseline groups and omnical visibility solution for a single file.
#### OBSERVER CHECKLIST:
* Check that that there actually is something plotted and the data isn't all flagged somehow.
* Check whether most of the baselines cluster together and that the black line follows the cluster.
* Check whether there are any significant outliers (though it won't be clear as yet which antennas those are attributable to, see below).
# Load Whole Day
```python
# load all redcal metadata into dictionaries
meta_list = [df.replace('.uvh5', f'.redcal_meta.hdf5') for df in data_list]
ee_iters_dict = {}
nn_iters_dict = {}
dlys_dict = {}
flips_dict = {}
times_dict = {}
lsts_dict = {}
histories_dict = {}
ants = set([])
for mf in meta_list:
(fc_meta, omni_meta, freqs, times_dict[mf], lsts_dict[mf],
antpos, histories_dict[mf]) = io.read_redcal_meta(mf)
ee_iters_dict[mf] = omni_meta['iter']["['ee']"]
nn_iters_dict[mf] = omni_meta['iter']["['nn']"]
flips_dict[mf] = fc_meta['polarity_flips']
dlys_dict[mf] = fc_meta['dlys']
ants |= set(fc_meta['dlys'].keys())
ants = sorted(ants)
times = np.hstack(list(times_dict.values()))
lsts = np.hstack(list(lsts_dict.values()))
```
```python
# Load chisq and flagging info from omnical gains
cal_list = [df.replace('.uvh5', f'.omni.calfits') for df in data_list]
ant_flags_dict = {}
chisq_ee_dict = {}
chisq_nn_dict = {}
cspa_med_dict = {}
for cal in cal_list:
hc = io.HERACal(cal)
_, flags, cspa, chisq = hc.read()
ant_flags_dict[cal] = {ant: np.all(flags[ant]) for ant in flags}
chisq_ee_dict[cal] = chisq['Jee']
chisq_nn_dict[cal] = chisq['Jnn']
cspa_med_dict[cal] = {ant: np.nanmedian(cspa[ant], axis=1) for ant in cspa}
all_flagged_dict = {ant: np.all([af[ant] for af in ant_flags_dict.values()]) for ant in ants}
cspa = {ant: np.hstack([np.squeeze(cspa_med_dict[cal][ant]) / \
~ant_flags_dict[cal][ant] for cal in cal_list]) for ant in ants}
```
invalid value encountered in true_divide
divide by zero encountered in true_divide
```python
# save middle-numbered ants with a minimal number of flags
ants_to_save = {}
for pol in ['Jee', 'Jnn']:
min_flags = np.min([np.sum(~np.isfinite(cspa[ant]))
for ant in cspa if ant[1] == pol])
ant_candidates = sorted([ant for ant in cspa if ant[1] == pol and
np.sum(~np.isfinite(cspa[ant])) == min_flags])
Nac = len(ant_candidates)
ants_to_save[pol] = ant_candidates[(Nac // 2 - 1):(Nac // 2 + 1)]
# Reload omnical gains
gain_dict = {}
flag_dict = {}
for cal in cal_list:
hc = io.HERACal(cal)
gains, flags, _, _ = hc.read()
gain_dict[cal] = {ant: gains[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
flag_dict[cal] = {ant: flags[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
gains = {ant: np.vstack([gain_dict[cal][ant] for cal in gain_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
flags = {ant: np.vstack([flag_dict[cal][ant] for cal in flag_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
flag_mask = np.all([f for f in flags.values()], axis=0)
```
# Inspect Whole Day
```python
# Plot delays
dlys = {ant: np.hstack([dlys_dict[mf][ant] for mf in dlys_dict]) for ant in ants}
dly_meds = {ant: np.nanmedian(dlys[ant]) for ant in dlys}
plt.figure(figsize=(16,10))
for ant in dlys:
plt.plot(times, (dlys[ant])*1e9)
if np.isfinite(dly_meds[ant]):
plt.text(np.min(times) - 20*np.median(np.diff(times)),
1e9*dly_meds[ant], '{}{}'.format(ant[0], ant[1][-1]),
va='center', ha='right', fontsize=8)
plt.gca().set_xticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1)
for t in plt.gca().get_xticks()]] * 12 / np.pi, 2))
plt.xlabel('LST (Hours)')
plt.ylabel('Delay (ns)')
plt.title('Firstcal Delays');
```
All-NaN slice encountered
FixedFormatter should only be used together with FixedLocator
Text(0.5, 1.0, 'Firstcal Delays')

### Figure 3: Firstcal Delays
Shows solved firstcal delays. These will have an arbitrary tip/tilt and offset.
#### OBSERVER CHECKLIST:
* Look for outliers. All antennas should be within a few hundred ns.
```python
# Plot offset delays
plt.figure(figsize=(16, len(ants)/7.4))
unflagged_dlys = {ant: dlys[ant] for ant in dlys if not all_flagged_dict[ant]}
for n, ant in enumerate(unflagged_dlys):
plt.plot(times, (dlys[ant]-dly_meds[ant])*1e9 + n, label=ant)
plt.text(np.min(times) - 20*np.median(np.diff(times)),
n, '{}{}'.format(ant[0], ant[1][-1]),
va='center', ha='right', fontsize=8)
plt.gca().set_xticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1)
for t in plt.gca().get_xticks()]] * 12 / np.pi, 2))
plt.xlabel('LST (Hours)')
plt.ylabel('Delay with Arbitrary Offset (ns)')
plt.title('Firstcal Delays With Arbitrary Offset');
plt.ylim([-10, len(unflagged_dlys) + 10])
```
FixedFormatter should only be used together with FixedLocator
(-10.0, 92.0)

### Figure 4: Offset Firstcal Delays
Same as Figure 4, but with arbitrary offsets for each antenna.
#### OBSERVER CHECKLIST:
* Look for antennas that exhibit wild swings (> 10 ns) in their delay over time.
```python
# Figure out oc_maxiter
if np.all(['oc_maxiter' in history for history in histories_dict.values()]):
history = list(histories_dict.values())[0]
oc_maxiter = int(history.split('--oc_maxiter')[1].split('--')[0])
else:
oc_maxiter = inspect.signature(redcal.redcal_run).parameters['oc_maxiter'].default
```
```python
# Recast from dictionaries to one big array
ee_iters = np.vstack(np.array(list(ee_iters_dict.values())))
nn_iters = np.vstack(np.array(list(nn_iters_dict.values())))
plt.figure(figsize=(20,12))
my_cmap = deepcopy(matplotlib.cm.get_cmap('viridis'))
my_cmap.set_under('w')
my_cmap.set_over('r')
for sp, iters, t in zip([121, 122], [ee_iters, nn_iters],
['ee-polarized', 'nn-polarized']):
plt.subplot(sp)
plt.imshow(iters, aspect='auto', cmap=my_cmap, vmin=1, vmax=oc_maxiter-1, interpolation='nearest',
extent=[freqs[0]/1e6, freqs[-1]/1e6, times[-1], times[0]])
plt.title('Number of Omnical Iterations: ' + t)
plt.xlabel('Frequency (MHz)')
plt.ylabel('LST (Hours)')
plt.gca().set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1)
for t in plt.gca().get_yticks()]] * 12 / np.pi, 2))
plt.colorbar()
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
You are modifying the state of a globally registered colormap. In future versions, you will not be able to modify a registered colormap in-place. To remove this warning, you can make a copy of the colormap first. cmap = copy.copy(mpl.cm.get_cmap("viridis"))
You are modifying the state of a globally registered colormap. In future versions, you will not be able to modify a registered colormap in-place. To remove this warning, you can make a copy of the colormap first. cmap = copy.copy(mpl.cm.get_cmap("viridis"))
FixedFormatter should only be used together with FixedLocator

### Figure 5: Number of omnical iterations per polarization
Red indicates that omnical reached the maximum number of integrations. White indicates that omnical didn't run, likely because the data were flagged.
#### OBSERVER CHECKLIST:
* Check that few-to-no data were flagged (white) before omnical and check that this matches
* Check that few-to-no data hit the maximum number of iterations for omnical (red)
```python
# Make dictionary mapping antenna to the whole night of antenna flips
flips = {ant: np.hstack([flips_dict[mf][ant] for mf in flips_dict]) for ant in ants}
plt.figure(figsize=(16,8))
my_cmap = matplotlib.cm.get_cmap('cool')
for sp, jpol, t in zip([121, 122], ['Jee', 'Jnn'], ['ee-polarized ', 'nn-polarized']):
plt.subplot(sp)
plt.scatter(np.array(list(hd.antpos.values()))[:,0],
np.array(list(hd.antpos.values()))[:,1],
c='w', s=0)
for ant,pos in hd.antpos.items():
flip_frac = np.nanmean(flips[(ant, jpol)])
if np.isfinite(flip_frac):
color=my_cmap(flip_frac)
else:
color='w'
plt.gca().add_artist(plt.Circle(tuple(pos[0:2]), radius=7,
fill=(~bad), color=color, ec='k'))
plt.text(pos[0], pos[1],
'{}:\n{}%'.format(ant, np.round(100*flip_frac,0)),
va='center', ha='center', color='k')
plt.xlabel("Antenna East-West Position (meters)")
plt.ylabel("Antenna North-South Position (meters)")
# count the number of times a self-consistent polarity flip solution was found
all_flips_this_pol = [flips[ant] for ant in flips if ant[1] == jpol]
success = np.round(100*np.mean(np.any(np.isfinite(all_flips_this_pol), axis=0)), 2)
plt.title(t + ' Polarity Flips -- Solution Found {}% of the Time'.format(success))
plt.axis('equal')
plt.tight_layout()
```
Mean of empty slice

### Figure 6: Detection of polarity-flipped antennas
Blue indicates nominal operation, pink indicates polarity flips.
#### OBSERVER CHECKLIST:
* Check that all antennas are either nearly 100% flipped, nearly 0% flipped, or flagged.
* Check that a solution for polarity flips was found a reasonable percentage of the time (ideally more than a few %)
```python
# Grid and plot overall chi^2 for each polarization
ee_chisq = np.vstack(np.array(list(chisq_ee_dict.values())))
nn_chisq = np.vstack(np.array(list(chisq_nn_dict.values())))
plt.figure(figsize=(20,12))
for sp, cs, t in zip([121, 122], [ee_chisq, nn_chisq], ['ee-polarized', 'nn-polarized']):
plt.subplot(sp)
plt.imshow(cs / ~flag_mask, aspect='auto', vmin=0, cmap='inferno', vmax=3, interpolation='nearest',
extent=[freqs[0]/1e6, freqs[-1]/1e6, times[-1], times[0]])
plt.title('Overall $\chi^2$ / DoF: ' + t)
plt.xlabel('Frequency (MHz)')
plt.ylabel('LST (Hours)')
plt.gca().set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1)
for t in plt.gca().get_yticks()]] * 12 / np.pi, 2))
plt.colorbar()
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
divide by zero encountered in true_divide
invalid value encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 7 Overall $\chi^2$ / DoF
#### OBSERVER CHECKLIST:
* Looks for regions of large non-redundancy not directly attributable to RFI.
```python
# plot all chi^2 per antenna, highlight antennas with >1.5 mean chisq per ant (median over frequency)
plt.figure(figsize=(20,10))
for sp, pol, t in zip([121, 122], ['Jee', 'Jnn'], ['ee-polarized', 'nn-polarized']):
plt.subplot(sp)
for ant in sorted([ant for ant in ants if ant[1] == pol],
key=lambda ant: np.nanmean(cspa[ant]), reverse=True):
if not np.all([ant_flags_dict[cal][ant] for cal in cal_list]):
if np.nanmean(cspa[ant]) > 1.5:
plt.plot(times, cspa[ant], '.', label=ant)
else:
plt.plot(times, cspa[ant], '-', color='grey', lw=.25)
plt.ylabel('Normalized Median $\chi^2$ per Antenna (unitless)')
plt.gca().set_xticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1)
for t in plt.gca().get_xticks()]] * 12 / np.pi, 2))
plt.xlabel('LST (Hours)')
plt.title(t + ' Antennas')
plt.legend()
```
FixedFormatter should only be used together with FixedLocator
FixedFormatter should only be used together with FixedLocator

### Figure 8: Normalized $\chi^2$ per antenna
Antennas with chisq per ant (mean over time, median over freq) > 1.5 are colored. All other antennas are shown in grey.
#### OBSERVER CHECKLIST:
* Look for outliers in the chi^2 per antenna distribution
```python
# Plot example gain amplitudes
plt.figure(figsize=(20,12))
for sp, pol in zip([121, 122], ['Jee', 'Jnn']):
plt.subplot(sp)
ant = ants_to_save[pol][1]
plt.title(str(ant) + ' Gain Magnitude')
plt.imshow(np.abs(gains[ant]) / ~flag_mask, aspect='auto', cmap='inferno', interpolation='nearest',
extent=[freqs[0]/1e6, freqs[-1]/1e6, times[-1], times[0]])
plt.clim([0,2])
plt.gca().set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1)
for t in plt.gca().get_yticks()]] * 12 / np.pi, 2))
plt.colorbar()
plt.xlabel('Frequency (MHz)')
plt.ylabel('LST (Hours)')
```
divide by zero encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 9: Example Amplitudes
#### OBSERVER CHECKLIST:
* Looks for large discontinuities or fuzziness not attributable to RFI
```python
# Plot example gain relative phases
plt.figure(figsize=(20,12))
for sp, pol in zip([121, 122], ['Jee', 'Jnn']):
plt.subplot(sp)
ant0, ant1 = ants_to_save[pol]
plt.title('Angle of gains[{}] / gains[{}]'.format(ant0, ant1))
plt.imshow(np.angle(gains[ant0] / gains[ant1]) / ~flag_mask, aspect='auto', cmap='twilight', interpolation='nearest',
extent=[freqs[0]/1e6, freqs[-1]/1e6, times[-1], times[0]])
plt.gca().set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1)
for t in plt.gca().get_yticks()]] * 12 / np.pi, 2))
plt.colorbar()
plt.xlabel('Frequency (MHz)')
plt.ylabel('LST (Hours)')
```
invalid value encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 10: Example Gain Phases
Relative gain phases of two example antennas.
#### OBSERVER CHECKLIST:
* Check that these gains are relatively stable in time and that there aren't huge phase discontinuities.
# Metadata
```python
print(redcal.version.history_string())
```
------------
This file was produced by the function <module>() in <ipython-input-1-c6de44361328> using:
git_branch: master
git_description: v3.0-733-gd2dd8ccf
git_hash: d2dd8ccf3fe43d5e5eb6a4c28ceaf4a6e3d1fcb7
git_origin: git@github.com:HERA-Team/hera_cal.git
version: 3.0
------------
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@redcal_inspect@redcal_inspect_2458116.ipynb@.PATH_END.py
|
{
"filename": "six.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/pip/vendor/six.py",
"type": "Python"
}
|
"""Utilities for writing code that runs on Python 2 and 3"""
# Copyright (c) 2010-2013 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.3.0"
# True if we are running on Python 3.
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result)
# This is a bit ugly, but it avoids running this again.
delattr(tp, self.name)
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _MovedItems(types.ModuleType):
"""Lazy loading of moved objects"""
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("reload_module", "__builtin__", "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("copyreg", "copy_reg"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
del attr
moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
_iterkeys = "keys"
_itervalues = "values"
_iteritems = "items"
_iterlists = "lists"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
_iterkeys = "iterkeys"
_itervalues = "itervalues"
_iteritems = "iteritems"
_iterlists = "iterlists"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
def iterkeys(d, **kw):
"""Return an iterator over the keys of a dictionary."""
return iter(getattr(d, _iterkeys)(**kw))
def itervalues(d, **kw):
"""Return an iterator over the values of a dictionary."""
return iter(getattr(d, _itervalues)(**kw))
def iteritems(d, **kw):
"""Return an iterator over the (key, value) pairs of a dictionary."""
return iter(getattr(d, _iteritems)(**kw))
def iterlists(d, **kw):
"""Return an iterator over the (key, [values]) pairs of a dictionary."""
return iter(getattr(d, _iterlists)(**kw))
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
if sys.version_info[1] <= 1:
def int2byte(i):
return bytes((i,))
else:
# This is about 2x faster than the implementation above on 3.2+
int2byte = operator.methodcaller("to_bytes", 1, "big")
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
else:
def b(s):
return s
def u(s):
return unicode(s, "unicode_escape")
int2byte = chr
import StringIO
StringIO = BytesIO = StringIO.StringIO
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
print_ = getattr(builtins, "print")
del builtins
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
raise tp, value, tb
""")
def print_(*args, **kwargs):
"""The new-style print function."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
_add_doc(reraise, """Reraise an exception.""")
def with_metaclass(meta, base=object):
"""Create a base class with a metaclass."""
return meta("NewBase", (base,), {})
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@pip@vendor@six.py@.PATH_END.py
|
{
"filename": "mock_backend.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/fft/tests/mock_backend.py",
"type": "Python"
}
|
import numpy as np
import scipy.fft
import threading
class _MockFunction:
def __init__(self, return_value = None):
self.number_calls = threading.local()
self.return_value = return_value
self.last_args = threading.local()
def __call__(self, *args, **kwargs):
if not hasattr(self.number_calls, 'c'):
self.number_calls.c = 0
self.number_calls.c += 1
self.last_args.l = (args, kwargs)
return self.return_value
fft = _MockFunction(np.random.random(10))
fft2 = _MockFunction(np.random.random(10))
fftn = _MockFunction(np.random.random(10))
ifft = _MockFunction(np.random.random(10))
ifft2 = _MockFunction(np.random.random(10))
ifftn = _MockFunction(np.random.random(10))
rfft = _MockFunction(np.random.random(10))
rfft2 = _MockFunction(np.random.random(10))
rfftn = _MockFunction(np.random.random(10))
irfft = _MockFunction(np.random.random(10))
irfft2 = _MockFunction(np.random.random(10))
irfftn = _MockFunction(np.random.random(10))
hfft = _MockFunction(np.random.random(10))
hfft2 = _MockFunction(np.random.random(10))
hfftn = _MockFunction(np.random.random(10))
ihfft = _MockFunction(np.random.random(10))
ihfft2 = _MockFunction(np.random.random(10))
ihfftn = _MockFunction(np.random.random(10))
dct = _MockFunction(np.random.random(10))
idct = _MockFunction(np.random.random(10))
dctn = _MockFunction(np.random.random(10))
idctn = _MockFunction(np.random.random(10))
dst = _MockFunction(np.random.random(10))
idst = _MockFunction(np.random.random(10))
dstn = _MockFunction(np.random.random(10))
idstn = _MockFunction(np.random.random(10))
fht = _MockFunction(np.random.random(10))
ifht = _MockFunction(np.random.random(10))
__ua_domain__ = "numpy.scipy.fft"
_implements = {
scipy.fft.fft: fft,
scipy.fft.fft2: fft2,
scipy.fft.fftn: fftn,
scipy.fft.ifft: ifft,
scipy.fft.ifft2: ifft2,
scipy.fft.ifftn: ifftn,
scipy.fft.rfft: rfft,
scipy.fft.rfft2: rfft2,
scipy.fft.rfftn: rfftn,
scipy.fft.irfft: irfft,
scipy.fft.irfft2: irfft2,
scipy.fft.irfftn: irfftn,
scipy.fft.hfft: hfft,
scipy.fft.hfft2: hfft2,
scipy.fft.hfftn: hfftn,
scipy.fft.ihfft: ihfft,
scipy.fft.ihfft2: ihfft2,
scipy.fft.ihfftn: ihfftn,
scipy.fft.dct: dct,
scipy.fft.idct: idct,
scipy.fft.dctn: dctn,
scipy.fft.idctn: idctn,
scipy.fft.dst: dst,
scipy.fft.idst: idst,
scipy.fft.dstn: dstn,
scipy.fft.idstn: idstn,
scipy.fft.fht: fht,
scipy.fft.ifht: ifht
}
def __ua_function__(method, args, kwargs):
fn = _implements.get(method)
return (fn(*args, **kwargs) if fn is not None
else NotImplemented)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@fft@tests@mock_backend.py@.PATH_END.py
|
{
"filename": "planner.py",
"repo_name": "franpoz/SHERLOCK",
"repo_path": "SHERLOCK_extracted/SHERLOCK-master/sherlockpipe/observation_plan/planner.py",
"type": "Python"
}
|
"""The classes to run the plan task"""
import logging
from multiprocessing import Pool
from astroplan import moon_illumination, Constraint
import os
import shutil
import astroplan
import matplotlib
from astroplan.plots import plot_airmass
from astropy.coordinates import SkyCoord, get_body
import astropy.units as u
from astroplan import EclipsingSystem
import pandas as pd
import numpy as np
from astroplan import (Observer, FixedTarget, AtNightConstraint, AltitudeConstraint)
import matplotlib.pyplot as plt
from astropy.time import Time, TimeDelta
from timezonefinder import TimezoneFinder
from pytz import timezone, utc
class MoonIlluminationSeparationConstraint(Constraint):
"""
Constrain the distance between the Earth's moon and some targets.
"""
def __init__(self, min_dist, max_dist):
"""
Parameters
----------
min_dist : `~astropy.units.Quantity`
Minimum moon distance when moon illumination is 0
max_dist : `~astropy.units.Quantity`
Maximum moon distance when moon illumination is 1
"""
self.min_dist = min_dist
self.max_dist = max_dist
def compute_constraint(self, times, observer, targets):
"""
Computes the observability of the moon given a minimum distance to the moon between self.min_dist (for
illumination = 0) and self.max_dist (for illumination = 1) by interpolating an intermediate distance from those
two values following a linear regression.
:param times: the times to compute the constraint for
:param observer: the observer to compute the constraint for
:param targets: the list of targets to compute the constraint for
:return: the positive mask for target being observable for the given times and observer given the constraint is matched
"""
# removed the location argument here, which causes small <1 deg
# inaccuracies, but it is needed until astropy PR #5897 is released
# which should be astropy 1.3.2
moon = get_body("moon", times)
# note to future editors - the order matters here
# moon.separation(targets) is NOT the same as targets.separation(moon)
# the former calculates the separation in the frame of the moon coord
# which is GCRS, and that is what we want.
moon_separation = moon.separation(targets)
illumination = moon_illumination(times)
min_dist = self.min_dist.value + (self.max_dist.value - self.min_dist.value) * illumination
mask = min_dist <= moon_separation.degree
return mask
class PlannerInput:
def __init__(self, observatory_row, midtransit_time, observer_site, midtransit_times, ingress_egress_times,
constraints, moon_for_midtransit_times, moon_dist_midtransit_times, moon_phase_midtransit_times,
transits_since_epoch, midtransit_time_low_err, midtransit_time_up_err, low_err_delta,
up_err_delta, i, plan_dir, target, min_altitude, transit_fraction, baseline,
error_alert) -> None:
self.observatory_row = observatory_row
self.midtransit_time = midtransit_time
self.observer_site = observer_site
self.midtransit_times = midtransit_times
self.ingress_egress_times = ingress_egress_times
self.constraints = constraints
self.moon_for_midtransit_times = moon_for_midtransit_times
self.moon_dist_midtransit_times = moon_dist_midtransit_times
self.moon_phase_midtransit_times = moon_phase_midtransit_times
self.transits_since_epoch = transits_since_epoch
self.midtransit_time_low_err = midtransit_time_low_err
self.midtransit_time_up_err = midtransit_time_up_err
self.low_err_delta = low_err_delta
self.up_err_delta = up_err_delta
self.i = i
self.plan_dir = plan_dir
self.target = target
self.min_altitude = min_altitude
self.transit_fraction = transit_fraction
self.baseline = baseline
self.error_alert = error_alert
class Planner:
"""
Class containing the main method to generate the observation plan events.
"""
@staticmethod
def create_observation_observables(object_id, object_dir, ra, dec, since, name, epoch, epoch_low_err, epoch_up_err,
period, period_low_err, period_up_err, duration,
observatories_file, timezone, latitude, longitude, altitude,
max_days, min_altitude, moon_min_dist, moon_max_dist, transit_fraction, baseline,
error_alert=True, time_unit='jd', cpus=os.cpu_count() - 1):
"""
Computes the observation windows for the given target parameters.
:param object_id: the candidate id
:param object_dir: the candidate directory
:param ra: right ascension of the target
:param dec: declination of the target
:param since: starting plan date
:param name: the name given to the candidate
:param epoch: the candidate epoch
:param epoch_low_err: the candidate epoch's lower error
:param epoch_up_err: the candidate epoch's upper error
:param period: the candidate period
:param period_low_err: the candidate period's lower error
:param period_up_err: the candidate period's upper error
:param duration: the candidate duration
:param observatories_file: the file containing the observatories file (csv format)
:param timezone: the timezone of the observatory (if observatories_file=None)
:param latitude: the latitude of the observatory (if observatories_file=None)
:param longitude: the longitude of the observatory (if observatories_file=None)
:param altitude: the altitude of the observatory (if observatories_file=None)
:param max_days: the maximum number of days to compute the observables
:param min_altitude: the minimum altitude of the target above the horizon
:param moon_min_dist: the minimum moon distance for moon illumination = 0
:param moon_max_dist: the minimum moon distance for moon illumination = 1
:param transit_fraction: the minimum transit observability (0.25 for at least ingress/egress, 0.5 for ingress/egress + midtime, 1 for ingress, egress and midtime).
:param baseline: the required baseline in hours.
:param error_alert: whether to create the alert date to signal imprecise observations
:param time_unit: the unit of the light curve data
:return: the generated data and target folders observatories_df, observables_df, alert_date, plan_dir, images_dir
:return: observatories_df containing the observatories used for the computation
:return: observables_df containing all the observation windows that passed the plan
:return: alert_date in case the plan reached a date where the observation uncertainty was too high
:return: images_dir the directory where images are stored
"""
if observatories_file is not None:
observatories_df = pd.read_csv(observatories_file, comment='#')
else:
observatories_df = pd.DataFrame(columns=['name', 'tz', 'lat', 'long', 'alt'])
observatories_df = observatories_df.append("Obs-1", timezone, latitude, longitude, altitude)
# TODO probably convert epoch to proper JD
primary_eclipse_time = Time(epoch, format=time_unit, scale="tdb")
target = FixedTarget(SkyCoord(str(ra) + ' ' + str(dec), unit=(u.deg, u.deg)))
n_transits = int(max_days // period)
system = EclipsingSystem(primary_eclipse_time=primary_eclipse_time, orbital_period=u.Quantity(period, unit="d"),
duration=u.Quantity(duration, unit="h"), name=name)
observables_df = pd.DataFrame(columns=['observatory', 'timezone', 'start_obs', 'end_obs', 'ingress', 'egress',
'midtime', "midtime_up_err_h", "midtime_low_err_h", 'twilight_evening',
'twilight_morning', 'observable', 'moon_phase', 'moon_dist'])
plan_dir = object_dir + "/plan"
images_dir = plan_dir + "/images"
if not os.path.exists(plan_dir):
os.mkdir(plan_dir)
if os.path.exists(images_dir):
shutil.rmtree(images_dir, ignore_errors=True)
os.mkdir(images_dir)
alert_date = None
planner_inputs = []
for index, observatory_row in observatories_df.iterrows():
if observatory_row['lat'] is None or observatory_row['lat'] == '' or np.isnan(observatory_row['lat']) or \
(observatory_row['lon'] is None or observatory_row['lon'] == '' or np.isnan(observatory_row['lon']) or \
observatory_row['alt'] is None or observatory_row['alt'] == '') or np.isnan(observatory_row['alt']):
observer_site = None
constraints = []
else:
observer_site = Observer(latitude=observatory_row["lat"], longitude=observatory_row["lon"],
elevation=u.Quantity(observatory_row["alt"], unit="m"))
constraints = [AtNightConstraint.twilight_nautical(), AltitudeConstraint(min=min_altitude * u.deg)]
constraints = constraints + [MoonIlluminationSeparationConstraint(min_dist=moon_min_dist * u.deg,
max_dist=moon_max_dist * u.deg)]
midtransit_times = system.next_primary_eclipse_time(since, n_eclipses=n_transits)
ingress_egress_times = system.next_primary_ingress_egress_time(since, n_eclipses=n_transits)
moon_for_midtransit_times = get_body("moon", midtransit_times)
moon_dist_midtransit_times = moon_for_midtransit_times.separation(SkyCoord(ra, dec, unit="deg"))
moon_phase_midtransit_times = np.round(astroplan.moon_illumination(midtransit_times), 2)
transits_since_epoch = np.round((midtransit_times - primary_eclipse_time).jd / period)
midtransit_time_low_err = np.round(
(((transits_since_epoch * period_low_err) ** 2 + epoch_low_err ** 2) ** (1 / 2)) * 24, 2)
midtransit_time_up_err = np.round(
(((transits_since_epoch * period_up_err) ** 2 + epoch_up_err ** 2) ** (1 / 2)) * 24, 2)
low_err_delta = TimeDelta(midtransit_time_low_err * 3600, format='sec')
up_err_delta = TimeDelta(midtransit_time_up_err * 3600, format='sec')
i = 0
for midtransit_time in midtransit_times:
planner_inputs = planner_inputs + [
PlannerInput(observatory_row, midtransit_time, observer_site, midtransit_times,
ingress_egress_times,
constraints, moon_for_midtransit_times, moon_dist_midtransit_times,
moon_phase_midtransit_times,
transits_since_epoch, midtransit_time_low_err, midtransit_time_up_err, low_err_delta,
up_err_delta, i, plan_dir, target, min_altitude, transit_fraction, baseline,
error_alert)]
i = i + 1
with Pool(processes=cpus) as pool:
alert_dates_and_observables = pool.map(Planner.plan_event, planner_inputs)
alert_dates = [item[0] for item in alert_dates_and_observables]
alert_date = alert_dates[0] if len(alert_dates) > 0 else None
for each_alert_date in alert_dates:
if each_alert_date is not None and each_alert_date < alert_date:
alert_date = each_alert_date
observables = [item[1] for item in alert_dates_and_observables]
for observable in observables:
if observable is not None:
observables_df = observables_df.append(observable, ignore_index=True)
observables_df = observables_df.sort_values(["midtime", "observatory"], ascending=True)
observables_df.to_csv(plan_dir + "/observation_plan.csv", index=False)
print("Observation plan created in directory: " + object_dir)
return observatories_df, observables_df, alert_date, plan_dir, images_dir
@staticmethod
def plan_event(planner_input: PlannerInput):
try:
twilight_evening = None
twilight_morning = None
if planner_input.observer_site is not None:
twilight_evening = planner_input.observer_site.twilight_evening_nautical(planner_input.midtransit_time)
twilight_morning = planner_input.observer_site.twilight_morning_nautical(planner_input.midtransit_time)
ingress = planner_input.ingress_egress_times[planner_input.i][0]
egress = planner_input.ingress_egress_times[planner_input.i][1]
lowest_ingress = ingress - planner_input.low_err_delta[planner_input.i]
highest_egress = egress + planner_input.up_err_delta[planner_input.i]
if planner_input.error_alert and (highest_egress - lowest_ingress).jd > 0.33:
return (planner_input.midtransit_time , None)
else:
baseline_low = lowest_ingress - planner_input.baseline * u.hour
baseline_up = highest_egress + planner_input.baseline * u.hour
transit_times = baseline_low + (baseline_up - baseline_low) * np.linspace(0, 1, 100)
if planner_input.observer_site is None:
observable_transit_times = np.full(transit_times.shape, True)
else:
observable_transit_times = astroplan.is_event_observable(planner_input.constraints,
planner_input.observer_site,
planner_input.target,
times=transit_times)[0]
observable_transit_times_true = np.argwhere(observable_transit_times)
observable = len(observable_transit_times_true) / 100
if observable < planner_input.transit_fraction:
return (None, None)
start_obs = transit_times[observable_transit_times_true[0]][0]
end_obs = transit_times[observable_transit_times_true[len(observable_transit_times_true) - 1]][0]
start_plot = baseline_low
end_plot = baseline_up
# TODO check whether twilight evening happens before twilight morning, if not, the check is different
if planner_input.observer_site is not None:
if twilight_evening > start_obs:
start_obs = twilight_evening
if twilight_morning < end_obs:
end_obs = twilight_morning
moon_dist = round(planner_input.moon_dist_midtransit_times[planner_input.i].degree)
moon_phase = planner_input.moon_phase_midtransit_times[planner_input.i]
# TODO get is_event_observable for several parts of the transit (ideally each 5 mins) to get the proper observable percent. Also with baseline
if planner_input.observatory_row["tz"] is not None and not np.isnan(planner_input.observatory_row["tz"]):
observer_timezone = planner_input.observatory_row["tz"]
else:
observer_timezone = Planner.get_offset(planner_input.observatory_row["lat"], planner_input.observatory_row["lon"],
planner_input.midtransit_time.datetime)
observable_dict = {"observatory": planner_input.observatory_row["name"],
"timezone": observer_timezone, "ingress": ingress.isot,
"start_obs": start_obs.isot, "end_obs": end_obs.isot,
"egress": egress.isot, "midtime": planner_input.midtransit_time.isot,
"midtime_up_err_h":
str(int(planner_input.midtransit_time_up_err[planner_input.i] // 1)) + ":" +
str(int(planner_input.midtransit_time_up_err[planner_input.i] % 1 * 60)).zfill(2),
"midtime_low_err_h":
str(int(planner_input.midtransit_time_low_err[planner_input.i] // 1)) + ":" +
str(int(planner_input.midtransit_time_low_err[planner_input.i] % 1 * 60)).zfill(2),
"twilight_evening": twilight_evening.isot if twilight_evening is not None else None,
"twilight_morning": twilight_morning.isot if twilight_morning is not None else None,
"observable": observable, "moon_phase": moon_phase,
"moon_dist": moon_dist}
if planner_input.observer_site is not None:
plot_time = start_plot + (end_plot - start_plot) * np.linspace(0, 1, 100)
plt.tick_params(labelsize=6)
airmass_ax = plot_airmass(planner_input.target, planner_input.observer_site, plot_time, brightness_shading=False,
altitude_yaxis=True)
airmass_ax.axvspan(twilight_morning.plot_date, end_plot.plot_date, color='white')
airmass_ax.axvspan(start_plot.plot_date, twilight_evening.plot_date, color='white')
airmass_ax.axvspan(twilight_evening.plot_date, twilight_morning.plot_date, color='gray')
airmass_ax.axhspan(1. / np.cos(np.radians(90 - planner_input.min_altitude)), 5.0, color='green')
airmass_ax.get_figure().gca().set_title("")
airmass_ax.get_figure().gca().set_xlabel("")
airmass_ax.get_figure().gca().set_ylabel("")
airmass_ax.set_xlabel("")
airmass_ax.set_ylabel("")
xticks = []
xticks_labels = []
xticks.append(start_obs.plot_date)
hour_min_sec_arr = start_obs.isot.split("T")[1].split(":")
xticks_labels.append("T1_" + hour_min_sec_arr[0] + ":" + hour_min_sec_arr[1])
plt.axvline(x=start_obs.plot_date, color="violet")
xticks.append(end_obs.plot_date)
hour_min_sec_arr = end_obs.isot.split("T")[1].split(":")
xticks_labels.append("T1_" + hour_min_sec_arr[0] + ":" + hour_min_sec_arr[1])
plt.axvline(x=end_obs.plot_date, color="violet")
if start_plot < lowest_ingress < end_plot:
xticks.append(lowest_ingress.plot_date)
hour_min_sec_arr = lowest_ingress.isot.split("T")[1].split(":")
xticks_labels.append("T1_" + hour_min_sec_arr[0] + ":" + hour_min_sec_arr[1])
plt.axvline(x=lowest_ingress.plot_date, color="red")
if start_plot < ingress < end_plot:
xticks.append(ingress.plot_date)
hour_min_sec_arr = ingress.isot.split("T")[1].split(":")
xticks_labels.append("T1_" + hour_min_sec_arr[0] + ":" + hour_min_sec_arr[1])
plt.axvline(x=ingress.plot_date, color="orange")
if start_plot < planner_input.midtransit_time < end_plot:
xticks.append(planner_input.midtransit_time.plot_date)
hour_min_sec_arr = planner_input.midtransit_time.isot.split("T")[1].split(":")
xticks_labels.append("T0_" + hour_min_sec_arr[0] + ":" + hour_min_sec_arr[1])
plt.axvline(x=planner_input.midtransit_time.plot_date, color="black")
if start_plot < egress < end_plot:
xticks.append(egress.plot_date)
hour_min_sec_arr = egress.isot.split("T")[1].split(":")
xticks_labels.append("T4_" + hour_min_sec_arr[0] + ":" + hour_min_sec_arr[1])
plt.axvline(x=egress.plot_date, color="orange")
if start_plot < highest_egress < end_plot:
xticks.append(highest_egress.plot_date)
hour_min_sec_arr = highest_egress.isot.split("T")[1].split(":")
xticks_labels.append("T4_" + hour_min_sec_arr[0] + ":" + hour_min_sec_arr[1])
plt.axvline(x=highest_egress.plot_date, color="red")
airmass_ax.xaxis.set_tick_params(labelsize=5)
airmass_ax.set_xticks([])
airmass_ax.set_xticklabels([])
degrees_ax = Planner.get_twin(airmass_ax)
degrees_ax.yaxis.set_tick_params(labelsize=6)
degrees_ax.set_yticks([1., 1.55572383, 2.])
degrees_ax.set_yticklabels([90, 50, 30])
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(1.25, 0.75)
plt.savefig(
planner_input.plan_dir + "/images/" + planner_input.observatory_row["name"] + "_" +
str(planner_input.midtransit_time.isot)[:-4] + ".png",
bbox_inches='tight')
plt.close()
return (None, observable_dict)
except Exception as e:
logging.exception("Error when planning one observation event.")
@staticmethod
def get_twin(ax):
"""
Retrieves a twin Y axis for a given matplotlib axis. This is useful when we have two axes one placed at each side
of the plot.
:param ax: the known matplotlib axis.
:return: the twin axis.
"""
for other_ax in ax.figure.axes:
if other_ax is ax:
continue
if other_ax.bbox.bounds == ax.bbox.bounds:
return other_ax
return None
@staticmethod
def get_offset(lat, lng, datetime):
"""
Returns a location's time zone offset from UTC in minutes.
:param lat: geographical latitude
:param lng: geographical longitude
:param datetime: the UTC time
"""
tf = TimezoneFinder()
tz_target = timezone(tf.certain_timezone_at(lng=lng, lat=lat))
if tz_target is None:
return None
today_target = tz_target.localize(datetime)
today_utc = utc.localize(datetime)
return (today_utc - today_target).total_seconds() / 3600
|
franpozREPO_NAMESHERLOCKPATH_START.@SHERLOCK_extracted@SHERLOCK-master@sherlockpipe@observation_plan@planner.py@.PATH_END.py
|
{
"filename": "test_LSST.py",
"repo_name": "lenstronomy/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_SimulationAPI/test_ObservationConfig/test_LSST.py",
"type": "Python"
}
|
import unittest
from lenstronomy.SimulationAPI.ObservationConfig.LSST import LSST
from lenstronomy.SimulationAPI.observation_api import Instrument, SingleBand
import lenstronomy.Util.util as util
class TestLSST(unittest.TestCase):
def setUp(self):
self.u = LSST(band="u")
self.g = LSST() # default is g_band
self.r = LSST(band="r")
self.i = LSST(band="i")
self.z = LSST(band="z")
self.y = LSST(band="Y") # same as band='y'
kwargs_u_band = self.u.kwargs_single_band()
kwargs_g_band = self.g.kwargs_single_band()
kwargs_r_band = self.r.kwargs_single_band()
kwargs_i_band = self.i.kwargs_single_band()
kwargs_z_band = self.z.kwargs_single_band()
kwargs_y_band = self.y.kwargs_single_band()
self.u_band = SingleBand(**kwargs_u_band)
self.g_band = SingleBand(**kwargs_g_band)
self.r_band = SingleBand(**kwargs_r_band)
self.i_band = SingleBand(**kwargs_i_band)
self.z_band = SingleBand(**kwargs_z_band)
self.y_band = SingleBand(**kwargs_y_band)
# dictionaries mapping LSST kwargs to SingleBand kwargs
self.camera_settings = {
"read_noise": "_read_noise",
"pixel_scale": "pixel_scale",
"ccd_gain": "ccd_gain",
}
self.obs_settings = {
"exposure_time": "_exposure_time",
"sky_brightness": "_sky_brightness_",
"magnitude_zero_point": "_magnitude_zero_point",
"num_exposures": "_num_exposures",
"seeing": "_seeing",
"psf_type": "_psf_type",
}
self.instrument = Instrument(**self.g.camera)
def test_LSST_class(self):
default = self.g
explicit_g = LSST(band="g")
self.assertEqual(explicit_g.camera, default.camera)
self.assertEqual(explicit_g.obs, default.obs)
with self.assertRaises(ValueError):
bad_band_1 = LSST(band="9")
with self.assertRaises(ValueError):
bad_band_2 = LSST(band="H")
with self.assertRaises(ValueError):
bad_psf = LSST(psf_type="blah")
single_year = LSST(coadd_years=1)
self.assertEqual(single_year.obs["num_exposures"], 20)
with self.assertRaises(ValueError):
bad_coadd_years = LSST(coadd_years=100)
def test_LSST_camera(self):
# comparing camera settings in LSST instance with those in Instrument instance
for config, setting in self.camera_settings.items():
self.assertEqual(
self.g.camera[config],
getattr(self.instrument, setting),
msg=f"{config} did not match",
)
def test_LSST_obs(self):
# comparing obs settings in LSST instance with those in SingleBand instance
for config, setting in self.obs_settings.items():
self.assertEqual(
self.u.obs[config],
getattr(self.u_band, setting),
msg=f"{config} did not match",
)
self.assertEqual(
self.g.obs[config],
getattr(self.g_band, setting),
msg=f"{config} did not match",
)
self.assertEqual(
self.r.obs[config],
getattr(self.r_band, setting),
msg=f"{config} did not match",
)
self.assertEqual(
self.i.obs[config],
getattr(self.i_band, setting),
msg=f"{config} did not match",
)
self.assertEqual(
self.z.obs[config],
getattr(self.z_band, setting),
msg=f"{config} did not match",
)
self.assertEqual(
self.y.obs[config],
getattr(self.y_band, setting),
msg=f"{config} did not match",
)
def test_kwargs_single_band(self):
kwargs_g = util.merge_dicts(self.g.camera, self.g.obs)
self.assertEqual(self.g.kwargs_single_band(), kwargs_g)
if __name__ == "__main__":
unittest.main()
|
lenstronomyREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_SimulationAPI@test_ObservationConfig@test_LSST.py@.PATH_END.py
|
{
"filename": "test_factor_analysis.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/decomposition/tests/test_factor_analysis.py",
"type": "Python"
}
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from itertools import combinations
import numpy as np
import pytest
from sklearn.decomposition import FactorAnalysis
from sklearn.decomposition._factor_analysis import _ortho_rotation
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils._testing import assert_almost_equal, assert_array_almost_equal
def test_factor_analysis(global_random_seed):
# Test FactorAnalysis ability to recover the data covariance structure
rng = np.random.RandomState(global_random_seed)
n_samples, n_features, n_components = 20, 5, 3
# Some random settings for the generative model
W = rng.randn(n_components, n_features)
# latent variable of dim 3, 20 of it
h = rng.randn(n_samples, n_components)
# using gamma to model different noise variance
# per component
noise = rng.gamma(1, size=n_features) * rng.randn(n_samples, n_features)
# generate observations
# wlog, mean is 0
X = np.dot(h, W) + noise
fas = []
for method in ["randomized", "lapack"]:
fa = FactorAnalysis(n_components=n_components, svd_method=method)
fa.fit(X)
fas.append(fa)
X_t = fa.transform(X)
assert X_t.shape == (n_samples, n_components)
assert_almost_equal(fa.loglike_[-1], fa.score_samples(X).sum())
assert_almost_equal(fa.score_samples(X).mean(), fa.score(X))
diff = np.all(np.diff(fa.loglike_))
assert diff > 0.0, "Log likelihood dif not increase"
# Sample Covariance
scov = np.cov(X, rowvar=0.0, bias=1.0)
# Model Covariance
mcov = fa.get_covariance()
diff = np.sum(np.abs(scov - mcov)) / W.size
assert diff < 0.2, "Mean absolute difference is %f" % diff
fa = FactorAnalysis(
n_components=n_components, noise_variance_init=np.ones(n_features)
)
with pytest.raises(ValueError):
fa.fit(X[:, :2])
def f(x, y):
return np.abs(getattr(x, y)) # sign will not be equal
fa1, fa2 = fas
for attr in ["loglike_", "components_", "noise_variance_"]:
assert_almost_equal(f(fa1, attr), f(fa2, attr))
fa1.max_iter = 1
fa1.verbose = True
with pytest.warns(ConvergenceWarning):
fa1.fit(X)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
fa.n_components = n_components
fa.fit(X)
cov = fa.get_covariance()
precision = fa.get_precision()
assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12)
# test rotation
n_components = 2
results, projections = {}, {}
for method in (None, "varimax", "quartimax"):
fa_var = FactorAnalysis(n_components=n_components, rotation=method)
results[method] = fa_var.fit_transform(X)
projections[method] = fa_var.get_covariance()
for rot1, rot2 in combinations([None, "varimax", "quartimax"], 2):
assert not np.allclose(results[rot1], results[rot2])
assert np.allclose(projections[rot1], projections[rot2], atol=3)
# test against R's psych::principal with rotate="varimax"
# (i.e., the values below stem from rotating the components in R)
# R's factor analysis returns quite different values; therefore, we only
# test the rotation itself
factors = np.array(
[
[0.89421016, -0.35854928, -0.27770122, 0.03773647],
[-0.45081822, -0.89132754, 0.0932195, -0.01787973],
[0.99500666, -0.02031465, 0.05426497, -0.11539407],
[0.96822861, -0.06299656, 0.24411001, 0.07540887],
]
)
r_solution = np.array(
[[0.962, 0.052], [-0.141, 0.989], [0.949, -0.300], [0.937, -0.251]]
)
rotated = _ortho_rotation(factors[:, :n_components], method="varimax").T
assert_array_almost_equal(np.abs(rotated), np.abs(r_solution), decimal=3)
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@decomposition@tests@test_factor_analysis.py@.PATH_END.py
|
{
"filename": "crtaf_H_6.py",
"repo_name": "Goobley/DexRT",
"repo_path": "DexRT_extracted/DexRT-main/tests/crtaf_H_6.py",
"type": "Python"
}
|
import crtaf
from crtaf.from_lightweaver import LightweaverAtomConverter
from crtaf.core_types import TemperatureInterpolationRateImpl, CERate, CIRate
from lightweaver.rh_atoms import H_6_atom, CaII_atom, H_4_atom
import numpy as np
import astropy.units as u
import astropy.constants as const
h_coll_temperature_grid = np.array([3e3, 5e3, 7e3, 10e3, 20e3, 30e3, 50e3, 100e3, 1e6, 2e6])
# NOTE(cmo): Using RH implementation as the lower bound in Johnson is defined differently to scipy
# RH implementation based on Handbook of Mathematical Functions, Applied mathematics series 55 (1964) (ed. Abramovitz and Stegun).
def E1(x):
a53 = np.array([
-0.57721566,
0.99999193,
-0.24991055,
0.05519968,
-0.00976004,
0.00107857
])
a56 = np.array([
8.5733287401,
18.0590169730,
8.6347608925,
0.2677737343
])
b56 = np.array([
9.5733223454,
25.6329561486,
21.0996530827,
3.9584969228
])
if np.any(x <= 0.0):
raise ValueError("x < 0")
mask = (x <= 1.0)
E1 = np.zeros_like(x)
xm = x[mask]
# E1[mask] = -np.log(xm) + a53[0] + xm * a53[1] + xm**2 * a53[2] + xm**3 * a53[3] + xm**4 * a53[4] + xm**5 * a53[5]
E1[mask] = -np.log(xm) + a53[0] + xm*(a53[1] + xm*(a53[2] + xm*(a53[3] + xm*(a53[4] + xm *a53[5]))))
xm = x[~mask]
E1[~mask] = a56[3] / xm + a56[2] + xm * (a56[1] + xm * (a56[0] + xm))
E1[~mask] /= b56[3] + xm * (b56[2] + xm * (b56[1] + xm * (b56[0] + xm)))
E1[~mask] *= np.exp(-xm)
return E1
def E2(x):
return np.exp(-x) - x * E1(x)
def expn(n, i):
if n == 1:
return E1(i)
if n == 2:
return E2(i)
raise ValueError("Invalid exp integral order requested")
def fn_f(x):
return np.exp(-x) / x - 2.0 * E1(x) + E2(x)
def g0(n):
if n == 1:
return 1.133
elif n == 2:
return 1.0785
return 0.9935 + (0.2328 - 0.1296/n)/n
def g1(n):
if n == 1:
return -0.4059
elif n == 2:
return -0.2319
return -(0.6282 - (0.5598 - 0.5299/n)/n) / n
def g2(n):
if n == 1:
return 0.07014
elif n == 2:
return 0.02947
return (0.3887 - (1.181 - 1.4700/n)/n) / (n*n)
def Johnson_CE(i, j, Eij, Te):
y = Eij / (const.k_B.value * Te)
C1 = 32.0 / (3.0 * np.sqrt(3.0) * np.pi)
if i == 1:
ri = 0.45
bi = -0.603
else:
ri = 1.94 * i**(-1.57)
bi = (4.0 + (-18.63 + (36.24 - 28.09 / i) / i) / i) / i
xr = 1.0 - i**2 / j**2
# NOTE(cmo): rij is calculated incorrectly in rh's make_h.c for higher order
# CE terms in a series, due to the incorrect *= accumulation
rij = ri * xr
z = rij + y
fij = C1 * i / (j * xr)**3 * (g0(i) + (g1(i) + g2(i) / xr) / xr)
Aij = 2.0 * i**2 / xr * fij
Bij = 4.0 * i**4 / (j**3 * xr**2) * (1.0 + 4.0 / (3.0 * xr) + bi / xr**2)
ce = np.sqrt((8.0 * const.k_B.value * Te) / (np.pi * const.m_e.value)) * (2.0 * np.pi * const.a0.value**2 * y**2 * i**2) / xr
t1 = Aij * ((0.5 + 1.0 / y) * expn(1, y) - (0.5 + 1.0 / z) * expn(1, z))
t2 = (Bij - Aij * np.log(2*i**2 / xr)) * (expn(2, y) / y - expn(2, z) / z)
ce *= (t1 + t2)
ce *= np.exp(y) / np.sqrt(Te)
return ce
def Johnson_CI(i, Eij, Te):
y = Eij / (const.k_B.value * Te)
C1 = 32.0 / (3.0 * np.sqrt(3.0) * np.pi)
if i == 1:
ri = 0.45
bi = -0.603
else:
ri = 1.94 * i**(-1.57)
bi = (4.0 + (-18.63 + (36.24 - 28.09 / i) / i) / i) / i
z = ri + y
An = C1 * i * (g0(i) / 3.0 + g1(i) / 4.0 + g2(i) / 5.0)
Bn = 2.0 * i**2 / 3.0 * (5.0 + bi)
ci = np.sqrt((8.0 * const.k_B.value * Te) / (np.pi * const.m_e.value)) * (2.0 * np.pi * const.a0.value**2 * y**2 * i**2)
t1 = An * (E1(y) / y - E1(z) / z)
t2 = (Bn - An * np.log(2.0 * i**2)) * (fn_f(y) - fn_f(z))
ci *= (t1 + t2)
ci *= np.exp(y) / np.sqrt(Te)
return ci
def make_atom():
conv = LightweaverAtomConverter()
model = conv.convert(H_6_atom())
for l in model.lines:
l.wavelength_grid.q_core *= 4
l.wavelength_grid.q_wing *= 5
visitor = crtaf.AtomicSimplificationVisitor(crtaf.default_visitors())
model_simplified = model.simplify_visit(visitor)
for coll_trans in model_simplified.collisions:
for coll in coll_trans.data:
if isinstance(coll, (CERate, CIRate)):
coll.temperature = h_coll_temperature_grid << u.K
rate_unit = coll.data.unit
Eij = (model_simplified.levels[coll_trans.transition[0]].energy_eV - model_simplified.levels[coll_trans.transition[1]].energy_eV).to(u.J)
n = np.sqrt(model_simplified.levels[coll_trans.transition[1]].g / 2)
if isinstance(coll, CERate):
nn = np.sqrt(model_simplified.levels[coll_trans.transition[0]].g / 2)
coll.data = Johnson_CE(n, nn, Eij.value, h_coll_temperature_grid) << rate_unit
elif isinstance(coll, CIRate):
coll.data = Johnson_CI(n, Eij.value, h_coll_temperature_grid) << rate_unit
# new_lines = []
# for l in model_simplified.lines:
# if l.lambda0 < 1000.0 * u.nm:
# new_lines.append(l)
# model_simplified.lines = new_lines
return model_simplified
def make_H_4():
conv = LightweaverAtomConverter()
model = conv.convert(H_4_atom())
for l in model.lines:
l.wavelength_grid.q_core *= 3
l.wavelength_grid.q_wing *= 2
visitor = crtaf.AtomicSimplificationVisitor(crtaf.default_visitors())
model_simplified = model.simplify_visit(visitor)
return model_simplified
def make_CaII():
conv = LightweaverAtomConverter()
model = conv.convert(CaII_atom())
for l in model.lines:
l.wavelength_grid.q_core *= 3
l.wavelength_grid.q_wing *= 2
# l.wavelength_grid.n_lambda //= 2
visitor = crtaf.AtomicSimplificationVisitor(crtaf.default_visitors())
model_simplified = model.simplify_visit(visitor)
current_grid = model_simplified.lines[-1].wavelength_grid.wavelengths
new_grid = np.sort(np.concatenate((current_grid, [-1.0 * u.nm])))
model_simplified.lines[-1].wavelength_grid.wavelengths = new_grid
# NOTE(cmo): To prevent explosion due to rates in the Snow KHI model
# TODO(cmo): Grab the rates from source/RADYN
for trans in model_simplified.collisions:
for coll in trans.data:
if isinstance(coll, TemperatureInterpolationRateImpl) and coll.temperature[0] > (1000.0 * u.K):
coll.temperature = np.concatenate(([500.0 * u.K], coll.temperature))
coll.data = np.concatenate(([0.0 * coll.data.unit], coll.data))
return model_simplified
if __name__ == "__main__":
atom = make_CaII()
with open("test_CaII.yaml", "w") as f:
f.write(atom.yaml_dumps())
atom = make_H_4()
with open("H_4.yaml", "w") as f:
f.write(atom.yaml_dumps())
atom = make_atom()
with open("H_6.yaml", "w") as f:
f.write(atom.yaml_dumps())
|
GoobleyREPO_NAMEDexRTPATH_START.@DexRT_extracted@DexRT-main@tests@crtaf_H_6.py@.PATH_END.py
|
{
"filename": "test_mstats_extras.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/stats/tests/test_mstats_extras.py",
"type": "Python"
}
|
import numpy as np
import numpy.ma as ma
import scipy.stats.mstats as ms
from numpy.testing import (assert_equal, assert_almost_equal, assert_,
assert_allclose)
def test_compare_medians_ms():
x = np.arange(7)
y = x + 10
assert_almost_equal(ms.compare_medians_ms(x, y), 0)
y2 = np.linspace(0, 1, num=10)
assert_almost_equal(ms.compare_medians_ms(x, y2), 0.017116406778)
def test_hdmedian():
# 1-D array
x = ma.arange(11)
assert_allclose(ms.hdmedian(x), 5, rtol=1e-14)
x.mask = ma.make_mask(x)
x.mask[:7] = False
assert_allclose(ms.hdmedian(x), 3, rtol=1e-14)
# Check that `var` keyword returns a value. TODO: check whether returned
# value is actually correct.
assert_(ms.hdmedian(x, var=True).size == 2)
# 2-D array
x2 = ma.arange(22).reshape((11, 2))
assert_allclose(ms.hdmedian(x2, axis=0), [10, 11])
x2.mask = ma.make_mask(x2)
x2.mask[:7, :] = False
assert_allclose(ms.hdmedian(x2, axis=0), [6, 7])
def test_rsh():
np.random.seed(132345)
x = np.random.randn(100)
res = ms.rsh(x)
# Just a sanity check that the code runs and output shape is correct.
# TODO: check that implementation is correct.
assert_(res.shape == x.shape)
# Check points keyword
res = ms.rsh(x, points=[0, 1.])
assert_(res.size == 2)
def test_mjci():
# Tests the Marits-Jarrett estimator
data = ma.array([77, 87, 88,114,151,210,219,246,253,262,
296,299,306,376,428,515,666,1310,2611])
assert_almost_equal(ms.mjci(data),[55.76819,45.84028,198.87875],5)
def test_trimmed_mean_ci():
# Tests the confidence intervals of the trimmed mean.
data = ma.array([545,555,558,572,575,576,578,580,
594,605,635,651,653,661,666])
assert_almost_equal(ms.trimmed_mean(data,0.2), 596.2, 1)
assert_equal(np.round(ms.trimmed_mean_ci(data,(0.2,0.2)),1),
[561.8, 630.6])
def test_idealfourths():
# Tests ideal-fourths
test = np.arange(100)
assert_almost_equal(np.asarray(ms.idealfourths(test)),
[24.416667,74.583333],6)
test_2D = test.repeat(3).reshape(-1,3)
assert_almost_equal(ms.idealfourths(test_2D, axis=0),
[[24.416667,24.416667,24.416667],
[74.583333,74.583333,74.583333]],6)
assert_almost_equal(ms.idealfourths(test_2D, axis=1),
test.repeat(2).reshape(-1,2))
test = [0, 0]
_result = ms.idealfourths(test)
assert_(np.isnan(_result).all())
class TestQuantiles:
data = [0.706560797,0.727229578,0.990399276,0.927065621,0.158953014,
0.887764025,0.239407086,0.349638551,0.972791145,0.149789972,
0.936947700,0.132359948,0.046041972,0.641675031,0.945530547,
0.224218684,0.771450991,0.820257774,0.336458052,0.589113496,
0.509736129,0.696838829,0.491323573,0.622767425,0.775189248,
0.641461450,0.118455200,0.773029450,0.319280007,0.752229111,
0.047841438,0.466295911,0.583850781,0.840581845,0.550086491,
0.466470062,0.504765074,0.226855960,0.362641207,0.891620942,
0.127898691,0.490094097,0.044882048,0.041441695,0.317976349,
0.504135618,0.567353033,0.434617473,0.636243375,0.231803616,
0.230154113,0.160011327,0.819464108,0.854706985,0.438809221,
0.487427267,0.786907310,0.408367937,0.405534192,0.250444460,
0.995309248,0.144389588,0.739947527,0.953543606,0.680051621,
0.388382017,0.863530727,0.006514031,0.118007779,0.924024803,
0.384236354,0.893687694,0.626534881,0.473051932,0.750134705,
0.241843555,0.432947602,0.689538104,0.136934797,0.150206859,
0.474335206,0.907775349,0.525869295,0.189184225,0.854284286,
0.831089744,0.251637345,0.587038213,0.254475554,0.237781276,
0.827928620,0.480283781,0.594514455,0.213641488,0.024194386,
0.536668589,0.699497811,0.892804071,0.093835427,0.731107772]
def test_hdquantiles(self):
data = self.data
assert_almost_equal(ms.hdquantiles(data,[0., 1.]),
[0.006514031, 0.995309248])
hdq = ms.hdquantiles(data,[0.25, 0.5, 0.75])
assert_almost_equal(hdq, [0.253210762, 0.512847491, 0.762232442,])
data = np.array(data).reshape(10,10)
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0)
assert_almost_equal(hdq[:,0], ms.hdquantiles(data[:,0],[0.25,0.5,0.75]))
assert_almost_equal(hdq[:,-1], ms.hdquantiles(data[:,-1],[0.25,0.5,0.75]))
hdq = ms.hdquantiles(data,[0.25,0.5,0.75],axis=0,var=True)
assert_almost_equal(hdq[...,0],
ms.hdquantiles(data[:,0],[0.25,0.5,0.75],var=True))
assert_almost_equal(hdq[...,-1],
ms.hdquantiles(data[:,-1],[0.25,0.5,0.75], var=True))
def test_hdquantiles_sd(self):
# Standard deviation is a jackknife estimator, so we can check if
# the efficient version (hdquantiles_sd) matches a rudimentary,
# but clear version here.
hd_std_errs = ms.hdquantiles_sd(self.data)
# jacknnife standard error, Introduction to the Bootstrap Eq. 11.5
n = len(self.data)
jdata = np.broadcast_to(self.data, (n, n))
jselector = np.logical_not(np.eye(n)) # leave out one sample each row
jdata = jdata[jselector].reshape(n, n-1)
jdist = ms.hdquantiles(jdata, axis=1)
jdist_mean = np.mean(jdist, axis=0)
jstd = ((n-1)/n * np.sum((jdist - jdist_mean)**2, axis=0))**.5
assert_almost_equal(hd_std_errs, jstd)
# Test actual values for good measure
assert_almost_equal(hd_std_errs, [0.0379258, 0.0380656, 0.0380013])
two_data_points = ms.hdquantiles_sd([1, 2])
assert_almost_equal(two_data_points, [0.5, 0.5, 0.5])
def test_mquantiles_cimj(self):
# Only test that code runs, implementation not checked for correctness
ci_lower, ci_upper = ms.mquantiles_cimj(self.data)
assert_(ci_lower.size == ci_upper.size == 3)
def test_median_cihs():
# Basic test against R library EnvStats function `eqnpar`, e.g.
# library(EnvStats)
# options(digits=8)
# x = c(0.88612955, 0.35242375, 0.66240904, 0.94617974, 0.10929913,
# 0.76699506, 0.88550655, 0.62763754, 0.76818588, 0.68506508,
# 0.88043148, 0.03911248, 0.93805564, 0.95326961, 0.25291112,
# 0.16128487, 0.49784577, 0.24588924, 0.6597, 0.92239679)
# eqnpar(x, p=0.5,
# ci.method = "interpolate", approx.conf.level = 0.95, ci = TRUE)
rng = np.random.default_rng(8824288259505800535)
x = rng.random(size=20)
assert_allclose(ms.median_cihs(x), (0.38663198, 0.88431272))
# SciPy's 90% CI upper limit doesn't match that of EnvStats eqnpar. SciPy
# doesn't look wrong, and it agrees with a different reference,
# `median_confint_hs` from `hoehleatsu/quantileCI`.
# In (e.g.) Colab with R runtime:
# devtools::install_github("hoehleatsu/quantileCI")
# library(quantileCI)
# median_confint_hs(x=x, conf.level=0.90, interpolate=TRUE)
assert_allclose(ms.median_cihs(x, 0.1), (0.48319773366, 0.88094268050))
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@stats@tests@test_mstats_extras.py@.PATH_END.py
|
{
"filename": "model.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/qa/L0_model_config/autofill_noplatform/python/output_wrong_property/model.py",
"type": "Python"
}
|
# Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
class TritonPythonModel:
@staticmethod
def auto_complete_config(auto_complete_model_config):
input0 = {"name": "INPUT0", "data_type": "TYPE_FP32", "dims": [4]}
input1 = {"name": "INPUT1", "data_type": "TYPE_FP32", "dims": [4]}
output0 = {"name": "OUTPUT0", "data_type": "TYPE_FP32", "dims": [4]}
output1 = {
"name": "OUTPUT1",
"data_type": "TYPE_FP32",
"dims": [4],
"is_shape_tensor:": True,
}
auto_complete_model_config.set_max_batch_size(0)
auto_complete_model_config.add_input(input0)
auto_complete_model_config.add_input(input1)
auto_complete_model_config.add_output(output0)
auto_complete_model_config.add_output(output1)
return auto_complete_model_config
def execute(self, requests):
pass
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@qa@L0_model_config@autofill_noplatform@python@output_wrong_property@model.py@.PATH_END.py
|
{
"filename": "modules.py",
"repo_name": "alibaba/TinyNeuralNetwork",
"repo_path": "TinyNeuralNetwork_extracted/TinyNeuralNetwork-main/tinynn/llm_quant/modules.py",
"type": "Python"
}
|
import sys
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from transformers.models.llama.modeling_llama import LlamaAttention
from tinynn.llm_quant.llama import LlamaAttentionFused
from tinynn.util.util import get_logger
from .util import _init_patch_easyquant, get_submodule_with_parent_from_name
log = get_logger(__name__, 'INFO')
SPEEDUP = True
try:
if sys.platform == "win32":
_init_patch_easyquant()
from easyquant import (
decompress_int4,
decompress_int8,
quantize_per_token,
gemm,
dequantize_bias_per_token,
dequantize_per_token,
)
except (ImportError, OSError):
log.warning('easyquant is not installed, the inference performance may be degraded')
SPEEDUP = False
def compress_int(data_tensor, bit_width, per_channel=True, per_token=False):
# use [-127, 127] as 8-bit quant range
q_max = 2 ** (bit_width - 1) - 1
q_min = -q_max
assert (per_channel and per_token) is False
if per_channel:
# for weight, use w_max/quant_max as scale, and convert weight to int8 to save memory.
scale = 2 * (data_tensor.abs().max(dim=-1).values.float() / (2**bit_width - 1))
quantized_tensor = torch.clamp(torch.round(data_tensor.float() / scale[:, None]), q_min, q_max).to(torch.int8)
elif per_token:
# per-token quantization
scales = data_tensor.abs().max(dim=-1).values.float() / q_max
if len(data_tensor.shape) == 3:
scales = scales[:, :, None]
elif len(data_tensor.shape) == 2:
scales = scales[:, None]
else:
assert False
quantized_tensor = torch.clamp(torch.round(data_tensor.float() / scales.float()), q_min, q_max).to(torch.int8)
scale = scales
else:
# per_tensor quantization
scale = data_tensor.abs().max().float() / q_max
quantized_tensor = torch.clamp(torch.round(data_tensor.float() / scale.float()), q_min, q_max).to(torch.int8)
return scale, quantized_tensor
class QLinear(nn.Module):
def __init__(self, fc: nn.Linear, quant_mode: str):
super().__init__()
assert quant_mode in ("weight4", "weight8", "dynamic")
if quant_mode == 'weight4':
weight_bit_width = 4
else:
weight_bit_width = 8
self.weight_bit_width = weight_bit_width
self.quant_mod = quant_mode
self.in_features = fc.in_features
self.out_features = fc.out_features
bias = None if fc.bias is None else fc.bias.data
# compress weight by given bit, use per-channel and [-127,127]/[-7,7] to clamp
scale, weight_q = compress_int(fc.weight.data, weight_bit_width)
if self.in_features % 4 != 0 and quant_mode == 'dynamic':
weight_q = F.pad(weight_q, (0, 4 - self.in_features % 4))
if self.weight_bit_width == 4:
weight_shape = weight_q.shape
assert len(weight_shape) == 2
assert weight_shape[1] % 2 == 0
pre_packed = weight_q.view(weight_shape[0], weight_shape[1] // 2, 2)
weight_q = ((pre_packed[..., 0] & 0b00001111) << 4) | (pre_packed[..., 1] & 0b00001111)
self.weight = nn.Parameter(weight_q, requires_grad=False)
self.weight_scale = nn.Parameter(scale, requires_grad=False)
self.bias = nn.Parameter(bias, requires_grad=False) if bias is not None else None
fc.weight = None
fc.bias = None
def forward(self, input: Tensor) -> Tensor:
input_device = input.device
input_dtype = input.dtype
input_shape = input.shape
if self.quant_mod == 'static':
assert False, f'{self.quant_mod} not supported'
else:
if self.quant_mod == 'weight4':
if SPEEDUP:
weight_fp = torch.empty(
(self.out_features, self.in_features), dtype=torch.float16, device=input.device
)
decompress_int4(weight_fp, self.weight, self.weight_scale)
else:
weight_fp = (
torch.stack((self.weight >> 4, self.weight << 4 >> 4), -1)
.view(self.out_features, self.in_features)
.to(dtype=torch.float32)
* self.weight_scale[:, None]
).to(dtype=torch.half)
elif self.quant_mod == 'weight8':
if SPEEDUP:
weight_fp = torch.empty_like(self.weight.data, dtype=input_dtype, device=input_device)
decompress_int8(weight_fp, self.weight, self.weight_scale)
else:
weight_fp = (self.weight.to(dtype=torch.float32) * self.weight_scale[:, None]).to(dtype=torch.half)
if 'dynamic' in self.quant_mod:
if SPEEDUP:
# the real dynamic quantization process, first quantize input to int8, then do int8Gemm calculation,
# and finally dequantize the output to float
input_viewed = input.view(-1, input_shape[-1])
# pad self.weight to 4x
padding_num = 4 - self.in_features % 4 if self.in_features % 4 != 0 else 0
# init easyquant kernels' output
input_q = torch.empty(
(input_viewed.shape[0], input_viewed.shape[1] + padding_num),
dtype=torch.int8,
device=input_device,
)
scale_shape = input_viewed.shape[0] if 'token' in self.quant_mod else 1
input_scale = torch.zeros(scale_shape, device=input_device)
out_q = torch.empty(
(int(input_viewed.shape[0]), self.out_features), dtype=torch.int32, device=input_device
)
output = torch.empty_like(out_q, dtype=torch.float16, device=input_device)
# use easyquant kernels to accelerate computation
quantize_per_token(input_q, input_viewed, input_scale)
gemm(out_q, input_q, self.weight)
if self.bias is not None:
dequantize_bias_per_token(output, out_q, input_scale, self.weight_scale, self.bias)
else:
dequantize_per_token(output, out_q, input_scale, self.weight_scale)
output = output.view(input_shape[:-1] + (output.shape[-1],))
else:
# simulate quantization
input_scale, input_q = compress_int(input, 8, per_channel=False, per_token=True)
if self.in_features % 4 != 0:
output = F.linear(
input_q.float(), self.weight[:, : self.in_features % 4 - 4].float(), self.bias
)
else:
output = F.linear(input_q.float(), self.weight.float(), self.bias)
output = (output.float() * (self.weight_scale * input_scale.view(-1, 1))).half()
else:
input_fq = input
output = F.linear(input_fq, weight_fp, self.bias)
return output
class TDQLinear_noinit(QLinear):
def forward(self, input: Tensor) -> Tensor:
input_shape = input.shape
bs, seq, _ = input_shape
input_device = input.device
input_viewed = input.view(-1, self.in_features)
# pad self.weight to 4x
padding_num = 4 - self.in_features % 4 if self.in_features % 4 != 0 else 0
input_q = torch.empty(
(input_viewed.shape[0], self.in_features + padding_num), dtype=torch.int8, device=input_device
)
input_scale = torch.empty(bs * seq, device=input_device)
out_q = torch.empty((bs * seq, self.out_features), dtype=torch.int32, device=input_device)
output = torch.empty_like(out_q, dtype=torch.float16, device=input_device)
quantize_per_token(input_q, input_viewed, input_scale)
gemm(out_q, input_q, self.weight)
dequantize_per_token(output, out_q, input_scale, self.weight_scale)
output = output.view(input_shape[:-1] + (output.shape[-1],))
return output
@torch.no_grad()
def fuse_atten(model: nn.Module):
"""fuse qkv linear, fuse scaled_dot_product_attention if torch>=1.13"""
for name, mod in model.named_modules():
if isinstance(mod, LlamaAttention):
_, parent_mod, last_name = get_submodule_with_parent_from_name(model, name)
fused_attn = LlamaAttentionFused(mod)
setattr(parent_mod, last_name, fused_attn)
@torch.no_grad()
def quant_fc(model: nn.Module, quant_mod='weight8', fuse_qkv=False):
"""convert all fcs of LLM model to quantized linear inplace.
Args:
model: the Given LLM model.
quant_mod: the working quantization mode. Default to be 'weight8', Optional:['weight4', 'dynamic_token'].
The 'dynamic_token' quantization use easyquant lib to do Int8Gemm accelerate.
fuse_qkv: whether to fuse qkv linear of attention to speedup inference,
the scaled-dot-product-attention will be fusedif the PyTorch version >= 1.13.
"""
model.cpu()
log.info(f'use quant mod {quant_mod} speedup={SPEEDUP}')
if fuse_qkv:
fuse_atten(model)
log.info('qkv has been fused')
for name, mod in model.named_modules():
if 'lm_head' in name:
continue
if isinstance(mod, nn.Linear):
_, parent_mod, last_name = get_submodule_with_parent_from_name(model, name)
if quant_mod == 'dynamic' and SPEEDUP:
quantized_fc_cls = TDQLinear_noinit
else:
quantized_fc_cls = QLinear
quantized_fc = quantized_fc_cls(
mod,
quant_mod,
)
setattr(parent_mod, last_name, quantized_fc)
|
alibabaREPO_NAMETinyNeuralNetworkPATH_START.@TinyNeuralNetwork_extracted@TinyNeuralNetwork-main@tinynn@llm_quant@modules.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "astrolabsoftware/fink-science",
"repo_path": "fink-science_extracted/fink-science-master/fink_science/anomaly_detection/README.md",
"type": "Markdown"
}
|
# Anomaly detection module
This module adds new column **anomaly_score** (lower values mean more anomalous observations). It uses 2 pre-trained IsolationForests (1 for each passband) and calculates the score as a mean of their predictions.
|
astrolabsoftwareREPO_NAMEfink-sciencePATH_START.@fink-science_extracted@fink-science-master@fink_science@anomaly_detection@README.md@.PATH_END.py
|
{
"filename": "afd.py",
"repo_name": "bolverk/huji-rich",
"repo_path": "huji-rich_extracted/huji-rich-master/analytic/afd.py",
"type": "Python"
}
|
import numpy
import cmath
def sgn(x):
if x==0:
return 0
else:
return x/abs(x)
def single_mode(x,t,k,omega,v,g0,dx,xi):
return cmath.exp(1j*k*x)*g0*dx*xi*\
(cmath.exp(1j*omega*t)-\
(1-(1-cmath.exp(-1j*k*dx))*xi)**(t*v/xi/dx))/\
(v*(-1+cmath.exp(1j*dx*xi*omega/v)+\
xi-xi*cmath.exp(-1j*k*dx)))
def primitives_to_conserved(hydro_data):
"""
Converts primitive variables to variables that are conserved along streamlines
"""
g = hydro_data['adiabatic index']
sound_speed = g*hydro_data['ambient']['pressure']/hydro_data['ambient']['density']
res = {}
res['positive riemann invariant'] = [dv + dp/(sound_speed*hydro_data['ambient']['density'])
for dp, dv in zip(hydro_data['pert']['pressure'],
hydro_data['pert']['velocity'])]
res['negative riemann invariant'] = [dv - dp/(sound_speed*hydro_data['ambient']['density'])
for dp, dv in zip(hydro_data['pert']['pressure'],
hydro_data['pert']['velocity'])]
res['entropy'] = [dp/hydro_data['ambient']['pressure'] - g*dd/hydro_data['ambient']['density']
for dd, dp in zip(hydro_data['pert']['density'],
hydro_data['pert']['pressure'])]
return res
def conserved_to_primitives(initial, conserved):
import math
g = initial['adiabatic index']
res = {}
res['velocity'] = [0.5*(jp+jm) for jp, jm in zip(conserved['positive riemann invariant'],
conserved['negative riemann invariant'])]
sound_speed = math.sqrt(g*initial['ambient']['pressure']/initial['ambient']['density'])
res['pressure'] = [0.5*sound_speed*initial['ambient']['density']*(jp-jm)
for jp, jm in zip(conserved['positive riemann invariant'],
conserved['negative riemann invariant'])]
res['density'] = [(initial['ambient']['density']/g)*(dp/initial['ambient']['pressure']-ds)
for dp,ds in zip(res['pressure'],conserved['entropy'])]
return res
def to_k_space(x_list, y_list):
import numpy
fy_list = numpy.fft.fft(y_list)
dx = x_list[1] - x_list[0]
k_list = 2*numpy.pi*numpy.fft.fftfreq(len(y_list),d=dx)
return k_list, fy_list
def apply_filter(x_list, y_list, filter_func):
k_list, fy_list = to_k_space(x_list, y_list)
filter_list = [filter_func(k) for k in k_list]
return numpy.fft.ifft([f*fy for f,fy in zip(filter_list, fy_list)])
def afd_advance_1(x_list, y_list, v, t, cfl=0.3):
dx = x_list[1] - x_list[0]
def filter_func(k):
import cmath
temp = 1 - cfl*(1.0-cmath.exp(-1j*k*dx*sgn(v)))
return temp**(t*abs(v)/(cfl*dx))
return [x.real for x in apply_filter(x_list, y_list, filter_func)]
def afd_advance_2(x_list, y_list, v, t, cfl=0.3):
dx = x_list[1] - x_list[0]
def filter_func(k):
import cmath
temp = 0.25*(4-cfl**2+cfl**2*cmath.cos(2*k*dx))-1j*cfl*cmath.sin(k*dx)
return temp**(t*v/(cfl*dx))
return [x.real for x in apply_filter(x_list, y_list, filter_func)]
def exact_advance(x_list, y_list, v, t):
import cmath
dx = x_list[1] - x_list[0]
def filter_func(k):
return cmath.exp(-1j*k*v*t)
return [x.real for x in apply_filter(x_list, y_list, filter_func)]
def calc_propagation_speeds(initial):
import math
g = initial['adiabatic index']
sound_speed = math.sqrt(g*initial['ambient']['pressure']/initial['ambient']['density'])
return {'positive riemann invariant':initial['ambient']['velocity']+sound_speed,
'negative riemann invariant':initial['ambient']['velocity']-sound_speed,
'entropy':initial['ambient']['velocity']}
def time_advance(initial, time, scheme):
initial_conserved = primitives_to_conserved(initial)
propagation_speeds = calc_propagation_speeds(initial)
final_conserved = {}
for field in initial_conserved:
final_conserved[field] = scheme(initial['grid'],
initial_conserved[field],
propagation_speeds[field],
time)
return conserved_to_primitives(initial, final_conserved)
def exact_time_advance(initial, time):
return time_advance(initial, time, exact_advance)
def first_order_time_advance(initial, time, cfl=0.3):
def my_scheme(x_list ,y_list, v, t):
return afd_advance_1(x_list ,y_list, v, t, cfl)
return time_advance(initial, time, my_scheme)
def second_order_time_advance(initial, time, cfl=0.3):
def my_scheme(x_list ,y_list, v, t):
return afd_advance_2(x_list ,y_list, v, t, cfl)
return time_advance(initial, time, my_scheme)
|
bolverkREPO_NAMEhuji-richPATH_START.@huji-rich_extracted@huji-rich-master@analytic@afd.py@.PATH_END.py
|
{
"filename": "coldgas.py",
"repo_name": "ICRAR/shark",
"repo_path": "shark_extracted/shark-master/standard_plots/coldgas.py",
"type": "Python"
}
|
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2018
# Copyright by UWA (in the framework of the ICRAR)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import functools
import numpy as np
import common
import utilities_statistics as us
##################################
# Constants
mlow = 6.0
mupp = 12.0
dm = 0.5
mbins = np.arange(mlow, mupp, dm)
xmf = mbins + dm/2.0
slow = -1.5
supp = 2.5
ds = 0.2
sbins = np.arange(slow, supp, ds)
xsf = sbins + ds/2.0
sflow = -13.0
sfupp = -8.0
dsf = 0.5
sfbins = np.arange(sflow, sfupp, dsf)
xssfr = sfbins + dsf/2.0
def add_observations_to_plot(obsdir, fname, ax, marker, label, color='k', err_absolute=False):
fname = '%s/Gas/%s' % (obsdir, fname)
x, y, yerr_down, yerr_up = common.load_observation(obsdir, fname, (0, 1, 2, 3))
common.errorbars(ax, x, y, yerr_down, yerr_up, color, marker, label=label, err_absolute=err_absolute)
def prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit):
common.prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit)
xleg = xmax - 0.2 * (xmax-xmin)
yleg = ymax - 0.1 * (ymax-ymin)
#ax.text(xleg, yleg, 'z=0')
def prepare_data(index, redshift, hdf5_data):
bin_it = functools.partial(us.wmedians, xbins=xmf)
bin_it_ssfr = functools.partial(us.wmedians, xbins=xssfr)
stack = functools.partial(us.stacking, xbins=xmf)
stack_sfr = functools.partial(us.stacking, xbins=xsf)
stack_ssfr = functools.partial(us.stacking, xbins=xssfr)
# Unpack data
(h0, _, typeg, mdisk, mbulge, _, mbh, mHI, mH2, mgas,
mHI_bulge, mH2_bulge, mgas_bulge, mvir, sfrd,
sfrb, id_halo_tree) = hdf5_data
sfr = (sfrd + sfrb)/h0/1e9
XH = 0.72
h0log = np.log10(float(h0))
n_typeg = len(typeg)
morpho_type = np.zeros(shape = (n_typeg))
morpho_type_stellar = np.zeros(shape = (n_typeg))
ssfr = np.zeros(shape = (n_typeg))
main_seq = np.zeros(shape = (n_typeg))
type_g_conf = np.zeros(shape = (n_typeg))
mhalo_conf = np.zeros(shape = (n_typeg))
#we follow Campbell et al. (2015) who shows that the purity of satellites is constant with halo mass at a level of ~65%
#while for centrals it goes from 90% at 1e12, 85% at 1e13, 80% at 1e14
mhalo_bins=[0,1e12,1e13,1e14,1e16]
CalculateHaloConfusion = False
if(CalculateHaloConfusion == True):
sat_pur = 0.65
cen_put = [0.875, 0.875, 0.825, 0.7, 0.7]
for j in range(0,len(mhalo_bins)-1):
#select central galaxies
ind = np.where((typeg == 0) & (mvir/h0 >= mhalo_bins[j]) & (mvir/h0 < mhalo_bins[j+1]))
n_cen = len(typeg[ind])
typein = np.zeros(shape = n_cen)
#we now need to randomly select a fraction of centrals and reassigned them as satellites
typein[:] = 0
IDS = range(0,n_cen)
selected = np.random.choice(IDS, size=int(np.floor((1 - cen_put[j+1])*n_cen)))
typein[selected] = 1
type_g_conf[ind] = typein
#select satellites galaxies
ind = np.where((typeg > 0) & (mvir/h0 >= mhalo_bins[j]) & (mvir/h0 < mhalo_bins[j+1]))
n_sat = len(typeg[ind])
typein = np.zeros(shape = n_sat)
#we now need to randomly select a fraction of satellites and reassigned them as centrals
typein[:] = 1
IDS = range(0,n_sat)
selected = np.random.choice(IDS, size=int(np.floor((1-sat_pur)*n_sat)))
typein[selected] = 0
type_g_conf[ind] = typein
#now we rank order the masses of halos and assign them to the new "centrals"
#first take all central galaxies with a stellar mass >1e9Msun:
ind = np.where((type_g_conf == 0) & ((mdisk + mbulge)/h0 > 1e9))
n_most_mass_cens = len(type_g_conf[ind])
ids_cen = id_halo_tree[ind]
mstar_tot_group = np.zeros(shape = n_most_mass_cens)
for i,g in enumerate(ids_cen):
ind = np.where(id_halo_tree == g)
mstar_tot_group[i] = sum(mdisk[ind] + mbulge[ind])
#now select the n_most_mass_cens most massive halos
ind = np.where(typeg == 0)
mvirin = mvir[ind]
id_halo_tree_in = id_halo_tree[ind]
mass_sorted_ids = np.argsort(1.0/mvirin)
mhalo_most_massive = mvirin[mass_sorted_ids[0:n_most_mass_cens]]/h0
ids_most_massive = id_halo_tree_in[mass_sorted_ids[0:n_most_mass_cens]]
print(mhalo_most_massive, min(mhalo_most_massive), max(mhalo_most_massive))
#sort total stellar mass of groups
smass_sorted_ids = np.argsort(1.0/mstar_tot_group)
ids_sm_sorted = ids_cen[smass_sorted_ids]
for i,g in enumerate(ids_sm_sorted):
ind = np.where(id_halo_tree == g)
mhalo_conf[ind] = mhalo_most_massive[i]
mhalo_conf = mvir/h0
ssfr[:] = -15
ind = np.where((mdisk + mbulge > 0) & (sfr > 0))
ssfr[ind] = np.log10(sfr[ind]) - np.log10((mdisk[ind] + mbulge[ind])/h0)
ind = np.where(ssfr + 9 > -1 + 0.5 * redshift)
main_seq[ind] = 1
ind = np.where((mbulge + mgas_bulge)/h0 > 1e12)
print("median B/T massive gals", np.median(mbulge[ind]/(mbulge[ind] + mdisk[ind])))
print("median SSFR massive gals", np.median(ssfr[ind]))
print("median mBH massive gals", np.median(mbh[ind]))
# Early-type galaxies criterion of Khochfar et al. (2011).
ind = np.where((mbulge + mgas_bulge)/(mdisk + mbulge + mgas + mgas_bulge) > 0.5)
morpho_type[ind] = 1.0
# Early-type galaxies criterion based on stellar mass alone.
ind = np.where((mbulge)/(mdisk + mbulge) > 0.5)
morpho_type_stellar[ind] = 1.0
mh2_gals = np.zeros(shape = (2, n_typeg))
mh1_gals = np.zeros(shape = (2, n_typeg))
mgas_gals = np.zeros(shape = (2, n_typeg))
mh2_gals_ltg = np.zeros(shape = (2, n_typeg))
mh1_gals_ltg = np.zeros(shape = (2, n_typeg))
mgas_gals_ltg = np.zeros(shape = (2, n_typeg))
mh2_gals_etg = np.zeros(shape = (2, n_typeg))
mh1_gals_etg = np.zeros(shape = (2, n_typeg))
mgas_gals_etg = np.zeros(shape = (2, n_typeg))
mh1_relation_satellites_halos_stack = np.zeros(shape = (2, 5, 2, len(mbins)))
mh1_relation_ssfr_satellites_halos_stack = np.zeros(shape = (2, 5, 2, len(sfbins)))
mh1_relation_satellites_halos = np.zeros(shape = (2, 3, len(mbins)))
mh1_relation_ssfr_satellites_halos = np.zeros(shape = (2, 3, len(sfbins)))
#Gas scaling relation from stacking
ind = np.where((mdisk + mbulge > 0) & (mHI_bulge + mHI > 0) & (main_seq == 1) & (sfr > 0))
#print(mHI_bulge[ind] + mHI[ind])
mh1_mstar_stack = stack(x=np.log10((mdisk[ind] + mbulge[ind])/h0), y=(mHI_bulge[ind] + mHI[ind])/h0)
mh1_sfr_stack = stack_sfr(x=np.log10(sfr[ind]), y=(mHI_bulge[ind] + mHI[ind])/h0)
fh1_mstar_stack = stack(x=np.log10((mdisk[ind] + mbulge[ind])/h0), y=(mHI_bulge[ind] + mHI[ind])/(mdisk[ind] + mbulge[ind]))
fh1_sfr_stack = stack_sfr(x=np.log10(sfr[ind]), y=(mHI_bulge[ind] + mHI[ind])/(mdisk[ind] + mbulge[ind]))
#print("HI stacking stellar mass at z=", redshift)
#for a,b,c in zip(xmf, mh1_mstar_stack, fh1_mstar_stack):
# print(a,b,c)
#
#print("HI stacking SFR at z=", redshift)
#for a,b,c in zip(xsf, mh1_sfr_stack, fh1_sfr_stack):
# print(a,b,c)
#Gas scaling relations based on morphological criterion calculated using total baryon mass of disk and bulge
ind = np.where((mdisk + mbulge > 0) & (mgas + mgas_bulge > 0) & (mH2 + mH2_bulge > 0) & (morpho_type == 0))
# Data we'll use later
mass = mdisk[ind] + mbulge[ind]
mgas_gals_ltg[0,ind] = mh1_gals_ltg[0,ind] = mh2_gals_ltg[0,ind] = np.log10(mass) - h0log
mgas_gals_ltg[1,ind] = np.log10(XH * (mgas[ind] + mgas_bulge[ind]) / mass)
mh1_gals_ltg[1,ind] = np.log10(XH * (mHI[ind] + mHI_bulge[ind]) / mass)
mh2_gals_ltg[1,ind] = np.log10(XH * (mH2[ind] + mH2_bulge[ind]) / (mass))
mgas_relation_ltg = bin_it(x=mgas_gals_ltg[0, ind], y=mgas_gals_ltg[1, ind])
mh1_relation_ltg = bin_it(x=mh1_gals_ltg[0, ind], y=mh1_gals_ltg[1, ind])
mh2_relation_ltg = bin_it(x=mh2_gals_ltg[0, ind], y=mh2_gals_ltg[1, ind])
ind = np.where((mdisk + mbulge > 0) & (mgas + mgas_bulge > 0) & (mH2 + mH2_bulge > 0) & (morpho_type == 1))
# Data we'll use later
mass = mdisk[ind] + mbulge[ind]
mgas_gals_etg[0,ind] = mh1_gals_etg[0,ind] = mh2_gals_etg[0,ind] = np.log10(mass) - h0log
mgas_gals_etg[1,ind] = np.log10(XH * (mgas[ind] + mgas_bulge[ind]) / mass)
mh1_gals_etg[1,ind] = np.log10(XH * (mHI[ind] + mHI_bulge[ind]) / mass)
mh2_gals_etg[1,ind] = np.log10(XH * (mH2[ind] + mH2_bulge[ind]) / (mass))
mgas_relation_etg = bin_it(x=mgas_gals_etg[0, ind], y=mgas_gals_etg[1, ind])
mh1_relation_etg = bin_it(x=mh1_gals_etg[0, ind], y=mh1_gals_etg[1, ind])
mh2_relation_etg = bin_it(x=mh2_gals_etg[0, ind], y=mh2_gals_etg[1, ind])
#Gas scaling relations based on morphological criterion calculated using stellar mass of disk and bulge
ind = np.where((mdisk + mbulge > 0) & (mgas + mgas_bulge > 0) & (mH2 + mH2_bulge > 0) & (morpho_type_stellar == 0))
# Data we'll use later
mass = mdisk[ind] + mbulge[ind]
mgas_gals_ltg[0,ind] = mh1_gals_ltg[0,ind] = mh2_gals_ltg[0,ind] = np.log10(mass) - h0log
mgas_gals_ltg[1,ind] = np.log10(XH * (mgas[ind] + mgas_bulge[ind]) / mass)
mh1_gals_ltg[1,ind] = np.log10(XH * (mHI[ind] + mHI_bulge[ind]) / mass)
mh2_gals_ltg[1,ind] = np.log10(XH * (mH2[ind] + mH2_bulge[ind]) / (mass))
mgas_ms_relation_ltg = bin_it(x=mgas_gals_ltg[0, ind], y=mgas_gals_ltg[1, ind])
mh1_ms_relation_ltg = bin_it(x=mh1_gals_ltg[0, ind], y=mh1_gals_ltg[1, ind])
mh2_ms_relation_ltg = bin_it(x=mh2_gals_ltg[0, ind], y=mh2_gals_ltg[1, ind])
ind = np.where((mdisk + mbulge > 0) & (mgas + mgas_bulge > 0) & (mH2 + mH2_bulge > 0) & (morpho_type_stellar == 1))
# Data we'll use later
mass = mdisk[ind] + mbulge[ind]
mgas_gals_etg[0,ind] = mh1_gals_etg[0,ind] = mh2_gals_etg[0,ind] = np.log10(mass) - h0log
mgas_gals_etg[1,ind] = np.log10(XH * (mgas[ind] + mgas_bulge[ind]) / mass)
mh1_gals_etg[1,ind] = np.log10(XH * (mHI[ind] + mHI_bulge[ind]) / mass)
mh2_gals_etg[1,ind] = np.log10(XH * (mH2[ind] + mH2_bulge[ind]) / (mass))
mgas_ms_relation_etg = bin_it(x=mgas_gals_etg[0, ind], y=mgas_gals_etg[1, ind])
mh1_ms_relation_etg = bin_it(x=mh1_gals_etg[0, ind], y=mh1_gals_etg[1, ind])
mh2_ms_relation_etg = bin_it(x=mh2_gals_etg[0, ind], y=mh2_gals_etg[1, ind])
# Constrains
ind = np.where((mdisk + mbulge > 0) & (mgas + mgas_bulge > 0) & (mH2 + mH2_bulge > 0))
# Data we'll use later
mass = mdisk[ind] + mbulge[ind]
mgas_gals[0,ind] = mh1_gals[0,ind] = mh2_gals[0,ind] = np.log10(mass) - h0log
mgas_gals[1,ind] = np.log10(XH * (mgas[ind] + mgas_bulge[ind]) / mass)
mh1_gals[1,ind] = np.log10(XH * (mHI[ind] + mHI_bulge[ind]) / mass)
mh2_gals[1,ind] = np.log10(XH * (mH2[ind] + mH2_bulge[ind]) / (mass))
# Binned relations
mgas_relation = bin_it(x=mgas_gals[0, ind], y=mgas_gals[1, ind])
mh1_relation = bin_it(x=mh1_gals[0, ind], y=mh1_gals[1, ind])
mh2_relation = bin_it(x=mh2_gals[0, ind], y=mh2_gals[1, ind])
mhr_relation = bin_it(x=np.log10(mdisk[ind]+mbulge[ind]) - h0log,
y=np.log10((mH2[ind] + mH2_bulge[ind]) / (mHI[ind] + mHI_bulge[ind])))
for i in range(0,len(mhalo_bins)-1):
ind = np.where((mdisk + mbulge > 0) & (mvir/h0 >= mhalo_bins[i]) & (mvir/h0 < mhalo_bins[i+1]) & (typeg > 0) & ((mdisk + mbulge)/h0 > 1e9))
mh1_relation_satellites_halos_stack[0,i+1,:] = stack(x=np.log10((mdisk[ind] + mbulge[ind])/h0), y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
mh1_relation_ssfr_satellites_halos_stack[0,i+1,:] = stack_ssfr(x=ssfr[ind], y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
ind = np.where((mdisk + mbulge > 0) & (mhalo_conf >= mhalo_bins[i]) & (mhalo_conf < mhalo_bins[i+1]) & (type_g_conf > 0) & ((mdisk + mbulge)/h0 > 1e9))
mh1_relation_satellites_halos_stack[1,i+1,:] = stack(x=np.log10((mdisk[ind] + mbulge[ind])/h0), y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
mh1_relation_ssfr_satellites_halos_stack[1,i+1,:] = stack_ssfr(x=ssfr[ind], y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
ind = np.where((mdisk + mbulge > 0) & (typeg > 0) & ((mdisk + mbulge)/h0 > 1e9))
mh1_relation_satellites_halos_stack[0,0,:] = stack(x=np.log10((mdisk[ind] + mbulge[ind])/h0), y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
mh1_relation_ssfr_satellites_halos_stack[0,0,:] = stack_ssfr(x=ssfr[ind], y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
mh1_relation_satellites_halos[0,:] = bin_it(x=np.log10((mdisk[ind] + mbulge[ind])/h0), y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
mh1_relation_ssfr_satellites_halos[0,:] = bin_it_ssfr(x=ssfr[ind], y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
ind = np.where((mdisk + mbulge > 0) & (type_g_conf > 0) & ((mdisk + mbulge)/h0 > 1e9))
mh1_relation_satellites_halos_stack[1,0,:] = stack(x=np.log10((mdisk[ind] + mbulge[ind])/h0), y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
mh1_relation_ssfr_satellites_halos_stack[1,0,:] = stack_ssfr(x=ssfr[ind], y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
mh1_relation_satellites_halos[1,:] = bin_it(x=np.log10((mdisk[ind] + mbulge[ind])/h0), y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
mh1_relation_ssfr_satellites_halos[1,:] = bin_it_ssfr(x=ssfr[ind], y=(mHI[ind]+mHI_bulge[ind])/(mdisk[ind] + mbulge[ind]))
ind = np.where((mdisk+mbulge > 0) & (typeg == 0) & ((mdisk + mbulge)/h0 > 1e9))
mass_central = mdisk[ind] + mbulge[ind]
mgas_relation_cen = bin_it(x=np.log10(mass_central) - h0log,
y=np.log10(XH * (mgas[ind] + mgas_bulge[ind]) / (mdisk[ind] + mbulge[ind] + XH*mgas[ind] + XH*mgas_bulge[ind])))
mhr_relation_cen = bin_it(x=np.log10(mass_central) - h0log,
y=np.log10((mH2[ind] + mH2_bulge[ind]) / (mHI[ind] + mHI_bulge[ind])))
ind = np.where((mdisk+mbulge > 0) & (typeg > 0))
mass_sat = np.log10(mdisk[ind] + mbulge[ind]) - h0log
mgas_relation_sat = bin_it(x=mass_sat,
y=np.log10(XH * (mgas[ind] + mgas_bulge[ind]) / (mdisk[ind] + mbulge[ind] + XH*mgas[ind] + XH*mgas_bulge[ind])))
mhr_relation_sat = bin_it(x=mass_sat,
y=np.log10((mH2[ind] + mH2_bulge[ind]) / (mHI[ind] + mHI_bulge[ind])))
return (mgas_relation, mgas_relation_cen, mgas_relation_sat,
mh2_gals, mh1_gals, mgas_gals,
mh2_relation, mh1_relation, mhr_relation, mhr_relation_cen, mhr_relation_sat,
mgas_relation_ltg, mh2_relation_ltg, mh1_relation_ltg, mgas_relation_etg, mh2_relation_etg,
mh1_relation_etg, mgas_ms_relation_ltg, mh2_ms_relation_ltg, mh1_ms_relation_ltg,
mgas_ms_relation_etg, mh2_ms_relation_etg, mh1_ms_relation_etg, mh1_relation_satellites_halos,
mh1_relation_ssfr_satellites_halos, mh1_relation_satellites_halos_stack, mh1_relation_ssfr_satellites_halos_stack)
def plot_cold_gas_fraction(plt, output_dir, obs_dir, mgas_relation, mgas_relation_cen, mgas_relation_sat):
###################################
# Plots global mass densities
fig = plt.figure(figsize=(5,4.5))
xtit="$\\rm log_{10} (\\rm M_{\\star}/M_{\odot})$"
ytit="$\\rm log_{10}(M_{\\rm cold}/M_{\\star})$"
ax = fig.add_subplot(111)
plt.subplots_adjust(bottom=0.15, left=0.15)
prepare_ax(ax, 8, 12, -3, 0.1, xtit, ytit)
#Predicted SMHM
ind = np.where(mgas_relation[0,:] != 0)
xplot = xmf[ind]
yplot = mgas_relation[0,ind]
errdn = mgas_relation[1,ind]
errup = mgas_relation[2,ind]
ax.errorbar(xplot,yplot[0],color='k', label="all galaxies")
ax.errorbar(xplot,yplot[0],yerr=[errdn[0],errup[0]], ls='None', mfc='None', ecolor = 'k', mec='k',marker='+',markersize=2)
ind = np.where(mgas_relation_cen[0,:] != 0)
xplot = xmf[ind]
yplot = mgas_relation_cen[0,ind]
ax.errorbar(xplot,yplot[0],color='b',linestyle='dotted', label="centrals")
ind = np.where(mgas_relation_sat[0,:] != 0)
xplot = xmf[ind]
yplot = mgas_relation_sat[0,ind]
ax.errorbar(xplot,yplot[0],color='r',linestyle='dashed', label="satelites")
#Baldry (Chabrier IMF), ['Baldry+2012, z<0.06']
add_observations_to_plot(obs_dir, 'NeutralGasRatio_NonDetEQUpperLimits.dat', ax, 'v', "GASS+COLDGASS", color='grey')
add_observations_to_plot(obs_dir, 'NeutralGasRatio_NonDetEQZero.dat', ax, '^', "GASS+COLDGASS", color='grey')
common.prepare_legend(ax, ['k','b','r','grey','grey'])
common.savefig(output_dir, fig, "cold_gas_fraction.pdf")
def plot_HI_stacking(plt, output_dir, obs_dir, mh1_relation_satellites_halos, mh1_relation_ssfr_satellites_halos):
###################################
# Plots global mass densities
fig = plt.figure(figsize=(16,4.5))
subp = [151, 152, 153, 154, 155]
xtit="$\\rm log_{10} (\\rm M_{\\star}/M_{\odot})$"
ytit="$\\rm log_{10}(M_{\\rm HI}/M_{\\star})$"
labels = ['all satellites', '$M_{\\rm halo}<10^{12}\\rm M_{\\odot}$', '$10^{12}\\rm M_{\\odot}<M_{\\rm halo}<10^{13}\\rm M_{\\odot}$', '$10^{13}\\rm M_{\\odot}<M_{\\rm halo}<10^{14}\\rm M_{\\odot}$', '$M_{\\rm halo}>10^{14}\\rm M_{\\odot}$']
def plot_obs_brown17(ax, bin_mass=0, mass = True, lab=True, delta=False, color='Salmon'):
if(mass == True):
x, y, ydown, yup, binm = common.load_observation(obs_dir, 'Gas/HIMstar_Brown17.dat', (0, 1, 2, 3, 4))
else:
x, y, ydown, yup, binm = common.load_observation(obs_dir, 'Gas/HISSFR_Brown17.dat', (0, 1, 2, 3, 4))
#define the relation for the whole sample
ind = np.where(binm == 0)
xin_all = x[ind]
yin_all = y[ind]
yerr_dn_all = abs(y[ind] - ydown[ind])
yerr_up_all = abs(y[ind] - yup[ind])
ind = np.where(binm == bin_mass)
xin = x[ind]
if(delta == False):
yin = y[ind]
yerr_dn = abs(yin - ydown[ind])
yerr_up = abs(yin - yup[ind])
else:
nbins = len(binm[ind])
yin = y[ind] - yin_all[0:nbins]
yerr_dn = np.sqrt((y[ind] - ydown[ind])**2 + yerr_dn_all[0:nbins]**2)
yerr_up = np.sqrt((y[ind] - yup[ind])**2 + yerr_up_all[0:nbins]**2)
ax.errorbar(xin, yin, yerr=[yerr_dn, yerr_up], ls='None', mfc=color, fillstyle='full', ecolor = color, mec=color,marker='o',markersize=7, label="Brown+17" if lab else None)
for i, sp in enumerate(subp):
ax = fig.add_subplot(sp)
plt.subplots_adjust(bottom=0.15, left=0.15)
ytitle = ""
if(i == 0):
ytitle = ytit
prepare_ax(ax, 9, 11.5, -4, 1, xtit, ytitle)
#Predicted SMHM
ind = np.where(mh1_relation_satellites_halos[0,i,1,:] != 0)
xplot = mh1_relation_satellites_halos[0,i,0,ind]
yplot = mh1_relation_satellites_halos[0,i,1,ind]
ax.plot(xplot[0],yplot[0],color='r', linestyle='solid', label=labels[i])
ind = np.where(mh1_relation_satellites_halos[1,i,1,:] != 0)
xplot = mh1_relation_satellites_halos[1,i,0,ind]
yplot = mh1_relation_satellites_halos[1,i,1,ind]
ax.plot(xplot[0],yplot[0],color='r', linestyle='dashed', label='with sat/cen confusion')
if(i == 0):
plot_obs_brown17(ax, bin_mass= i, mass=True, lab = True)
else:
plot_obs_brown17(ax, bin_mass= i, mass=True, lab = False)
common.prepare_legend(ax, ['r','r','Salmon'])
plt.tight_layout()
common.savefig(output_dir, fig, "HI_stacking_satellites.pdf")
fig = plt.figure(figsize=(16,4.5))
xtit="$\\rm log_{10} (\\rm sSFR/yr^{-1})$"
for i, sp in enumerate(subp):
ax = fig.add_subplot(sp)
plt.subplots_adjust(bottom=0.15, left=0.15)
ytitle = ""
if(i == 0):
ytitle = ytit
prepare_ax(ax, -13, -8, -4, 1, xtit, ytitle)
#Predicted SMHM
ind = np.where(mh1_relation_ssfr_satellites_halos[0,i,1,:] != 0)
xplot = mh1_relation_ssfr_satellites_halos[0,i,0,ind]
yplot = mh1_relation_ssfr_satellites_halos[0,i,1,ind]
ax.plot(xplot[0],yplot[0],color='r', linestyle='solid', label=labels[i])
ind = np.where(mh1_relation_ssfr_satellites_halos[1,i,1,:] != 0)
xplot = mh1_relation_ssfr_satellites_halos[1,i,0,ind]
yplot = mh1_relation_ssfr_satellites_halos[1,i,1,ind]
ax.plot(xplot[0],yplot[0],color='r', linestyle='dashed', label='with sat/cen confusion')
if(i == 0):
plot_obs_brown17(ax, bin_mass= i, mass=False, lab = True)
else:
plot_obs_brown17(ax, bin_mass= i, mass=False, lab = False)
common.prepare_legend(ax, ['r','r','Salmon'])
plt.tight_layout()
common.savefig(output_dir, fig, "HI_stacking_satellites_ssfr.pdf")
def plot_lagos18_HIstacking_relativediff(ax, mass_bin=1, mass=True, color='k'):
if(mass == False):
m, reldiff = common.load_observation(obs_dir, 'Models/SharkVariations/HIStackingSatellite_SSFR_RelativeDiff.dat', (0, 1))
if(mass_bin == 1):
i_i = 0
i_f = 9
elif(mass_bin == 2):
i_i = 9
i_f = 19
elif(mass_bin == 3):
i_i = 19
i_f = 29
elif(mass_bin == 4):
i_i=29
i_f=39
else:
m, reldiff = common.load_observation(obs_dir, 'Models/SharkVariations/HIStackingSatellite_Mstar_RelativeDiff.dat', (0, 1))
if(mass_bin == 1):
i_i = 0
i_f = 4
elif(mass_bin == 2):
i_i = 4
i_f = 9
elif(mass_bin == 3):
i_i = 9
i_f = 15
elif(mass_bin == 4):
i_i=15
i_f=21
ax.plot(m[i_i:i_f], reldiff[i_i:i_f], linestyle='dashed', color=color)
###################################
# Plots global mass densities
fig = plt.figure(figsize=(5,4.5))
xtit="$\\rm log_{10} (\\rm M_{\\star}/M_{\odot})$"
ytit="$\\rm \\Delta\\,log_{10}\\langle M_{\\rm HI}/M_{\\star}\\rangle$"
colors = ['k','blue', 'darkgreen', 'orange', 'red']
ax = fig.add_subplot(111)
plt.subplots_adjust(bottom=0.15, left=0.15)
common.prepare_ax(ax, 9, 12, -1, 0.7, xtit, ytit, locators=(0.5, 0.5, 0.2, 0.2))
for i, lab in enumerate(labels):
if(i >= 1):
plot_obs_brown17(ax, bin_mass= i, mass=True, lab = False, delta=True, color=colors[i])
plot_lagos18_HIstacking_relativediff(ax, mass_bin=i, mass=True, color=colors[i])
ind = np.where((mh1_relation_satellites_halos[0,i,1,:] != 0) & (np.isinf(mh1_relation_satellites_halos[0,i,1,:]) == False))
xplot = mh1_relation_satellites_halos[0,i,0,ind]
yplot = mh1_relation_satellites_halos[0,i,1,ind] - mh1_relation_satellites_halos[0,0,1,ind]
ax.plot(xplot[0],yplot[0],color=colors[i], linestyle='solid', label=labels[i])
#ind = np.where((mh1_relation_satellites_halos[1,i,1,:] != 0) & (np.isinf(mh1_relation_satellites_halos[1,i,1,:]) == False))
#xplot = mh1_relation_satellites_halos[1,i,0,ind]
#yplot = mh1_relation_satellites_halos[1,i,1,ind] - mh1_relation_satellites_halos[1,0,1,ind]
#ax.plot(xplot[0],yplot[0],color=colors[i], linestyle='dashed', label='with sat/cen confusion')
common.prepare_legend(ax, ['blue', 'darkgreen', 'orange', 'red'], loc='best')
plt.tight_layout()
common.savefig(output_dir, fig, "HI_stacking_satellites_RelativeDiff.pdf")
###################################
# Plots global mass densities
fig = plt.figure(figsize=(5,4.5))
ytit="$\\rm \\Delta\\,log_{10}\\langle M_{\\rm HI}/M_{\\star}\\rangle$"
xtit="$\\rm log_{10} (\\rm sSFR/yr^{-1})$"
ax = fig.add_subplot(111)
plt.subplots_adjust(bottom=0.15, left=0.15)
common.prepare_ax(ax, -12, -9, -1, 1, xtit, ytit, locators=(0.5, 0.5, 0.2, 0.2))
for i, lab in enumerate(labels):
if(i >= 1):
plot_obs_brown17(ax, bin_mass= i, mass=False, lab = False, delta=True, color=colors[i])
plot_lagos18_HIstacking_relativediff(ax, mass_bin=i, mass=False, color=colors[i])
ind = np.where((mh1_relation_ssfr_satellites_halos[0,i,1,:] != 0) & (np.isinf(mh1_relation_ssfr_satellites_halos[0,i,1,:]) == False))
xplot = mh1_relation_ssfr_satellites_halos[0,i,0,ind]
yplot = mh1_relation_ssfr_satellites_halos[0,i,1,ind] - mh1_relation_ssfr_satellites_halos[0,0,1,ind]
ax.plot(xplot[0],yplot[0],color=colors[i], linestyle='solid')
#ind = np.where((mh1_relation_satellites_halos[1,i,1,:] != 0) & (np.isinf(mh1_relation_satellites_halos[1,i,1,:]) == False))
#xplot = mh1_relation_satellites_halos[1,i,0,ind]
#yplot = mh1_relation_satellites_halos[1,i,1,ind] - mh1_relation_satellites_halos[1,0,1,ind]
#ax.plot(xplot[0],yplot[0],color=colors[i], linestyle='dashed', label='with sat/cen confusion')
#common.prepare_legend(ax, ['blue', 'darkgreen', 'orange', 'red'], loc='best')
plt.tight_layout()
common.savefig(output_dir, fig, "HI_stacking_satellites_ssfr_RelativeDiff.pdf")
def plot_molecular_gas_fraction(plt, output_dir, obs_dir, mgas_gals, mgas_relation, mh1_gals, mh1_relation, mh2_gals, mh2_relation,
mgas_relation_ltg, mh2_relation_ltg, mh1_relation_ltg, mgas_relation_etg, mh2_relation_etg, mh1_relation_etg,
mgas_ms_relation_ltg, mh2_ms_relation_ltg, mh1_ms_relation_ltg, mgas_ms_relation_etg, mh2_ms_relation_etg,
mh1_ms_relation_etg):
xmin, xmax, ymin, ymax = 9, 12, -3, 1
fig = plt.figure(figsize=(11,11))
# First subplot
ax = fig.add_subplot(321)
plt.subplots_adjust(left=0.15)
xtit="$\\rm log_{10} (\\rm M_{\\star}/M_{\odot})$"
ytit="$\\rm log_{10}(M_{\\rm HI+H_2}/M_{\\star})$"
prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit)
#Predicted relation for all galaxies
ind = np.where((mgas_gals[0,:] > 0) & (mgas_gals[1,:] != 0) )
xdata = mgas_gals[0,ind]
ydata = mgas_gals[1,ind]
us.density_contour(ax, xdata[0], ydata[0], 30, 30) #, **contour_kwargs)
def plot_mrelation(mrelation, color, label=None, linestyle=None):
ind = np.where(mrelation[0,:] != 0)
xplot = xmf[ind]
yplot = mrelation[0,ind]
linestyle = linestyle or ''
ax.plot(xplot,yplot[0], color=color, label=label, linestyle=linestyle)
def plot_mrelation_fill(mrelation, color, colorfill, label=None, linestyle=None):
ind = np.where(mrelation[0,:] != 0)
xplot = xmf[ind]
yplot = mrelation[0,ind]
errdn = mrelation[1,ind]
errup = mrelation[2,ind]
ax.plot(xplot,yplot[0], color=color, label=label, linestyle=linestyle)
ax.fill_between(xplot,yplot[0],yplot[0]-errdn[0], facecolor=colorfill, alpha=0.2,interpolate=True)
ax.fill_between(xplot,yplot[0],yplot[0]+errup[0], facecolor=colorfill, alpha=0.2,interpolate=True)
plot_mrelation(mgas_relation, 'k', linestyle='solid', label="Shark all galaxies")
#Baldry (Chabrier IMF), ['Baldry+2012, z<0.06']
add_observations_to_plot(obs_dir, 'NeutralGasRatio_NonDetEQZero.dat', ax, '^', "xCOLDGAS+xGASS(0)", color='grey')
add_observations_to_plot(obs_dir, 'NeutralGasRatio_NonDetEQUpperLimits.dat', ax, 'v', "xCOLDGAS+xGASS(UL)")
common.prepare_legend(ax, ['k','k','k'])
# Second subplot
ax = fig.add_subplot(322)
xmin, xmax, ymin, ymax = 9, 12, -4.5, 1
prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit)
plot_mrelation_fill(mgas_relation_ltg, 'b', 'b',label="Shark LTGs $(\\rm B/T)_{\\rm bar}$",linestyle='solid')
plot_mrelation_fill(mgas_relation_etg, 'r', 'r',label="Shark ETGs $(\\rm B/T)_{\\rm bar}$",linestyle='solid')
plot_mrelation(mgas_ms_relation_ltg, 'b',label="Shark LTGs $(\\rm B/T)_{\star}$",linestyle='dotted')
plot_mrelation(mgas_ms_relation_etg, 'r',label="Shark ETGs $(\\rm B/T)_{\star}$",linestyle='dotted')
# Legend
common.prepare_legend(ax, ['b','r','b','r','k'],loc=3)
# Third subplot
ax = fig.add_subplot(323)
plt.subplots_adjust(left=0.15)
xtit="$\\rm log_{10} (\\rm M_{\\star}/M_{\odot})$"
ytit="$\\rm log_{10}(M_{\\rm HI}/M_{\\star})$"
xmin, xmax, ymin, ymax = 9, 12, -3, 1
prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit)
#Predicted relation
ind = np.where((mh1_gals[0,:] > 0) & (mh1_gals[1,:] != 0) )
xdata = mh1_gals[0,ind]
ydata = mh1_gals[1,ind]
us.density_contour(ax, xdata[0], ydata[0], 30, 30) #, **contour_kwargs)
plot_mrelation(mh1_relation, 'k', linestyle='solid')
#Baldry (Chabrier IMF), ['Baldry+2012, z<0.06']
add_observations_to_plot(obs_dir, 'HIGasRatio_NonDetEQZero.dat', ax, '^', "xGASS(0)", color='grey')
add_observations_to_plot(obs_dir, 'HIGasRatio_NonDetEQUpperLimits.dat', ax, 'v', "xGASS(UL)")
x, y, yerr_down, yerr_up = common.load_observation(obs_dir, 'Gas/Parkash18.dat', (0, 1, 2, 3))
ax.errorbar(x,y-x,yerr=[(y-x) - (yerr_down-x),(yerr_up-x) - (y-x)], ls='None', mfc='r', fillstyle='full', ecolor = 'r', mec='r',marker='s',markersize=7, label="Parkash+18")
m, mrat, merr = common.load_observation(obs_dir, 'Gas/RHI-Mstars_Brown15.dat', [0,1,2])
errdn = np.log10(mrat) - np.log10(mrat - merr)
errup = np.log10(mrat + merr) - np.log10(mrat)
ax.errorbar(m,np.log10(mrat),yerr=[errdn,errup], ls='None', mfc='Salmon', fillstyle='full', ecolor = 'Salmon', mec='Salmon',marker='o',markersize=7, label="Brown+15")
# Legend
common.prepare_legend(ax, ['k','k','r','Salmon'], loc=1)
# Fourth subplot
ax = fig.add_subplot(324)
xmin, xmax, ymin, ymax = 9, 12, -4.5, 1
prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit)
plot_mrelation_fill(mh1_relation_ltg, 'b', 'b',linestyle='solid')
plot_mrelation_fill(mh1_relation_etg, 'r', 'r',linestyle='solid')
plot_mrelation(mh1_ms_relation_ltg, 'b',linestyle='dotted')
plot_mrelation(mh1_ms_relation_etg, 'r',linestyle='dotted')
add_observations_to_plot(obs_dir, 'RHI-Mstars_Callette18-LTGs.dat', ax, 's', "Calette+18 LTGs", color='grey', err_absolute=True)
add_observations_to_plot(obs_dir, 'RHI-Mstars_Callette18-ETGs.dat', ax, 'o', "Calette+18 ETGs", color='grey', err_absolute=True)
# Legend
common.prepare_legend(ax, ['grey','grey','grey'],loc=1)
# Fifth subplot
ax = fig.add_subplot(325)
plt.subplots_adjust(left=0.15)
xtit="$\\rm log_{10} (\\rm M_{\\star}/M_{\odot})$"
ytit="$\\rm log_{10}(M_{\\rm H_2}/M_{\\star})$"
xmin, xmax, ymin, ymax = 9, 12, -3, 1
prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit)
#Predicted relation
ind = np.where((mh2_gals[0,:] > 0) & (mh2_gals[1,:] != 0) )
xdata = mh2_gals[0,ind]
ydata = mh2_gals[1,ind]
us.density_contour(ax, xdata[0], ydata[0], 30, 30) #, **contour_kwargs)
plot_mrelation(mh2_relation, 'k', linestyle='solid')
#Baldry (Chabrier IMF), ['Baldry+2012, z<0.06']
add_observations_to_plot(obs_dir, 'MolecularGasRatio_NonDetEQZero.dat', ax, '^', "xCOLDGASS(0)", color='grey')
add_observations_to_plot(obs_dir, 'MolecularGasRatio_NonDetEQUpperLimits.dat', ax, 'v', "xCOLDGASS(UL)")
common.prepare_legend(ax, ['k','k','k'], loc = 1)
# Fourth subplot
ax = fig.add_subplot(326)
xmin, xmax, ymin, ymax = 9, 12, -4.5, 1
prepare_ax(ax, xmin, xmax, ymin, ymax, xtit, ytit)
plot_mrelation_fill(mh2_relation_ltg, 'b', 'b',linestyle='solid')
plot_mrelation_fill(mh2_relation_etg, 'r', 'r',linestyle='solid')
plot_mrelation(mh2_ms_relation_ltg, 'b',linestyle='dotted')
plot_mrelation(mh2_ms_relation_etg, 'r',linestyle='dotted')
add_observations_to_plot(obs_dir, 'RH2-Mstars_Callette18-LTGs.dat', ax, 's', "Calette+18 LTGs",color='grey', err_absolute=True)
add_observations_to_plot(obs_dir, 'RH2-Mstars_Callette18-ETGs.dat', ax, 'o', "Calette+18 ETGs",color='grey', err_absolute=True)
# Legend
common.prepare_legend(ax, ['grey','grey','grey'],loc=1)
common.savefig(output_dir, fig, "molecular_gas_fraction.pdf")
def plot_h1h2_gas_fraction(plt, output_dir, mhr_relation, mhr_relation_cen, mhr_relation_sat):
fig = plt.figure(figsize=(5,4.5))
ax = fig.add_subplot(111)
xtit="$\\rm log_{10} (\\rm M_{\\star}/M_{\odot})$"
ytit="$\\rm log_{10}(M_{\\rm H_2}/M_{\\rm HI})$"
prepare_ax(ax, 8, 12, -3, 1.0, xtit, ytit)
# Predicted SMHM
ind = np.where(mhr_relation[0,:] != 0)
xplot = xmf[ind]
yplot = mhr_relation[0,ind]
errdn = mhr_relation[1,ind]
errup = mhr_relation[2,ind]
ax.errorbar(xplot,yplot[0],color='k', label="all galaxies")
ax.errorbar(xplot,yplot[0],yerr=[errdn[0],errup[0]], ls='None', mfc='None', ecolor = 'k', mec='k',marker='+',markersize=2)
ind = np.where(mhr_relation_cen[0,:] != 0)
xplot = xmf[ind]
yplot = mhr_relation_cen[0,ind]
ax.errorbar(xplot,yplot[0],color='b',linestyle='dotted', label="centrals")
ind = np.where(mhr_relation_sat[0,:] != 0)
xplot = xmf[ind]
yplot = mhr_relation_sat[0,ind]
ax.errorbar(xplot,yplot[0],color='r',linestyle='dashed', label="satelites")
common.prepare_legend(ax, ['k','b','r','grey','grey'])
common.savefig(output_dir, fig, "HIH2_gas_fraction.pdf")
def main(model_dir, output_dir, redshift_table, subvols, obs_dir):
zlist = [0, 0.381963715160695]
plt = common.load_matplotlib()
fields = {'galaxies': ('type', 'mstars_disk', 'mstars_bulge',
'rstar_disk', 'm_bh', 'matom_disk', 'mmol_disk', 'mgas_disk',
'matom_bulge', 'mmol_bulge', 'mgas_bulge', 'mvir_hosthalo',
'sfr_disk', 'sfr_burst','id_halo_tree')}
for index, snapshot in enumerate(redshift_table[zlist]):
hdf5_data = common.read_data(model_dir, snapshot, fields, subvols)
(mgas_relation, mgas_relation_cen, mgas_relation_sat,
mh2_gals, mh1_gals, mgas_gals,
mh2_relation, mh1_relation, mhr_relation, mhr_relation_cen, mhr_relation_sat,
mgas_relation_ltg, mh2_relation_ltg, mh1_relation_ltg,
mgas_relation_etg, mh2_relation_etg, mh1_relation_etg,
mgas_ms_relation_ltg, mh2_ms_relation_ltg, mh1_ms_relation_ltg,
mgas_ms_relation_etg, mh2_ms_relation_etg, mh1_ms_relation_etg,
mh1_relation_satellites_halos, mh1_relation_ssfr_satellites_halos,
mh1_relation_satellites_halos_stack, mh1_relation_ssfr_satellites_halos_stack) = prepare_data(index, zlist[index], hdf5_data)
if(index == 0):
plot_cold_gas_fraction(plt, output_dir, obs_dir, mgas_relation, mgas_relation_cen, mgas_relation_sat)
plot_HI_stacking(plt, output_dir, obs_dir, mh1_relation_satellites_halos_stack, mh1_relation_ssfr_satellites_halos_stack)
plot_molecular_gas_fraction(plt, output_dir, obs_dir, mgas_gals, mgas_relation, mh1_gals, mh1_relation, mh2_gals, mh2_relation, mgas_relation_ltg,
mh2_relation_ltg, mh1_relation_ltg, mgas_relation_etg, mh2_relation_etg, mh1_relation_etg, mgas_ms_relation_ltg, mh2_ms_relation_ltg,
mh1_ms_relation_ltg, mgas_ms_relation_etg, mh2_ms_relation_etg, mh1_ms_relation_etg)
plot_h1h2_gas_fraction(plt, output_dir, mhr_relation, mhr_relation_cen, mhr_relation_sat)
if __name__ == '__main__':
main(*common.parse_args())
|
ICRARREPO_NAMEsharkPATH_START.@shark_extracted@shark-master@standard_plots@coldgas.py@.PATH_END.py
|
{
"filename": "readme.md",
"repo_name": "AarhusCosmology/connect_public",
"repo_path": "connect_public_extracted/connect_public-main/data/lhc_samples/readme.md",
"type": "Markdown"
}
|
This is where Latin hypercube seeds are stored in
order to reuse them instead of computing new ones
|
AarhusCosmologyREPO_NAMEconnect_publicPATH_START.@connect_public_extracted@connect_public-main@data@lhc_samples@readme.md@.PATH_END.py
|
{
"filename": "transforms.ipynb",
"repo_name": "dfm/tinygp",
"repo_path": "tinygp_extracted/tinygp-main/docs/tutorials/transforms.ipynb",
"type": "Jupyter Notebook"
}
|
```python
try:
import tinygp
except ImportError:
%pip install -q tinygp
try:
import flax
except ImportError:
%pip install -q flax
try:
import optax
except ImportError:
%pip install -q optax
```
(transforms)=
# Kernel Transforms
`tinygp` is designed to make it easy to implement new kernels (see {ref}`kernels` for an example), but a particular set of customizations that `tinygp` supports with a high-level interface are coordinate transforms.
The basic idea here is that you may want to pass your input coordinates through a linear or non-linear transformation before evaluating one of the standard kernels in that transformed space.
This is particularly useful for multivariate inputs where, for example, you may want to capture the different units, or prior covariances between dimensions.
(transforms-dkl)=
## Example: Deep kernel lerning
The [Deep Kernel Learning](https://arxiv.org/abs/1511.02222) model is an example of a more complicated kernel transform, and since `tinygp` integrates well with libraries like `flax` (see {ref}`modeling`) the implementation of such a model is fairly straightforward.
To demonstrate, let's start by sampling a simulated dataset from a step function, a model that a GP would typically struggle to model:
```python
import matplotlib.pyplot as plt
import numpy as np
random = np.random.default_rng(567)
noise = 0.1
x = np.sort(random.uniform(-1, 1, 100))
y = 2 * (x > 0) - 1 + random.normal(0.0, noise, len(x))
t = np.linspace(-1.5, 1.5, 500)
plt.plot(t, 2 * (t > 0) - 1, "k", lw=1, label="truth")
plt.plot(x, y, ".k", label="data")
plt.xlim(-1.5, 1.5)
plt.ylim(-1.3, 1.3)
plt.xlabel("x")
plt.ylabel("y")
_ = plt.legend()
```
Then we will fit this model using a model similar to the one described in {ref}`modeling-flax`, except our kernel will include a custom {class}`tinygp.kernels.Transform` that will pass the input coordinates through a (small) neural network before passing them into a {class}`tinygp.kernels.Matern32` kernel.
Otherwise, the model and optimization procedure are similar to the ones used in {ref}`modeling-flax`.
We compare the performance of the Deep Matern-3/2 kernel (a {class}`tinygp.kernels.Matern32` kernel, with custom neural network transform) to the performance of the same kernel without the transform. The untransformed model doesn't have the capacity to capture our simulated step function, but our transformed model does. In our transformed model, the hyperparameters of our kernel now include the weights of our neural network transform, and we learn those simultaneously with the length scale and amplitude of the `Matern32` kernel.
```python
import flax.linen as nn
import jax
import jax.numpy as jnp
import optax
from flax.linen.initializers import zeros
from tinygp import GaussianProcess, kernels, transforms
```
```python
class Matern32Loss(nn.Module):
@nn.compact
def __call__(self, x, y, t):
# Set up a typical Matern-3/2 kernel
log_sigma = self.param("log_sigma", zeros, ())
log_rho = self.param("log_rho", zeros, ())
log_jitter = self.param("log_jitter", zeros, ())
base_kernel = jnp.exp(2 * log_sigma) * kernels.Matern32(jnp.exp(log_rho))
# Evaluate and return the GP negative log likelihood as usual
gp = GaussianProcess(
base_kernel, x[:, None], diag=noise**2 + jnp.exp(2 * log_jitter)
)
log_prob, gp_cond = gp.condition(y, t[:, None])
return -log_prob, (gp_cond.loc, gp_cond.variance)
```
```python
class Transformer(nn.Module):
"""A small neural network used to non-linearly transform the input data"""
@nn.compact
def __call__(self, x):
x = nn.Dense(features=15)(x)
x = nn.relu(x)
x = nn.Dense(features=10)(x)
x = nn.relu(x)
x = nn.Dense(features=1)(x)
return x
class DeepLoss(nn.Module):
@nn.compact
def __call__(self, x, y, t):
# Set up a typical Matern-3/2 kernel
log_sigma = self.param("log_sigma", zeros, ())
log_rho = self.param("log_rho", zeros, ())
log_jitter = self.param("log_jitter", zeros, ())
base_kernel = jnp.exp(2 * log_sigma) * kernels.Matern32(jnp.exp(log_rho))
# Define a custom transform to pass the input coordinates through our
# `Transformer` network from above
# Note: with recent version of flax, you can't directly vmap modules,
# but we can get around that by explicitly constructing the init and
# apply functions. Ref:
# https://flax.readthedocs.io/en/latest/advanced_topics/lift.html
transform = Transformer()
transform_params = self.param("transform", transform.init, x[:1])
apply_fn = lambda x: transform.apply(transform_params, x)
kernel = transforms.Transform(apply_fn, base_kernel)
# Evaluate and return the GP negative log likelihood as usual with the
# transformed features
gp = GaussianProcess(
kernel, x[:, None], diag=noise**2 + jnp.exp(2 * log_jitter)
)
log_prob, gp_cond = gp.condition(y, t[:, None])
# We return the loss, the conditional mean and variance, and the
# transformed input parameters
return (
-log_prob,
(gp_cond.loc, gp_cond.variance),
(transform(x[:, None]), transform(t[:, None])),
)
# Define and train the model
def loss_func(model):
def loss(params):
return model.apply(params, x, y, t)[0]
return loss
models_list, params_list = [], []
loss_vals = {}
# Plot the results and compare to the true model
fig, ax = plt.subplots(ncols=2, sharey=True, figsize=(9, 3))
for it, (model_name, model) in enumerate(
zip(
["Deep", "Matern32"],
[DeepLoss(), Matern32Loss()],
)
):
loss_vals[it] = []
params = model.init(jax.random.PRNGKey(1234), x, y, t)
tx = optax.sgd(learning_rate=1e-4)
opt_state = tx.init(params)
loss = loss_func(model)
loss_grad_fn = jax.jit(jax.value_and_grad(loss))
for i in range(1000):
loss_val, grads = loss_grad_fn(params)
updates, opt_state = tx.update(grads, opt_state)
params = optax.apply_updates(params, updates)
loss_vals[it].append(loss_val)
mu, var = model.apply(params, x, y, t)[1]
ax[it].plot(t, 2 * (t > 0) - 1, "k", lw=1, label="truth")
ax[it].plot(x, y, ".k", label="data")
ax[it].plot(t, mu)
ax[it].fill_between(
t, mu + np.sqrt(var), mu - np.sqrt(var), alpha=0.5, label="model"
)
ax[it].set_xlim(-1.5, 1.5)
ax[it].set_ylim(-1.3, 1.3)
ax[it].set_xlabel("x")
ax[it].set_ylabel("y")
ax[it].set_title(model_name)
_ = ax[it].legend()
models_list.append(model)
params_list.append(params)
```
The untransformed `Matern32` model suffers from over-smoothing at the discontinuity, and poor extrapolation performance.
The `Deep` model extrapolates well and captures the discontinuity reliably.
We can compare the training loss (negative log likelihood) traces for these two models:
```python
fig = plt.plot()
plt.plot(loss_vals[0], label="Deep")
plt.plot(loss_vals[1], label="Matern32")
plt.ylabel("Loss")
plt.xlabel("Training Iterations")
_ = plt.legend()
```
To inspect what the transformed model is doing under the hood, we can plot the functional form of the transformation, as well as the transformed values of our input coordinates:
```python
x_transform, t_transform = models_list[0].apply(params_list[0], x, y, t)[2]
fig = plt.figure()
plt.plot(t, t_transform, "k")
plt.xlim(-1.5, 1.5)
plt.ylim(-1.3, 1.3)
plt.xlabel("input data; x")
plt.ylabel("transformed data; x'")
fig, ax = plt.subplots(ncols=2, sharey=True, figsize=(9, 3))
for it, (fig_title, feature_input, x_label) in enumerate(
zip(["Input Data", "Transformed Data"], [x, x_transform], ["x", "x'"])
):
ax[it].plot(feature_input, y, ".k")
ax[it].set_xlim(-1.5, 1.5)
ax[it].set_ylim(-1.3, 1.3)
ax[it].set_title(fig_title)
ax[it].set_xlabel(x_label)
ax[it].set_ylabel("y")
```
The neural network transforms the input feature into a step function like data (as shown in the figures above) before feeding to the base kernel, making it better suited than the baseline model for this data.
```python
```
|
dfmREPO_NAMEtinygpPATH_START.@tinygp_extracted@tinygp-main@docs@tutorials@transforms.ipynb@.PATH_END.py
|
{
"filename": "about_pylbo.md",
"repo_name": "n-claes/legolas",
"repo_path": "legolas_extracted/legolas-master/docs/pylbo/about_pylbo.md",
"type": "Markdown"
}
|
---
title: The Python package Pylbo
layout: single
classes: wide
sidebar:
nav: "leftcontents"
last_modified_at: 2021-07-27
---
When Legolas finishes solving the eigenvalue problem it writes all information to a specially
tailored datfile. First a header is written, containing information such as the geometry, chosen equilibrium,
parameters, unit normalisations and names of the equilibrium variables. The header is followed by the actual data
(eigenvalues, equilibrium arrays etc.), supplemented with eigenfunctions and matrix data if those are requested as well.
As all of this data is written out in a binary format for efficient data-storage, it is not so straightforward to
actually read its information since you'll have to keep track of which variables are in there and in which order
they are stored.
Since this is far from user-friendly we developed the post-processing Python package, `Pylbo`, short for
"**Py**thon for **L**egolas **B**inary **O**utput". Pylbo enables you to easily load in Legolas datfiles
and access all information stored within. We even developed special classes to do post-processing, for more information
and examples we refer to [this page](../using_pylbo). Maybe at some point in the future Pylbo will move to its own dedicated
repository and be included as a submodule in the legolas repository, but for now it is included in the
[`post_processing`](https://github.com/n-claes/legolas/tree/master/post_processing) folder.
Note that if you do Legolas runs on high resolution and save the eigenfunctions, the files can easily be a few
gigabytes in size. It may be useful to know that Pylbo _never_ loads the file into memory. Instead we keep track
of the various offsets of the datablocks, which are in turn used to read in data on a query-basis.
Doing it like this means a huge boost in performance and decreases memory usage significantly, and implies that you can
easily do analysis on larger-than-RAM datasets.
For example, say you did a huge resolution run where you saved the matrices and eigenfunctions, which resulted in a
10 Gb datfile. Loading this into Pylbo will be instantaneous, since the only thing that Pylbo "calculates" are the
data offsets. Whenever you query data, Pylbo will seek the corresponding offset in the binary stream
(which is a fast operation) and read only that specific datachunk into memory.
To put some numbers on it, you can load a series of datfiles each a few Gb in size simultaneously, and Pylbo will use
a few Mb of memory, tops.
<i class="fas fa-lightbulb" aria-hidden="true"></i>
**Note**: all interactive API's (plotting eigenfunctions, continua, etc.) in the Pylbo framework are meant to do a
fast visual inspection of the data generated by Legolas, and the resulting figures are not assumed to be publication-worthy.
Make use of the easy data access provided by Pylbo instead to create some nice figures.
{: .notice--success}
|
n-claesREPO_NAMElegolasPATH_START.@legolas_extracted@legolas-master@docs@pylbo@about_pylbo.md@.PATH_END.py
|
{
"filename": "test_join.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/indexes/timedeltas/test_join.py",
"type": "Python"
}
|
import numpy as np
from pandas import (
DataFrame,
Index,
Timedelta,
timedelta_range,
)
import pandas._testing as tm
class TestJoin:
def test_append_join_nondatetimeindex(self):
rng = timedelta_range("1 days", periods=10)
idx = Index(["a", "b", "c", "d"])
result = rng.append(idx)
assert isinstance(result[0], Timedelta)
# it works
rng.join(idx, how="outer")
def test_join_self(self, join_type):
index = timedelta_range("1 day", periods=10)
joined = index.join(index, how=join_type)
tm.assert_index_equal(index, joined)
def test_does_not_convert_mixed_integer(self):
df = DataFrame(np.ones((5, 5)), columns=timedelta_range("1 day", periods=5))
cols = df.columns.join(df.index, how="outer")
joined = cols.join(df.columns)
assert cols.dtype == np.dtype("O")
assert cols.dtype == joined.dtype
tm.assert_index_equal(cols, joined)
def test_join_preserves_freq(self):
# GH#32157
tdi = timedelta_range("1 day", periods=10)
result = tdi[:5].join(tdi[5:], how="outer")
assert result.freq == tdi.freq
tm.assert_index_equal(result, tdi)
result = tdi[:5].join(tdi[6:], how="outer")
assert result.freq is None
expected = tdi.delete(5)
tm.assert_index_equal(result, expected)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@indexes@timedeltas@test_join.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "juliotux/astropop",
"repo_path": "astropop_extracted/astropop-main/astropop/astrometry/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Process astrometry calibration using several algorithms."""
from .astrometrynet import AstrometryNetUnsolvedField, \
AstrometrySolver, \
AstrometricSolution, \
solve_astrometry_image, \
solve_astrometry_xy, \
solve_astrometry_hdu, \
solve_astrometry_framedata, \
create_xyls
from .manual_wcs import wcs_from_coords
from .coords_utils import guess_coordinates
__all__ = ['guess_coordinates', 'wcs_from_coords', 'AstrometrySolver',
'AstrometricSolution', 'solve_astrometry_hdu',
'solve_astrometry_xy', 'solve_astrometry_image',
'solve_astrometry_framedata', 'create_xyls',
'AstrometryNetUnsolvedField']
|
juliotuxREPO_NAMEastropopPATH_START.@astropop_extracted@astropop-main@astropop@astrometry@__init__.py@.PATH_END.py
|
{
"filename": "convergeT_parker.py",
"repo_name": "dlinssen/sunbather",
"repo_path": "sunbather_extracted/sunbather-main/src/convergeT_parker.py",
"type": "Python"
}
|
#sunbather imports
import tools
import solveT
#other imports
import pandas as pd
import numpy as np
import multiprocessing
from shutil import copyfile
import time
import os
import re
import argparse
import traceback
def find_close_model(parentfolder, T, Mdot, tolT=2000, tolMdot=1.0):
"""
Takes a parent folder where multiple 1D parker profiles have been ran,
and for given T and Mdot it looks for another model that is already finished and closest
to the given model, so that we can start our new simulation from that converged temperature
structure. It returns the T and Mdot
of the close converged folder, or None if there aren't any (within the tolerance).
Parameters
----------
parentfolder : str
Parent folder containing sunbather simulations within folders with the parker_*T0*_*Mdot* name format.
T : numeric
Target isothermal temperature in units of K.
Mdot : numeric
log of the target mass-loss rate in units of g s-1.
tolT : numeric, optional
Maximum T0 difference with the target temperature, by default 2000 K
tolMdot : numeric, optional
Maximum log10(Mdot) difference with the target mass-loss rate, by default 1 dex
Returns
-------
clconv : list
[T0, Mdot] of the closest found finished model, or [None, None] if none were found within the tolerance.
"""
pattern = re.compile(r'parker_\d+_\d+\.\d{3}$') #this is how folder names should be
all_files_and_folders = os.listdir(parentfolder)
allfolders = [os.path.join(parentfolder, folder)+'/' for folder in all_files_and_folders if pattern.match(folder) and os.path.isdir(os.path.join(parentfolder, folder))]
convergedfolders = [] #stores the T and Mdot values of all folders with 0.out files
for folder in allfolders:
if os.path.isfile(folder+'converged.out'):
folderparams = folder.split('/')[-2].split('_')
convergedfolders.append([int(folderparams[1]), float(folderparams[2])])
if [int(T), float(Mdot)] in convergedfolders: #if the current folder is found, remove it
convergedfolders.remove([int(T), float(Mdot)])
if convergedfolders == []: #then we default to constant starting value
clconv = [None, None]
else: #find closest converged profile
dist = lambda x, y: (x[0]-y[0])**2 + (2000*(x[1]-y[1]))**2 #1 order of magnitude Mdot is now 'equal weighted' to 2000K
clconv = min(convergedfolders, key=lambda fol: dist(fol, [int(T), float(Mdot)])) #closest converged [T, Mdot]
if (np.abs(clconv[0] - int(T)) > tolT) or (np.abs(clconv[1] - float(Mdot)) > tolMdot):
clconv = [None, None]
return clconv
def run_s(plname, Mdot, T, itno, fc, dir, SEDname, overwrite, startT, pdir, zdict=None, altmax=8, save_sp=[], constantT=False, maxit=16):
"""
Solves for a nonisothermal temperature profile of a single isothermal Parker wind (density and velocity) profile.
Parameters
----------
plname : str
Planet name (must have parameters stored in $SUNBATHER_PROJECT_PATH/planets.txt).
Mdot : str or numeric
log of the mass-loss rate in units of g s-1.
T : str or int
Temperature in units of g s-1.
itno : int
Iteration number to start from (can only be different from 1
if this same model has been ran before, and then also
overwrite = True needs to be set). If value is 0, will automatically
look for the highest iteration number to start from.
fc : numeric
H/C convergence factor, see Linssen et al. (2024). A sensible value is 1.1.
dir : str
Directory as $SUNBATHER_PROJECT_PATH/sims/1D/planetname/*dir*/
where the temperature profile will be solved. A folder named
parker_*T*_*Mdot*/ will be made there.
SEDname : str
Name of SED file to use. If SEDname='real', we use the name as
given in the planets.txt file, but if SEDname is something else,
we advice to use a separate dir folder for this.
overwrite : bool
Whether to overwrite if this simulation already exists.
startT : str
Either 'constant', 'free' or 'nearby'. Sets the initial
temperature profile guessed/used for the first iteration.
'constant' sets it equal to the parker wind isothermal value.
'free' lets Cloudy solve it, so you will get the radiative equilibrium structure.
'nearby' looks in the dir folder for previously solved
Parker wind profiles and starts from a converged one. Then, if no converged
ones are available, uses 'free' instead.
pdir : str
Directory as $SUNBATHER_PROJECT_PATH/parker_profiles/planetname/*pdir*/
where we take the isothermal parker wind density and velocity profiles from.
Different folders may exist there for a given planet, to separate for example profiles
with different assumptions such as stellar SED/semi-major axis/composition.
zdict : dict, optional
Dictionary with the scale factors of all elements relative
to the default solar composition. Can be easily created with tools.get_zdict().
Default is None, which results in a solar composition.
altmax : int, optional
Maximum altitude of the simulation in units of planet radius, by default 8
save_sp : list, optional
A list of atomic/ionic species to let Cloudy save the number density profiles
for. Those are needed when doing radiative transfer to produce
transmission spectra. For example, to be able to make
metastable helium spectra, 'He' needs to be in the save_sp list. By default [].
constantT : bool, optional
If True, instead of sovling for a nonisothermal temperature profile,
the Parker wind profile is ran at the isothermal value. By default False.
maxit : int, optional
Maximum number of iterations, by default 16.
"""
Mdot = "%.3f" % float(Mdot) #enforce this format to get standard file names.
T = str(T)
#set up the planet object
planet = tools.Planet(plname)
if SEDname != 'real':
planet.set_var(SEDname=SEDname)
#set up the folder structure
pathTstruc = tools.projectpath+'/sims/1D/'+planet.name+'/'+dir+'/'
path = pathTstruc+'parker_'+T+'_'+Mdot+'/'
#check if this parker profile exists in the given pdir
try:
pprof = tools.read_parker(planet.name, T, Mdot, pdir)
except FileNotFoundError:
print("This parker profile does not exist:", tools.projectpath+'/parker_profiles/'+planet.name+'/'+pdir+'/pprof_'+planet.name+'_T='+str(T)+'_M='+Mdot+'.txt')
return #quit the run_s function but not the code
#check for overwriting
if os.path.isdir(path): #the simulation exists already
if not overwrite:
print("Simulation already exists and overwrite = False:", plname, dir, Mdot, T)
return #this quits the function but if we're running a grid, it doesn't quit the whole Python code
else:
os.mkdir(path[:-1]) #make the folder
#get profiles and parameters we need for the input file
alt = pprof.alt.values
hden = tools.rho_to_hden(pprof.rho.values, abundances=tools.get_abundances(zdict))
dlaw = tools.alt_array_to_Cloudy(alt, hden, altmax, planet.R, 1000, log=True)
nuFnu_1AU_linear, Ryd = tools.get_SED_norm_1AU(planet.SEDname)
nuFnu_a_log = np.log10(nuFnu_1AU_linear / ((planet.a - altmax*planet.R)/tools.AU)**2)
comments = '# plname='+planet.name+'\n# parker_T='+str(T)+'\n# parker_Mdot='+str(Mdot)+'\n# parker_dir='+pdir+'\n# altmax='+str(altmax)
if constantT: #this will run the profile at the isothermal T value instead of converging a nonisothermal profile
if save_sp == []:
tools.write_Cloudy_in(path+'constantT', title=planet.name+' 1D Parker with T='+str(T)+' and log(Mdot)='+str(Mdot),
flux_scaling=[nuFnu_a_log, Ryd], SED=planet.SEDname, dlaw=dlaw, double_tau=True,
overwrite=overwrite, cosmic_rays=True, zdict=zdict, comments=comments, constantT=T)
else:
tools.write_Cloudy_in(path+'constantT', title=planet.name+' 1D Parker with T='+str(T)+' and log(Mdot)='+str(Mdot),
flux_scaling=[nuFnu_a_log, Ryd], SED=planet.SEDname, dlaw=dlaw, double_tau=True,
overwrite=overwrite, cosmic_rays=True, zdict=zdict, comments=comments, constantT=T,
outfiles=['.den', '.en'], denspecies=save_sp, selected_den_levels=True)
tools.run_Cloudy('constantT', folder=path) #run the Cloudy simulation
return
#if we got to here, we are not doing a constantT simulation, so we set up the convergence scheme files
#write Cloudy template input file - each iteration will add their current temperature structure to this template
tools.write_Cloudy_in(path+'template', title=planet.name+' 1D Parker with T='+str(T)+' and log(Mdot)='+str(Mdot),
flux_scaling=[nuFnu_a_log, Ryd], SED=planet.SEDname, dlaw=dlaw, double_tau=True,
overwrite=overwrite, cosmic_rays=True, zdict=zdict, comments=comments)
if itno == 0: #this means we resume from the highest found previously ran iteration
pattern = r'iteration(\d+)\.out' #search pattern: iteration followed by an integer
max_iteration = -1 #set an impossible number
for filename in os.listdir(path): #loop through all files/folder in the path
if os.path.isfile(os.path.join(path, filename)): #if it is a file (not a folder)
if re.search(pattern, filename): #if it matches the pattern
iteration_number = int(re.search(pattern, filename).group(1)) #extract the iteration number
if iteration_number > max_iteration: #update highest found iteration number
max_iteration = iteration_number
if max_iteration == -1: #this means no files were found
print(f"This folder does not contain any iteration files {path}, so I cannot resume from the highest one. Will instead start at itno = 1.")
itno = 1
else:
print(f"Found the highest iteration {path}iteration{max_iteration}, will resume at that same itno.")
itno = max_iteration
if itno == 1:
#get starting temperature structure
clconv = find_close_model(pathTstruc, T, Mdot) #find if there are any nearby models we can start from
if startT == 'constant': #then we start with the isothermal value
tools.copyadd_Cloudy_in(path+'template', path+'iteration1', constantT=T)
elif clconv == [None, None] or startT == 'free': #then we start in free (=radiative eq.) mode
copyfile(path+'template.in', path+'iteration1.in')
elif startT == 'nearby': #then clconv cannot be [None, None] and we start from a previous converged T(r)
print(f"Model {path} starting from previously converged temperature profile: T0 = {clconv[0]}, Mdot = {clconv[1]}")
prev_conv_T = pd.read_table(pathTstruc+'parker_'+str(clconv[0])+'_'+"{:.3f}".format(clconv[1])+'/converged.txt', delimiter=' ')
Cltlaw = tools.alt_array_to_Cloudy(prev_conv_T.R * planet.R, prev_conv_T.Te, altmax, planet.R, 1000)
tools.copyadd_Cloudy_in(path+'template', path+'iteration1', tlaw=Cltlaw)
#with everything in order, run the actual temperature convergence scheme
solveT.run_loop(path, itno, fc, save_sp, maxit)
def catch_errors_run_s(*args):
"""
Executes the run_s() function with provided arguments, while catching errors more gracefully.
"""
try:
run_s(*args)
except Exception as e:
traceback.print_exc()
def run_g(plname, cores, Mdot_l, Mdot_u, Mdot_s, T_l, T_u, T_s, fc, dir, SEDname, overwrite, startT, pdir, zdict, altmax, save_sp, constantT, maxit):
"""
Solves for a nonisothermal temperature profile of a grid of isothermal Parker wind models,
by executing the run_s() function in parallel.
Parameters
----------
plname : str
Planet name (must have parameters stored in $SUNBATHER_PROJECT_PATH/planets.txt).
cores : int
Number of parallel processes to spawn (i.e., number of CPU cores).
Mdot_l : str or numeric
Lower bound on the log10(mass-loss rate) grid in units of g s-1.
Mdot_u : str or numeric
Upper bound on the log10(mass-loss rate) grid in units of g s-1.
Mdot_s : str or numeric
Step size of the log10(mass-loss rate) grid in units of g s-1.
T_l : str or numeric
Lower bound on the temperature grid in units of K.
T_u : str or numeric
Upper bound on the temperature grid in units of K.
T_s : str or numeric
Step size of the temperature grid in units of K.
fc : numeric
H/C convergence factor, see Linssen et al. (2024). A sensible value is 1.1.
dir : str
Directory as $SUNBATHER_PROJECT_PATH/sims/1D/planetname/*dir*/
where the temperature profile will be solved. A folder named
parker_*T*_*Mdot*/ will be made there.
SEDname : str
Name of SED file to use. If SEDname is 'real', we use the name as
given in the planets.txt file, but if SEDname is something else,
we advice to use a separate dir folder for this.
overwrite : bool
Whether to overwrite if this simulation already exists.
startT : str
Either 'constant', 'free' or 'nearby'. Sets the initial
temperature profile guessed/used for the first iteration.
'constant' sets it equal to the parker wind isothermal value.
'free' lets Cloudy solve it, so you will get the radiative equilibrium structure.
'nearby' looks in the dir folder for previously solved
Parker wind profiles and starts from a converged one. Then, if no converged
ones are available, uses 'free' instead.
pdir : str
Directory as $SUNBATHER_PROJECT_PATH/parker_profiles/planetname/*pdir*/
where we take the isothermal parker wind density and velocity profiles from.
Different folders may exist there for a given planet, to separate for example profiles
with different assumptions such as stellar SED/semi-major axis/composition.
zdict : dict, optional
Dictionary with the scale factors of all elements relative
to the default solar composition. Can be easily created with tools.get_zdict().
Default is None, which results in a solar composition.
altmax : int, optional
Maximum altitude of the simulation in units of planet radius, by default 8
save_sp : list, optional
A list of atomic/ionic species to let Cloudy save the number density profiles
for. Those are needed when doing radiative transfer to produce
transmission spectra. For example, to be able to make
metastable helium spectra, 'He' needs to be in the save_sp list. By default [].
constantT : bool, optional
If True, instead of sovling for a nonisothermal temperature profile,
the Parker wind profile is ran at the isothermal value. By default False.
maxit : int, optional
Maximum number of iterations, by default 16.
"""
p = multiprocessing.Pool(cores)
pars = []
for Mdot in np.arange(float(Mdot_l), float(Mdot_u)+1e-6, float(Mdot_s)): #1e-6 so that upper bound is inclusive
for T in np.arange(int(T_l), int(T_u)+1e-6, int(T_s)).astype(int):
pars.append((plname, Mdot, T, 1, fc, dir, SEDname, overwrite, startT, pdir, zdict, altmax, save_sp, constantT, maxit))
p.starmap(catch_errors_run_s, pars)
p.close()
p.join()
if __name__ == '__main__':
class OneOrThreeAction(argparse.Action):
"""
Custom class for an argparse argument with exactly 1 or 3 values.
"""
def __call__(self, parser, namespace, values, option_string=None):
if len(values) not in (1, 3):
parser.error("Exactly one or three values are required.")
setattr(namespace, self.dest, values)
class AddDictAction(argparse.Action):
"""
Custom class to add an argparse argument to a dictionary.
"""
def __call__(self, parser, namespace, values, option_string=None):
if not hasattr(namespace, self.dest) or getattr(namespace, self.dest) is None:
setattr(namespace, self.dest, {})
for value in values:
key, val = value.split('=')
getattr(namespace, self.dest)[key] = float(val)
t0 = time.time()
parser = argparse.ArgumentParser(description="Runs the temperature convergence for 1D Parker profile(s).")
parser.add_argument("-plname", required=True, help="planet name (must be in planets.txt)")
parser.add_argument("-dir", required=True, type=str, help="folder where the temperature structures are solved. e.g. Tstruc_fH_0.9 or Tstruc_z_100_3xEUV etc.")
parser.add_argument("-pdir", required=True, type=str, help="parker profile folder/dir to use, e.g. fH_0.9 or z_100.")
parser.add_argument("-Mdot", required=True, type=float, nargs='+', action=OneOrThreeAction, help="log10(mass-loss rate), or three values specifying a grid of " \
"mass-loss rates: lowest, highest, stepsize. -Mdot will be rounded to three decimal places.")
parser.add_argument("-T", required=True, type=int, nargs='+', action=OneOrThreeAction, help="temperature, or three values specifying a grid of temperatures: lowest, highest, stepsize.")
parser.add_argument("-cores", type=int, default=1, help="number of parallel runs [default=1]")
parser.add_argument("-fc", type=float, default=1.1, help="convergence factor (heat/cool should be below this value) [default=1.1]")
parser.add_argument("-startT", choices=["nearby", "free", "constant"], default="nearby", help="initial T structure, either 'constant', 'free' or 'nearby' [default=nearby]")
parser.add_argument("-itno", type=int, default=1, help="starting iteration number (itno != 1 only works with -overwrite). As a special use, you can pass " \
"-itno 0 which will automatically find the highest previously ran iteration number [default=1]")
parser.add_argument("-maxit", type=int, default=20, help="maximum number of iterations [default = 20]")
parser.add_argument("-SEDname", type=str, default='real', help="name of SED to use. Must be in Cloudy's data/SED/ folder [default=SEDname set in planet.txt file]")
parser.add_argument("-overwrite", action='store_true', help="overwrite existing simulation if passed [default=False]")
parser.add_argument("-z", type=float, default=1., help="metallicity (=scale factor relative to solar for all elements except H and He) [default=1.]")
parser.add_argument("-zelem", action = AddDictAction, nargs='+', default = {}, help="abundance scale factor for specific elements, e.g. -zelem Fe=10 -zelem He=0.01. " \
"Can also be used to toggle elements off, e.g. -zelem Ca=0. Combines with -z argument. Using this " \
"command results in running p_winds in an an iterative scheme where Cloudy updates the mu parameter.")
parser.add_argument("-altmax", type=int, default=8, help="maximum altitude of the simulation in units of Rp. [default=8]")
parser.add_argument("-save_sp", type=str, nargs='+', default=['all'], help="atomic or ionic species to save densities for (needed for radiative transfer). " \
"You can add multiple as e.g. -save_sp He Ca+ Fe3+ Passing 'all' includes all species that weren't turned off. In that case, you can "\
"set the maximum degree of ionization with the -save_sp_max_ion flag. default=[] i.e. none.")
parser.add_argument("-save_sp_max_ion", type=int, default=6, help="only used when you set -save_sp all This command sets the maximum degree of ionization "\
"that will be saved. [default=6] but using lower values saves significant file size if high ions are not needed. The maximum number is 12, "\
"but such highly ionized species only occur at very high XUV flux, such as in young systems.")
parser.add_argument("-constantT", action='store_true', help="run the profile at the isothermal temperature instead of converging upon the temperature structure. [default=False]")
args = parser.parse_args()
zdict = tools.get_zdict(z=args.z, zelem=args.zelem)
if 'all' in args.save_sp:
args.save_sp = tools.get_specieslist(exclude_elements=[sp for sp,zval in zdict.items() if zval == 0.], max_ion=args.save_sp_max_ion)
#set up the folder structure if it doesn't exist yet
if not os.path.isdir(tools.projectpath+'/sims/'):
os.mkdir(tools.projectpath+'/sims')
if not os.path.isdir(tools.projectpath+'/sims/1D/'):
os.mkdir(tools.projectpath+'/sims/1D')
if not os.path.isdir(tools.projectpath+'/sims/1D/'+args.plname+'/'):
os.mkdir(tools.projectpath+'/sims/1D/'+args.plname)
if not os.path.isdir(tools.projectpath+'/sims/1D/'+args.plname+'/'+args.dir+'/'):
os.mkdir(tools.projectpath+'/sims/1D/'+args.plname+'/'+args.dir)
if (len(args.T) == 1 and len(args.Mdot) == 1): #then we run a single model
run_s(args.plname, args.Mdot[0], str(args.T[0]), args.itno, args.fc, args.dir, args.SEDname, args.overwrite, args.startT, args.pdir, zdict, args.altmax, args.save_sp, args.constantT, args.maxit)
elif (len(args.T) == 3 and len(args.Mdot) == 3): #then we run a grid over both parameters
run_g(args.plname, args.cores, args.Mdot[0], args.Mdot[1], args.Mdot[2], args.T[0], args.T[1], args.T[2], args.fc, args.dir, args.SEDname, args.overwrite, args.startT, args.pdir, zdict, args.altmax, args.save_sp, args.constantT, args.maxit)
elif (len(args.T) == 3 and len(args.Mdot) == 1): #then we run a grid over only T
run_g(args.plname, args.cores, args.Mdot[0], args.Mdot[0], args.Mdot[0], args.T[0], args.T[1], args.T[2], args.fc, args.dir, args.SEDname, args.overwrite, args.startT, args.pdir, zdict, args.altmax, args.save_sp, args.constantT, args.maxit)
elif (len(args.T) == 1 and len(args.Mdot) == 3): #then we run a grid over only Mdot
run_g(args.plname, args.cores, args.Mdot[0], args.Mdot[1], args.Mdot[2], args.T[0], args.T[0], args.T[0], args.fc, args.dir, args.SEDname, args.overwrite, args.startT, args.pdir, zdict, args.altmax, args.save_sp, args.constantT, args.maxit)
print("\nCalculations took", int(time.time()-t0) // 3600, "hours, ", (int(time.time()-t0)%3600) // 60, "minutes and ", (int(time.time()-t0)%60), "seconds.\n")
|
dlinssenREPO_NAMEsunbatherPATH_START.@sunbather_extracted@sunbather-main@src@convergeT_parker.py@.PATH_END.py
|
{
"filename": "test_vec_check_nan.py",
"repo_name": "DLR-RM/stable-baselines3",
"repo_path": "stable-baselines3_extracted/stable-baselines3-master/tests/test_vec_check_nan.py",
"type": "Python"
}
|
import gymnasium as gym
import numpy as np
import pytest
from gymnasium import spaces
from stable_baselines3.common.vec_env import DummyVecEnv, VecCheckNan
class NanAndInfEnv(gym.Env):
"""Custom Environment that raised NaNs and Infs"""
metadata = {"render_modes": ["human"]}
def __init__(self):
super().__init__()
self.action_space = spaces.Box(low=-np.inf, high=np.inf, shape=(1,), dtype=np.float64)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(1,), dtype=np.float64)
@staticmethod
def step(action):
if np.all(np.array(action) > 0):
obs = float("NaN")
elif np.all(np.array(action) < 0):
obs = float("inf")
else:
obs = 0
return [obs], 0.0, False, False, {}
@staticmethod
def reset(seed=None):
return [0.0], {}
def render(self):
pass
def test_check_nan():
"""Test VecCheckNan Object"""
env = DummyVecEnv([NanAndInfEnv])
env = VecCheckNan(env, raise_exception=True)
env.step([[0]])
with pytest.raises(ValueError):
env.step([[float("NaN")]])
with pytest.raises(ValueError):
env.step([[float("inf")]])
with pytest.raises(ValueError):
env.step([[-1]])
with pytest.raises(ValueError):
env.step([[1]])
env.step(np.array([[0, 1], [0, 1]]))
env.reset()
|
DLR-RMREPO_NAMEstable-baselines3PATH_START.@stable-baselines3_extracted@stable-baselines3-master@tests@test_vec_check_nan.py@.PATH_END.py
|
{
"filename": "demo_skyview.py",
"repo_name": "leejjoon/pywcsgrid2",
"repo_path": "pywcsgrid2_extracted/pywcsgrid2-main/examples/demo_skyview.py",
"type": "Python"
}
|
from astropy.io import fits as pyfits
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mpl_toolkits.axes_grid1.axes_grid import ImageGrid
#from pywcsgrid2.axes_wcs import GridHelperWcs, AxesWcs
import pywcsgrid2
# read in the first image
xray_name="pspc_skyview.fits"
f_xray = pyfits.open(xray_name)
header_xray = f_xray[0].header
# the second image
radio_name="radio_21cm.fits"
f_radio = pyfits.open(radio_name)
header_radio = f_radio[0].header
# grid helper
grid_helper = pywcsgrid2.GridHelper(wcs=header_xray)
# AxesGrid to display tow images side-by-side
fig = plt.figure(1, (6,3.5))
grid = ImageGrid(fig, (0.15, 0.15, 0.8, 0.75), nrows_ncols=(1, 2),
axes_pad=0.1, share_all=True,
cbar_mode="each", cbar_location="top", cbar_pad=0,
axes_class=(pywcsgrid2.Axes, dict(grid_helper=grid_helper)))
ax1 = grid[0]
# use imshow for a simply image display.
im = ax1.imshow(f_xray[0].data, origin="lower", vmin=0., cmap=cm.gray_r,
interpolation="nearest")
im.set_clim(4.e-05, 0.00018)
ticklocs = [6, 9, 12, 15]
cax1 = grid.cbar_axes[0]
cbar1 = cax1.colorbar(im)
cax1.toggle_label(True)
cax1.axis["bottom"].toggle(all=False)
cax1.axis["top"].toggle(ticks=True)
cax1.set_xticks([t*1.e-5 for t in ticklocs])
cax1.set_xticklabels(["$%d$" % t for t in ticklocs])
#cax1.xaxis.get_major_formatter().set_offset_string(r"$\times 10^{-5}$")
cax1.annotate(r"$\times 10^{-5}$",
xy=(1,1), xycoords="axes fraction",
xytext=(0, 15), textcoords="offset points",
va="bottom", ha="right", size="small")
ax2 = grid[1]
d = f_radio[0].data
# The second image have a different wcs. While imshow works, it will
# interpolate the second image into the image coordinate of the first
# image. You may use pcolormesh when the pixel size of the second
# image is larger than that of the first image. Or you may use
# inshow_affine.
#im2 = ax2[header_radio].pcolormesh(d, cmap=cm.gray_r)
im2 = ax2[header_radio].imshow_affine(d,
cmap=cm.gray_r, origin="lower")
cax2 = grid.cbar_axes[1]
cax2.colorbar(im2)
cax2.toggle_label(True)
cax2.axis["bottom"].toggle(all=False)
cax2.axis["top"].toggle(ticks=True)
# draw contour. The data points of the contour lines are created in
# the image coordinate of the second image and then are transformed to
# the image coordinate of the first image.
cont = ax2[header_radio].contour(d, colors="k")
# draw contour of the second image in the first axes.
cont2 = ax1[header_radio].contour(d, colors="k")
ax1.add_inner_title("X-ray", loc=2)
ax2.add_inner_title("Radio", loc=2)
ax1.locator_params("both", nbins=2) # since ax1 and ax2 shares a
# grid_helper, it affects not only
# ax1 but also ax2.
plt.show()
|
leejjoonREPO_NAMEpywcsgrid2PATH_START.@pywcsgrid2_extracted@pywcsgrid2-main@examples@demo_skyview.py@.PATH_END.py
|
{
"filename": "sunburst.py",
"repo_name": "1313e/CMasher",
"repo_path": "CMasher_extracted/CMasher-master/src/cmasher/colormaps/sunburst/sunburst.py",
"type": "Python"
}
|
import matplotlib as mpl
from matplotlib.colors import ListedColormap
# All declaration
__all__ = ["cmap"]
# Author declaration
__author__ = "Ellert van der Velden (@1313e)"
# Package declaration
__package__ = "cmasher"
# %% GLOBALS AND DEFINITIONS
# Type of this colormap
cm_type = "sequential"
# RGB-values of this colormap
cm_data = [
[0.00000000, 0.00000000, 0.00000000],
[0.00028691, 0.00020835, 0.00028279],
[0.00102421, 0.00070903, 0.00101021],
[0.00218033, 0.00144242, 0.00214845],
[0.00375280, 0.00237790, 0.00368891],
[0.00574727, 0.00349371, 0.00562841],
[0.00817359, 0.00477242, 0.00796563],
[0.01104432, 0.00619914, 0.01069976],
[0.01437378, 0.00776073, 0.01382970],
[0.01817764, 0.00944524, 0.01735364],
[0.02247277, 0.01124162, 0.02126897],
[0.02727694, 0.01313949, 0.02557207],
[0.03260869, 0.01512908, 0.03025819],
[0.03848721, 0.01720107, 0.03532137],
[0.04472223, 0.01934661, 0.04074862],
[0.05095008, 0.02155723, 0.04620189],
[0.05718085, 0.02382484, 0.05156892],
[0.06341877, 0.02614168, 0.05685075],
[0.06966727, 0.02850036, 0.06204782],
[0.07592916, 0.03089381, 0.06716019],
[0.08220666, 0.03331529, 0.07218757],
[0.08850155, 0.03575837, 0.07712945],
[0.09481532, 0.03821687, 0.08198520],
[0.10114895, 0.04068063, 0.08675399],
[0.10750319, 0.04306161, 0.09143498],
[0.11387855, 0.04536332, 0.09602729],
[0.12027537, 0.04758808, 0.10053004],
[0.12669388, 0.04973801, 0.10494242],
[0.13313410, 0.05181515, 0.10926361],
[0.13959587, 0.05382147, 0.11349284],
[0.14607903, 0.05575879, 0.11762946],
[0.15258333, 0.05762879, 0.12167284],
[0.15910850, 0.05943303, 0.12562246],
[0.16565413, 0.06117310, 0.12947786],
[0.17221981, 0.06285040, 0.13323866],
[0.17880518, 0.06446624, 0.13690456],
[0.18540980, 0.06602187, 0.14047531],
[0.19203321, 0.06751848, 0.14395075],
[0.19867499, 0.06895715, 0.14733079],
[0.20533472, 0.07033887, 0.15061537],
[0.21201197, 0.07166460, 0.15380450],
[0.21870632, 0.07293518, 0.15689824],
[0.22541736, 0.07415142, 0.15989669],
[0.23214472, 0.07531401, 0.16279996],
[0.23888802, 0.07642364, 0.16560823],
[0.24564687, 0.07748088, 0.16832171],
[0.25242097, 0.07848626, 0.17094058],
[0.25920996, 0.07944023, 0.17346508],
[0.26601352, 0.08034324, 0.17589547],
[0.27283134, 0.08119562, 0.17823199],
[0.27966317, 0.08199764, 0.18047489],
[0.28650868, 0.08274959, 0.18262446],
[0.29336760, 0.08345167, 0.18468096],
[0.30023971, 0.08410396, 0.18664460],
[0.30712474, 0.08470663, 0.18851568],
[0.31402240, 0.08525975, 0.19029445],
[0.32093251, 0.08576327, 0.19198110],
[0.32785482, 0.08621717, 0.19357587],
[0.33478905, 0.08662148, 0.19507899],
[0.34173503, 0.08697601, 0.19649062],
[0.34869254, 0.08728060, 0.19781092],
[0.35566125, 0.08753522, 0.19904011],
[0.36264104, 0.08773953, 0.20017823],
[0.36963165, 0.08789334, 0.20122542],
[0.37663272, 0.08799656, 0.20218186],
[0.38364424, 0.08804859, 0.20304740],
[0.39066574, 0.08804944, 0.20382227],
[0.39769703, 0.08799872, 0.20450641],
[0.40473792, 0.08789596, 0.20509971],
[0.41178790, 0.08774121, 0.20560237],
[0.41884704, 0.08753353, 0.20601388],
[0.42591463, 0.08727325, 0.20633459],
[0.43299069, 0.08695948, 0.20656394],
[0.44007455, 0.08659242, 0.20670212],
[0.44716616, 0.08617128, 0.20674851],
[0.45426479, 0.08569637, 0.20670331],
[0.46137042, 0.08516677, 0.20656566],
[0.46848219, 0.08458313, 0.20633582],
[0.47560004, 0.08394454, 0.20601280],
[0.48272316, 0.08325159, 0.20559662],
[0.48985104, 0.08250434, 0.20508677],
[0.49698340, 0.08170242, 0.20448225],
[0.50411927, 0.08084690, 0.20378304],
[0.51125803, 0.07993830, 0.20298844],
[0.51839929, 0.07897664, 0.20209721],
[0.52554202, 0.07796358, 0.20110904],
[0.53268538, 0.07690049, 0.20002312],
[0.53982852, 0.07578902, 0.19883855],
[0.54697049, 0.07463129, 0.19755431],
[0.55411028, 0.07342990, 0.19616934],
[0.56124678, 0.07218810, 0.19468248],
[0.56837880, 0.07090985, 0.19309253],
[0.57550502, 0.06959997, 0.19139818],
[0.58262400, 0.06826431, 0.18959809],
[0.58973418, 0.06690989, 0.18769083],
[0.59683382, 0.06554515, 0.18567490],
[0.60392106, 0.06418012, 0.18354875],
[0.61099403, 0.06282598, 0.18131023],
[0.61805061, 0.06149625, 0.17895730],
[0.62508803, 0.06020822, 0.17648890],
[0.63210426, 0.05897851, 0.17390136],
[0.63909578, 0.05783082, 0.17119418],
[0.64606007, 0.05678752, 0.16836327],
[0.65299326, 0.05587785, 0.16540731],
[0.65989160, 0.05513269, 0.16232365],
[0.66675096, 0.05458598, 0.15910942],
[0.67356680, 0.05427454, 0.15576179],
[0.68033403, 0.05423761, 0.15227799],
[0.68704706, 0.05451589, 0.14865546],
[0.69369969, 0.05515040, 0.14489185],
[0.70028509, 0.05618108, 0.14098519],
[0.70679624, 0.05764355, 0.13693176],
[0.71322465, 0.05957213, 0.13273203],
[0.71956187, 0.06199294, 0.12838347],
[0.72579832, 0.06492701, 0.12388673],
[0.73192387, 0.06838759, 0.11924309],
[0.73792785, 0.07238015, 0.11445523],
[0.74379911, 0.07690258, 0.10952793],
[0.74952631, 0.08194530, 0.10446780],
[0.75509807, 0.08749192, 0.09928513],
[0.76050344, 0.09351949, 0.09399345],
[0.76573234, 0.09999923, 0.08860931],
[0.77077595, 0.10689714, 0.08315390],
[0.77562724, 0.11417469, 0.07765262],
[0.78028137, 0.12178994, 0.07213493],
[0.78473594, 0.12969861, 0.06663478],
[0.78899120, 0.13785534, 0.06119075],
[0.79304987, 0.14621526, 0.05584590],
[0.79691698, 0.15473527, 0.05064835],
[0.80059949, 0.16337512, 0.04565234],
[0.80410578, 0.17209842, 0.04091877],
[0.80744502, 0.18087354, 0.03656330],
[0.81062721, 0.18967261, 0.03284897],
[0.81366202, 0.19847328, 0.02978095],
[0.81655911, 0.20725703, 0.02735425],
[0.81932773, 0.21600901, 0.02556368],
[0.82197656, 0.22471783, 0.02440445],
[0.82451354, 0.23337504, 0.02387282],
[0.82694588, 0.24197470, 0.02396658],
[0.82928000, 0.25051291, 0.02468537],
[0.83152234, 0.25898625, 0.02603161],
[0.83367755, 0.26739445, 0.02800850],
[0.83575119, 0.27573587, 0.03062270],
[0.83774693, 0.28401176, 0.03388176],
[0.83966871, 0.29222281, 0.03779577],
[0.84152000, 0.30037020, 0.04231855],
[0.84330390, 0.30845547, 0.04718171],
[0.84502314, 0.31648042, 0.05232334],
[0.84668012, 0.32444703, 0.05769850],
[0.84827700, 0.33235739, 0.06327080],
[0.84981598, 0.34021329, 0.06901096],
[0.85129899, 0.34801660, 0.07489554],
[0.85272715, 0.35576999, 0.08090629],
[0.85410285, 0.36347441, 0.08702799],
[0.85542653, 0.37113285, 0.09324952],
[0.85670046, 0.37874607, 0.09956104],
[0.85792511, 0.38631664, 0.10595570],
[0.85910167, 0.39384615, 0.11242769],
[0.86023184, 0.40133560, 0.11897200],
[0.86131603, 0.40878710, 0.12558544],
[0.86235527, 0.41620202, 0.13226519],
[0.86335049, 0.42358173, 0.13900904],
[0.86430261, 0.43092748, 0.14581530],
[0.86521249, 0.43824051, 0.15268270],
[0.86608094, 0.44552198, 0.15961030],
[0.86690878, 0.45277298, 0.16659744],
[0.86769678, 0.45999455, 0.17364368],
[0.86844571, 0.46718767, 0.18074877],
[0.86915633, 0.47435325, 0.18791261],
[0.86982940, 0.48149217, 0.19513520],
[0.87046566, 0.48860521, 0.20241667],
[0.87106589, 0.49569313, 0.20975721],
[0.87163086, 0.50275663, 0.21715708],
[0.87216162, 0.50979614, 0.22461634],
[0.87265881, 0.51681240, 0.23213553],
[0.87312317, 0.52380600, 0.23971510],
[0.87355555, 0.53077744, 0.24735548],
[0.87395712, 0.53772697, 0.25505684],
[0.87432861, 0.54465512, 0.26281981],
[0.87467085, 0.55156232, 0.27064498],
[0.87498503, 0.55844876, 0.27853263],
[0.87527217, 0.56531471, 0.28648326],
[0.87553313, 0.57216055, 0.29449756],
[0.87576930, 0.57898630, 0.30257577],
[0.87598171, 0.58579221, 0.31071851],
[0.87617147, 0.59257844, 0.31892638],
[0.87634020, 0.59934489, 0.32719953],
[0.87648888, 0.60609181, 0.33553878],
[0.87661914, 0.61281908, 0.34394439],
[0.87673240, 0.61952670, 0.35241687],
[0.87683016, 0.62621463, 0.36095669],
[0.87691421, 0.63288268, 0.36956410],
[0.87698607, 0.63953083, 0.37823972],
[0.87704779, 0.64615877, 0.38698363],
[0.87710104, 0.65276640, 0.39579639],
[0.87714801, 0.65935338, 0.40467811],
[0.87719069, 0.66591948, 0.41362916],
[0.87723137, 0.67246435, 0.42264965],
[0.87727233, 0.67898764, 0.43173978],
[0.87731605, 0.68548896, 0.44089961],
[0.87736509, 0.69196788, 0.45012917],
[0.87742214, 0.69842394, 0.45942844],
[0.87749005, 0.70485663, 0.46879727],
[0.87757175, 0.71126545, 0.47823549],
[0.87767038, 0.71764981, 0.48774277],
[0.87778914, 0.72400915, 0.49731878],
[0.87793145, 0.73034282, 0.50696296],
[0.87810081, 0.73665020, 0.51667477],
[0.87830092, 0.74293060, 0.52645341],
[0.87853556, 0.74918334, 0.53629808],
[0.87880873, 0.75540769, 0.54620771],
[0.87912449, 0.76160293, 0.55618122],
[0.87948712, 0.76776830, 0.56621720],
[0.87990092, 0.77390307, 0.57631429],
[0.88037047, 0.78000643, 0.58647070],
[0.88090027, 0.78607767, 0.59668473],
[0.88149514, 0.79211598, 0.60695418],
[0.88215974, 0.79812065, 0.61727700],
[0.88289909, 0.80409090, 0.62765056],
[0.88371798, 0.81002606, 0.63807240],
[0.88462153, 0.81592540, 0.64853946],
[0.88561459, 0.82178829, 0.65904886],
[0.88670229, 0.82761408, 0.66959711],
[0.88788952, 0.83340224, 0.68018083],
[0.88918122, 0.83915225, 0.69079625],
[0.89058234, 0.84486362, 0.70143930],
[0.89209744, 0.85053601, 0.71210615],
[0.89373153, 0.85616903, 0.72279183],
[0.89548875, 0.86176252, 0.73349245],
[0.89737373, 0.86731625, 0.74420272],
[0.89939058, 0.87283016, 0.75491787],
[0.90154313, 0.87830429, 0.76563309],
[0.90383561, 0.88373862, 0.77634217],
[0.90627132, 0.88913338, 0.78704028],
[0.90885368, 0.89448881, 0.79772179],
[0.91158625, 0.89980515, 0.80838000],
[0.91447204, 0.90508277, 0.81900898],
[0.91751403, 0.91032207, 0.82960244],
[0.92071527, 0.91552347, 0.84015333],
[0.92407894, 0.92068737, 0.85065379],
[0.92760832, 0.92581419, 0.86109531],
[0.93130674, 0.93090430, 0.87146916],
[0.93517804, 0.93595804, 0.88176475],
[0.93922654, 0.94097572, 0.89196965],
[0.94345707, 0.94595767, 0.90206897],
[0.94787482, 0.95090438, 0.91204440],
[0.95248483, 0.95581688, 0.92187319],
[0.95729065, 0.96069726, 0.93152703],
[0.96229171, 0.96554987, 0.94097172],
[0.96747854, 0.97038293, 0.95016887],
[0.97282603, 0.97521057, 0.95908244],
[0.97828739, 0.98005380, 0.96769236],
[0.98379547, 0.98493815, 0.97601254],
[0.98927857, 0.98988597, 0.98410494],
[0.99468526, 0.99490795, 0.99206668],
[1.00000000, 1.00000000, 1.00000000],
]
# Create ListedColormap object for this colormap
assert len(cm_data) == 256
cmap = ListedColormap(cm_data, name="cmr.sunburst")
cmap_r = cmap.reversed()
# Register (reversed) cmap in MPL
mpl.colormaps.register(cmap=cmap)
mpl.colormaps.register(cmap=cmap_r)
|
1313eREPO_NAMECMasherPATH_START.@CMasher_extracted@CMasher-master@src@cmasher@colormaps@sunburst@sunburst.py@.PATH_END.py
|
{
"filename": "muse_line_measuring.py",
"repo_name": "Vital-Fernandez/lime",
"repo_path": "lime_extracted/lime-master/examples/benchmark/muse_line_measuring.py",
"type": "Python"
}
|
from pathlib import Path
from mpdaf.obj import Cube
from astropy.wcs import WCS
import numpy as np
import lime
lime.theme.set_style('dark')
def read_muse_cube(file_address):
cube_obj = Cube(filename=str(file_address))
header = cube_obj.data_header
dw = header['CD3_3']
w_min = header['CRVAL3']
nPixels = header['NAXIS3']
w_max = w_min + dw * nPixels
wave_array = np.linspace(w_min, w_max, nPixels, endpoint=False)
return wave_array, cube_obj, header
# Inputs
cfg_file = 'muse.toml'
mask_file = 'CGCG007_masks.fits'
cube_file = '../benchmark/CGCG007.fits'
# Outputs
log_file = 'log_CGCG007.fits'
# Load configuration
cfg = lime.load_cfg(cfg_file)
norm_flux = cfg['sample_data']['norm_flux']
z_obj = cfg['sample_data']['redshift']
# Load cube
wave_array, cube, hdr = read_muse_cube(cube_file)
flux_cube = cube.data.data * norm_flux
err_cube = np.sqrt(cube.var.data) * norm_flux
mask_pixel_cube = np.isnan(flux_cube)
wcs = WCS(hdr)
# Create MUSE
cgcg007 = lime.Cube(wave_array, flux_cube, err_cube, redshift=z_obj, norm_flux=norm_flux,
wcs=wcs, pixel_mask=mask_pixel_cube)
# Show the data and the masks
cgcg007.check.cube('H1_6563A', masks_file=mask_file)
# Perform the measurements
# cgcg007.fit.spatial_mask(mask_file, line_detection=True, output_address=log_file, fit_conf=cfg)
|
Vital-FernandezREPO_NAMElimePATH_START.@lime_extracted@lime-master@examples@benchmark@muse_line_measuring.py@.PATH_END.py
|
{
"filename": "query.py",
"repo_name": "simonsobs/nextline-schedule",
"repo_path": "nextline-schedule_extracted/nextline-schedule-main/src/nextline_schedule/schema/scheduler/query.py",
"type": "Python"
}
|
import strawberry
from strawberry.types import Info
from nextline_schedule.scheduler import Scheduler
def query_scheduler_api_url(info: Info) -> str:
scheduler = info.context['schedule']['scheduler']
assert isinstance(scheduler, Scheduler)
return scheduler._api_url
def query_scheduler_length_minutes(info: Info) -> int:
scheduler = info.context['schedule']['scheduler']
assert isinstance(scheduler, Scheduler)
return scheduler._length_minutes
def query_scheduler_policy(info: Info) -> str:
scheduler = info.context['schedule']['scheduler']
assert isinstance(scheduler, Scheduler)
return scheduler._policy
@strawberry.type
class QueryScheduleScheduler:
api_url: str = strawberry.field(resolver=query_scheduler_api_url)
length_minutes: int = strawberry.field(resolver=query_scheduler_length_minutes)
policy: str = strawberry.field(resolver=query_scheduler_policy)
|
simonsobsREPO_NAMEnextline-schedulePATH_START.@nextline-schedule_extracted@nextline-schedule-main@src@nextline_schedule@schema@scheduler@query.py@.PATH_END.py
|
{
"filename": "icf-2d.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/tests/functional/Strength/ICF/icf-2d.py",
"type": "Python"
}
|
#-------------------------------------------------------------------------------
# A mock ICF kind 'o problem.
#-------------------------------------------------------------------------------
from math import *
from Spheral import *
from SpheralTestUtilities import *
from SpheralGnuPlotUtilities import *
from SpheralVisitDump import dumpPhysicsState
from findLastRestart import findLastRestart
from GzipFileNodeGenerator import *
# Load the mpi module if we're parallel.
import loadmpi
mpi, rank, procs = loadmpi.loadmpi()
title("2-D ICF test problem")
#-------------------------------------------------------------------------------
# Generic problem parameters
#-------------------------------------------------------------------------------
commandLine(NodeListConstructor = AsphNodeList2d,
rhoAir = 1.4,
rhoDrive = 0.1,
rhoShell = 1.0,
PAir = 1.0,
PDrive = 100.0,
PShell = 1.0,
gammaAir = 1.4,
gammaDrive = 1.4,
gammaShell = 1.6,
mu = 1.0,
Qconstructor = MonaghanGingoldViscosity2d,
#Qconstructor = TensorMonaghanGingoldViscosity2d,
Cl = 1.0,
Cq = 0.75,
Qlimiter = False,
balsaraCorrection = False,
epsilon2 = 1e-2,
hmin = 1e-5,
hmax = 0.5,
hminratio = 0.1,
nPerh = 2.01,
cfl = 0.5,
XSPH = True,
epsilonTensile = 0.0,
nTensile = 8,
goalTime = 0.06,
dtSample = 0.01,
dt = 0.0001,
dtMin = 1.0e-6,
dtMax = 0.1,
dtGrowth = 2.0,
maxSteps = None,
statsStep = 10,
smoothIters = 0,
HEvolution = Hydro2d.HEvolutionType.IdealH,
sumForMassDensity = Hydro2d.MassDensityType.RigorousSumDensity,
compatibleEnergy = True,
restoreCycle = None,
restartStep = 1000,
dataDirBase = "icf-2d",
graphics = True,
)
epsAir = PAir/((gammaAir - 1.0)*rhoAir)
epsDrive = PDrive/((gammaDrive - 1.0)*rhoDrive)
epsShell = PShell/((gammaShell - 1.0)*rhoShell)
dataDir = dataDirBase
restartDir = dataDir + "/restarts"
visitDir = dataDir + "/visit"
restartBaseName = restartDir + "/icf-2d"
#-------------------------------------------------------------------------------
# Check if the necessary output directories exist. If not, create them.
#-------------------------------------------------------------------------------
import os, sys
if mpi.rank == 0:
if not os.path.exists(restartDir):
os.makedirs(restartDir)
if not os.path.exists(visitDir):
os.makedirs(visitDir)
mpi.barrier()
#-------------------------------------------------------------------------------
# If we're restarting, find the set of most recent restart files.
#-------------------------------------------------------------------------------
restoreCycle = findLastRestart(restartBaseName)
#-------------------------------------------------------------------------------
# Material properties.
#-------------------------------------------------------------------------------
eosAir = GammaLawGasCGS2d(gammaAir, mu)
eosDrive = GammaLawGasCGS2d(gammaDrive, mu)
eosShell = GammaLawGasCGS2d(gammaShell, mu)
#-------------------------------------------------------------------------------
# Interpolation kernels.
#-------------------------------------------------------------------------------
WT = TableKernel2d(BSplineKernel2d(), 1000)
WTPi = TableKernel2d(BSplineKernel2d(), 1000)
output("WT")
output("WTPi")
kernelExtent = WT.kernelExtent()
#-------------------------------------------------------------------------------
# Make the NodeLists.
#-------------------------------------------------------------------------------
nodesAir = NodeListConstructor("air", eosAir, WT, WTPi)
nodesDrive = NodeListConstructor("drive", eosAir, WT, WTPi)
nodesShell = NodeListConstructor("shell", eosAir, WT, WTPi)
nodeSet = [nodesAir, nodesDrive, nodesShell]
for nodes in nodeSet:
nodes.XSPH = XSPH
nodes.hmin = hmin
nodes.hmax = hmax
nodes.hminratio = hminratio
nodes.nodesPerSmoothingScale = nPerh
nodes.epsilonTensile = epsilonTensile
nodes.nTensile = nTensile
output("nodes.name()")
output(" nodes.hmin")
output(" nodes.hmax")
output(" nodes.hminratio")
output(" nodes.nodesPerSmoothingScale")
output(" nodes.epsilonTensile")
output(" nodes.nTensile")
output(" nodes.XSPH")
#-------------------------------------------------------------------------------
# Construct the neighbor objects.
#-------------------------------------------------------------------------------
_cache = []
for nodes in nodeSet:
neighbor = TreeNeighbor2d(nodes,
kernelExtent = kernelExtent)
nodes.registerNeighbor(neighbor)
_cache.append(neighbor)
#-------------------------------------------------------------------------------
# Set the node properties.
#-------------------------------------------------------------------------------
outerNodeFlags = IntField2d("outer node flags", nodesDrive)
if restoreCycle is None:
filename = "icf-10-20-8x90.gz"
generatorAir = GzipFileNodeGeneratorRZto2D(filename, "Inner", nPerh,
SPH = (type(nodesAir) == SphNodeList2d))
generatorDrive = GzipFileNodeGeneratorRZto2D(filename, "Driver", nPerh,
SPH = (type(nodesDrive) == SphNodeList2d),
extraFields = ["Driver"])
generatorShell = GzipFileNodeGeneratorRZto2D(filename, "Shell", nPerh,
SPH = (type(nodesShell) == SphNodeList2d))
# Get the outer node flags.
nodesDrive.numInternalNodes = generatorDrive.numLocalNodes()
for i in range(generatorDrive.numLocalNodes()):
outerNodeFlags[i] = int(generatorDrive.outerNodes[i] + 0.1)
from ParMETISDistributeNodes import distributeNodes2d
distributeNodes2d((nodesAir, generatorAir),
(nodesDrive, generatorDrive),
(nodesShell, generatorShell))
for nodes in nodeSet:
output("nodes.name()")
output(" mpi.reduce(nodes.numInternalNodes, mpi.MIN)")
output(" mpi.reduce(nodes.numInternalNodes, mpi.MAX)")
output(" mpi.reduce(nodes.numInternalNodes, mpi.SUM)")
# Set node specific thermal energies
nodesAir.specificThermalEnergy(ScalarField2d("tmp", nodesAir, epsAir))
nodesDrive.specificThermalEnergy(ScalarField2d("tmp", nodesDrive, epsAir))
nodesShell.specificThermalEnergy(ScalarField2d("tmp", nodesShell, epsAir))
#-------------------------------------------------------------------------------
# Construct a DataBase.
#-------------------------------------------------------------------------------
db = DataBase2d()
for nodes in nodeSet:
db.appendNodeList(nodes)
output("db")
output("db.numNodeLists")
output("db.numFluidNodeLists")
#-------------------------------------------------------------------------------
# Construct the artificial viscosity.
#-------------------------------------------------------------------------------
q = Qconstructor(Cl, Cq)
q.epsilon2 = epsilon2
q.limiter = Qlimiter
q.balsaraShearCorrection = balsaraCorrection
output("q")
output("q.Cl")
output("q.Cq")
output("q.epsilon2")
output("q.limiter")
output("q.balsaraShearCorrection")
#-------------------------------------------------------------------------------
# Construct the hydro physics object.
#-------------------------------------------------------------------------------
hydro = Hydro2d(WT, WTPi, q, compatibleEnergy)
hydro.cfl = cfl
hydro.HEvolution = HEvolution
hydro.sumForMassDensity = sumForMassDensity
hydro.HsmoothMin = hmin
hydro.HsmoothMax = hmax
hydro.HratioMin = hminratio
output("hydro")
output("hydro.cfl")
output("hydro.HEvolution")
output("hydro.sumForMassDensity")
output("hydro.HsmoothMin")
output("hydro.HsmoothMax")
output("hydro.compatibleEnergyEvolution")
output("hydro.kernel()")
output("hydro.PiKernel()")
output("hydro.HratioMin")
output("hydro.valid()")
#-------------------------------------------------------------------------------
# Create boundary conditions.
#-------------------------------------------------------------------------------
xPlane0 = Plane2d(Vector2d(0.0, 0.0), Vector2d(1.0, 0.0))
yPlane0 = Plane2d(Vector2d(0.0, 0.0), Vector2d(0.0, 1.0))
xbc0 = ReflectingBoundary2d(xPlane0)
ybc0 = ReflectingBoundary2d(yPlane0)
hydro.appendBoundary(xbc0)
hydro.appendBoundary(ybc0)
output("hydro.haveBoundary(xbc0)")
output("hydro.haveBoundary(ybc0)")
#-------------------------------------------------------------------------------
# Construct an integrator, and add the physics packages.
#-------------------------------------------------------------------------------
integrator = SynchronousRK2Integrator2d(db)
integrator.appendPhysicsPackage(hydro)
integrator.lastDt = dt
integrator.dtMin = dtMin
integrator.dtMax = dtMax
integrator.dtGrowth = dtGrowth
output("integrator")
output("integrator.havePhysicsPackage(hydro)")
output("integrator.valid()")
output("integrator.lastDt")
output("integrator.dtMin")
output("integrator.dtMax")
output("integrator.dtGrowth")
#-------------------------------------------------------------------------------
# Make the problem controller.
#-------------------------------------------------------------------------------
control = SpheralController(integrator, WT,
statsStep = statsStep,
restartStep = restartStep,
restartBaseName = restartBaseName)
output("control")
# Smooth the initial conditions.
if restoreCycle is not None:
control.loadRestartFile(restoreCycle)
else:
control.iterateIdealH()
control.smoothState(smoothIters)
control.dropRestartFile()
dumpPhysicsState(integrator,
"icf-2d",
visitDir)
#-------------------------------------------------------------------------------
# Advance to the end time.
#-------------------------------------------------------------------------------
hstats(nodeSet)
while control.time() < goalTime:
nextGoalTime = min(control.time() + dtSample, goalTime)
control.advance(nextGoalTime, maxSteps)
control.dropRestartFile()
dumpPhysicsState(integrator,
"icf-2d",
visitDir)
#-------------------------------------------------------------------------------
# Plot the results.
#-------------------------------------------------------------------------------
if graphics:
# Plot the elongation (h1/h2) for the H tensors.
import Gnuplot
rPlot = plotNodePositions2d(db, colorNodeLists=True, colorDomains=False)
# Plot the final state.
rhoPlot, vrPlot, epsPlot, PPlot, HPlot = plotRadialState(db)
del HPlot
Hinverse = db.fluidHinverse
hr = db.newFluidScalarFieldList()
ht = db.newFluidScalarFieldList()
for Hfield, hrfield, htfield in zip(Hinverse.fields(),
hr.fields(),
ht.fields()):
n = Hfield.numElements()
assert hrfield.numElements() == n
assert htfield.numElements() == n
positions = Hfield.nodeList().positions()
for i in range(n):
runit = positions[i].unitVector()
tunit = Vector2d(-(positions[i].y), positions[i].x).unitVector()
hrfield[i] = (Hfield[i]*runit).magnitude()
htfield[i] = (Hfield[i]*tunit).magnitude()
hrPlot = plotFieldList(hr, xFunction="%s.magnitude()", plotStyle="points", winTitle="h_r")
htPlot = plotFieldList(ht, xFunction="%s.magnitude()", plotStyle="points", winTitle="h_t")
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@tests@functional@Strength@ICF@icf-2d.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/funnel/marker/colorbar/tickfont/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="funnel.marker.colorbar.tickfont",
**kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@funnel@marker@colorbar@tickfont@_family.py@.PATH_END.py
|
{
"filename": "concatenate_fit_gyro_model_output.py",
"repo_name": "lgbouma/gyro-interp",
"repo_path": "gyro-interp_extracted/gyro-interp-main/drivers/deprecated/concatenate_fit_gyro_model_output.py",
"type": "Python"
}
|
"""
Concatenate the results from parallel_fit_gyro_model.py
Write the output to one CSV file.
(which will be plotted by plot_fit_gyro_model.py)
"""
from itertools import product
from gyrointerp.fitting import get_chi_sq_red
from gyrointerp.paths import RESULTSDIR, LOCALDIR
import os
from glob import glob
import numpy as np, pandas as pd
from datetime import datetime
modelid = "fitgyro_v06_zeroB_zeroA_N750k"
input_dir = os.path.join(LOCALDIR, "gyrointerp", modelid)
csvpaths = glob(os.path.join(input_dir, "A_*csv"))
N = len(csvpaths)
print(N)
outdir = os.path.join(RESULTSDIR, "fit_gyro_model")
outpath = os.path.join(outdir, f'{modelid}_concatenated_chi_squared_results.csv')
for ix, csvpath in enumerate(csvpaths):
# Funky file handling just to avoid stale pointers
if ix % 10000 == 0:
now = datetime.now().isoformat()
print(f"{now}: {ix}/{N}...")
if ix == 0:
write_handle = open(outpath, "a")
else:
write_handle.close()
write_handle = open(outpath, "a")
with open(csvpath, "r") as f:
in_lines = f.readlines()
if len(in_lines) == 2:
write_handle.write(in_lines[-1])
else:
print(f"ERR! {csvpath} did not have exactly two lines")
pass
write_handle.close()
print(f"Wrote {outpath}")
|
lgboumaREPO_NAMEgyro-interpPATH_START.@gyro-interp_extracted@gyro-interp-main@drivers@deprecated@concatenate_fit_gyro_model_output.py@.PATH_END.py
|
{
"filename": "prior.py",
"repo_name": "aneeshnaik/spam",
"repo_path": "spam_extracted/spam-master/fit/prior.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created: 2018
Author: A. P. Naik
Description: File containing prior function 'lnprior', to be fed to emcee
sampler. Also various supplementary functions relating halo scaling relations
from Moster et al, 2013 (arxiv:1205.5807), and Dutton & Maccio, 2014
(arxiv:1402.7073).
"""
import numpy as np
from scipy.constants import G
from scipy.constants import parsec as pc
# physical constants and cosmology; all SI units.
delta = 93.6
h = 0.7
H0 = h*100*1000/(1e+6*pc)
Msun = 1.989e+30
def SHM(M_halo):
"""
Stellar mass / halo mass function from Moster et al. 2013,
(arxiv:1205.5807). For given halo mass, returns stellar mass.
Parameters
----------
M_halo : float or 1D numpy.ndarray
Halo virial mass. UNITS: kg
Returns
-------
M_star : float or 1D numpy.ndarray, same shape as M_halo
Total stellar mass. UNITS: kg
"""
# parameters from Moster et al. (2013)
M1 = 10**(11.59)*Msun
N = 0.0351
beta = 1.376
gamma = 0.608
# calculate M_star
X = M_halo/M1
denom = X**(-beta) + X**gamma
M_star = 2*N*M_halo/denom
return M_star
def err_SHM(M_halo):
"""
Error on stellar mass / halo mass function from Moster et al. 2013,
(arxiv:1205.5807). For given halo mass, returns error on log10(m_stellar).
Parameters
----------
M_halo : float or 1D numpy.ndarray
Halo virial mass. UNITS: kg
Returns
-------
err_M_star : float or 1D numpy.ndarray, same shape as M_halo
Error on log10(stellar mass)
"""
# parameters and errors from Moster et al. 2013
M1 = 10**(11.59)*Msun
N = 0.0351
beta = 1.376
gamma = 0.608
sig_M1 = M1*np.log(10)*0.236
sig_N = 0.0058
sig_beta = 0.153
sig_gamma = 0.059
# calculate stellar mass
Ms = SHM(M_halo)
# fractional errors on parameters
err_N = sig_N*Ms/N
err_M1 = sig_M1*Ms/M1
err_beta = sig_beta*Ms/beta
err_gamma = sig_gamma*Ms/gamma
# add errors in quadrature for total error
err_M_star = np.sqrt(err_N**2 + err_M1**2 + err_beta**2 + err_gamma**2)
# convert to logspace
err_M_star = err_M_star / (Ms*np.log(10))
return err_M_star
def CMR(M_halo):
"""
Concentration / halo mass relation from Dutton & Maccio 2014,
(arxiv:1402.7073). For given halo mass, returns log concentration.
Parameters
----------
M_halo : float or 1D numpy.ndarray
Halo virial mass. UNITS: kg
Returns
-------
logc : float or 1D numpy.ndarray, same shape as M_halo
log10(halo concentration)
"""
# parameters from Dutton & Maccio 2014
a = 1.025
b = -0.097
# calculate log(concentration)
logc = a + b*np.log10(M_halo/(1e+12*Msun/h))
return logc
def lnprior(theta, theta_dict, galaxy, **kwargs):
"""
For given fit parameters (contained in 'theta') returns log-prior. If
'baryon_bound' is switched on, huge negative value is returned for
parameter values for which baryon fraction is super-cosmic. If
'scaling_priors' is switched on, then scaling relations from Moster et al,
2013 (arxiv:1205.5807), and Dutton & Maccio, 2014 (arxiv:1402.7073), are
used as priors. Otherwise flat priors with bounds from Katz et al., 2016
(arxiv:1605.05971) for all priors except fR0 and sigma_g. For fR0, bounds
are 1e-9 and 2e-6. For sigma_g, bounds are 0 and twice the maximum
observational error for galaxy in question.
Parameters
----------
theta : numpy.ndarray, shape (ndim,)
Parameter values for which to calculate prior.
theta_dict : dict
Keys are names of free parameters, and values are indices. Indices are
used, for example, in the stored Markov chains.
galaxy : spam.data.SPARCGalaxy
Instance of class spam.data.SPARCGalaxy, containing galaxy to be fit.
**kwargs :
Same as kwargs for spam.fit.GalaxyFit constructor. See documentation
therein. Additionally, prior_bounds_lower and prior_bounds_upper, which
give the bounds of the priors on all parameters, set in GalaxyFit.
Returns
-------
lnprior : float
log-prior lnP(theta).
"""
# get parameter bounds
lb = kwargs['prior_bounds_lower']
ub = kwargs['prior_bounds_upper']
if not theta.shape == lb.shape == ub.shape:
raise ValueError("Theta does not have same shape as theta bounds")
# check if parameters are within bounds, otherwise -inf prior
if (theta < ub).all() and (theta > lb).all():
# determine mass-to-light ratio
if kwargs['upsilon'] == 'fixed':
ML = 0.5
else:
ML = 10**theta[theta_dict['ML_disc']]
# calculate stellar and halo masses
V_vir = 10**theta[theta_dict['V_vir']]
c_vir = 10**theta[theta_dict['c_vir']]
M_halo = (V_vir**3)/(np.sqrt(delta/2)*G*H0)
M_star = ML*1e+9*galaxy.luminosity_tot*Msun
# reject if M_baryon/M_DM > 0.2, if switch is on
if kwargs['baryon_bound']:
M_gas = (4/3)*galaxy.HI_mass
if (M_star+M_gas)/M_halo > 0.2:
return -1e+20
# implement SHM and CMR scaling relation priors if switch is on
if kwargs['scaling_priors']:
# SHM
y = np.log10(M_star)
mu = np.log10(SHM(M_halo))
sig = err_SHM(M_halo)
sig += 0.2 # f(R) broadening
g1 = ((y-mu)/sig)**2
# CMR
if kwargs['halo_type'] == 'DC14': # convert DC14 c_vir to NFW
X = np.log10(M_star/M_halo)
exponent = 3.4*(X+4.5)
c_NFW = c_vir/(1+1e-5*np.exp(exponent)) # Katz typo corrected
y = np.log10(c_NFW)
else:
y = np.log10(c_vir)
mu = CMR(M_halo)
sig = 0.11 # from Dutton et al.
sig += 0.1 # f(R) broadening
g2 = ((y-mu)/sig)**2
lnp = -0.5*g1*g2
return lnp
else:
return 0
else:
return -np.inf
|
aneeshnaikREPO_NAMEspamPATH_START.@spam_extracted@spam-master@fit@prior.py@.PATH_END.py
|
{
"filename": "run.py",
"repo_name": "ratt-ru/Stimela-classic",
"repo_path": "Stimela-classic_extracted/Stimela-classic-master/stimela/cargo/cab/casa47_gaincal/src/run.py",
"type": "Python"
}
|
import os
import sys
import logging
import Crasa.Crasa as crasa
from casacore.tables import table
import numpy
import glob
import yaml
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
gtab = args["caltable"]
if not os.path.exists(gtab):
raise RuntimeError("The gaintable was not created. Please refer to CASA {0:s} logfile for further details".format(cab["binary"]))
tab = table(gtab)
field_ids = numpy.unique(tab.getcol("FIELD_ID"))
tab.close()
tab = table(gtab+"::FIELD")
field_names = tab.getcol("NAME")
tab.close()
field_in = args["field"].split(",")
try:
ids = map(int, field_in)
except ValueError:
ids = map(lambda a: field_names.index(a), field_in)
if not set(ids).intersection(field_ids):
raise RuntimeError("None of the fields has solutions after the calibration. Please refer to CASA the {} logfile for further details".format(cab["binary"]))
|
ratt-ruREPO_NAMEStimela-classicPATH_START.@Stimela-classic_extracted@Stimela-classic-master@stimela@cargo@cab@casa47_gaincal@src@run.py@.PATH_END.py
|
{
"filename": "quality_control_framework.py",
"repo_name": "Keck-DataReductionPipelines/KPF-Pipeline",
"repo_path": "KPF-Pipeline_extracted/KPF-Pipeline-master/modules/quality_control/src/quality_control_framework.py",
"type": "Python"
}
|
import ast
import configparser as cp
from modules.Utils.kpf_parse import HeaderParse
import modules.quality_control.src.quality_control as qc
from modules.quality_control.src.quality_control import execute_all_QCs
# Pipeline dependencies
from kpfpipe.logger import *
from kpfpipe.primitives.level0 import KPF0_Primitive
from keckdrpframework.models.arguments import Arguments
# Global read-only variables
DEFAULT_CFG_PATH = 'modules/quality_control/configs/default.cfg'
class QualityControlFramework(KPF0_Primitive):
"""
Description:
Performs quality control on a FITS file. Includes logic for automatically determining the data level.
Arguments:
data_type (str): Type of data (e.g., KPF).
data_level_str (str): L0, 2D, L1, L2 are possible choices.
fits_object (KPF object): L0/2D/L1/L2 KPF object
actual_dir (str): Prefix of actual directory outside container that maps to /data (e.g., /data/kpf)
"""
def __init__(self, action, context):
KPF0_Primitive.__init__(self, action, context)
self.data_type = self.action.args[0]
self.data_level_str = self.action.args[1]
self.kpf_object = self.action.args[2]
self.qc_list_flag = self.action.args[3]
try:
self.module_config_path = context.config_path['quality_control']
print("--->",self.__class__.__name__,": self.module_config_path =",self.module_config_path)
except:
self.module_config_path = DEFAULT_CFG_PATH
print("{} class: self.module_config_path = {}".format(self.__class__.__name__,self.module_config_path))
print("Starting logger...")
self.logger = start_logger(self.__class__.__name__, self.module_config_path)
if self.logger is not None:
print("--->self.logger is not None...")
else:
print("--->self.logger is None...")
self.logger.info('Started {}'.format(self.__class__.__name__))
self.logger.debug('module_config_path = {}'.format(self.module_config_path))
module_config_obj = cp.ConfigParser()
res = module_config_obj.read(self.module_config_path)
if res == []:
raise IOError('failed to read {}'.format(self.module_config_path))
module_param_cfg = module_config_obj['PARAM']
debug_level_cfg_str = module_param_cfg.get('debug_level')
self.debug_level_cfg = ast.literal_eval(debug_level_cfg_str)
self.logger.info('Type of self.debug_level_cfg = {}'.format(type(self.debug_level_cfg)))
def _perform(self):
"""
Returns exitcode:
0 = Normal
"""
quality_control_exit_code = 0
# Execute appropriate QC tests
self.kpf_object = execute_all_QCs(self.kpf_object, self.data_level_str, logger=self.logger)
# Optionally list QC metrics.
if self.qc_list_flag == 1:
qc_obj.qcdefinitions.list_qc_metrics()
# Finish.
self.logger.info('Finished {}'.format(self.__class__.__name__))
return Arguments([quality_control_exit_code, self.kpf_object])
|
Keck-DataReductionPipelinesREPO_NAMEKPF-PipelinePATH_START.@KPF-Pipeline_extracted@KPF-Pipeline-master@modules@quality_control@src@quality_control_framework.py@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "spicy-oil/hfs_fit",
"repo_path": "hfs_fit_extracted/hfs_fit-master/tests/conftest.py",
"type": "Python"
}
|
spicy-oilREPO_NAMEhfs_fitPATH_START.@hfs_fit_extracted@hfs_fit-master@tests@conftest.py@.PATH_END.py
|
|
{
"filename": "README.md",
"repo_name": "lesgourg/class_public",
"repo_path": "class_public_extracted/class_public-master/external/HyRec2020/README.md",
"type": "Markdown"
}
|
# HYREC-2
By Yacine Ali-Haimoud and Chris Hirata (2010-17)
with contributions from Nanoom Lee (2020).
Version July 2020.
See readme.pdf for detailed info.
|
lesgourgREPO_NAMEclass_publicPATH_START.@class_public_extracted@class_public-master@external@HyRec2020@README.md@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/window/moments/conftest.py",
"type": "Python"
}
|
import itertools
import numpy as np
import pytest
from pandas import (
DataFrame,
Series,
notna,
)
def create_series():
return [
Series(dtype=np.float64, name="a"),
Series([np.nan] * 5),
Series([1.0] * 5),
Series(range(5, 0, -1)),
Series(range(5)),
Series([np.nan, 1.0, np.nan, 1.0, 1.0]),
Series([np.nan, 1.0, np.nan, 2.0, 3.0]),
Series([np.nan, 1.0, np.nan, 3.0, 2.0]),
]
def create_dataframes():
return [
DataFrame(columns=["a", "a"]),
DataFrame(np.arange(15).reshape((5, 3)), columns=["a", "a", 99]),
] + [DataFrame(s) for s in create_series()]
def is_constant(x):
values = x.values.ravel("K")
return len(set(values[notna(values)])) == 1
@pytest.fixture(
params=(
obj
for obj in itertools.chain(create_series(), create_dataframes())
if is_constant(obj)
),
)
def consistent_data(request):
return request.param
@pytest.fixture(params=create_series())
def series_data(request):
return request.param
@pytest.fixture(params=itertools.chain(create_series(), create_dataframes()))
def all_data(request):
"""
Test:
- Empty Series / DataFrame
- All NaN
- All consistent value
- Monotonically decreasing
- Monotonically increasing
- Monotonically consistent with NaNs
- Monotonically increasing with NaNs
- Monotonically decreasing with NaNs
"""
return request.param
@pytest.fixture(params=[0, 2])
def min_periods(request):
return request.param
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@window@moments@conftest.py@.PATH_END.py
|
{
"filename": "fishermatrix.py",
"repo_name": "janosch314/GWFish",
"repo_path": "GWFish_extracted/GWFish-main/GWFish/modules/fishermatrix.py",
"type": "Python"
}
|
import numpy as np
import GWFish.modules.waveforms as wf
import GWFish.modules.detection as det
import GWFish.modules.auxiliary as aux
import GWFish.modules.fft as fft
import copy
import pandas as pd
from typing import Optional, Union
from tqdm import tqdm
import logging
from pathlib import Path
def invertSVD(matrix):
thresh = 1e-10
dm = np.sqrt(np.diag(matrix))
normalizer = np.outer(dm, dm)
matrix_norm = matrix / normalizer
[U, S, Vh] = np.linalg.svd(matrix_norm)
kVal = sum(S > thresh)
logging.debug(f'Inverting a matrix keeping {kVal}/{len(S)} singular values')
matrix_inverse_norm = U[:, 0:kVal] @ np.diag(1. / S[0:kVal]) @ Vh[0:kVal, :]
# print(matrix @ (matrix_inverse_norm / normalizer))
return matrix_inverse_norm / normalizer, S
def fft_derivs_at_detectors(deriv_list, frequency_vector):
"""
A wrapper for fft_lal_timeseries
"""
delta_f = frequency_vector[1,0] - frequency_vector[0,0]
ffd_deriv_list = []
for deriv in deriv_list:
ffd_deriv_list.append(fft.fft_lal_timeseries(deriv, delta_f, f_start=0.).data.data)
# Because f_start = 0 Hz, we need to mask some frequencies
idx_f_low = int(frequency_vector[0,0]/delta_f)
idx_f_high = int(frequency_vector[-1,0]/delta_f)
return np.vstack(ffd_deriv_list).T[idx_f_low:idx_f_high+1,:]
class Derivative:
"""
Standard GWFish waveform derivative class, based on finite differencing in frequency domain.
Calculates derivatives with respect to geocent_time, merger phase, and distance analytically.
Derivatives of other parameters are calculated numerically.
eps: 1e-5, this follows the simple "cube root of numerical precision" recommendation, which is 1e-16 for double
"""
def __init__(self, waveform, parameters, detector, eps=1e-5, waveform_class=wf.Waveform):
self.waveform = waveform
self.detector = detector
self.eps = eps
self.waveform_class = waveform_class
self.data_params = {'frequencyvector': detector.frequencyvector, 'f_ref': 50.}
self.waveform_object = waveform_class(waveform, parameters, self.data_params)
self.waveform_at_parameters = None
self.projection_at_parameters = None
# For central parameters and their epsilon-neighbourhood
self.local_params = parameters.copy()
self.pv_set1 = parameters.copy()
self.pv_set2 = parameters.copy()
self.tc = self.local_params['geocent_time']
@property
def waveform_at_parameters(self):
"""
Return a waveform at the point in parameter space determined by the parameters argument.
Returns tuple, (wave, t_of_f).
"""
if self._waveform_at_parameters is None:
wave = self.waveform_object()
t_of_f = self.waveform_object.t_of_f
self._waveform_at_parameters = (wave, t_of_f)
return self._waveform_at_parameters
@waveform_at_parameters.setter
def waveform_at_parameters(self, new_waveform_data):
self._waveform_at_parameters = new_waveform_data
@property
def projection_at_parameters(self):
if self._projection_at_parameters is None:
self._projection_at_parameters = det.projection(self.local_params, self.detector,
self.waveform_at_parameters[0], # wave
self.waveform_at_parameters[1]) # t(f)
return self._projection_at_parameters
@projection_at_parameters.setter
def projection_at_parameters(self, new_projection_data):
self._projection_at_parameters = new_projection_data
def with_respect_to(self, target_parameter):
"""
Return a derivative with respect to target_parameter at the point in
parameter space determined by the argument parameters.
"""
if target_parameter == 'luminosity_distance':
derivative = -1. / self.local_params[target_parameter] * self.projection_at_parameters
elif target_parameter == 'geocent_time':
derivative = 2j * np.pi * self.detector.frequencyvector * self.projection_at_parameters
elif target_parameter == 'phase':
derivative = -1j * self.projection_at_parameters
else:
pv = self.local_params[target_parameter]
if target_parameter in ['chirp_mass', 'chirp_mass_source', 'mass_1', 'mass_2', 'mass_1_source', 'mass_2_source']:
dp = 1e-8 * pv
else:
dp = np.maximum(self.eps, self.eps * pv)
self.pv_set1 = self.local_params.copy()
self.pv_set2 = self.local_params.copy()
self.pv_set1[target_parameter] = pv - dp / 2.
self.pv_set2[target_parameter] = pv + dp / 2.
if target_parameter in ['ra', 'dec', 'psi']: # these parameters do not influence the waveform
signal1 = det.projection(self.pv_set1, self.detector,
self.waveform_at_parameters[0],
self.waveform_at_parameters[1])
signal2 = det.projection(self.pv_set2, self.detector,
self.waveform_at_parameters[0],
self.waveform_at_parameters[1])
derivative = (signal2 - signal1) / dp
else:
# to improve precision of numerical differentiation
self.pv_set1['geocent_time'] = 0.
self.pv_set2['geocent_time'] = 0.
waveform_obj1 = self.waveform_class(self.waveform, self.pv_set1, self.data_params)
wave1 = waveform_obj1()
t_of_f1 = waveform_obj1.t_of_f
waveform_obj2 = self.waveform_class(self.waveform, self.pv_set2, self.data_params)
wave2 = waveform_obj2()
t_of_f2 = waveform_obj2.t_of_f
self.pv_set1['geocent_time'] = self.tc
self.pv_set2['geocent_time'] = self.tc
signal1 = det.projection(self.pv_set1, self.detector, wave1, t_of_f1 + self.tc)
signal2 = det.projection(self.pv_set2, self.detector, wave2, t_of_f2 + self.tc)
derivative = np.exp(2j * np.pi * self.detector.frequencyvector \
* self.tc) * (signal2 - signal1) / dp
self.waveform_object.update_gw_params(self.local_params)
return derivative
def __call__(self, target_parameter):
return self.with_respect_to(target_parameter)
class FisherMatrix:
def __init__(self, waveform, parameters, fisher_parameters, detector, eps=1e-5, waveform_class=wf.Waveform):
self.fisher_parameters = fisher_parameters
self.detector = detector
self.derivative = Derivative(waveform, parameters, detector, eps=eps, waveform_class=waveform_class)
self.nd = len(fisher_parameters)
self.fm = None
def update_fm(self):
self._fm = np.zeros((self.nd, self.nd))
for p1 in np.arange(self.nd):
deriv1_p = self.fisher_parameters[p1]
deriv1 = self.derivative(deriv1_p)
self._fm[p1, p1] = np.sum(aux.scalar_product(deriv1, deriv1, self.detector), axis=0)
for p2 in np.arange(p1+1, self.nd):
deriv2_p = self.fisher_parameters[p2]
deriv2 = self.derivative(deriv2_p)
self._fm[p1, p2] = np.sum(aux.scalar_product(deriv1, deriv2, self.detector), axis=0)
self._fm[p2, p1] = self._fm[p1, p2]
@property
def fm(self):
if self._fm is None:
self.update_fm()
return self._fm
@fm.setter
def fm(self, hardcode_fm):
self._fm = hardcode_fm
def __call__(self):
return self.fm
def sky_localization_area(
network_fisher_inverse: np.ndarray,
declination_angle: np.ndarray,
right_ascension_index: int,
declination_index: int,
) -> float:
"""
Compute the 1-sigma sky localization ellipse area starting
from the full network Fisher matrix inverse and the inclination.
"""
return (
np.pi
* np.abs(np.cos(declination_angle))
* np.sqrt(
network_fisher_inverse[right_ascension_index, right_ascension_index]
* network_fisher_inverse[declination_index, declination_index]
- network_fisher_inverse[right_ascension_index, declination_index] ** 2
)
)
def sky_localization_percentile_factor(
percentile: float=90.) -> float:
"""Conversion factor $C_{X\%}$ to go from the sky localization area provided
by GWFish (one sigma, in steradians) to the X% contour, in degrees squared.
$$ \Delta \Omega_{X\%} = C_{X\%} \Delta \Omega_{\\text{GWFish output}} $$
:param percentile: Percentile of the sky localization area.
:return: Conversion factor $C_{X\%}$
"""
return - 2 * np.log(1 - percentile / 100.) * (180 / np.pi)**2
def compute_detector_fisher(
detector: det.Detector,
signal_parameter_values: Union[pd.DataFrame, dict[str, float]],
fisher_parameters: Optional[list[str]] = None,
waveform_model: str = wf.DEFAULT_WAVEFORM_MODEL,
waveform_class: type(wf.Waveform) = wf.LALFD_Waveform,
use_duty_cycle: bool = False,
redefine_tf_vectors: bool = False,
long_wavelength: bool = True,
) -> tuple[np.ndarray, float]:
"""Compute the Fisher matrix and SNR for a single detector.
Example usage:
```
>>> from GWFish.modules.detection import Detector
>>> detector = Detector('ET')
>>> params = {
... 'mass_1': 10.,
... 'mass_2': 10.,
... 'luminosity_distance': 1000.,
... 'theta_jn': 0.,
... 'ra': 0.,
... 'dec': 0.,
... 'phase': 0.,
... 'psi': 0.,
... 'geocent_time': 1e9,
... }
>>> fisher, detector_SNR_square = compute_detector_fisher(detector, params)
>>> print(fisher.shape)
(9, 9)
>>> print(f'{np.sqrt(detector_SNR_square):.0f}')
260
```
:param detector: The detector to compute the Fisher matrix for
:param signal_parameter_values: The parameter values for the signal. They can be a dictionary of parameter names and values, or a single-row pandas DataFrame with the parameter names as columns.
:param fisher_parameters: The parameters to compute the Fisher matrix for. If None, all parameters are used.
:param waveform_model: The waveform model to use (see [choosing an approximant](../how-to/choosing_an_approximant.md));
:param waveform_class: The waveform class to use (see [choosing an approximant](../how-to/choosing_an_approximant.md));
:param use_duty_cycle: Whether to use the detector duty cycle (i.e. stochastically set the SNR to zero some of the time); defaults to `False`
:param redefine_tf_vectors: Whether to redefine the time-frequency vectors in order to correctly model signals with small frequency evolution. Defaults to `False`.
:return: The Fisher matrix, and the square of the detector SNR.
"""
data_params = {
'frequencyvector': detector.frequencyvector,
'f_ref': 50.
}
waveform_obj = waveform_class(waveform_model, signal_parameter_values, data_params)
wave = waveform_obj()
t_of_f = waveform_obj.t_of_f
if redefine_tf_vectors:
signal, timevector, frequencyvector = det.projection(signal_parameter_values, detector, wave, t_of_f, redefine_tf_vectors=True, long_wavelength_approx = long_wavelength)
else:
signal = det.projection(signal_parameter_values, detector, wave, t_of_f, long_wavelength_approx = long_wavelength)
frequencyvector = detector.frequencyvector[:, 0]
component_SNRs = det.SNR(detector, signal, use_duty_cycle, frequencyvector=frequencyvector)
detector_SNR_square = np.sum(component_SNRs ** 2)
if fisher_parameters is None:
if isinstance(signal_parameter_values, dict):
fisher_parameters = list(signal_parameter_values.keys())
else:
fisher_parameters = signal_parameter_values.columns
return FisherMatrix(waveform_model, signal_parameter_values, fisher_parameters, detector, waveform_class=waveform_class).fm, detector_SNR_square
def compute_network_errors(
network: det.Network,
parameter_values: pd.DataFrame,
fisher_parameters: Optional[list[str]] = None,
waveform_model: str = wf.DEFAULT_WAVEFORM_MODEL,
waveform_class = wf.LALFD_Waveform,
use_duty_cycle: bool = False,
redefine_tf_vectors: bool = False,
save_matrices: bool = False,
save_matrices_path: Union[Path, str] = Path('.'),
matrix_naming_postfix: str = '',
long_wavelength: bool = True,
) -> tuple[np.ndarray, np.ndarray, Optional[np.ndarray]]:
"""
Compute Fisher matrix errors for a network whose
SNR and Fisher matrices have already been calculated.
Will only return output for the `n_above_thr` signals
for which the network SNR is above `network.detection_SNR[1]`.
:param network: detector network to use
:param parameter_values: dataframe with parameters for one or more signals
:param fisher_parameters: list of parameters to use for the Fisher matrix analysis - if `None` (default), all waveform parameters are used
:param waveform_model: waveform model to use - refer to [choosing an approximant](../how-to/choosing_an_approximant.md)
:param waveform_model: waveform class to use - refer to [choosing an approximant](../how-to/choosing_an_approximant.md)
:param redefine_tf_vectors: Whether to redefine the time-frequency vectors in order to correctly model signals with small frequency evolution. Defaults to `False`.
:param use_duty_cycle: Whether to use the detector duty cycle (i.e. stochastically set the SNR to zero some of the time); defaults to `False`
:param save_matrices: Whether to save the Fisher matrices and their inverses to disk; defaults to `False`
:param save_matrices_path: Path (expressed with Pathlib or through a string) where to save the Fisher matrices and their inverses to disk; defaults to `Path('.')` (the current folder)
:param matrix_naming_postfix: string to be appended to the names of the Fisher matrices and their inverses: they will look like `fisher_matrices_postfix.npy` and `inv_fisher_matrices_postfix.npy`
:return:
- `detected`: array with shape `(n_above_thr,)` - array of indices for the detected signals.
- `network_snr`: array with shape `(n_signals,)` - Network SNR for all signals.
- `parameter_errors`: array with shape `(n_signals, n_parameters)` - One-sigma Fisher errors for the parameters.
- `sky_localization`: array with shape `(n_signals,)` or `None` - One-sigma sky localization area in steradians, returned if the signals have both right ascension and declination, or `None` otherwise.
"""
if fisher_parameters is None:
fisher_parameters = list(parameter_values.keys())
if 'max_frequency_cutoff' in fisher_parameters:
fisher_parameters.remove('max_frequency_cutoff')
if 'redshift' in fisher_parameters:
fisher_parameters.remove('redshift')
n_params = len(fisher_parameters)
n_signals = len(parameter_values)
assert n_params > 0
assert n_signals > 0
if isinstance(save_matrices_path, str):
save_matrices_path = Path(save_matrices_path)
if save_matrices:
save_matrices_path.mkdir(parents=True, exist_ok=True)
fisher_matrices = np.zeros((n_signals, n_params, n_params))
inv_fisher_matrices = np.zeros((n_signals, n_params, n_params))
signals_havesky = False
if ("ra" in fisher_parameters) and ("dec" in fisher_parameters):
signals_havesky = True
i_ra = fisher_parameters.index("ra")
i_dec = fisher_parameters.index("dec")
detector_snr_thr, network_snr_thr = network.detection_SNR
parameter_errors = np.zeros((n_signals, n_params))
if signals_havesky:
sky_localization = np.zeros((n_signals,))
network_snr = np.zeros((n_signals,))
for k in tqdm(range(n_signals)):
network_fisher_matrix = np.zeros((n_params, n_params))
network_snr_square = 0.
signal_parameter_values = parameter_values.iloc[k]
for detector in network.detectors:
detector_fisher, detector_snr_square = compute_detector_fisher(detector, signal_parameter_values, fisher_parameters, waveform_model, waveform_class, use_duty_cycle, long_wavelength = long_wavelength)
network_snr_square += detector_snr_square
if np.sqrt(detector_snr_square) > detector_snr_thr:
network_fisher_matrix += detector_fisher
network_fisher_inverse, _ = invertSVD(network_fisher_matrix)
if save_matrices:
fisher_matrices[k, :, :] = network_fisher_matrix
inv_fisher_matrices[k, :, :] = network_fisher_inverse
parameter_errors[k, :] = np.sqrt(np.diagonal(network_fisher_inverse))
network_snr[k] = np.sqrt(network_snr_square)
if signals_havesky:
sky_localization[k] = sky_localization_area(
network_fisher_inverse, parameter_values["dec"].iloc[k], i_ra, i_dec
)
detected, = np.where(network_snr > network_snr_thr)
if save_matrices:
if matrix_naming_postfix != '':
if not matrix_naming_postfix.startswith('_'):
matrix_naming_postfix = f'_{matrix_naming_postfix}'
fisher_matrices = fisher_matrices[detected, :, :]
inv_fisher_matrices = inv_fisher_matrices[detected, :, :]
np.save(save_matrices_path / f"fisher_matrices{matrix_naming_postfix}.npy", fisher_matrices)
np.save(save_matrices_path / f"inv_fisher_matrices{matrix_naming_postfix}.npy", inv_fisher_matrices)
if signals_havesky:
return (
detected,
network_snr,
parameter_errors,
sky_localization,
)
return detected, network_snr, parameter_errors, None
def errors_file_name(
network: det.Network, sub_network_ids: list[int], population_name: str
) -> str:
sub_network = "_".join([network.detectors[k].name for k in sub_network_ids])
return (
f"Errors_{sub_network}_{population_name}_SNR{network.detection_SNR[1]:.0f}")
def output_to_txt_file(
parameter_values: pd.DataFrame,
network_snr: np.ndarray,
parameter_errors: np.ndarray,
sky_localization: Optional[np.ndarray],
fisher_parameters: list[str],
filename: Union[str, Path],
decimal_output_format: str = '%.3E'
) -> None:
if isinstance(filename, str):
filename = Path(filename)
delim = " "
header = (
"network_SNR "
+ delim.join(parameter_values.keys())
+ " "
+ delim.join(["err_" + x for x in fisher_parameters])
)
save_data = np.c_[network_snr, parameter_values, parameter_errors]
if sky_localization is not None:
header += " err_sky_location"
save_data = np.c_[save_data, sky_localization]
row_format = "%s " + " ".join([decimal_output_format for _ in range(save_data.shape[1] - 1)])
np.savetxt(
filename.with_suffix(".txt"),
save_data,
delimiter=" ",
header=header,
comments="",
fmt=row_format,
)
def analyze_and_save_to_txt(
network: det.Network,
parameter_values: pd.DataFrame,
fisher_parameters: list[str],
sub_network_ids_list: list[list[int]],
population_name: str,
save_path: Optional[Union[Path, str]] = None,
save_matrices: bool = False,
decimal_output_format: str = '%.3E',
**kwargs
) -> None:
if save_path is None:
save_path = Path().resolve()
if isinstance(save_path, str):
save_path = Path(save_path)
for sub_network_ids in sub_network_ids_list:
partial_network = network.partial(sub_network_ids)
filename = errors_file_name(
network=network,
sub_network_ids=sub_network_ids,
population_name=population_name,
)
detected, network_snr, errors, sky_localization = compute_network_errors(
network=partial_network,
parameter_values=parameter_values,
fisher_parameters=fisher_parameters,
save_matrices=save_matrices,
save_matrices_path=save_path,
matrix_naming_postfix='_'.join(filename.split('_')[1:]),
**kwargs,
)
output_to_txt_file(
parameter_values=parameter_values.iloc[detected],
network_snr=network_snr[detected],
parameter_errors=errors[detected, :],
sky_localization=(
sky_localization[detected] if sky_localization is not None else None
),
fisher_parameters=fisher_parameters,
filename=save_path/filename,
decimal_output_format=decimal_output_format,
)
|
janosch314REPO_NAMEGWFishPATH_START.@GWFish_extracted@GWFish-main@GWFish@modules@fishermatrix.py@.PATH_END.py
|
{
"filename": "checkpolconvertfringe.py",
"repo_name": "marti-vidal-i/PolConvert",
"repo_path": "PolConvert_extracted/PolConvert-main/PP/checkpolconvertfringe.py",
"type": "Python"
}
|
#!/usr/bin/python
#
# Copyright (c) Ivan Marti-Vidal 2015-2023, University of Valencia (Spain)
# and Geoffrey Crew 2015-2023, Massachusetts Institute of Technology
#
# Script to open and assess the POLCONVERT.FRINGE_* binary files.
# Code cribbed from TOP/task_polconvert.py around line 2550 or so.
# As usual with stupid python-numpy-pylab crap...things get rather out of
# control rather quickly. However this ends up as a bit of a mini-fourfit.
#
'''
checkpolconvertfringe.py -- a program to check POLCONVERT.FRINGE binary files
'''
import argparse
import glob
import numpy as np
import os
import re
import struct as stk
import subprocess
import sys
import warnings
# pylab is "deprecated" so we've converted to matplotlib.* and played with
# it a bit further to try for a make for a more useful postmortem tool.
try:
import matplotlib.pyplot as pl
except:
try:
import pylab as pl
except Exception as ex:
print('at least one of matplotlib or pylab must be available')
raise(ex)
def formatDescription(o):
'''
This generates a legend below the main title. Things to say are
captured in o.description, and we reformat that to fit a header
and a footer. If the publish option is set, then we create a
text file with the information as well
'''
header = 'Start: ' + o.description['time0'] + '\n'
header += 'Finish: ' + o.description['timex']
footer = o.description['antennas']
footer += "AMPs: LL %8.2f LR %8.2f RL %8.2f RR %8.2f" % (
o.description['amps'])
o.description['snrs'].reverse()
footer += "\nSNRs: LL %8.2f LR %8.2f RL %8.2f RR %8.2f" % tuple(
o.description['snrs'])
saved = '%s.%s' % (o.name, o.ext)
pname = '%s.%s' % (o.name, 'txt')
return header, footer, saved, pname
def antennaBlock(legend, antprow, antdict):
'''
Simply doing str(antdict) is ugly; try for two equal lines
if we have more than 8 antennas
'''
if antprow == 0:
if len(antdict) > 10: antprow = (len(antdict) + 1) // 2
else: antprow = len(antdict)
text = [" %d:%s" % (ky,antdict[ky]) for ky in antdict]
for runt in range((antprow - len(text) % antprow) % antprow):
text.append(' '*5)
for endr in range(1+len(text)//antprow):
try:
text[endr*antprow + 0] = legend + text[endr*antprow]
text[endr*antprow + antprow-1] += '\n'
except:
pass
return ''.join(text)
def findAntennaNames(o):
'''
Assuming we can locate the PolConvert log, the antennas show up
in lines such as these:
TELESCOPE AA AT X: 2225061.285 ; Y: -5440061.738 ; Z: -2481681.151
TELESCOPE BR AT X: -2112065.351 ; Y: -3705356.500 ; Z: 4726813.606
TELESCOPE FD AT X: -1324009.452 ; Y: -5332181.950 ; Z: 3231962.351
...
and a simple grep should suffice to complete the mapping. Apparently
subprocess.run() is recommended if it suffices, now.
'''
pclog = "%s/PolConvert.log" % o.dir
if not os.path.exists(pclog): return '??','??'
if o.verb: print(' found',pclog)
cmd = 'grep ^TELESCOPE....AT.X: %s' % pclog
if o.verb: print(' running',cmd.split(' ')[0:2],'...\n')
antennas = dict()
try: # CompletedProcess tells the tale
cpro = subprocess.run(cmd.split(' '), capture_output=True)
if cpro.returncode == 0:
for aa,liner in enumerate(cpro.stdout.decode().split('\n')):
if len(liner) > 10: antennas[aa+1] = liner[10:12]
except Exception as ex:
if o.verb: print('Unable to dig out TELESCOPE names',str(ex))
antennas[o.ant1] = antennas[o.ant2] = '??'
o.description['antennas'] = antennaBlock(o.antlegend, o.antprow, antennas)
if o.verb: print(' ',o.description['antennas'])
return antennas[o.ant1],antennas[o.ant2]
def getAntennaNames(o):
'''
Do this at the outset and save it for later use.
'''
try: o.ant1,o.ant2 = map(int,o.ants.split(','))
except: raise Exception('This is not an antenna-index pair: ' + o.ants)
o.antenna1, o.antenna2 = findAntennaNames(o)
def dtype0(fringedata,frfile,quiet):
'''
The version with PANG? but not UVDIST..DiFX 2.6 through 2.8.1
(through PolConvert version 2.0.3). Note that PANG? is junk.
'''
if not quiet: print('Reading',os.path.basename(fringedata),'...',end=' ')
alldats = frfile.read(4)
nchPlot = stk.unpack("i", alldats[:4])[0]
if not quiet: print('no UVDIST')
dtype = np.dtype(
[
("JDT", np.float64),
("ANT1", np.int32),
("ANT2", np.int32),
("PANG1", np.float64),
("PANG2", np.float64),
("MATRICES", np.complex64, 12 * nchPlot),
]
)
return dtype,nchPlot
def dtype1(fringedata,frfile,quiet):
'''
The version with PANG? and UVDIST..DiFX 2.8.2 (after 2.0.4)
'''
if not quiet: print('Reading',os.path.basename(fringedata),'...',end=' ')
alldats = frfile.read(5)
nchPlot,isParang = stk.unpack("i?", alldats)
if not quiet: print('with Parang?',isParang,'w/UVDIST')
dtype = np.dtype(
[
("FILE", np.int32),
("JDT", np.float64),
("ANT1", np.int32),
("ANT2", np.int32),
("PANG1", np.float64),
("PANG2", np.float64),
("UVDIST", np.float64),
("MATRICES", np.complex64, 12 * nchPlot),
]
)
return dtype,nchPlot
def deducePCvers(pcdir, verb):
'''
Look for VERSION and make a choice....
'''
pclog = pcdir + '/PolConvert.log'
if not os.path.exists(pclog): raise Exception('No file ' + pclog + 'to examine.')
cmd = 'grep VERSION %s' % pclog
if verb: print(' running',cmd.split(' ')[0:2],'... PolConvert.log')
try: # CompletedProcess tells the tale
cpro = subprocess.run(cmd.split(' '), capture_output=True)
if cpro.returncode == 0:
versio = re.sub(r'.*VERSION ','', cpro.stdout.decode().split('\n')[0])
if versio >= '2.0.5': vers = '1'
else: vers = '0'
if verb: print(' version string from log:', versio,'using "-V ',vers,'"\n')
return vers
except Exception as ex:
print("Unable to work out a good choice for -V argument; try -V help")
raise(ex)
def examineFRINGE_IF(pli, o):
'''
pli is the index of the file, so .../POLCONVERT.FRINGE_IF??
is expected to hold some binary data this task will try to
unpack it and report on what it holds. Options in o affect
what it does with the data. The FRINGE data file holds records
by time for antenna pairs (including only baselines to the
"plotAnt") and for that a matrix of unconverted, converted
and the conversion matrices.
'''
if o.withIF: ifs = 'IF'
else: ifs = ''
fringedata = "%s/POLCONVERT.FRINGE/POLCONVERT.FRINGE_%s%i" % (
o.dir,ifs,pli)
o.thisIF = pli
frfile = open(fringedata,"rb")
if o.pcvers == '': o.pcvers = deducePCvers(o.dir, o.verb)
if o.pcvers == '0': dtype,nchPlot = dtype0(fringedata,frfile,o.quiet)
elif o.pcvers == '1': dtype,nchPlot = dtype1(fringedata,frfile,o.quiet)
else: raise Exception('Unsupported fringe version ' + o.pcvers)
o.nchPlot = int(nchPlot)
try:
fringe = np.fromfile(frfile,dtype=dtype)
frfile.close()
except Exception as ex:
raise Exception('Unable to read fringe',str(ex))
if o.verb and not o.quiet: print(' ',os.path.basename(fringedata),
'has ',len(fringe),'time-baseline samples and',o.nchPlot,'channels')
x = len(fringe)-1
if o.pcvers == '1':
file0 = fringe[0]['FILE']
fileX = fringe[x]['FILE']
else:
file0 = fileX = '--'
o.description['time0'] = 'JDT %f s = %s'%jdt(fringe[0]['JDT'])
o.description['timex'] = 'JDT %f s = %s'%jdt(fringe[x]['JDT'])
if not o.quiet:
print(' [%04d] File:'%0,file0, o.description['time0'])
print(' [%04d] File:'%x,fileX, o.description['timex'])
ant1set = set(list(fringe[:]["ANT1"]))
ant2set = set(list(fringe[:]["ANT2"]))
if o.verb: print(' ANT1: ', ant1set, ', ANT2: ',ant2set)
maxUVDIST = ''
if o.pcvers == '1' and o.verb and not o.quiet:
maxUVDIST = (
' max UVDIST %f'%np.max(fringe[:]["UVDIST"]) + '(units unknown)')
print(' PANG1: %.2f'%np.rad2deg(np.min(fringe[:]["PANG1"])),
'.. %.2f'%np.rad2deg(np.max(fringe[:]["PANG1"])),
' PANG2: %.2f'%np.rad2deg(np.min(fringe[:]["PANG2"])),
'.. %.2f'%np.rad2deg(np.max(fringe[:]["PANG2"])),
' (deg);\n', maxUVDIST)
if o.ant1 in ant1set and o.ant2 in ant2set:
if o.verb and not o.quiet:
print(' Prepping data on baseline', o.ant1, '(', o.antenna1, ')',
'to', o.ant2, '(', o.antenna2, ') for plot')
AntEntry1 = np.logical_and(
fringe[:]["ANT1"] == o.ant1,fringe[:]["ANT2"] == o.ant2)
AntEntry2 = np.logical_and(
fringe[:]["ANT2"] == o.ant1,fringe[:]["ANT1"] == o.ant2)
AntEntry = np.logical_or(AntEntry1,AntEntry2)
if np.sum(AntEntry)>0:
# this is the polconverted data
cal12 = [ (fringe[AntEntry]["MATRICES"])[:,i::12]
for i in range(4,8)]
# this is the number of delay rate channels for the baseline:
# typically this is (time-baseline samples) / (number antennas)
o.rchan = np.shape(cal12[0])[0]
return prepPlot(cal12, pli, o)
else:
raise Exception("No data on %d--%d baseline" % (ant1,ant2))
else:
print(ant1 in ant1set,ant2 in ant2set)
raise Exception("The antenna pair %s has no data?" % o.ants)
def jdt(jdts):
'''
Apparently the unit is (Modified) Julian Date in seconds.
The time origin for that is 11 Nov 1858. It is not clear
what way to code this is least likely to lose precision.
'''
import datetime
dt = datetime.timedelta(seconds=jdts)
d0 = datetime.datetime(1858,11,17)
iso = (d0+dt).isoformat()
return(jdts, iso)
def prepPlot(cal, plif, o):
'''
Ok, now replicate the steps of task_polconvert.py for fringe
plotting. Ideally, we want to head towards a combined fringe
across IFs such as fourfit does so that weaker fringes gain
in significance. The np array cal holds the polconveted
data and other things of interest are in o.
'''
# Fringes in delay-rate space: double fft of the "cal" data
# fftshift swaps half-spaces so that the 0 freq at the center and
# by default it does this for both axes. fft2 does a 2dim FFT.
RRVis = np.fft.fftshift(np.fft.fft2(cal[0]))
RLVis = np.fft.fftshift(np.fft.fft2(cal[1]))
LRVis = np.fft.fftshift(np.fft.fft2(cal[2]))
LLVis = np.fft.fftshift(np.fft.fft2(cal[3]))
# amplitudes
RR = np.abs(RRVis)
RL = np.abs(RLVis)
LR = np.abs(LRVis)
LL = np.abs(LLVis)
# locate max peak
RMAX = np.unravel_index(np.argmax(RR+LL),np.shape(RRVis))
MAXVis = np.array([RRVis[RMAX],RLVis[RMAX],LRVis[RMAX],LLVis[RMAX]])
MAXl = np.array([RR[RMAX],RL[RMAX],LR[RMAX],LL[RMAX]])
MAX = max(MAXl)
o.description["amps"] = (LL[RMAX],LR[RMAX],RL[RMAX],RR[RMAX])
print(" IF%d peaks at %s < +/-[%d,%d] with (RR,RL,LR,LL) Vis:" %
(o.thisIF, repr(RMAX), int(o.rchan), int(o.nchPlot)))
if o.verb: print(' ', MAXVis)
print(' ', MAXl, '; overall max |Vis|: %f\n'%float(MAX))
# provide the data actually needed for a combined plot
return [ RR, RL, LR, LL, float(MAX), RMAX, plif, MAXl ]
def scaleAlias(scale):
if scale == 'loge': scale = 'elog'
return scale
def setScaling(scale):
'''
In theory one can be quite creative here...;
return a function and a sensible min for it.
'''
scale = scaleAlias(scale)
if scale == 'elog': scalor = np.log
elif scale == 'log10': scalor = np.log10
elif scale == 'linear': scalor = lambda x:x
elif scale == 'sqrt': scalor = np.sqrt
else: raise Exception("scale option %s not defined (set)" % (scale))
return scalor
def invScaling(scale):
'''
And then provide an inverse so that cb range can be known
'''
scale = scaleAlias(scale)
if scale == 'elog': scalor = np.exp
elif scale == 'log10': scalor = lambda x:np.power(10.0, x)
elif scale == 'linear': scalor = lambda x:x
elif scale == 'sqrt': scalor = np.square
else: raise Exception("scale option %s not defined (inv)" % (scale))
return scalor
def plotMinimum(scale, samdev, count, sigma):
'''
Choose a sensible minimum; log are the only tricky case since
log (0) is not a good thing.
'''
scale = scaleAlias(scale)
if scale == 'elog': minimum=float(np.log(samdev/np.sqrt(count))*sigma)
elif scale == 'log10': minimum=float(np.log10(samdev/np.sqrt(count))*sigma)
elif scale == 'linear': minimum = 0.0
elif scale == 'sqrt': minimum = 0.0
else: raise Exception("scale option %s not defined (min)" % (scale))
return minimum
def avePeakPositions(plotdata):
count = 0
for pd in plotdata:
if count == 0: peaks = pd[5]
else: peaks = np.add(peaks, pd[5])
count += 1
peaks = np.divide(peaks, count)
return "(delay %.1f, delay rate %.1f)"%(float(peaks[1]),float(peaks[0]))
def sampleDevFromPlotdata(plotdata, ylim, xlim):
'''
Estimate the sample deviation from parts of the images away from
the peaks. If we grab some samples from the 4 corners of every
plot, make a list and pick the median, we are very likely ok.
'''
samples = list()
for pd in plotdata:
for vi in range(4):
samples.append(np.std(pd[vi][1:ylim,1:xlim].flatten()))
samples.append(np.std(pd[vi][1:ylim,xlim:-1].flatten()))
samples.append(np.std(pd[vi][ylim-1,xlim:-1].flatten()))
samples.append(np.std(pd[vi][ylim-1,1:xlim].flatten()))
samedian = np.median(np.array(samples))
return samedian
def padSlice(mn, cen, mx, pnd, xtra):
'''
Some stupid index games: min, max and pad used below...
'''
before = after = 0
pnd += xtra
if pnd > cen: after = pnd - cen
elif pnd < cen: before = cen - pnd
thismin = mn + after
thismax = mx + after
padding = (before+xtra, after+xtra)
return thismin, thismax, padding
def computeSNRs(vizzy, maximum, count, samdev, nsigma, scale, fwdscalor):
'''
Return a list of the estimated SNRs for the 4 product images in vizzy
and the scaled maximum and minimum for in the image arrays. The maximum
should be the total max/count, the mimimum is zero or sigma-scaled stdev.
Note however we are starting with abs(vis), which is perhaps Raleigh
distributed, so the sample deviation computed and passed to us will
underestimate the true std dev by sqrt(2-pi/2) or 0.655136377562
'''
maximum = fwdscalor(maximum/count)
minimum = plotMinimum(scale, samdev, count, nsigma)
invscalor = invScaling(scale)
SNRs = np.array(range(4))
ocmpmax = cmpmax = invscalor(minimum)
for ii,vis in enumerate(vizzy):
# recover unscaled max on this visibility
npmaxvis = np.max(vis)
if npmaxvis > cmpmax: cmpmax = npmaxvis
maxvis = float(invscalor(npmaxvis))
# generate SNRs of the combined data -- attempting to correct...
SNRs[ii] = ((maxvis / samdev) *
float(np.sqrt(count)) * 0.655136377562)
return SNRs, minimum, maximum
def parseFringeRequest(fringe, verb):
'''
The o.fringe var is npix[,xtra[,sigma]] but split gets upset.
The plots look nicer when npix is odd when there is a fringe.
If npix is larger than the amount of data, we need xtra nonzero
'''
npix,xtra,sigma,junk = (o.fringe + ',,,').split(',',maxsplit=3)
npix = 2*int(int(npix)/2.0) + 1
if xtra == '':
xtra = npix//2
if sigma == '': sigma = 1.0
xtra = int(xtra)
sigma = float(sigma)
if o.verb: print(' npix,xtra,sigma: ',npix,xtra,sigma)
return npix, xtra, sigma
def combinePlotdata(plotdata, o):
'''
Should have been given list of plotdata tuples (per IF). Combine
them and make a 2x2 image plot centered around the peaks +/- npix,
which we do by padding with np.pad and then slicing out npix around
the new center. We also add xtra padding so that if there is not
much data, we still get some approximation of the original npix.
Returns the things to be plotted.
'''
npix,xtra,sigma = parseFringeRequest(o.fringe, o.verb)
xcen = int((o.nchPlot+2*xtra)/2)
ycen = int((o.rchan+2*xtra)/2)
wind = min(npix, xcen, ycen)
xmin, xmax = (xcen - wind, xcen + wind + 1)
ymin, ymax = (ycen - wind, ycen + wind + 1)
# these should all be the same if it is a real fringe
truecenter = avePeakPositions(plotdata)
# sample median of the original np.abs(visibilities)
samdev = sampleDevFromPlotdata(plotdata,
min(npix,ycen)//3, min(npix,xcen)//3)
if o.verb: print((' %s plot %dx%d on %d peaks at %s') % (
o.scale, 2*wind+1,2*wind+1, len(plotdata), truecenter))
count = 0
AMPs = np.zeros(4)
for pd in plotdata: # RR,RL,LR,LL, 4:MX, 5:RMAX, 6:IF, 7:MAXl
# note that y indices precede x indices
pndy,pndx = pd[5]
thismax = pd[4]
AMPs = np.add(AMPs, pd[7])
# if are multiple peaks, this is definitely not a droid we want
if not (type(pndx) is np.int64 and type(pndy) is np.int64 and
thismax > 0.0): # there better be a peak somewhere
print(' No single max from',pd[6],'so we shall ignore it')
continue
# accumulate a maximum value
if count == 0: maximum = thismax
else: maximum += thismax
# pad the sides so that a slice window puts the peak at the center
thisxmin,thisxmax,xpadding = padSlice(xmin,xcen,xmax,int(pndx),xtra)
thisymin,thisymax,ypadding = padSlice(ymin,ycen,ymax,int(pndy),xtra)
window = np.s_[thisymin:thisymax,thisxmin:thisxmax]
pad_width = ( ypadding, xpadding )
vis = list()
# finally generate the centered, sliced images
for vi in range(4):
vis.append(np.pad(pd[vi], pad_width, mode='constant',
constant_values=samdev/10.0)[window])
if count > 0:
vizzy[vi] = np.add(vizzy[vi], vis[vi])
if count == 0: vizzy = vis
count += 1
if count == 0:
raise Exception("Nothing to plot?")
# average and scale
AMPs = np.divide(AMPs, float(count))
scalor = setScaling(o.scale)
for vi in range(4): vizzy[vi] = scalor(np.divide(vizzy[vi], float(count)))
# compute SNRs and scaled image max,min
SNRs, minimum, maximum = computeSNRs(
vizzy, maximum, count, samdev, sigma, o.scale, scalor)
# vizzy, scalor(maximum/count), count, samdev, sigma, o.scale, scalor)
o.description['snrs'] = list(SNRs)
invscalor = invScaling(o.scale)
print(' SNRs on',o.ants,'(%s && %s)'%(o.antenna1,o.antenna2),
SNRs,'\n %s|Vis| data e [%.2f..%.2f] +/- %.3f (std.dev)'%(
o.scale, invscalor(minimum), invscalor(maximum), samdev))
# return plot products; all should have same ratio, so use first
ratio = vizzy[0].shape[1] / vizzy[0].shape[0]
return vizzy, [minimum, maximum], ratio, SNRs, AMPs
def plotProcessing(plotdata, o):
'''
Combine the plotdata tuples into abs(visibility), the mx val.
Note that we have reversed the order of visibilities to be
the canonical alphabetical one. The 'constrained' layout will
prevent the axis labels from getting buried, but it then doesn't
leave much control over placement of other things.
'''
vis, vxn, ratio, SNRs, AMPs = combinePlotdata(plotdata, o)
lab = [ 'RR','RL','LR','LL' ]
scalor = invScaling(o.scale)
cbrange = '..'.join(list(map(lambda x:"%.2f"%x, scalor(vxn))))
pl.ioff()
# lengendary--blank lines in tile opens space for header block
# space for footer is created by padding (above) the colorbar
fig, axs = pl.subplots(2, 2, figsize=(8.5,11), layout='constrained',
subplot_kw={'xticks':[], 'yticks':[]})
fig.suptitle(('Averaged Fringes (IFs: %s)' % ','.join(o.ifused)) +
'\nJob: ' + o.job + ' Vis(' + o.antenna1 + ' && ' + o.antenna2 + ')'
+ "\n\n\n", fontsize=14) # open up space for header
props = dict(boxstyle='round', facecolor='snow', alpha=1.0)
header,footer,saved,pname = formatDescription(o)
fig.text(0.5, 0.930, header, fontsize=10, fontfamily='monospace',
ha='center', va='center', wrap=True, bbox=props)
fig.text(0.51, 0.130, footer, fontsize=10, fontfamily='monospace',
ha='center', va='center', wrap=True, bbox=props)
# subplots
for row in range(2):
for col in range(2):
ndx = 2*(1-row)+(1-col)
if o.verb:
print(' Vis[',ndx, lab[ndx],'] range',cbrange,
'% 7.2f Amp %.1f'% (SNRs[ndx], AMPs[ndx]))
ax = axs[row, col]
ax.set_title(lab[ndx] + ' Vis., SNR %5.1f Amp %.1f' %
(SNRs[ndx], AMPs[ndx]))
ax.set_xlabel('delay\n')
ax.set_ylabel('delay rate')
im = ax.imshow(vis[ndx], vmin=vxn[0], vmax=vxn[1],
interpolation='nearest', cmap=pl.cm.viridis, origin='lower')
cbar = fig.colorbar(im, ax=axs,
label='('+o.scale+'-scaled) |Vis(LL,LR,RL,RR)|',
location='bottom', shrink=0.60, pad=0.12)
# common colorbar, with updated labels for scaling: replace('-','-'),
# but not all cbar implementations have working text.get_text(), &c.
try:
ttt = ['%.0f'%scalor(float(text.get_text().replace('\u2212','\u002d')))
for text in cbar.ax.get_xticklabels()]
warnings.filterwarnings(action='ignore', category=UserWarning)
cbar.ax.set_xticklabels(ttt)
except:
print('warning: plot colorbar axes not corrected for scaling')
fig.savefig(saved)
plotCoda(header, footer, saved, pname, o)
return 0
def plotCoda(header, footer, saved, pname, o):
'''
Tell the human about what is now available
'''
if o.publish:
fp = open(pname, 'w')
fp.write('Job: ' + o.job + '\n');
fp.write('Stamp: ' + o.stamp + '\n');
fp.write('Ants: ' + str(o.ant1) + ' v ' + str(o.ant2) + '\n');
fp.write(header + '\n')
fp.write(footer + '\n')
fp.close()
print(" text: '%s'" % pname)
print(" plot: '%s'" % saved)
if o.viewer != '':
cmd = '%s %s.%s &' % (o.viewer, o.name, o.ext)
print(' ' + o.viewer + ' ....' + o.ext + ' launched')
os.system(cmd)
def parseJobStamp(o):
'''
It is somewhat convenient to parse the dirname for correlation
job number as well as timestamp (for plot labels). Do that now.
And this is a good place to check stupid stuff.
'''
if not os.path.exists(o.dir):
raise Exception("Directory %s does not exist" % o.dir)
if not os.path.exists(o.dir + '/POLCONVERT.FRINGE'):
raise Exception("No POLCONVERT.FRINGE subdir to %s" % o.dir)
o.description = {}
getAntennaNames(o)
if o.name == '':
if o.antenna1+o.antenna2 == '????':
o.name = 'checkFringe-%d-%d' % (o.ant1,o.ant2)
else:
o.name = 'checkFringe-%s-%s' % (o.antenna1,o.antenna2)
if o.publish:
o.name = o.dir + '/' + o.name
if o.verb: print('plotting to', o.name)
try:
parts = o.dir.split('.polconvert-')
o.job = parts[0]
o.stamp = parts[1]
except Exception as ex:
print(str(ex))
o.job = ''
o.stamp = ''
def parseIFarg(o):
'''
Convert the IF input option to a list of IFs to examine.
We also make sure that the named IFs have data to plot.
'''
iflist = list()
targetdir = "%s/POLCONVERT.FRINGE" % o.dir
dirdir = os.path.dirname(o.dir)
if len(dirdir) > 0: dirdir = '\n ' + dirdir + '/'
if o.verb: print('Locating fringes in:%s\n %s' %
(dirdir, os.path.basename(o.dir)))
# POLCONVERT.FRINGE_* initially, later POLCONVERT.FRINGE__IF*
o.withIF = None
for frng in sorted(glob.glob("%s/*FRINGE_IF*" % targetdir)):
o.withIF = True
iflist.append(frng[-2:])
if o.verb: print(' ',os.path.basename(frng),'as IF',iflist[-1])
if o.withIF is None:
for frng in sorted(glob.glob("%s/*FRINGE_*" % targetdir)):
iflist.append(frng[-2:])
if o.verb: print(' ',os.path.basename(frng),'as IF',iflist[-1])
o.withIF = False
# if no selection provide full list
if o.IF == '': return iflist
# else make a cut to those requested
ifcull = list()
for iffy in o.IF.split(','):
if iffy in iflist: ifcull.append(iffy)
if o.verb: print(' limiting actions to these IFs:', ','.join(ifcull),'\n')
if len(ifcull) == 0: print('No IFs match: -I',o.IF,'choose wisely.')
return ifcull
def getVersion():
'''
There has to be a better solution than editing all the files.
'''
try:
import pcvers
return pcvers
except:
return 'not available'
return 'total failure'
def parseOptions():
'''
While converting data, PolConvert writes out binary data
which it uses to either support solving for the XY phase
or merely to plot fringes. This program examines those
binary files and reports on what it finds.
'''
des = parseOptions.__doc__
epi = 'In the typical case, you may have run PolConvert, '
epi += 'something did not work, and you wish to verify that '
epi += 'binary fringe files written by PolConvert, are ok (or not). '
epi += 'For this you need at least the -d *polconvert* argument '
epi += 'which will located the PolConvert.log and the binary '
epi += 'fringe files. The remaining arguments controls what '
epi += '(exactly) is done with those files. '
epi += 'Use -f "example" for sample invocations.'
use = '%(prog)s [options]\n\nVersion ' + getVersion()
parser = argparse.ArgumentParser(epilog=epi, description=des, usage=use)
major = parser.add_argument_group('Major Options')
minor = parser.add_argument_group('Minor Options')
picky = parser.add_argument_group('Picky Options')
major.add_argument('-d', '--dir', dest='dir',
default='.', metavar='DIR', help='(Mandatory) Path to '
'the polconvert output directory. In production processing, '
'that is $job.polconvert-$timestamp')
major.add_argument('-I', '--IF', dest='IF',
default='', metavar="IF", help='This controls the IFs '
'that will be considered. If unset, all IFs in the '
'directory are examined. You may also supply a comma-sep '
'list of IF numbers to process.')
major.add_argument('-a', '--antennas', dest='ants',
default='1,2', metavar='ANT1,ANT2', help='Indicies for the'
' pair of antennas to use for subsequent checking. Ideally,'
' the first one is a linear station (ALMA) and the second'
' is a short baseline to a good station.')
major.add_argument('-f', '--fringe', dest='fringe',
default='', help='String to configure fringing checks.'
' Use "help" as an argument for more information; reminder:'
' npix,pads,sigma')
major.add_argument('-P', '--publish', dest='publish',
default=False, action='store_true', help='place results in'
' the -d directory (a graphic and a text file).')
#
minor.add_argument('-v', '--verbose', dest='verb',
default=False, action='store_true',
help='be chatty about the work')
minor.add_argument('-q', '--quiet', dest='quiet',
default=False, action='store_true',
help='this is useful if you are playing with the plots'
' and no longer need to see the fringe file details.')
minor.add_argument('-n', '--name', dest='name',
default='', help='Basename for any plot generated. If no name'
' is supplied, one will be created for you based on the baseline.')
minor.add_argument('-V', '--pcvers', dest='pcvers',
default='', help='Fringe file version: 1 = 2.0.5 and later'
' (with UVDIST), 0 = 2.0.3 and earlier (without UVDIST); or "help"'
' to print out a more complete explanation.')
minor.add_argument('-s', '--scale', dest='scale',
default='log10', help='One of "elog" (or "loge"),'
' "log10" (the default), "linear", "sqrt". Use "help" '
' for more information.')
minor.add_argument('-g', '--viewer', dest='viewer',
default='', help='Name of graphic display tool, e.g.'
' eog, okular.... The default is "" to just make a PNG'
' file (see -n) and to not display it.')
minor.add_argument('-e', '--extension', dest='ext',
default='png', metavar='EXT', help='Graphics extension for'
' the file produced: png (default), pdf, ... (Cf. Matplotlib).')
#
picky.add_argument('-p', '--precision', dest='prec', type=int,
default=3, metavar=int,
help='Precision for numpy printing if verbosity active')
picky.add_argument('-t', '--threshold', dest='thres', type=int,
default=20, metavar=int,
help='Threshold for numpy printing if verbosity active')
picky.add_argument('-w', '--linewidth', dest='width', type=int,
default=78, metavar=int,
help='Linewidth for numpy printing if verbosity active')
picky.add_argument('-L', '--antenna-legend', dest='antlegend',
default='Ant. Map:', help='Number of lines in antenna map legend.')
picky.add_argument('-M', '--antenna-per-row', dest='antprow',
default='0', type=int,
help='Number of antennas per row in map legend.')
return parser.parse_args()
def pcvershelp():
return '''
The fringe file is binary packed for numpy to read it
The early versions had parallactic angles (not implemented)
and as of 2.0.5 (targetted for DiFX 2.8.2), UVDIST was added.
Use -V 0 for the earlier format and -V 1 for the later one.
The default is to examine the PolConvert.log and make a choice.
'''
def fringehelp():
return '''
Normally polconvert generates plots of before and after the
polconversion...with a zoom into "npix" around the peak. An
issue is that if fringes are weak, the peak is not enough to
work with. If the 'fringe' argument is not empty, it is parsed
first to supply npix. Then ALL the IFs mentioned in the -I
argument are combined into an average image, and the result is
plotted for a window around npix. The image may need to be
padded and you can also scale it. Use -f "more" to find out
about that.
'''
def fringemore():
return '''
After npix, a comma and a second argument indicates the amount
of padding you want. If the delay or delay-rate is at the edge,
you will need to pad in order to combine the images. (If you
do not supply a pad, the code will try and may fail...) A
third argument affects the image range...use -s "help" for more
about that.
'''
def scalehelp():
return '''
For some of the scalings, a zero minimum is safe to use. For
log-scaled plots, however, you'll need to set the minimum. The
final fringe argument specifies the number of sigma to multiply
the noise floor by for the minimum. The default is 1.0, but
you can do as you like.
'''
def fringeexam():
return '''
To generate a plot from the fringe data in pdir for the 1,8 antenna
baseline pair, put the result in pdir and display the result with eog:
checkpolconvertfringe.py -d pdir -a 1,8 -g eog -f 50 -P
A more verbose version with padding of 25 pixels and a 0.1-sigma
noise floor:
checkpolconvertfringe.py -d pdir -a 1,8 -g eog -f 50,25,0.1 -P
'''
def somehelp(o):
if o.pcvers == 'help':
print(pcvershelp())
return True
if o.fringe == 'help':
print(fringehelp())
return True
if o.fringe == 'more':
print(fringehelp())
return True
if o.fringe == 'example':
print(fringeexam())
return True
if o.scale == 'help':
print(scalehelp())
return True
return False
#
# enter here to do the work
#
if __name__ == '__main__':
if sys.version_info.major < 3:
print('Sorry, this code works in Python3 only.')
sys.exit(1)
o = parseOptions()
if somehelp(o): sys.exit(0)
if o.verb:
print('\nprinting with %d precision, %d elements, %d width' % (
o.prec, o.thres, o.width))
np.set_printoptions(
precision=o.prec, threshold=o.thres, linewidth=o.width)
errors = 0
plotdata = list()
parseJobStamp(o)
o.ifused = parseIFarg(o)
print("\nOpening POLCONVERT.FRINGE files\n")
for pli in o.ifused:
try:
plotdata.append(examineFRINGE_IF(int(pli), o))
except Exception as ex:
print("Unable to read IF %d successfully"%int(pli))
print("Exception was:\n",str(ex))
errors += 1
print("Have plotting data for %d fringes"%len(plotdata))
if (o.fringe != ''):
try:
errors += plotProcessing(plotdata, o);
except Exception as ex:
print("Unable to make a plot")
print("Exception was:\n",str(ex))
errors += 1
if errors > 0:
print('\nall done with',errors,'errors')
sys.exit(errors)
else:
print('\nall done with no errors')
sys.exit(0)
#
# eof vim: set nospell:
#
|
marti-vidal-iREPO_NAMEPolConvertPATH_START.@PolConvert_extracted@PolConvert-main@PP@checkpolconvertfringe.py@.PATH_END.py
|
{
"filename": "acor.py",
"repo_name": "dfm/acor",
"repo_path": "acor_extracted/acor-main/acor/acor.py",
"type": "Python"
}
|
__all__ = ["acor", "function"]
import numpy as np
from . import _acor
def acor(data, maxlag=10):
"""
Estimate the autocorrelation time of a time series
Parameters
----------
data : numpy.ndarray (N,) or (M, N)
The time series.
maxlag : int, optional
N must be greater than maxlag times the estimated autocorrelation
time.
Returns
-------
tau : float
An estimate of the autocorrelation time.
mean : float
The sample mean of data.
sigma : float
An estimate of the standard deviation of the sample mean.
"""
return _acor.acor(np.array(data), maxlag)
def function(data, maxt=None):
"""
Calculate the autocorrelation function for a 1D time series.
Parameters
----------
data : numpy.ndarray (N,)
The time series.
Returns
-------
rho : numpy.ndarray (N,)
An autocorrelation function.
"""
data = np.atleast_1d(data)
assert len(np.shape(data)) == 1, \
"The autocorrelation function can only by computed " \
+ "on a 1D time series."
if maxt is None:
maxt = len(data)
result = np.zeros(maxt, dtype=float)
_acor.function(np.array(data, dtype=float), result)
return result / result[0]
|
dfmREPO_NAMEacorPATH_START.@acor_extracted@acor-main@acor@acor.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/linalg/tests/__init__.py",
"type": "Python"
}
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@linalg@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "phase_space2.py",
"repo_name": "eugeneg88/fixed_points_brown",
"repo_path": "fixed_points_brown_extracted/fixed_points_brown-main/phase_space2.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 12 11:58:42 2024
@author: evgeni
"""
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
#%%
def ecc(jz, eps):
y= 9*eps*jz/8
x2 = np.fabs(jz) * (5 / 3 * (1+y)/(1-y))**0.5
e2 = 1 - x2
if e2<0:
return 0
return e2**0.5
def incc(jz, eps):
rr = jz/(1-ecc(jz,eps)**2)**0.5
return np.arccos(rr)*180/np.pi
plt.rc('text', usetex=True)
matplotlib.rcParams.update({'font.size': 20})
# Define the Hamiltonian function
def hamiltonian(ee,x,H, eps_sa):
G = (1-ee**2)**0.5
# cosi = H/G
# Example Hamiltonian (modify this according to your specific problem)
sec_term = 6*G**2 - 3*H**2 + 15*(1-G**2 - H**2/G**2 + H**2)*np.sin(x)**2
# sec_term = - ee**2/2 + cosi**2 + 1.5*ee**2*cosi**2 + 2.5*ee**2*(1-cosi**2)*np.cos(2*x)
katz = (-8+5*np.sin(x)**2)*G**2 + 5 * np.sin(x)**2 * (H**2/G**2 - 1 - H**2)
return sec_term - 27 * H * eps_sa / 8 * katz
def plot_energy_levels(eps, inc_deg):
#eps=0.
#jz=np.cos(89*np.pi/180)
# Define the range for x and p
x_min, x_max = 0, np.pi
H = np.cos(inc_deg*np.pi/180)
G_min, G_max = H, 1
e_min, e_max = 0, (1-G_min**2)**0.5
# p_min = (1-e_max**2)**0.5
# p_max = (1-e_min**2)**0.5
# Generate a grid of x and p values
num_points = 300
x_vals = np.linspace(x_min, x_max, num_points)
e_vals = np.linspace(e_min, e_max, num_points)
G_vals = np.linspace(G_min, G_max, num_points)
#[(1-y**2)**0.5 for y in p_vals]
x_grid, G_grid = np.meshgrid(x_vals, G_vals)
x_grid, e_grid = np.meshgrid(x_vals, e_vals)
# Calculate the Hamiltonian values at each point on the grid
H_values = hamiltonian(e_grid, x_grid, H, eps)
# Plot the phase space
p = 0.2 + 0.7*ecc(H,eps)
plt.figure(1)
# plt.figure(figsize=(8, 6))
plt.contourf(x_grid/np.pi, e_grid, H_values, 50, cmap='viridis')
plt.colorbar(label='$\mathcal{H}$')
#plt.contour(x_grid/np.pi, e_grid, H_values, 20, cmap='Blues')
plt.contour(x_grid/np.pi, e_grid, H_values, cmap='copper', levels=np.linspace(np.min(H_values)*p+np.max(H_values)*(1-p) ,np.max(H_values)+0.01,21))
plt.contour(x_grid/np.pi, e_grid, H_values, colors='r', levels=[hamiltonian(0.001,0,H, eps)], linewidths=3)
plt.scatter(1/2, ecc(H,eps))
# plt.xlabel(r'$\omega / \pi$')
# plt.ylabel('eccentricity')
plt.title(r'$\epsilon_{\rm SA}=$' + str(eps) + '\quad' +r'$j_{\rm z}=$' + "%.2f" % H)
#plt.grid(True)
# plt.show()
plt.figure(figsize=(15,10))
plt.subplot(231)
plot_energy_levels(eps=0., inc_deg=120)
plt.ylabel('eccentricity')
plt.subplot(232)
plot_energy_levels(eps=0., inc_deg=135)
plt.subplot(233)
plot_energy_levels(eps=0., inc_deg=150)
plt.subplot(234)
plt.ylabel('eccentricity')
plot_energy_levels(eps=0.2, inc_deg=120)
plt.xlabel(r'$\omega / \pi$')
plt.subplot(235)
plot_energy_levels(eps=0.2, inc_deg=135)
plt.xlabel(r'$\omega / \pi$')
plt.subplot(236)
plot_energy_levels(eps=0.2, inc_deg=150)
plt.xlabel(r'$\omega / \pi$')
plt.subplots_adjust(left=0.05, bottom=0.1, right=0.97, top=0.94, wspace=0.16, hspace=0.18)
#%%
eps_lin=np.linspace(-0.0,0.3,100)
rh = lambda eps: 3**(1/3) * eps **(2/3)
hr = lambda f: f**1.5 / 3 **0.5
plt.rc('text', usetex=True)
matplotlib.rcParams.update({'font.size': 20})
def plot_efix(inc0):
fig, ax = plt.subplots(1)
# zz = [np.cos(inc*np.pi/180) for inc in inc0]
for i in range(0, len(inc0)):
ax.plot(eps_lin, [ecc( np.cos(inc0[i]*np.pi/180), x) for x in eps_lin], linewidth=3, label=str(inc0[i]) + r'$^\circ$')
plt.xlabel(r'$\epsilon_{\rm SA}$')
plt.ylabel(r'$e_{\rm fix}$')
# secax = ax.secondary_xaxis('top', functions=(rh, rh))
#secax.set_xlabel(r'$a_1/r_{\rm H}$')
plt.legend()
plot_efix([40,45, 50, 60, 143.7])
#plot_efix([135,140, 150, 160])
plt.subplots_adjust(left=0.15, bottom=0.15, right=0.94, top=0.88, wspace=0.16, hspace=0.18)
#%%
plt.subplot(121)
plot_efix( inc0=30)
plt.subplot(122)
plot_efix(inc0=45)
#%%
import matplotlib.pyplot as plt
import numpy as np
# Define the range of epsilon where f(epsilon) > 0.5
epsilon_upper_limit = ((0.5 / 3**(1/3))**(3/2))
# Define primary x-axis values (linear) within the range
x_primary = np.linspace(0, epsilon_upper_limit, 100)
# Define secondary x-axis values using the provided function
def transform_secondary(x):
return 3**(1/3) * x**(2/3)
# Define the inverse function
def inverse_transform_secondary(x):
return (x / (3**(1/3)))**(3/2)
# Create y-axis values (you can define this based on your specific function/data)
y = np.sin(x_primary)
# Create figure and primary axes
fig, ax1 = plt.subplots()
# Create secondary axes
ax2 = ax1.twiny()
# Plot the data on the primary axes
ax1.plot(x_primary, y, label='Primary X-axis', color='blue')
# Set the transformation for the secondary axes
ax2.set_xscale('function', functions=(transform_secondary, inverse_transform_secondary))
# Set the tick positions and labels for the secondary axes
# Generate ticks for secondary axis
x_secondary = np.linspace(0, epsilon_upper_limit, 10) # Adjusted range for x_secondary
x_secondary_transformed = transform_secondary(x_secondary)
ax2.set_xticks(x_secondary_transformed)
ax2.set_xticklabels([f'{val:.2f}' for val in x_secondary])
# Set labels and title
ax1.set_xlabel('Primary X-axis')
ax2.set_xlabel('Secondary X-axis')
ax1.set_ylabel('Y-axis')
plt.title('Plot with Two X-axes')
# Add legend
ax1.legend()
# Show plot
plt.show()
#%%
import rebound
sim = rebound.Simulation()
# Add the Sun
sim.add(m=1.0) # Sun's mass is set to 1.0
# Add the other giant planets: Saturn, Uranus, and Neptune
#sim.add(m=0.0002857, a=9.537, e=0.05415060, inc=0.93, Omega=1.983, omega=1.080, M=5.554) # Saturn
#sim.add(m=0.00004365, a=19.191, e=0.04716771, inc=1.0, Omega=0.772, omega=0.257, M=5.149) # Uranus
#sim.add(m=0.00005149, a=30.069, e=0.00858587, inc=0.769, Omega=1.077, omega=1.770, M=6.116) # Neptune
# Add Jupiter
jupiter_mass = 1.0/1047.56 # Jupiter's mass relative to the Sun
a_j = 5.203
eps=0.1
inc0 = np.pi / 180 * 60
HH = np.cos(inc0) #* (1 - e0**2)**0.5
e0 = ecc(HH, eps)
sim.add(m=jupiter_mass, a=a_j) # Jupiter
# Add Jupiter's four largest moons: Io, Europa, Ganymede, and Callisto
# Define masses of the Galilean satellites relative to Jupiter's mass
mass_io = 8.93e-5 * jupiter_mass
mass_europa = 4.8e-5 * jupiter_mass
mass_ganymede = 1.48e-4 * jupiter_mass
mass_callisto = 1.08e-4 * jupiter_mass
# Add the moons with their respective masses
#sim.add(primary=sim.particles[4], m=mass_io, a=0.00282, e=0.0041, inc=0.050, Omega=-0.235, omega=0.613, M=1.523) # Io
#sim.add(primary=sim.particles[4], m=mass_europa, a=0.00449, e=0.0094, inc=0.471, Omega=0.001, omega=0.008, M=2.488) # Europa
#sim.add(primary=sim.particles[4], m=mass_ganymede, a=0.00754, e=0.0013, inc=0.204, Omega=-0.067, omega=0.050, M=3.699) # Ganymede
#sim.add(primary=sim.particles[4], m=mass_callisto, a=0.01258, e=0.0074, inc=0.205, Omega=-0.016, omega=0.202, M=4.760) # Callisto
def get_a1(eps, m1, m2, m3, a2, e2):
k = (m3**2 / (m1+m2) * (m1+m2+m3) )
b2 = a2 * (1-e2**2)**0.5
return k**(-1/3) * b2 * eps**(2/3)
def get_eps(m1,m2,m3,a1,a2,e2):
b2 = a2 * (1-e2**2)**0.5
return (a1/b2)**1.5 * m3 / (m1+m2) **0.5 / (m1+m2+m3)
m_test=3e-12
a1 = get_a1(eps, m_test, jupiter_mass, 1, a_j, 0)
# Add Pasiphae with JPL orbital elements
sim.add(m=3.0e-12, a=a1, e=e0, inc=np.arccos(HH/(1-(e0)**2)**0.5), Omega=np.pi / 4, omega= np.pi / 180 *90, M=np.pi / 180 *1, primary=sim.particles[1])
# Set the integrator
sim.integrator = "ias15" # Wisdom-Holman symplectic integrator is efficient for this kind of simulation
# Integrate the system for a certain number of years
times = np.linspace(0, 4000, 4000)
args_jupiter = np.zeros(len(times))
args_pasiphae = np.zeros(len(times))
nodes_jupiter = np.zeros(len(times))
nodes_pasiphae = np.zeros(len(times))
ecc_jupiter = np.zeros(len(times))
ecc_pasiphae = np.zeros(len(times))
for i, t in enumerate(times):
sim.integrate(t)
#orb0 = sim.calculate_orbits(primary=sim.particles[0])
# orb4 = sim.particles[4].calculate_orbits(primary=sim.particles[4])
args_jupiter[i] = sim.particles[1].calculate_orbit(primary=sim.particles[0]).omega # Argument of pericentre of Jupiter
args_pasiphae[i] = sim.particles[2].calculate_orbit(primary=sim.particles[1]).omega # Argument of pericentre of Pasiphae
nodes_jupiter[i] = sim.particles[1].calculate_orbit(primary=sim.particles[0]).Omega # Argument of pericentre of Jupiter
nodes_pasiphae[i] = sim.particles[2].calculate_orbit(primary=sim.particles[1]).Omega # Argument of pericentre of Pasiphae
ecc_jupiter[i] = sim.particles[1].calculate_orbit(primary=sim.particles[0]).e # Argument of pericentre of Jupiter
ecc_pasiphae[i] = sim.particles[2].calculate_orbit(primary = sim.particles[1]).e
plot_energy_levels(eps, inc0 * 180 / np.pi)
plt.scatter(args_pasiphae/np.pi, ecc_pasiphae, color='grey', alpha=0.1)
plt.xlim([0,1])
plt.ylabel('eccentricity')
plt.xlabel(r'$\omega / \pi$')
plt.subplots_adjust(left=0.15, bottom=0.15, right=0.94, top=0.88, wspace=0.16, hspace=0.18)
#%%
plt.figure(2)
plt.plot(times, args_pasiphae/np.pi, label=r'$\omega_p$')
plt.plot(times, ecc_pasiphae, label=r'$\omega_p$')
#%%
eps=0.16
inc0 = np.pi / 180 * 60
HH = np.cos(inc0) #* (1 - e0**2)**0.5
e0 = ecc(HH, eps)
E_fix = hamiltonian(e0, np.pi/2, HH, eps)
E_sep = hamiltonian(0.001, 0, HH, eps)
de = 1*16/9*0.6**0.5*e0*np.fabs(HH)
dH_de = 18*e0**2 + 5 * HH**2 * 2*e0 / (1-e0**2)**2
fluc = dH_de * de
print (E_fix - E_sep, E_sep, e0, HH, 1200*eps**3* e0**2*HH)
#%%
import rebound
def get_a1(eps, m1, m2, m3, a2, e2):
k = (m3**2 / (m1+m2) * (m1+m2+m3) )
b2 = a2 * (1-e2**2)**0.5
return k**(-1/3) * b2 * eps**(2/3)
def run_system(eps, HH, planet):
sim = rebound.Simulation()
# Add the Sun
sim.add(m=1.0) # Sun's mass is set to 1.0
if planet == 'jupiter':
jupiter_mass = 1.0/1047.56 # Jupiter's mass relative to the Sun
a_j = 5.203
# inc0 = np.pi / 180 * inc_deg
# HH = np.cos(inc0) #* (1 - e0**2)**0.5
e0 = min(ecc(HH, eps), 0.999)
sim.add(m=jupiter_mass, a=a_j) # Jupiter
m_test=3e-12
a1 = get_a1(eps, m_test, jupiter_mass, 1, a_j, 0)
# Add Pasiphae with JPL orbital elements
if eps == 0.175 or eps == 0.185:
print (eps, e0, HH)
sim.add(m=3.0e-12, a=a1, e=e0, inc=np.arccos(HH/(1-(e0)**2)**0.5), Omega=np.pi / 4, omega= np.pi / 180 *90, M=np.pi / 180 *1, primary=sim.particles[1])
# Set the integrator
sim.integrator = "ias15" # Wisdom-Holman symplectic integrator is efficient for this kind of simulation
# Integrate the system for a certain number of years
p_out = sim.particles[1].calculate_orbit(primary=sim.particles[0]).P
times = np.linspace(0, 10 * p_out / eps, 5000)
# sim.status()
# print(sim.particles[2].calculate_orbit(primary=sim.particles[1]).e, 0)
stat = 0
for i, t in enumerate(times):
sim.integrate(t)
# print(sim.particles[2].calculate_orbit(primary=sim.particles[1]).e, t)
if sim.particles[2].calculate_orbit(primary=sim.particles[1]).e>=1.1:
stat = 2
return stat
# print ( np.sin(sim.particles[2].calculate_orbit(primary=sim.particles[1]).omega))
elif np.sin(sim.particles[2].calculate_orbit(primary=sim.particles[1]).omega) <= -0.2:
# print ('ok')
stat = 1
if sim.particles[2].calculate_orbit(primary=sim.particles[1]).e>=1.1:
stat = 2
# return stat
# sim.status()
return stat
run_system(0.26, -0.1, 'jupiter')
#%%
import itertools
import multiprocessing # create a process pool that uses all cpus
epsilons_rp22 = np.linspace(0.171, 0.179, 305)
incs_rp22 = np.linspace(0.43, 0.48, 330)
res_rp22 =np.zeros([len(epsilons_rp22), len(incs_rp22)])
paramlist = list(itertools.product(epsilons_rp22, incs_rp22))
#A function which will process a tuple of parameters
def func(params):
eps = params[0]
inc = params[1]
return run_system(eps, inc, 'jupiter')
pool = multiprocessing.Pool()
#Distribute the parameter sets evenly across the cores
res_rp22 = pool.map(func,paramlist)
#%%
np.save('res_pro2.npy', res_rp22)
#%%
plt.pcolor(incs_rp22, epsilons_rp22, np.array(res_rp22).reshape([len(epsilons_rp22), len(incs_rp22)]), cmap='Set1')#'nipy_spectral')
#for i in range(0, len(epsilons_all)):
# for j in range(0, len(incs_all)):
# res_all[i][j] = run_system(epsilons_all[i], incs_all[j], 'jupiter')
#%%
#incs_p = np.linspace(40,85, 46)
#epsilons_p = np.linspace(0.03,0.3, 55)
res22 = np.array(res_rr)
res24 = res22.reshape([len(epsilons_rr), len(incs_rr)])
#%%
res22zz = np.array(res_allzz)
res24zzz = res22zzz.reshape([len(epsilons_allzzz), len(incs_allzzz)])
#%%
plt.rc('text', usetex=True)
plt.figure(figsize=(10,6))
plt.pcolor(incs_all, epsilons_all, res24, cmap='Blues_r')#'nipy_spectral')
#plt.plot(incs, [0.03 + 0.4*np.sin((ii-40)*np.pi/180) for ii in incs], color='k', linewidth=3)
plt.text(0.55, 0.036, 'Circulating', rotation=-65, color='white', size=36, font="serif", usetex=False)
#plt.text(145, 0.15, 'Librating', color='white')
#plt.text(120, 0.28, 'Unstable', color = 'black')
#plt.text(42, 0.15, 'Circulating', color='white')
plt.text(-0.6, 0.15, 'Librating', rotation=0, color='white', size=40, font="serif", usetex=False)
plt.text(0.2, 0.24, 'Unstable', color = 'black', size=40, font='serif', usetex=False)
plt.ylabel(r'$\epsilon_{\rm SA}$')
plt.xlabel(r'$j_z$')
plt.subplots_adjust(left=0.1, bottom=0.12, right=0.97, top=0.96, wspace=0.16, hspace=0.18)
#plt.colorbar()
#%%
plot_energy_levels(eps, inc0 * 180 / np.pi)
plt.scatter(args_pasiphae/np.pi, ecc_pasiphae, color='grey', alpha=0.1)
plt.xlim([0,1])
plt.ylabel('eccentricity')
plt.xlabel(r'$\omega / \pi$')
plt.subplots_adjust(left=0.15, bottom=0.15, right=0.94, top=0.88, wspace=0.16, hspace=0.18)
#%%
plt.plot(times, args_pasiphae)
#%%
import matplotlib.pyplot as plt
print(plt.rcParams["font.sans-serif"][0])
print(plt.rcParams["font.monospace"][0])
plt.show()
#%%
from tkinter import *
from tkinter import font
root = Tk()
list_fonts = list(font.families())
for i in list_fonts:
print(i)
root.mainloop()
#%%
def inc_in(eps,jz):
cosi = (3/5)**0.25 *jz**0.5 * ( (1 - 9 * eps * jz / 8)/(1 + 8 * eps * jz / 8))**0.5
return np.arccos(cosi) * 180 / np.pi
|
eugeneg88REPO_NAMEfixed_points_brownPATH_START.@fixed_points_brown_extracted@fixed_points_brown-main@phase_space2.py@.PATH_END.py
|
{
"filename": "simplify.py",
"repo_name": "maayane/catsHTM",
"repo_path": "catsHTM_extracted/catsHTM-master/build/lib/catsHTM/simplify.py",
"type": "Python"
}
|
import numpy as np
ID=[[[[[[3833.0]]], [[[3913.0]]], [[[3961.0]]]], [[[[4553.0]]], [[[4665.0]]], [[[4745.0]]]]]]
print('the nested list is',ID)
def simplify_list(val):
if isinstance(val, list) == False:
return val
else:
if len(val) > 1:
return val
else:
return simplify_list(val[0])
'''IDx=[]
for i in ID:
a=simplify_list(i)
if isinstance(a, (list, tuple, np.ndarray))==False:
IDx.append(simplify_list(i))
else:
for j in a:
IDx.append(simplify_list(j))
print('IDx is',IDx)
'''
def simplify2(x):
IDc=[]
for i in x:
if isinstance(i, (list, tuple, np.ndarray)) == True:
for j in i:
IDc.append(j)
else:
IDc.append(i)
return IDc
#return simplify2(IDc)
def simplify3(x):
if isinstance(x[0],(list, tuple, np.ndarray)) == False:
return x
else:
y=simplify2(x)
print(y)
return simplify3(y)
print ('simplify3(IDx) is',simplify3(ID))
'''
IDy=[]
for i in IDx:
if isinstance(i, (list, tuple, np.ndarray)) == True:
for j in i:
IDy.append(j)
else:
IDy.append(i)
IDz=[]
for i in IDy:
if isinstance(i, (list, tuple, np.ndarray)) == True:
for j in i:
IDz.append(j)
else:
IDz.append(i)
IDw=[]
for i in IDz:
if isinstance(i, (list, tuple, np.ndarray)) == True:
for j in i:
IDw.append(j)
else:
IDw.append(i)
IDh=[]
for i in IDw:
if isinstance(i, (list, tuple, np.ndarray)) == True:
for j in i:
IDh.append(j)
else:
IDh.append(i)
'''
print('the list without brakets is',simplify3(ID))
|
maayaneREPO_NAMEcatsHTMPATH_START.@catsHTM_extracted@catsHTM-master@build@lib@catsHTM@simplify.py@.PATH_END.py
|
{
"filename": "spacy_embeddings.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/embeddings/spacy_embeddings.py",
"type": "Python"
}
|
import importlib.util
from typing import Any, Dict, List, Optional
from langchain_core.embeddings import Embeddings
from pydantic import BaseModel, ConfigDict, model_validator
class SpacyEmbeddings(BaseModel, Embeddings):
"""Embeddings by spaCy models.
Attributes:
model_name (str): Name of a spaCy model.
nlp (Any): The spaCy model loaded into memory.
Methods:
embed_documents(texts: List[str]) -> List[List[float]]:
Generates embeddings for a list of documents.
embed_query(text: str) -> List[float]:
Generates an embedding for a single piece of text.
"""
model_name: str = "en_core_web_sm"
nlp: Optional[Any] = None
model_config = ConfigDict(extra="forbid", protected_namespaces=())
@model_validator(mode="before")
@classmethod
def validate_environment(cls, values: Dict) -> Any:
"""
Validates that the spaCy package and the model are installed.
Args:
values (Dict): The values provided to the class constructor.
Returns:
The validated values.
Raises:
ValueError: If the spaCy package or the
model are not installed.
"""
if values.get("model_name") is None:
values["model_name"] = "en_core_web_sm"
model_name = values.get("model_name")
# Check if the spaCy package is installed
if importlib.util.find_spec("spacy") is None:
raise ValueError(
"SpaCy package not found. "
"Please install it with `pip install spacy`."
)
try:
# Try to load the spaCy model
import spacy
values["nlp"] = spacy.load(model_name) # type: ignore[arg-type]
except OSError:
# If the model is not found, raise a ValueError
raise ValueError(
f"SpaCy model '{model_name}' not found. "
f"Please install it with"
f" `python -m spacy download {model_name}`"
"or provide a valid spaCy model name."
)
return values # Return the validated values
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Generates embeddings for a list of documents.
Args:
texts (List[str]): The documents to generate embeddings for.
Returns:
A list of embeddings, one for each document.
"""
return [self.nlp(text).vector.tolist() for text in texts] # type: ignore[misc]
def embed_query(self, text: str) -> List[float]:
"""
Generates an embedding for a single piece of text.
Args:
text (str): The text to generate an embedding for.
Returns:
The embedding for the text.
"""
return self.nlp(text).vector.tolist() # type: ignore[misc]
async def aembed_documents(self, texts: List[str]) -> List[List[float]]:
"""
Asynchronously generates embeddings for a list of documents.
This method is not implemented and raises a NotImplementedError.
Args:
texts (List[str]): The documents to generate embeddings for.
Raises:
NotImplementedError: This method is not implemented.
"""
raise NotImplementedError("Asynchronous embedding generation is not supported.")
async def aembed_query(self, text: str) -> List[float]:
"""
Asynchronously generates an embedding for a single piece of text.
This method is not implemented and raises a NotImplementedError.
Args:
text (str): The text to generate an embedding for.
Raises:
NotImplementedError: This method is not implemented.
"""
raise NotImplementedError("Asynchronous embedding generation is not supported.")
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@embeddings@spacy_embeddings.py@.PATH_END.py
|
{
"filename": "jedi_typing.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/jedi/py2/jedi/evaluate/jedi_typing.py",
"type": "Python"
}
|
"""
This module is not intended to be used in jedi, rather it will be fed to the
jedi-parser to replace classes in the typing module
"""
try:
from collections import abc
except ImportError:
# python 2
import collections as abc
def factory(typing_name, indextypes):
class Iterable(abc.Iterable):
def __iter__(self):
while True:
yield indextypes[0]()
class Iterator(Iterable, abc.Iterator):
def next(self):
""" needed for python 2 """
return self.__next__()
def __next__(self):
return indextypes[0]()
class Sequence(abc.Sequence):
def __getitem__(self, index):
return indextypes[0]()
class MutableSequence(Sequence, abc.MutableSequence):
pass
class List(MutableSequence, list):
pass
class Tuple(Sequence, tuple):
def __getitem__(self, index):
if indextypes[1] == Ellipsis:
# https://www.python.org/dev/peps/pep-0484/#the-typing-module
# Tuple[int, ...] means a tuple of ints of indetermined length
return indextypes[0]()
else:
return indextypes[index]()
class AbstractSet(Iterable, abc.Set):
pass
class MutableSet(AbstractSet, abc.MutableSet):
pass
class KeysView(Iterable, abc.KeysView):
pass
class ValuesView(abc.ValuesView):
def __iter__(self):
while True:
yield indextypes[1]()
class ItemsView(abc.ItemsView):
def __iter__(self):
while True:
yield (indextypes[0](), indextypes[1]())
class Mapping(Iterable, abc.Mapping):
def __getitem__(self, item):
return indextypes[1]()
def keys(self):
return KeysView()
def values(self):
return ValuesView()
def items(self):
return ItemsView()
class MutableMapping(Mapping, abc.MutableMapping):
pass
class Dict(MutableMapping, dict):
pass
class DefaultDict(MutableMapping, dict):
pass
dct = {
"Sequence": Sequence,
"MutableSequence": MutableSequence,
"List": List,
"Iterable": Iterable,
"Iterator": Iterator,
"AbstractSet": AbstractSet,
"MutableSet": MutableSet,
"Mapping": Mapping,
"MutableMapping": MutableMapping,
"Tuple": Tuple,
"KeysView": KeysView,
"ItemsView": ItemsView,
"ValuesView": ValuesView,
"Dict": Dict,
"DefaultDict": DefaultDict,
}
return dct[typing_name]
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@jedi@py2@jedi@evaluate@jedi_typing.py@.PATH_END.py
|
{
"filename": "window.py",
"repo_name": "nickhand/pyRSD",
"repo_path": "pyRSD_extracted/pyRSD-master/pyRSD/rsd/window.py",
"type": "Python"
}
|
import math
from fractions import Fraction
import numpy as np
import scipy.interpolate as interp
from pyRSD.rsd.tools import RSDSpline as spline
from pyRSD import pygcl
_epsilon = np.finfo(float).eps
def G(p):
"""
Return the function G(p), as defined in Wilson et al 2015.
See also: WA Al-Salam 1953
Returns
-------
numer, denom: int
the numerator and denominator
"""
toret = 1
for p in range(p+1):
if p == 0:
toret *= 1.
else:
toret *= (1./2 + p - 1.)
return int(2**p) * toret, math.factorial(p)
def get_coefficients(ell, ellprime, as_string=False):
"""
Return the window convolution coefficients
Parameters
----------
ell : int
the multipole number of the spectra we are convolving
ellprime : int
the multipole number of the spectra that is leaking
power via the convolution
"""
p = 0
coeffs = []
qvals = []
ret_str = []
for p in range(0, min(ell, ellprime)+1):
numer = []
denom = []
# numerator of product of G(x)
for r in [G(ell-p), G(p), G(ellprime-p)]:
numer.append(r[0])
denom.append(r[1])
# divide by this
a,b = G(ell+ellprime-p)
numer.append(b)
denom.append(a)
numer.append((2*(ell+ellprime) - 4*p + 1))
denom.append((2*(ell+ellprime) - 2*p + 1))
q = ell+ellprime-2*p
numer.append((2*ell+1))
denom.append((2*q+1))
numer = Fraction(np.prod(numer))
denom = Fraction(np.prod(denom))
if not as_string:
coeffs.append(float(numer/denom))
qvals.append(q)
else:
ret_str.append("%s L%d" %(numer/denom, q))
if not as_string:
return qvals[::-1], coeffs[::-1]
else:
return ret_str[::-1]
class WindowConvolution(object):
"""
Compute the window-convolved configuration space multipoles
This class takes the ell = 0, 2, 4 (,6) unconvolved power multipoles and
the window multipoles as input and computes the convolved power multipoles
Parameters
----------
s : array_like, (Ns,)
the separation vector
W : array_like, (Ns, Nl)
the even-ell configuration space window function multipoles,
where Nl must be >= 5; the first column is the ell=0, second
is ell=2, etc
max_ellprime : int, optional
the maximum value of ``ellprime`` to include when performing
the linear combination of higher-order multipoles leaking
into a mulitpole of order ``ell
max_ell : int, optional
maximum multipole number we want to convolve
Reference
----------
See Wilson et al, MNRAS Volume 464, Issue 3, p.3121-3130, 2017
"""
def __init__(self, s, W, max_ellprime=4, max_ell=4):
# the values of the separation where window is defined
self.s = s
self.smin = s.min()
self.smax = s.max()
# the array of window multipoles
self.W = W
# ell and ell prime values
self.max_ellprime = max_ellprime
self.max_ell = max_ell
# initialize the kernel splines
self._setup_kernels()
def _setup_kernels(self):
"""
Initialize the splines used to compute the convolution
kernels for each ell from the discretely-measued
window multipoles
"""
self.splines = {}
kern = np.zeros((len(self.s), self.max_ellprime//2+1))
W = self.W
# ell is the multipole number of the convolved spectra
for i, ell in enumerate(range(0, self.max_ell+1, 2)):
# ellprime specifies power leakage from other multipoles into ell
for j, ellprime in enumerate(range(0, self.max_ellprime+1, 2)):
# the coefficients
qvals, coeffs = get_coefficients(ell, ellprime)
qinds = [q//2 for q in qvals]
# this term is the sum of coefficients times the window multipoles
kern[:,j] = np.einsum('...i,i...', W[:,qinds], np.array(coeffs))
# store a spline representation
self.splines[ell] = [spline(self.s, k) for k in kern.T]
def _get_kernel(self, ell, r):
"""
Return the appropriate kernel
"""
splines = self.splines[ell]
toret = np.zeros((len(r), len(splines)))
idx = (r>=self.smin)&(r<=self.smax)
for i, s in enumerate(splines):
toret[idx,i] = s(r[idx])
# set the kernel to one out of bounds
if i == ell//2:
toret[~idx,i] = 1.0
return toret
def __call__(self, ells, r, xi, order='F'):
"""
Perform the linear combination of configuration-space multipoles
with the kernel of window multipoles
Parameters
----------
ells : list of int
the list of multipole numbers that we are convolving
r : array_like
the desired separation vector where the configuration-space multipoles
are defined
xi : array_like, shape: (len(r), len(ells))
the configuration-space multipoles
order : 'F', 'C'
memory-order of return array; 'C' is organized by rows, 'F' by columns
Returns
-------
xi_conv : array_like
the convolved xi arrays, given by a linear combination of ``xi`` and
the window function multipoles
"""
# convolved xi
conv_xi = np.zeros((len(r), len(ells)), order=order)
# convolve each ell
for i, ell in enumerate(ells):
# convolution kernel
kern = self._get_kernel(ell, r)
# check shapes
if kern.shape[1] != xi.shape[1]:
npoles = self.max_ellprime//2+1
# need at least a shape of npoles
if xi.shape[1] > npoles:
xi = xi[...,:npoles]
else:
raise ValueError(("shape mismatch between kernel and number of xi multipoles; "
"please provide the first %d even multipoles" %npoles))
conv_xi[:,i] = np.einsum('ij,ij->i', xi, kern)
return conv_xi
def convolve_multipoles(k, Pell, ells, convolver, qbias=0.7, dry_run=False, legacy=True):
"""
Convolve the input ell = 0, 2, 4 power multipoles, specified by `Pell`,
with the specified window function.
Parameters
----------
k : array_like, (Nk,)
the array of wavenumbers where `Pell` is defined -- to avoid convolution
errors, `k` should probably extend to higher values than the desired `k_out`
ells : array_like, (Nell,)
the ell values
Pell : array_like, (Nk, Nell)
the ell = 0, 2, 4 power multipoles, defined at `k`
"""
if not legacy:
Nell = len(ells); Nk = len(k)
# FFT the input power multipoles
xi = np.empty((Nk, Nell), order='F') # column-continuous
rr = np.empty(Nk)
for i, ell in enumerate(ells):
pygcl.ComputeXiLM_fftlog(int(ell), 2, k, Pell[:,i], rr, xi[:,i], qbias)
xi[:,i] *= (-1)**(ell//2)
# convolve
if dry_run:
xi_conv = xi.copy()
else:
xi_conv = convolver(ells, rr, xi, order='F')
# FFTLog back
Pell_conv = np.empty((Nk, Nell), order='F')
kk = np.empty(Nk)
for i, ell in enumerate(ells):
pygcl.ComputeXiLM_fftlog(int(ell), 2, rr, xi_conv[:,i], kk, Pell_conv[:,i], -qbias)
Pell_conv[:,i] *= (-1)**(ell//2) * (2*np.pi)**3
return kk, Pell_conv
else:
shape = Pell.shape
Nell = len(ells)
if Nell != shape[-1]:
raise ValueError("shape mismatch between multipole numbers and number of multipoles provided")
if not all(ell in [0,2,4,6] for ell in ells):
raise ValueError("valid `ell` values are [0,2,4,6]")
# separation is the first window column
s = convolver.s
# format the k_out
k_out = k
if np.ndim(k_out) == 1:
k_out = np.repeat(k_out[:,None], Nell, axis=1)
if k_out.shape[-1] != len(ells):
raise ValueError("input `k_out` must have %d columns for ell=%s multipoles" %(Nell, str(ells)))
# make the hires version to avoid wiggles when convolving
if len(k) < 500:
k_hires = np.logspace(np.log10(k.min()), np.log10(k.max()), 500)
poles_hires = []
for i in range(Nell):
tck = interp.splrep(k, Pell[:,i], k=3, s=0)
poles_hires.append(interp.splev(k_hires, tck))
Pell = np.vstack(poles_hires).T
k = k_hires.copy()
# FT the power multipoles
xi = np.empty((len(s), Nell))
for i, ell in enumerate(ells):
xi[:,i] = pygcl.pk_to_xi(int(ell), k, Pell[:,i], s, smoothing=0., method=pygcl.IntegrationMethods.TRAPZ)
# convolve the config space multipole
if dry_run:
xi_conv = xi.copy()
else:
xi_conv = convolver(ells, s, xi, order='F')
# FT back to get convolved power pole
toret = np.empty((len(k_out), Nell))
for i, ell in enumerate(ells):
toret[:,i] = pygcl.xi_to_pk(int(ell), s, xi_conv[:,i], k_out[:,i], smoothing=0., method=pygcl.IntegrationMethods.TRAPZ)
return k_out[:,0], toret
|
nickhandREPO_NAMEpyRSDPATH_START.@pyRSD_extracted@pyRSD-master@pyRSD@rsd@window.py@.PATH_END.py
|
{
"filename": "submit_img_jobs.py",
"repo_name": "Swift-BAT/NITRATES",
"repo_path": "NITRATES_extracted/NITRATES-main/nitrates/submission_scripts/submit_img_jobs.py",
"type": "Python"
}
|
import os
import numpy as np
import time
import argparse
import logging
def cli():
parser = argparse.ArgumentParser()
parser.add_argument(
"--ssh", help="Do we need to ssh in to submit?", action="store_true"
)
parser.add_argument(
"--Njobs", type=int, help="Number of jobs to submit", default=16
)
parser.add_argument(
"--dt0",
type=float,
help="Time relative to trigger time to start at",
default=-16.0,
)
parser.add_argument(
"--dt1",
type=float,
help="Time relative to trigger time to end at",
default=16.0,
)
parser.add_argument(
"--dbfname", type=str, help="Name to save the database to", default="none"
)
parser.add_argument(
"--workdir", type=str, help="directory to work in", default=None
)
parser.add_argument(
"--name", type=str, help="directory to work in", default="SigImgs"
)
parser.add_argument(
"--queue", type=str, help="what queue to submit this to", default="Open"
)
parser.add_argument(
"--pbs_fname",
type=str,
help="file name of the pbs script to submit",
default="/storage/work/jjd330/local/bat_data/BatML/submission_scripts/sub_sigimgs.pbs",
)
args = parser.parse_args()
return args
def main(args):
if args.ssh:
ssh_cmd = 'ssh aci-b.aci.ics.psu.edu "'
base_sub_cmd = "qsub %s -A %s -N %s -v " % (
args.pbs_fname,
args.queue,
args.name,
)
else:
base_sub_cmd = "qsub %s -A %s -N %s -v " % (
args.pbs_fname,
args.queue,
args.name,
)
njobs = args.Njobs
if args.workdir is None:
workdir = os.getcwd()
else:
workdir = args.workdir
cmd = ""
dts = np.linspace(args.dt0, args.dt1, njobs + 1)
for i in range(njobs):
cmd_ = "workdir=%s,dt0=%.3f,dt1=%.3f,dbfname=%s" % (
workdir,
dts[i],
dts[i + 1],
args.dbfname,
)
if args.ssh:
cmd += base_sub_cmd + cmd_
if i < (njobs - 1):
cmd += " | "
else:
cmd = base_sub_cmd + cmd_
logging.info("Trying to submit: ")
logging.info(cmd)
try:
os.system(cmd)
except Exception as E:
logging.error(E)
logging.error("Messed up with ")
logging.error(cmd)
time.sleep(1.0)
if args.ssh:
cmd = ssh_cmd + cmd + '"'
logging.info("Full cmd to run:")
logging.info(cmd)
try:
os.system(cmd)
except Exception as E:
logging.error(E)
logging.error("Messed up with ")
logging.error(cmd)
if __name__ == "__main__":
logging.basicConfig(
filename="submit_jobs_log.log",
level=logging.DEBUG,
format="%(asctime)s-" "%(levelname)s- %(message)s",
)
args = cli()
main(args)
|
Swift-BATREPO_NAMENITRATESPATH_START.@NITRATES_extracted@NITRATES-main@nitrates@submission_scripts@submit_img_jobs.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "scikit-optimize/scikit-optimize",
"repo_path": "scikit-optimize_extracted/scikit-optimize-master/skopt/learning/gaussian_process/tests/__init__.py",
"type": "Python"
}
|
scikit-optimizeREPO_NAMEscikit-optimizePATH_START.@scikit-optimize_extracted@scikit-optimize-master@skopt@learning@gaussian_process@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "__main__.py",
"repo_name": "lwa-project/lsl",
"repo_path": "lsl_extracted/lsl-main/lsl/version/__main__.py",
"type": "Python"
}
|
from lsl.version import version
print(f"lsl {version}")
|
lwa-projectREPO_NAMElslPATH_START.@lsl_extracted@lsl-main@lsl@version@__main__.py@.PATH_END.py
|
{
"filename": "_legendrank.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattercarpet/_legendrank.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LegendrankValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="legendrank", parent_name="scattercarpet", **kwargs):
super(LegendrankValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattercarpet@_legendrank.py@.PATH_END.py
|
{
"filename": "Eigenvalues.py",
"repo_name": "Pranab-JD/LeXInt",
"repo_path": "LeXInt_extracted/LeXInt-main/Python/Eigenvalues.py",
"type": "Python"
}
|
"""
Created on Thu Aug 8 20:22 2022
@author: Pranab JD
Description: -
Functions to determine the largest eigenvalue of a
matrix/related matrix.
Gershgorin's disks can be used only if the matrix is
explicitly available. For matrix-free implementation,
choose power iterations.
"""
import sys
import numpy as np
sys.path.insert(1, "./LeXInt/Python/")
from Jacobian import Jacobian
def Gershgorin(A):
"""
Parameters
----------
A : N x N matrix
Returns
-------
eig_real : Largest real eigenvalue (negative magnitude)
eig_imag : Largest imaginary eigenvalue
"""
###? Divide matrix 'A' into Hermitian and skew-Hermitian
A_Herm = (A + A.T.conj())/2
A_SkewHerm = (A - A.T.conj())/2
eig_real = - np.max(np.sum(abs(A_Herm), 1)) # Has to be NEGATIVE
eig_imag = np.max(np.sum(abs(A_SkewHerm), 1))
return eig_real, eig_imag
def Power_iteration(u, RHS_function):
"""
Parameters
----------
u : Input state variable(s)
RHS_function : RHS function
Returns
-------
largest_eigen_value : Largest eigenvalue (within 2% accuracy)
3*ii : Number of RHS calls
"""
tol = 0.02 #? 2% tolerance
niters = 1000 #? Max. number of iterations
eigenvalue_ii_1 = 0 #? Eigenvalue at ii-1
vector = np.ones(np.shape(u)) #? Initial estimate of eigenvector
rhs_u = RHS_function(u) #? RHS evaluated at 'u'
for ii in range(niters):
###? Compute new eigenvector
eigenvector = Jacobian(RHS_function, u, vector, rhs_u)
###? Norm of eigenvector = eigenvalue
eigenvalue = np.linalg.norm(eigenvector)
###? Normalize eigenvector to eigenvalue; new estimate of eigenvector
vector = eigenvector/eigenvalue
###? Check convergence for eigenvalues (eigenvalues converge faster than eigenvectors)
if (abs(eigenvalue - eigenvalue_ii_1) <= (tol*eigenvalue + tol)):
largest_eigen_value = eigenvalue
break
###? This value becomes the previous one
eigenvalue_ii_1 = eigenvalue
return largest_eigen_value, ii+1
|
Pranab-JDREPO_NAMELeXIntPATH_START.@LeXInt_extracted@LeXInt-main@Python@Eigenvalues.py@.PATH_END.py
|
{
"filename": "survey.py",
"repo_name": "LSSTDESC/surveycodex",
"repo_path": "surveycodex_extracted/surveycodex-main/surveycodex/survey.py",
"type": "Python"
}
|
import math
from dataclasses import dataclass, field
from typing import Dict, List
import astropy.units as u
import yaml
from astropy.units import Quantity
from surveycodex.filter import Filter
@dataclass(frozen=True)
class Survey:
"""A dataclass for storing the parameters of a survey"""
name: str
"The survey name"
description: str
"The survey description with telescope/instrument information"
_filters: Dict[str, Filter]
"A private dictionary containing the survey filters"
pixel_scale: Quantity
"The pixel scale of the survey"
mirror_diameter: Quantity
"The mirror diameter"
gain: Quantity
"The gain in electron/ADU"
obscuration: Quantity
"The total obscuration created by the instrument pieces"
zeropoint_airmass: Quantity
"The zeropoint airmass"
available_filters: List[str] = field(init=False)
"The list of survey filters"
effective_area: Quantity = field(init=False)
"The survey instrument effective area on the sky computed from the obscuration"
references: Dict[str, Dict[str, str]]
"Dictionary of references for each parameter specified in surveycodex"
@classmethod
def from_yaml(cls, yaml_file: str):
"""Constructor for the Survey class
Parameters
----------
yaml_file: pathlike
Filepath to YAML file containing the survey info
Returns
-------
Survey
A `Survey` instance filled with the information as attributes
"""
with open(yaml_file) as f:
data = yaml.safe_load(f)
filters = Survey._construct_filter_dict(data)
pixel_scale = data["pixel_scale"] * u.arcsec
mirror_diameter = data["mirror_diameter"] * u.m
gain = data["gain"] * u.electron / u.adu
obscuration = data["obscuration"] * u.dimensionless_unscaled
zeropoint_airmass = data["zeropoint_airmass"] * u.dimensionless_unscaled
return cls(
data["name"],
data["description"],
filters,
pixel_scale,
mirror_diameter,
gain,
obscuration,
zeropoint_airmass,
data["references"],
)
def __str__(self):
n = len(self.name)
survey_repr = "-" * (n + 4) + "\n"
survey_repr += f"| {self.name} |"
survey_repr += f" {self.description}\n"
survey_repr += "-" * (n + 4) + "\n"
printed_params = [
f" {key:<20} = {val}"
for key, val in self.__dict__.items()
if key not in ("name", "description", "_filters", "references")
]
survey_repr += "\n".join(printed_params)
return survey_repr
def __repr__(self):
return f"Survey {self.name}"
@staticmethod
def _construct_filter_dict(survey_dict):
"""Create a custom dictionary for the survey filters
Parameters
----------
survey_dict: dict
Dictionnary of the survey parameters, including the definition of the filters
Returns
-------
dict
Dictionary of the survey Filter instances
"""
return {
fname: Filter.from_dict(fdict)
for fname, fdict in survey_dict["filters"].items()
}
def __post_init__(self):
"""Set attributes computed after class is constructed"""
available_filters = list(self._filters.keys())
object.__setattr__(self, "available_filters", available_filters)
total_area = math.pi * (self.mirror_diameter * 0.5) ** 2
effective_area = total_area * (1 - self.obscuration)
object.__setattr__(self, "effective_area", effective_area)
def get_filter(self, filter_name):
"""Getter method to retrieve a Filter object
Parameters
----------
filter_name : str
Name of a filter chosen among the `available_filters` attribute
Returns
-------
Filter
Corresponding `Filter` dataclass
Raises
------
ValueError
The requested filter does not exist or is not available in surveycodex
"""
if filter_name not in self.available_filters:
raise ValueError(
"Please check the filter name. "
f"The available filters for {self.name} "
f"are {self.available_filters}"
)
return self._filters[filter_name]
|
LSSTDESCREPO_NAMEsurveycodexPATH_START.@surveycodex_extracted@surveycodex-main@surveycodex@survey.py@.PATH_END.py
|
{
"filename": "_ticklabelstep.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/volume/colorbar/_ticklabelstep.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklabelstepValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="ticklabelstep", parent_name="volume.colorbar", **kwargs
):
super(TicklabelstepValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@volume@colorbar@_ticklabelstep.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "ma-xu/pointMLP-pytorch",
"repo_path": "pointMLP-pytorch_extracted/pointMLP-pytorch-main/README.md",
"type": "Markdown"
}
|
# Rethinking Network Design and Local Geometry in Point Cloud: A Simple Residual MLP Framework (ICLR 2022)
[](https://paperswithcode.com/sota/3d-point-cloud-classification-on-modelnet40?p=rethinking-network-design-and-local-geometry-1)
[](https://paperswithcode.com/sota/3d-point-cloud-classification-on-scanobjectnn?p=rethinking-network-design-and-local-geometry-1)
[](https://github.com/ma-xu/pointMLP-pytorch)
<div align="left">
<a><img src="images/smile.png" height="70px" ></a>
<a><img src="images/neu.png" height="70px" ></a>
<a><img src="images/columbia.png" height="70px" ></a>
</div>
[open review](https://openreview.net/forum?id=3Pbra-_u76D) | [arXiv](https://arxiv.org/abs/2202.07123) | Primary contact: [Xu Ma](mailto:ma.xu1@northeastern.edu)
<div align="center">
<img src="images/overview.png" width="650px" height="300px">
</div>
Overview of one stage in PointMLP. Given an input point cloud, PointMLP progressively extracts local features using residual point MLP blocks. In each stage, we first transform the local point using a geometric affine module, and then local points are extracted before and after aggregation, respectively. By repeating multiple stages, PointMLP progressively enlarges the receptive field and models entire point cloud geometric information.
## BibTeX
@article{ma2022rethinking,
title={Rethinking network design and local geometry in point cloud: A simple residual MLP framework},
author={Ma, Xu and Qin, Can and You, Haoxuan and Ran, Haoxi and Fu, Yun},
journal={arXiv preprint arXiv:2202.07123},
year={2022}
}
## Model Zoo
**Questions on ModelNet40 classification results (a common issue for ModelNet40 dataset in the community)**
The performance on ModelNet40 of almost all methods are not stable, see (https://github.com/CVMI-Lab/PAConv/issues/9#issuecomment-873371422).<br>
If you run the same codes for several times, you will get different results (even with fixed seed).<br>
The best way to reproduce the results is to test with a pretrained model for ModelNet40. <br>
Also, the randomness of ModelNet40 is our motivation to experiment on ScanObjectNN, and to report the mean/std results of several runs.
------
The codes/models/logs for submission version (without bug fixed) can be found here [commit:d2b8dbaa](http://github.com/13952522076/pointMLP-pytorch/tree/d2b8dbaa06eb6176b222dcf2ad248f8438582026).
On ModelNet40, fixed pointMLP achieves a result of **91.5% mAcc** and **94.1% OA** without voting, logs and pretrained models can be found [[here]](https://web.northeastern.edu/smilelab/xuma/pointMLP/checkpoints/fixstd/modelnet40/pointMLP-20220209053148-404/).
On ScanObjectNN, fixed pointMLP achieves a result of **84.4% mAcc** and **86.1% OA** without voting, logs and pretrained models can be found [[here]](https://web.northeastern.edu/smilelab/xuma/pointMLP/checkpoints/fixstd/scanobjectnn/pointMLP-20220204021453/). Fixed pointMLP-elite achieves a result of **81.7% mAcc** and **84.1% OA** without voting, logs and pretrained models can be found [[here]](https://web.northeastern.edu/smilelab/xuma/pointMLP/checkpoints/fixstd/scanobjectnn/model313Elite-20220220015842-2956/).
Stay tuned. More elite versions and voting results will be uploaded.
## News & Updates:
- [x] **Apr/24/2024**: University server is down. Update the ScanobjectNN dataset link.
- [x] fix the uncomplete utils in partseg by Mar/10, caused by error uplaoded folder.
- [x] upload test code for ModelNet40
- [x] update std bug (unstable testing in previous version)
- [x] paper/codes release
:point_right::point_right::point_right:**NOTE:** The codes/models/logs for submission version (without bug fixed) can be found here [commit:d2b8dbaa](http://github.com/13952522076/pointMLP-pytorch/tree/d2b8dbaa06eb6176b222dcf2ad248f8438582026).
## Install
```bash
# step 1. clone this repo
git clone https://github.com/ma-xu/pointMLP-pytorch.git
cd pointMLP-pytorch
# step 2. create a conda virtual environment and activate it
conda env create
conda activate pointmlp
```
```bash
# Optional solution for step 2: install libs step by step
conda create -n pointmlp python=3.7 -y
conda activate pointmlp
conda install pytorch==1.10.1 torchvision==0.11.2 cudatoolkit=10.2 -c pytorch -y
# if you are using Ampere GPUs (e.g., A100 and 30X0), please install compatible Pytorch and CUDA versions, like:
# pip install torch==1.8.1+cu111 torchvision==0.9.1+cu111 torchaudio==0.8.1 -f https://download.pytorch.org/whl/torch_stable.html
pip install cycler einops h5py pyyaml==5.4.1 scikit-learn==0.24.2 scipy tqdm matplotlib==3.4.2
pip install pointnet2_ops_lib/.
```
## Useage
### Classification ModelNet40
**Train**: The dataset will be automatically downloaded, run following command to train.
By default, it will create a folder named "checkpoints/{modelName}-{msg}-{randomseed}", which includes args.txt, best_checkpoint.pth, last_checkpoint.pth, log.txt, out.txt.
```bash
cd classification_ModelNet40
# train pointMLP
python main.py --model pointMLP
# train pointMLP-elite
python main.py --model pointMLPElite
# please add other paramemters as you wish.
```
To conduct voting testing, run
```bash
# please modify the msg accrodingly
python voting.py --model pointMLP --msg demo
```
### Classification ScanObjectNN
The dataset will be automatically downloaded
- Train pointMLP/pointMLPElite
```bash
cd classification_ScanObjectNN
# train pointMLP
python main.py --model pointMLP
# train pointMLP-elite
python main.py --model pointMLPElite
# please add other paramemters as you wish.
```
By default, it will create a fold named "checkpoints/{modelName}-{msg}-{randomseed}", which includes args.txt, best_checkpoint.pth, last_checkpoint.pth, log.txt, out.txt.
### Part segmentation
- Make data folder and download the dataset
```bash
cd part_segmentation
mkdir data
cd data
wget https://shapenet.cs.stanford.edu/media/shapenetcore_partanno_segmentation_benchmark_v0_normal.zip --no-check-certificate
unzip shapenetcore_partanno_segmentation_benchmark_v0_normal.zip
```
- Train pointMLP
```bash
# train pointMLP
python main.py --model pointMLP
# please add other paramemters as you wish.
```
## Acknowledgment
Our implementation is mainly based on the following codebases. We gratefully thank the authors for their wonderful works.
[CurveNet](https://github.com/tiangexiang/CurveNet),
[PAConv](https://github.com/CVMI-Lab/PAConv),
[GDANet](https://github.com/mutianxu/GDANet),
[Pointnet2_PyTorch](https://github.com/erikwijmans/Pointnet2_PyTorch)
## LICENSE
PointMLP is under the Apache-2.0 license.
|
ma-xuREPO_NAMEpointMLP-pytorchPATH_START.@pointMLP-pytorch_extracted@pointMLP-pytorch-main@README.md@.PATH_END.py
|
{
"filename": "tinygp_multidimensional_quasiperiodicsquaredexponential_activity.py",
"repo_name": "LucaMalavolta/PyORBIT",
"repo_path": "PyORBIT_extracted/PyORBIT-main/pyorbit/models/tinygp_multidimensional_quasiperiodicsquaredexponential_activity.py",
"type": "Python"
}
|
from pyorbit.subroutines.common import *
from pyorbit.models.abstract_model import *
from pyorbit.keywords_definitions import *
from scipy.linalg import cho_factor, cho_solve, lapack, LinAlgError
from scipy import matrix, spatial
import sys
__all__ = ['TinyGP_Multidimensional_QuasiPeriodicSquaredExponentialActivity']
try:
import jax
jax.config.update("jax_enable_x64", True)
import jax.numpy as jnp
from tinygp import kernels, GaussianProcess
#from tinygp.helpers import JAXArray
if sys.version_info[1] < 10:
raise Warning("You should be using Python 3.10 - tinygp may not work")
class LatentKernel_Multi_QPSE(kernels.Kernel):
"""A custom kernel based on Rajpaul et al. (2015)
Args:
kernel: The kernel function describing the latent process. This can be any other
``tinygp`` kernel.
coeff_prim: The primal coefficients for each class. This can be thought of as how
much the latent process itself projects into the observations for that class.
This should be an array with an entry for each class of observation.
coeff_deriv: The derivative coefficients for each class. This should have the same
shape as ``coeff_prim``.
"""
try:
kernel_QP : kernels.Kernel
kernel_SE : kernels.Kernel
coeff_QP_prim: jax.Array | float
coeff_QP_deriv: jax.Array | float
coeff_SE_prim: jax.Array | float
coeff_SE_deriv: jax.Array | float
except:
pass
def __init__(self, kernel_QP, kernel_SE, coeff_QP_prim, coeff_QP_deriv, coeff_SE_prim, coeff_SE_deriv):
self.kernel_QP = kernel_QP
self.kernel_SE = kernel_SE
self.coeff_QP_prim, self.coeff_QP_deriv = jnp.broadcast_arrays(
jnp.asarray(coeff_QP_prim), jnp.asarray(coeff_QP_deriv)
)
self.coeff_SE_prim, self.coeff_SE_deriv = jnp.broadcast_arrays(
jnp.asarray(coeff_SE_prim), jnp.asarray(coeff_SE_deriv)
)
def evaluate(self, X1, X2):
t1, label1 = X1
t2, label2 = X2
# Differentiate the kernel function: the first derivative wrt x1
QP_Kp = jax.grad(self.kernel_QP.evaluate, argnums=0)
SE_Kp = jax.grad(self.kernel_SE.evaluate, argnums=0)
# ... and the second derivative
QP_Kpp = jax.grad(QP_Kp, argnums=1)
SE_Kpp = jax.grad(SE_Kp, argnums=1)
# Evaluate the kernel matrix and all of its relevant derivatives
QP_K = self.kernel_QP.evaluate(t1, t2)
QP_d2K_dx1dx2 = QP_Kpp(t1, t2)
SE_K = self.kernel_SE.evaluate(t1, t2)
SE_d2K_dx1dx2 = SE_Kpp(t1, t2)
# For stationary kernels, these are related just by a minus sign, but we'll
# evaluate them both separately for generality's sake
QP_dK_dx2 = jax.grad(self.kernel_QP.evaluate, argnums=1)(t1, t2)
QP_dK_dx1 = QP_Kp(t1, t2)
SE_dK_dx2 = jax.grad(self.kernel_SE.evaluate, argnums=1)(t1, t2)
SE_dK_dx1 = SE_Kp(t1, t2)
# Extract the coefficients
a1 = self.coeff_QP_prim[label1]
a2 = self.coeff_QP_prim[label2]
b1 = self.coeff_QP_deriv[label1]
b2 = self.coeff_QP_deriv[label2]
c1 = self.coeff_SE_prim[label1]
c2 = self.coeff_SE_prim[label2]
d1 = self.coeff_SE_deriv[label1]
d2 = self.coeff_SE_deriv[label2]
# Construct the matrix element
return (
a1 * a2 * QP_K
+ a1 * b2 * QP_dK_dx2
+ b1 * a2 * QP_dK_dx1
+ b1 * b2 * QP_d2K_dx1dx2
+ c1 * c2 * SE_K
+ c1 * d2 * SE_dK_dx2
+ d1 * c2 * SE_dK_dx1
+ d1 * d2 * SE_d2K_dx1dx2
)
def _build_tinygp_multidimensional_QPSE(params):
base_kernel_QP = kernels.ExpSquared(scale=jnp.abs(params["Pdec"])) \
* kernels.ExpSineSquared(
scale=jnp.abs(params["Prot"]),
gamma=jnp.abs(params["gamma"]))
base_kernel_SE = kernels.ExpSquared(scale=jnp.abs(params["Pcyc"]))
kernel = LatentKernel_Multi_QPSE(base_kernel_QP, base_kernel_SE,
params['coeff_QP_prime'], params['coeff_QP_deriv'],
params['coeff_SE_prime'], params['coeff_SE_deriv'])
return GaussianProcess(
kernel, params['X'], diag=jnp.abs(params['diag']), mean=0.0
)
@jax.jit
def _loss_tinygp_MultiQPSE(params):
gp = _build_tinygp_multidimensional_QPSE(params)
return gp.log_probability(params['y'])
except:
pass
class TinyGP_Multidimensional_QuasiPeriodicSquaredExponentialActivity(AbstractModel):
''' Three parameters out of four are the same for all the datasets, since they are related to
the properties of the physical process rather than the observed effects on a dataset
From Grunblatt+2015, Affer+2016
- theta: is usually related to the rotation period of the star( or one of its harmonics);
- lambda: is the correlation decay timescale, and it can be related to the lifetime of the active regions.
- omega: is the length scale of the periodic component, and can be linked to the size evolution of the active regions;
- h: represents the amplitude of the correlations '''
default_common = 'activity'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_class = 'gp_multidimensional_quasiperiodicsquaredexponential_activity'
self.internal_likelihood = True
self.delayed_lnlk_computation = True
self.list_pams_common = OrderedSet([
'Prot', # Rotational period of the star
'Pdec', # Decay timescale of activity
'Oamp', # Granulation of activity
'Pcyc', # timescale od magnetic cycle
])
self.list_pams_dataset = OrderedSet([
'rot_amp', # Amplitude of the covariance matrix
'con_amp', # Amplitude of the first derivative of the covariance matrix
'cyc_amp', # Amplitude of the covariance matrix
'cyc_der' # Amplitude of the first derivative of the covariance matrix
])
self.internal_parameter_values = None
self._dist_t1 = None
self._dist_t2 = None
self._added_datasets = 0
self.dataset_ordering = {}
self.inds_cache = None
self._dataset_x0 = []
self._dataset_label = []
self._dataset_e2 = []
self._dataset_names = {}
self._dataset_nindex = []
self.use_derivative_dict = {}
self.internal_coeff_QP_prime = []
self.internal_coeff_QP_deriv = []
self.internal_coeff_SE_prime = []
self.internal_coeff_SE_deriv = []
self._dataset_ej2 = []
self._dataset_res = []
self._added_datasets = 0
self._n_cov_matrix = 0
self.pi2 = np.pi * np.pi
def initialize_model(self, mc, **kwargs):
if kwargs.get('hyperparameters_condition', False):
self.hyper_condition = self._hypercond_01
else:
self.hyper_condition = self._hypercond_00
if kwargs.get('rotation_decay_condition', False):
self.rotdec_condition = self._hypercond_02
else:
self.rotdec_condition = self._hypercond_00
if kwargs.get('halfrotation_decay_condition', False):
self.halfrotdec_condition = self._hypercond_03
else:
self.halfrotdec_condition = self._hypercond_00
for common_ref in self.common_ref:
if mc.common_models[common_ref].model_class == 'activity':
self.use_stellar_rotation_period = getattr(mc.common_models[common_ref], 'use_stellar_rotation_period', False)
break
for keyword in keywords_stellar_rotation:
self.use_stellar_rotation_period = kwargs.get(keyword, self.use_stellar_rotation_period)
if self.use_stellar_rotation_period:
self.list_pams_common.update(['rotation_period'])
self.list_pams_common.discard('Prot')
for common_ref in self.common_ref:
if mc.common_models[common_ref].model_class == 'activity':
self.use_stellar_activity_decay = getattr(mc.common_models[common_ref], 'use_stellar_activity_decay', False)
break
for keyword in keywords_stellar_activity_decay:
self.use_stellar_activity_decay = kwargs.get(keyword, self.use_stellar_activity_decay)
if self.use_stellar_activity_decay:
self.list_pams_common.update(['activity_decay'])
self.list_pams_common.discard('Pdec')
def initialize_model_dataset(self, mc, dataset, **kwargs):
""" when reloading the .p files, the object is not reinitialized, so we have to skip the
incremental addition of datasets if they are already present """
if dataset.name_ref in self._dataset_names:
return
self._dataset_nindex.append([self._n_cov_matrix,
self._n_cov_matrix+dataset.n])
self._dataset_x0 = np.append(self._dataset_x0, dataset.x0)
self._dataset_label = np.append(self._dataset_label, np.zeros_like(dataset.x0, dtype=int) + self._added_datasets)
self._dataset_e2 = np.append(self._dataset_e2, dataset.e**2)
self._dataset_names[dataset.name_ref] = self._added_datasets
self._n_cov_matrix += dataset.n
self._added_datasets += 1
self._dataset_ej2 = self._dataset_e2 * 1.
self._dataset_res = self._dataset_e2 * 0.
self.internal_coeff_QP_prime = np.empty(self._added_datasets)
self.internal_coeff_QP_deriv = np.empty(self._added_datasets)
self.internal_coeff_SE_prime = np.empty(self._added_datasets)
self.internal_coeff_SE_deriv = np.empty(self._added_datasets)
self._X = (self._dataset_x0, self._dataset_label.astype(int))
if 'derivative'in kwargs:
use_derivative = kwargs['derivative'].get(dataset.name_ref, False)
elif dataset.name_ref in kwargs:
use_derivative = kwargs[dataset.name_ref].get('derivative', False)
else:
if dataset.kind == 'H-alpha' or \
dataset.kind == 'S_index' or \
dataset.kind == 'Ca_HK' or \
dataset.kind == 'FWHM':
use_derivative = False
else:
use_derivative = True
if 'derivative_quasiperiodic'in kwargs:
use_derivative_QP = kwargs['derivative_quasiperiodic'].get(dataset.name_ref, False)
elif dataset.name_ref in kwargs:
use_derivative_QP = kwargs[dataset.name_ref].get('derivative_quasiperiodic', False)
else:
use_derivative_QP = True
if 'derivative_squaredexponential'in kwargs:
use_derivative_SE = kwargs['derivative_squaredexponential'].get(dataset.name_ref, False)
elif dataset.name_ref in kwargs:
use_derivative_SE= kwargs[dataset.name_ref].get('derivative_squaredexponential', False)
else:
use_derivative_SE = True
if not use_derivative or not use_derivative_QP:
self.fix_list[dataset.name_ref] = {'rot_amp': [0., 0.]}
if not use_derivative or not use_derivative_SE:
self.fix_list[dataset.name_ref] = {'cyc_der': [0., 0.]}
return
def add_internal_dataset(self, parameter_values, dataset):
if self.use_stellar_rotation_period:
parameter_values['Prot'] = parameter_values['rotation_period']
if self.use_stellar_activity_decay:
parameter_values['Pdec'] = parameter_values['activity_decay']
self.internal_parameter_values = parameter_values
d_ind = self._dataset_names[dataset.name_ref]
d_nstart, d_nend = self._dataset_nindex[d_ind]
self._dataset_ej2[d_nstart:d_nend] = self._dataset_e2[d_nstart:d_nend] + dataset.jitter**2.0
self._dataset_res[d_nstart:d_nend] = dataset.residuals
self.internal_coeff_QP_prime[d_ind] = parameter_values['con_amp']
self.internal_coeff_QP_deriv[d_ind] = parameter_values['rot_amp']
self.internal_coeff_SE_prime[d_ind] = parameter_values['cyc_amp']
self.internal_coeff_SE_deriv[d_ind] = parameter_values['cyc_der']
def lnlk_compute(self):
if not self.hyper_condition(self.internal_parameter_values):
return -np.inf
if not self.rotdec_condition(self.internal_parameter_values):
return -np.inf
if not self.halfrotdec_condition(self.internal_parameter_values):
return -np.inf
theta_dict = dict(
gamma=1. / (2.*self.internal_parameter_values['Oamp'] ** 2),
Pdec=self.internal_parameter_values['Pdec'],
Prot=self.internal_parameter_values['Prot'],
Pcyc=self.internal_parameter_values['Pcyc'],
diag=self._dataset_ej2,
X=self._X,
y=self._dataset_res,
coeff_QP_prime=self.internal_coeff_QP_prime,
coeff_QP_deriv=self.internal_coeff_QP_deriv,
coeff_SE_prime=self.internal_coeff_SE_prime,
coeff_SE_deriv=self.internal_coeff_SE_deriv
)
return _loss_tinygp_MultiQPSE(theta_dict)
def sample_predict(self, dataset, x0_input=None, return_covariance=False, return_variance=False):
dataset_index = self._dataset_names[dataset.name_ref]
if x0_input is None:
l_nstart, l_nend = self._dataset_nindex[dataset_index]
X_input = self._X
else:
l_nstart, l_nend = len(x0_input)*dataset_index, len(x0_input)*(dataset_index+1)
temp_input = []
temp_label = []
for ii in range(0, self._added_datasets):
temp_input = np.append(temp_input, x0_input)
temp_label = np.append(temp_label, np.zeros_like(x0_input, dtype=int) + ii)
X_input = (temp_input, temp_label.astype(int))
theta_dict = dict(
gamma=1. / (2.*self.internal_parameter_values['Oamp'] ** 2),
Pdec=self.internal_parameter_values['Pdec'],
Prot=self.internal_parameter_values['Prot'],
Pcyc=self.internal_parameter_values['Pcyc'],
diag=self._dataset_ej2,
X=self._X,
y=self._dataset_res,
coeff_QP_prime=self.internal_coeff_QP_prime,
coeff_QP_deriv=self.internal_coeff_QP_deriv,
coeff_SE_prime=self.internal_coeff_SE_prime,
coeff_SE_deriv=self.internal_coeff_SE_deriv,
x0_predict = X_input
)
gp = _build_tinygp_multidimensional_QPSE(theta_dict)
_, cond_gp = gp.condition(theta_dict['y'], theta_dict['x0_predict'])
#mu = cond_gp.mean
#std = np.sqrt(cond_gp.variance)
mu_full = cond_gp.loc # or cond_gp.mean?
mu = mu_full[l_nstart:l_nend]
std = np.sqrt(cond_gp.variance)[l_nstart:l_nend]
if return_variance:
return mu, std
else:
return mu
@staticmethod
def _hypercond_00(parameter_values):
#Condition from Rajpaul 2017, Rajpaul+2021
return True
@staticmethod
def _hypercond_01(parameter_values):
# Condition from Rajpaul 2017, Rajpaul+2021
# Taking into account that Pdec^2 = 2*lambda_2^2
return parameter_values['Pdec']**2 > (3. / 2. / np.pi) * parameter_values['Oamp']**2 * parameter_values['Prot']**2
@staticmethod
def _hypercond_02(parameter_values):
#Condition on Rotation period and decay timescale
return parameter_values['Pdec'] > 2. * parameter_values['Prot']
@staticmethod
def _hypercond_03(parameter_values):
#Condition on Rotation period and decay timescale
return parameter_values['Pdec'] > 0.5 * parameter_values['Prot']
|
LucaMalavoltaREPO_NAMEPyORBITPATH_START.@PyORBIT_extracted@PyORBIT-main@pyorbit@models@tinygp_multidimensional_quasiperiodicsquaredexponential_activity.py@.PATH_END.py
|
{
"filename": "commands.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/redis/py3/redis/commands/bf/commands.py",
"type": "Python"
}
|
from redis.client import NEVER_DECODE
from redis.utils import deprecated_function
BF_RESERVE = "BF.RESERVE"
BF_ADD = "BF.ADD"
BF_MADD = "BF.MADD"
BF_INSERT = "BF.INSERT"
BF_EXISTS = "BF.EXISTS"
BF_MEXISTS = "BF.MEXISTS"
BF_SCANDUMP = "BF.SCANDUMP"
BF_LOADCHUNK = "BF.LOADCHUNK"
BF_INFO = "BF.INFO"
BF_CARD = "BF.CARD"
CF_RESERVE = "CF.RESERVE"
CF_ADD = "CF.ADD"
CF_ADDNX = "CF.ADDNX"
CF_INSERT = "CF.INSERT"
CF_INSERTNX = "CF.INSERTNX"
CF_EXISTS = "CF.EXISTS"
CF_MEXISTS = "CF.MEXISTS"
CF_DEL = "CF.DEL"
CF_COUNT = "CF.COUNT"
CF_SCANDUMP = "CF.SCANDUMP"
CF_LOADCHUNK = "CF.LOADCHUNK"
CF_INFO = "CF.INFO"
CMS_INITBYDIM = "CMS.INITBYDIM"
CMS_INITBYPROB = "CMS.INITBYPROB"
CMS_INCRBY = "CMS.INCRBY"
CMS_QUERY = "CMS.QUERY"
CMS_MERGE = "CMS.MERGE"
CMS_INFO = "CMS.INFO"
TOPK_RESERVE = "TOPK.RESERVE"
TOPK_ADD = "TOPK.ADD"
TOPK_INCRBY = "TOPK.INCRBY"
TOPK_QUERY = "TOPK.QUERY"
TOPK_COUNT = "TOPK.COUNT"
TOPK_LIST = "TOPK.LIST"
TOPK_INFO = "TOPK.INFO"
TDIGEST_CREATE = "TDIGEST.CREATE"
TDIGEST_RESET = "TDIGEST.RESET"
TDIGEST_ADD = "TDIGEST.ADD"
TDIGEST_MERGE = "TDIGEST.MERGE"
TDIGEST_CDF = "TDIGEST.CDF"
TDIGEST_QUANTILE = "TDIGEST.QUANTILE"
TDIGEST_MIN = "TDIGEST.MIN"
TDIGEST_MAX = "TDIGEST.MAX"
TDIGEST_INFO = "TDIGEST.INFO"
TDIGEST_TRIMMED_MEAN = "TDIGEST.TRIMMED_MEAN"
TDIGEST_RANK = "TDIGEST.RANK"
TDIGEST_REVRANK = "TDIGEST.REVRANK"
TDIGEST_BYRANK = "TDIGEST.BYRANK"
TDIGEST_BYREVRANK = "TDIGEST.BYREVRANK"
class BFCommands:
"""Bloom Filter commands."""
def create(self, key, errorRate, capacity, expansion=None, noScale=None):
"""
Create a new Bloom Filter `key` with desired probability of false positives
`errorRate` expected entries to be inserted as `capacity`.
Default expansion value is 2. By default, filter is auto-scaling.
For more information see `BF.RESERVE <https://redis.io/commands/bf.reserve>`_.
""" # noqa
params = [key, errorRate, capacity]
self.append_expansion(params, expansion)
self.append_no_scale(params, noScale)
return self.execute_command(BF_RESERVE, *params)
reserve = create
def add(self, key, item):
"""
Add to a Bloom Filter `key` an `item`.
For more information see `BF.ADD <https://redis.io/commands/bf.add>`_.
""" # noqa
return self.execute_command(BF_ADD, key, item)
def madd(self, key, *items):
"""
Add to a Bloom Filter `key` multiple `items`.
For more information see `BF.MADD <https://redis.io/commands/bf.madd>`_.
""" # noqa
return self.execute_command(BF_MADD, key, *items)
def insert(
self,
key,
items,
capacity=None,
error=None,
noCreate=None,
expansion=None,
noScale=None,
):
"""
Add to a Bloom Filter `key` multiple `items`.
If `nocreate` remain `None` and `key` does not exist, a new Bloom Filter
`key` will be created with desired probability of false positives `errorRate`
and expected entries to be inserted as `size`.
For more information see `BF.INSERT <https://redis.io/commands/bf.insert>`_.
""" # noqa
params = [key]
self.append_capacity(params, capacity)
self.append_error(params, error)
self.append_expansion(params, expansion)
self.append_no_create(params, noCreate)
self.append_no_scale(params, noScale)
self.append_items(params, items)
return self.execute_command(BF_INSERT, *params)
def exists(self, key, item):
"""
Check whether an `item` exists in Bloom Filter `key`.
For more information see `BF.EXISTS <https://redis.io/commands/bf.exists>`_.
""" # noqa
return self.execute_command(BF_EXISTS, key, item)
def mexists(self, key, *items):
"""
Check whether `items` exist in Bloom Filter `key`.
For more information see `BF.MEXISTS <https://redis.io/commands/bf.mexists>`_.
""" # noqa
return self.execute_command(BF_MEXISTS, key, *items)
def scandump(self, key, iter):
"""
Begin an incremental save of the bloom filter `key`.
This is useful for large bloom filters which cannot fit into the normal SAVE and RESTORE model.
The first time this command is called, the value of `iter` should be 0.
This command will return successive (iter, data) pairs until (0, NULL) to indicate completion.
For more information see `BF.SCANDUMP <https://redis.io/commands/bf.scandump>`_.
""" # noqa
params = [key, iter]
options = {}
options[NEVER_DECODE] = []
return self.execute_command(BF_SCANDUMP, *params, **options)
def loadchunk(self, key, iter, data):
"""
Restore a filter previously saved using SCANDUMP.
See the SCANDUMP command for example usage.
This command will overwrite any bloom filter stored under key.
Ensure that the bloom filter will not be modified between invocations.
For more information see `BF.LOADCHUNK <https://redis.io/commands/bf.loadchunk>`_.
""" # noqa
return self.execute_command(BF_LOADCHUNK, key, iter, data)
def info(self, key):
"""
Return capacity, size, number of filters, number of items inserted, and expansion rate.
For more information see `BF.INFO <https://redis.io/commands/bf.info>`_.
""" # noqa
return self.execute_command(BF_INFO, key)
def card(self, key):
"""
Returns the cardinality of a Bloom filter - number of items that were added to a Bloom filter and detected as unique
(items that caused at least one bit to be set in at least one sub-filter).
For more information see `BF.CARD <https://redis.io/commands/bf.card>`_.
""" # noqa
return self.execute_command(BF_CARD, key)
class CFCommands:
"""Cuckoo Filter commands."""
def create(
self, key, capacity, expansion=None, bucket_size=None, max_iterations=None
):
"""
Create a new Cuckoo Filter `key` an initial `capacity` items.
For more information see `CF.RESERVE <https://redis.io/commands/cf.reserve>`_.
""" # noqa
params = [key, capacity]
self.append_expansion(params, expansion)
self.append_bucket_size(params, bucket_size)
self.append_max_iterations(params, max_iterations)
return self.execute_command(CF_RESERVE, *params)
reserve = create
def add(self, key, item):
"""
Add an `item` to a Cuckoo Filter `key`.
For more information see `CF.ADD <https://redis.io/commands/cf.add>`_.
""" # noqa
return self.execute_command(CF_ADD, key, item)
def addnx(self, key, item):
"""
Add an `item` to a Cuckoo Filter `key` only if item does not yet exist.
Command might be slower that `add`.
For more information see `CF.ADDNX <https://redis.io/commands/cf.addnx>`_.
""" # noqa
return self.execute_command(CF_ADDNX, key, item)
def insert(self, key, items, capacity=None, nocreate=None):
"""
Add multiple `items` to a Cuckoo Filter `key`, allowing the filter
to be created with a custom `capacity` if it does not yet exist.
`items` must be provided as a list.
For more information see `CF.INSERT <https://redis.io/commands/cf.insert>`_.
""" # noqa
params = [key]
self.append_capacity(params, capacity)
self.append_no_create(params, nocreate)
self.append_items(params, items)
return self.execute_command(CF_INSERT, *params)
def insertnx(self, key, items, capacity=None, nocreate=None):
"""
Add multiple `items` to a Cuckoo Filter `key` only if they do not exist yet,
allowing the filter to be created with a custom `capacity` if it does not yet exist.
`items` must be provided as a list.
For more information see `CF.INSERTNX <https://redis.io/commands/cf.insertnx>`_.
""" # noqa
params = [key]
self.append_capacity(params, capacity)
self.append_no_create(params, nocreate)
self.append_items(params, items)
return self.execute_command(CF_INSERTNX, *params)
def exists(self, key, item):
"""
Check whether an `item` exists in Cuckoo Filter `key`.
For more information see `CF.EXISTS <https://redis.io/commands/cf.exists>`_.
""" # noqa
return self.execute_command(CF_EXISTS, key, item)
def mexists(self, key, *items):
"""
Check whether an `items` exist in Cuckoo Filter `key`.
For more information see `CF.MEXISTS <https://redis.io/commands/cf.mexists>`_.
""" # noqa
return self.execute_command(CF_MEXISTS, key, *items)
def delete(self, key, item):
"""
Delete `item` from `key`.
For more information see `CF.DEL <https://redis.io/commands/cf.del>`_.
""" # noqa
return self.execute_command(CF_DEL, key, item)
def count(self, key, item):
"""
Return the number of times an `item` may be in the `key`.
For more information see `CF.COUNT <https://redis.io/commands/cf.count>`_.
""" # noqa
return self.execute_command(CF_COUNT, key, item)
def scandump(self, key, iter):
"""
Begin an incremental save of the Cuckoo filter `key`.
This is useful for large Cuckoo filters which cannot fit into the normal
SAVE and RESTORE model.
The first time this command is called, the value of `iter` should be 0.
This command will return successive (iter, data) pairs until
(0, NULL) to indicate completion.
For more information see `CF.SCANDUMP <https://redis.io/commands/cf.scandump>`_.
""" # noqa
return self.execute_command(CF_SCANDUMP, key, iter)
def loadchunk(self, key, iter, data):
"""
Restore a filter previously saved using SCANDUMP. See the SCANDUMP command for example usage.
This command will overwrite any Cuckoo filter stored under key.
Ensure that the Cuckoo filter will not be modified between invocations.
For more information see `CF.LOADCHUNK <https://redis.io/commands/cf.loadchunk>`_.
""" # noqa
return self.execute_command(CF_LOADCHUNK, key, iter, data)
def info(self, key):
"""
Return size, number of buckets, number of filter, number of items inserted,
number of items deleted, bucket size, expansion rate, and max iteration.
For more information see `CF.INFO <https://redis.io/commands/cf.info>`_.
""" # noqa
return self.execute_command(CF_INFO, key)
class TOPKCommands:
"""TOP-k Filter commands."""
def reserve(self, key, k, width, depth, decay):
"""
Create a new Top-K Filter `key` with desired probability of false
positives `errorRate` expected entries to be inserted as `size`.
For more information see `TOPK.RESERVE <https://redis.io/commands/topk.reserve>`_.
""" # noqa
return self.execute_command(TOPK_RESERVE, key, k, width, depth, decay)
def add(self, key, *items):
"""
Add one `item` or more to a Top-K Filter `key`.
For more information see `TOPK.ADD <https://redis.io/commands/topk.add>`_.
""" # noqa
return self.execute_command(TOPK_ADD, key, *items)
def incrby(self, key, items, increments):
"""
Add/increase `items` to a Top-K Sketch `key` by ''increments''.
Both `items` and `increments` are lists.
For more information see `TOPK.INCRBY <https://redis.io/commands/topk.incrby>`_.
Example:
>>> topkincrby('A', ['foo'], [1])
""" # noqa
params = [key]
self.append_items_and_increments(params, items, increments)
return self.execute_command(TOPK_INCRBY, *params)
def query(self, key, *items):
"""
Check whether one `item` or more is a Top-K item at `key`.
For more information see `TOPK.QUERY <https://redis.io/commands/topk.query>`_.
""" # noqa
return self.execute_command(TOPK_QUERY, key, *items)
@deprecated_function(version="4.4.0", reason="deprecated since redisbloom 2.4.0")
def count(self, key, *items):
"""
Return count for one `item` or more from `key`.
For more information see `TOPK.COUNT <https://redis.io/commands/topk.count>`_.
""" # noqa
return self.execute_command(TOPK_COUNT, key, *items)
def list(self, key, withcount=False):
"""
Return full list of items in Top-K list of `key`.
If `withcount` set to True, return full list of items
with probabilistic count in Top-K list of `key`.
For more information see `TOPK.LIST <https://redis.io/commands/topk.list>`_.
""" # noqa
params = [key]
if withcount:
params.append("WITHCOUNT")
return self.execute_command(TOPK_LIST, *params)
def info(self, key):
"""
Return k, width, depth and decay values of `key`.
For more information see `TOPK.INFO <https://redis.io/commands/topk.info>`_.
""" # noqa
return self.execute_command(TOPK_INFO, key)
class TDigestCommands:
def create(self, key, compression=100):
"""
Allocate the memory and initialize the t-digest.
For more information see `TDIGEST.CREATE <https://redis.io/commands/tdigest.create>`_.
""" # noqa
return self.execute_command(TDIGEST_CREATE, key, "COMPRESSION", compression)
def reset(self, key):
"""
Reset the sketch `key` to zero - empty out the sketch and re-initialize it.
For more information see `TDIGEST.RESET <https://redis.io/commands/tdigest.reset>`_.
""" # noqa
return self.execute_command(TDIGEST_RESET, key)
def add(self, key, values):
"""
Adds one or more observations to a t-digest sketch `key`.
For more information see `TDIGEST.ADD <https://redis.io/commands/tdigest.add>`_.
""" # noqa
return self.execute_command(TDIGEST_ADD, key, *values)
def merge(self, destination_key, num_keys, *keys, compression=None, override=False):
"""
Merges all of the values from `keys` to 'destination-key' sketch.
It is mandatory to provide the `num_keys` before passing the input keys and
the other (optional) arguments.
If `destination_key` already exists its values are merged with the input keys.
If you wish to override the destination key contents use the `OVERRIDE` parameter.
For more information see `TDIGEST.MERGE <https://redis.io/commands/tdigest.merge>`_.
""" # noqa
params = [destination_key, num_keys, *keys]
if compression is not None:
params.extend(["COMPRESSION", compression])
if override:
params.append("OVERRIDE")
return self.execute_command(TDIGEST_MERGE, *params)
def min(self, key):
"""
Return minimum value from the sketch `key`. Will return DBL_MAX if the sketch is empty.
For more information see `TDIGEST.MIN <https://redis.io/commands/tdigest.min>`_.
""" # noqa
return self.execute_command(TDIGEST_MIN, key)
def max(self, key):
"""
Return maximum value from the sketch `key`. Will return DBL_MIN if the sketch is empty.
For more information see `TDIGEST.MAX <https://redis.io/commands/tdigest.max>`_.
""" # noqa
return self.execute_command(TDIGEST_MAX, key)
def quantile(self, key, quantile, *quantiles):
"""
Returns estimates of one or more cutoffs such that a specified fraction of the
observations added to this t-digest would be less than or equal to each of the
specified cutoffs. (Multiple quantiles can be returned with one call)
For more information see `TDIGEST.QUANTILE <https://redis.io/commands/tdigest.quantile>`_.
""" # noqa
return self.execute_command(TDIGEST_QUANTILE, key, quantile, *quantiles)
def cdf(self, key, value, *values):
"""
Return double fraction of all points added which are <= value.
For more information see `TDIGEST.CDF <https://redis.io/commands/tdigest.cdf>`_.
""" # noqa
return self.execute_command(TDIGEST_CDF, key, value, *values)
def info(self, key):
"""
Return Compression, Capacity, Merged Nodes, Unmerged Nodes, Merged Weight, Unmerged Weight
and Total Compressions.
For more information see `TDIGEST.INFO <https://redis.io/commands/tdigest.info>`_.
""" # noqa
return self.execute_command(TDIGEST_INFO, key)
def trimmed_mean(self, key, low_cut_quantile, high_cut_quantile):
"""
Return mean value from the sketch, excluding observation values outside
the low and high cutoff quantiles.
For more information see `TDIGEST.TRIMMED_MEAN <https://redis.io/commands/tdigest.trimmed_mean>`_.
""" # noqa
return self.execute_command(
TDIGEST_TRIMMED_MEAN, key, low_cut_quantile, high_cut_quantile
)
def rank(self, key, value, *values):
"""
Retrieve the estimated rank of value (the number of observations in the sketch
that are smaller than value + half the number of observations that are equal to value).
For more information see `TDIGEST.RANK <https://redis.io/commands/tdigest.rank>`_.
""" # noqa
return self.execute_command(TDIGEST_RANK, key, value, *values)
def revrank(self, key, value, *values):
"""
Retrieve the estimated rank of value (the number of observations in the sketch
that are larger than value + half the number of observations that are equal to value).
For more information see `TDIGEST.REVRANK <https://redis.io/commands/tdigest.revrank>`_.
""" # noqa
return self.execute_command(TDIGEST_REVRANK, key, value, *values)
def byrank(self, key, rank, *ranks):
"""
Retrieve an estimation of the value with the given rank.
For more information see `TDIGEST.BY_RANK <https://redis.io/commands/tdigest.by_rank>`_.
""" # noqa
return self.execute_command(TDIGEST_BYRANK, key, rank, *ranks)
def byrevrank(self, key, rank, *ranks):
"""
Retrieve an estimation of the value with the given reverse rank.
For more information see `TDIGEST.BY_REVRANK <https://redis.io/commands/tdigest.by_revrank>`_.
""" # noqa
return self.execute_command(TDIGEST_BYREVRANK, key, rank, *ranks)
class CMSCommands:
"""Count-Min Sketch Commands"""
def initbydim(self, key, width, depth):
"""
Initialize a Count-Min Sketch `key` to dimensions (`width`, `depth`) specified by user.
For more information see `CMS.INITBYDIM <https://redis.io/commands/cms.initbydim>`_.
""" # noqa
return self.execute_command(CMS_INITBYDIM, key, width, depth)
def initbyprob(self, key, error, probability):
"""
Initialize a Count-Min Sketch `key` to characteristics (`error`, `probability`) specified by user.
For more information see `CMS.INITBYPROB <https://redis.io/commands/cms.initbyprob>`_.
""" # noqa
return self.execute_command(CMS_INITBYPROB, key, error, probability)
def incrby(self, key, items, increments):
"""
Add/increase `items` to a Count-Min Sketch `key` by ''increments''.
Both `items` and `increments` are lists.
For more information see `CMS.INCRBY <https://redis.io/commands/cms.incrby>`_.
Example:
>>> cmsincrby('A', ['foo'], [1])
""" # noqa
params = [key]
self.append_items_and_increments(params, items, increments)
return self.execute_command(CMS_INCRBY, *params)
def query(self, key, *items):
"""
Return count for an `item` from `key`. Multiple items can be queried with one call.
For more information see `CMS.QUERY <https://redis.io/commands/cms.query>`_.
""" # noqa
return self.execute_command(CMS_QUERY, key, *items)
def merge(self, destKey, numKeys, srcKeys, weights=[]):
"""
Merge `numKeys` of sketches into `destKey`. Sketches specified in `srcKeys`.
All sketches must have identical width and depth.
`Weights` can be used to multiply certain sketches. Default weight is 1.
Both `srcKeys` and `weights` are lists.
For more information see `CMS.MERGE <https://redis.io/commands/cms.merge>`_.
""" # noqa
params = [destKey, numKeys]
params += srcKeys
self.append_weights(params, weights)
return self.execute_command(CMS_MERGE, *params)
def info(self, key):
"""
Return width, depth and total count of the sketch.
For more information see `CMS.INFO <https://redis.io/commands/cms.info>`_.
""" # noqa
return self.execute_command(CMS_INFO, key)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@redis@py3@redis@commands@bf@commands.py@.PATH_END.py
|
{
"filename": "cls_instrument_library.py",
"repo_name": "pysat/pysat",
"repo_path": "pysat_extracted/pysat-main/pysat/tests/classes/cls_instrument_library.py",
"type": "Python"
}
|
#!/usr/bin/env python
# Full license can be found in License.md
# Full author list can be found in .zenodo.json file
# DOI:10.5281/zenodo.1199703
#
# DISTRIBUTION STATEMENT A: Approved for public release. Distribution is
# unlimited.
# ----------------------------------------------------------------------------
"""Standardized class and functions to test instruments for pysat libraries.
Note
----
Not directly called by pytest, but imported as part of test_instruments.py.
Can be imported directly for external instrument libraries of pysat instruments.
Examples
--------
::
# Import custom instrument library
import mypackage
# Import the test classes from pysat.
from pysat.tests.classes.cls_instrument_library import InstLibTests
InstLibTests.initialize_test_package(InstLibTests,
inst_loc=mypackage.instruments,
user_info=user_info)
class TestInstruments(InstLibTests):
'''Create a testable object from standard library.
Note
-----
In your docstring be sure to use double quotes instead of single quotes.
'''
"""
import datetime as dt
from importlib import import_module
import logging
import numpy as np
import sys
import tempfile
import warnings
import pandas as pds
import pytest
import xarray as xr
import pysat
from pysat.utils import generate_instrument_list
from pysat.utils import testing
def initialize_test_inst_and_date(inst_dict):
"""Initialize the instrument object to test and date.
Parameters
----------
inst_dict : dict
Dictionary containing specific instrument info, generated by
generate_instrument_list
Returns
-------
test_inst : pysat.Instrument
instrument object to be tested
date : dt.datetime
test date from module
"""
kwargs = inst_dict['kwargs'] if 'kwargs' in inst_dict.keys() else {}
test_inst = pysat.Instrument(inst_module=inst_dict['inst_module'],
tag=inst_dict['tag'],
inst_id=inst_dict['inst_id'],
temporary_file_list=True, update_files=True,
**kwargs)
test_dates = inst_dict['inst_module']._test_dates
date = test_dates[inst_dict['inst_id']][inst_dict['tag']]
return test_inst, date
def load_and_set_strict_time_flag(test_inst, date, raise_error=False,
clean_off=True, set_end_date=False):
"""Load data and set the strict time flag if needed for other tests.
Parameters
----------
test_inst : pysat.Instrument
Test instrument
date : dt.datetime
Date for loading data
raise_error : bool
Raise the load error if it is not the strict time flag error
(default=False)
clean_off : bool
Turn off the clean method when re-loading data and testing the
strict time flag (default=True)
set_end_date : bool
If True, load with setting the end date. If False, load single day.
(default=False)
"""
kwargs = {}
if set_end_date:
kwargs['end_date'] = date + dt.timedelta(days=2)
try:
test_inst.load(date=date, **kwargs)
except Exception as err:
# Catch all potential input errors, and only ensure that the one caused
# by the strict time flag is prevented from occurring on future load
# calls.
if str(err).find('Loaded data') > 0:
# Change the flags that may have caused the error to be raised, to
# see if it the strict time flag
test_inst.strict_time_flag = False
if clean_off:
# Turn the clean method off
orig_clean_level = str(test_inst.clean_level)
test_inst.clean_level = 'none'
# Evaluate the warning
with warnings.catch_warnings(record=True) as war:
test_inst.load(date=date, **kwargs)
assert len(war) >= 1
categories = [war[j].category for j in range(len(war))]
assert UserWarning in categories
if clean_off:
# Reset the clean level
test_inst.clean_level = orig_clean_level
elif raise_error:
raise err
return
class InstLibTests(object):
"""Provide standardized tests for pysat instrument libraries.
Note
----
Uses class level setup and teardown so that all tests use the same
temporary directory. We do not want to geneate a new tempdir for each test,
as the load tests need to be the same as the download tests.
Not directly run by pytest, but inherited through test_instruments.py
Users will need to run `initialize_test_package` before setting up the test
class.
See Also
--------
`pysat.tests.test_instruments`
"""
# Define standard attributes to check.
# Needs to be defined here for backwards compatibility.
module_attrs = ['platform', 'name', 'tags', 'inst_ids',
'load', 'list_files', 'download']
inst_attrs = ['tag', 'inst_id', 'acknowledgements', 'references',
'inst_module']
inst_callable = ['load', 'list_files', 'download', 'clean',
'default']
attr_types = {'platform': str, 'name': str, 'tags': dict,
'inst_ids': dict, 'tag': str, 'inst_id': str,
'acknowledgements': str, 'references': str}
def setup_class(self):
"""Initialize the testing setup once before all tests are run."""
# Use a temporary directory so that the user's setup is not altered.
# TODO(#974): Remove if/else when support for Python 3.9 is dropped.
if sys.version_info.minor >= 10:
self.tempdir = tempfile.TemporaryDirectory(
ignore_cleanup_errors=True)
else:
self.tempdir = tempfile.TemporaryDirectory()
self.saved_path = pysat.params['data_dirs']
pysat.params._set_data_dirs(path=self.tempdir.name, store=False)
return
def teardown_class(self):
"""Clean up downloaded files and parameters from tests."""
pysat.params._set_data_dirs(self.saved_path, store=False)
# Remove the temporary directory. In Windows, this occasionally fails
# by raising a wide variety of different error messages. Python 3.10+
# can handle this, but lower Python versions cannot.
# TODO(#974): Remove try/except when support for Python 3.9 is dropped.
try:
self.tempdir.cleanup()
except Exception:
pass
del self.saved_path, self.tempdir
return
def setup_method(self):
"""Initialize parameters before each method."""
self.test_inst = None
self.date = None
self.module = None
return
def teardown_method(self):
"""Clean up any instruments that were initialized."""
del self.test_inst, self.date, self.module
return
def initialize_test_package(self, inst_loc, user_info=None):
"""Generate custom instrument lists for each category of tests.
Parameters
----------
inst_loc : python subpackage
The location of the instrument subpackage to test, e.g.,
`pysat.instruments`
user_info : dict or NoneType
Nested dictionary with user and password info for instrument module
name. If None, no user or password is assumed. (default=None)
EX: user_info = {'jro_isr': {'user': 'myname', 'password': 'email'}}
Returns
-------
instruments : dict
A dictionary containing the lists of instruments from a given
package for each category of tests. The categories are:
"names" : A list of all insrument modules by name.
"download" : Instrument objects with full download support.
"no_download" : Instrument objects without download support.
"""
# Attach location of package to test object for later reference.
self.inst_loc = inst_loc
# Find all instruments for testing from user-specified location.
instruments = generate_instrument_list(inst_loc=inst_loc,
user_info=user_info)
# Find all methods in the standard test class.
method_list = [func for func in dir(self)
if callable(getattr(self, func))]
# Search tests for iteration via pytestmark, update w/ instrument list.
for method in method_list:
if hasattr(getattr(self, method), 'pytestmark'):
# Get list of names of pytestmarks.
n_args = len(getattr(self, method).pytestmark)
mark_names = [getattr(self, method).pytestmark[j].name
for j in range(0, n_args)]
# Add instruments from your library.
if 'all_inst' in mark_names:
mark = pytest.mark.parametrize("inst_name",
instruments['names'])
getattr(self, method).pytestmark.append(mark)
elif 'new_tests' in mark_names:
# Prioritize new test marks if present
mark = pytest.mark.parametrize("inst_dict",
instruments['new_tests'])
getattr(self, method).pytestmark.append(mark)
elif 'load_options' in mark_names:
# Prioritize load_options mark if present
mark = pytest.mark.parametrize("inst_dict",
instruments['load_options'])
getattr(self, method).pytestmark.append(mark)
elif 'download' in mark_names:
mark = pytest.mark.parametrize("inst_dict",
instruments['download'])
getattr(self, method).pytestmark.append(mark)
elif 'no_download' in mark_names:
mark = pytest.mark.parametrize("inst_dict",
instruments['no_download'])
getattr(self, method).pytestmark.append(mark)
return instruments
@pytest.mark.all_inst
def test_modules_standard(self, inst_name):
"""Test that modules are importable and have standard properties.
Parameters
----------
inst_name : str
Name of instrument module. Set automatically from
instruments['names'] when `initialize_test_package` is run.
"""
# Ensure that each module is at minimum importable
self.module = import_module(''.join(('.', inst_name)),
package=self.inst_loc.__name__)
# Check for presence of basic instrument module attributes
for mattr in self.module_attrs:
testing.assert_hasattr(self.module, mattr)
if mattr in self.attr_types.keys():
testing.assert_isinstance(getattr(self.module, mattr),
self.attr_types[mattr])
# Check for presence of required instrument attributes
for inst_id in self.module.inst_ids.keys():
for tag in self.module.inst_ids[inst_id]:
self.test_inst = pysat.Instrument(inst_module=self.module,
tag=tag, inst_id=inst_id)
# Test to see that the class parameters were passed in
testing.assert_isinstance(self.test_inst, pysat.Instrument)
assert self.test_inst.platform == self.module.platform
assert self.test_inst.name == self.module.name
assert self.test_inst.inst_id == inst_id
assert self.test_inst.tag == tag
assert self.test_inst.inst_module is not None
# Test the required class attributes
for iattr in self.inst_attrs:
testing.assert_hasattr(self.test_inst, iattr)
if iattr in self.attr_types:
testing.assert_isinstance(getattr(self.test_inst,
iattr),
self.attr_types[iattr])
return
@pytest.mark.all_inst
def test_standard_function_presence(self, inst_name):
"""Test that each function is callable, all required functions exist.
Parameters
----------
inst_name : str
Name of instrument module. Set automatically from
instruments['names'] when `initialize_test_package` is run.
"""
self.module = import_module(''.join(('.', inst_name)),
package=self.inst_loc.__name__)
# Test for presence of all standard module functions
for mcall in self.inst_callable:
if hasattr(self.module, mcall):
# If present, must be a callable function
assert callable(getattr(self.module, mcall))
else:
# If absent, must not be a required function
assert mcall not in self.module_attrs
return
@pytest.mark.all_inst
def test_instrument_test_dates(self, inst_name):
"""Test that module has structured test dates correctly.
Parameters
----------
inst_name : str
Name of instrument module. Set automatically from
instruments['names'] when `initialize_test_package` is run.
"""
self.module = import_module(''.join(('.', inst_name)),
package=self.inst_loc.__name__)
info = self.module._test_dates
for inst_id in info.keys():
for tag in info[inst_id].keys():
testing.assert_isinstance(info[inst_id][tag], dt.datetime)
return
@pytest.mark.first
@pytest.mark.download
def test_download(self, inst_dict):
"""Test that instruments are downloadable.
Parameters
----------
inst_dict : dict
Dictionary containing info to instantiate a specific instrument.
Set automatically from instruments['download'] when
`initialize_test_package` is run.
"""
self.test_inst, self.date = initialize_test_inst_and_date(inst_dict)
# Check for username.
if 'user_info' in inst_dict.keys():
dl_dict = inst_dict['user_info']
else:
dl_dict = {}
# Ask to download two consecutive days
self.test_inst.download(start=self.date,
stop=self.date + dt.timedelta(days=2),
**dl_dict)
assert len(self.test_inst.files.files) > 0
return
@pytest.mark.second
@pytest.mark.load_options
@pytest.mark.parametrize("clean_level", ['none', 'dirty', 'dusty', 'clean'])
def test_load(self, clean_level, inst_dict):
"""Test that instruments load at each cleaning level.
Parameters
----------
clean_level : str
Cleanliness level for loaded instrument data.
inst_dict : dict
Dictionary containing info to instantiate a specific instrument.
Set automatically from instruments['download'] when
`initialize_test_package` is run.
"""
self.test_inst, self.date = initialize_test_inst_and_date(inst_dict)
if len(self.test_inst.files.files) > 0:
# Set the clean level
self.test_inst.clean_level = clean_level
target = 'Fake Data to be cleared'
self.test_inst.data = [target]
# Make sure the strict time flag doesn't interfere with
# the load tests, and re-run with desired clean level
load_and_set_strict_time_flag(self.test_inst, self.date,
raise_error=True, clean_off=False)
# Make sure fake data is cleared
assert target not in self.test_inst.data
# If cleaning not used, something should be in the file
# Not used for clean levels since cleaning may remove all data
if clean_level == "none":
assert not self.test_inst.empty
else:
pytest.skip("Download data not available")
return
@pytest.mark.second
@pytest.mark.load_options
def test_load_empty(self, inst_dict):
"""Test that instruments load empty objects if no data is available.
Parameters
----------
inst_dict : dict
Dictionary containing info to instantiate a specific instrument.
Set automatically from instruments['download'] when
`initialize_test_package` is run.
"""
# Get the instrument information and update the date to be in the future
self.test_inst, self.date = initialize_test_inst_and_date(inst_dict)
self.date = dt.datetime(dt.datetime.now(dt.timezone.utc).year + 100,
1, 1)
# Make sure the strict time flag doesn't interfere with the load test
load_and_set_strict_time_flag(self.test_inst, self.date,
raise_error=True)
# Check the empty status
assert self.test_inst.empty, "Data was loaded for a far-future time"
assert self.test_inst.meta == pysat.Meta(), "Meta data is not empty"
if self.test_inst.pandas_format:
assert all(self.test_inst.data == pds.DataFrame()), "Data not empty"
else:
assert self.test_inst.data.dims == xr.Dataset().dims, \
"Dims not empty"
assert self.test_inst.data.data_vars == xr.Dataset().data_vars, \
"Data variables not empty"
return
# TODO(#1172): remove mark.new_tests at v3.3.0
@pytest.mark.second
@pytest.mark.load_options
@pytest.mark.new_tests
def test_load_multiple_days(self, inst_dict):
"""Test that instruments load multiple days when requested.
Parameters
----------
inst_dict : dict
Dictionary containing info to instantiate a specific instrument.
Set automatically from instruments['download'] when
`initialize_test_package` is run.
"""
self.test_inst, self.date = initialize_test_inst_and_date(inst_dict)
if len(self.test_inst.files.files) > 0:
if self.date < self.test_inst.today():
# Make sure the strict time flag doesn't interfere with
# the load tests, and re-run with desired clean level
self.test_inst.clean_level = 'none'
load_and_set_strict_time_flag(self.test_inst, self.date,
raise_error=True, clean_off=True,
set_end_date=True)
# Make sure more than one day has been loaded
assert hasattr(self.test_inst.index, 'day'), \
"No data to load for {:}-{:}".format(
self.date, self.date + dt.timedelta(days=2))
assert len(np.unique(self.test_inst.index.day)) > 1
else:
pytest.skip("".join(["Can't download multiple days of real-",
"time or forecast data"]))
else:
pytest.skip("Download data not available")
return
@pytest.mark.second
@pytest.mark.load_options
@pytest.mark.parametrize("clean_level", ['dirty', 'dusty', 'clean'])
def test_clean_warn(self, clean_level, inst_dict, caplog):
"""Test that appropriate warnings and errors are raised when cleaning.
Parameters
----------
clean_level : str
Cleanliness level for loaded instrument data.
inst_dict : dict
Dictionary containing info to instantiate a specific instrument.
Set automatically from instruments['download'] when
`initialize_test_package` is run.
"""
# Not all Instruments have warning messages to test, only run tests
# when the desired test attribute is defined
if hasattr(inst_dict['inst_module'], '_clean_warn'):
try:
clean_warn = inst_dict['inst_module']._clean_warn[
inst_dict['inst_id']][inst_dict['tag']]
except KeyError:
# Combo does not exist for this instrument, skip test.
pytest.skip("".join(["No clean warnings for Instrument ",
repr(inst_dict['inst_module']), " level ",
clean_level]))
# Cleaning warnings may vary by clean level, test the warning
# messages at the current clean level, specified by `clean_level`
if clean_level in clean_warn.keys():
# Only need to test if there are clean warnings for this level
self.test_inst, self.date = initialize_test_inst_and_date(
inst_dict)
clean_warnings = clean_warn[clean_level]
# Make sure the strict time flag doesn't interfere with
# the cleaning tests
load_and_set_strict_time_flag(self.test_inst, self.date)
# Cycle through each of the potential cleaning messages
# for this Instrument module, inst ID, tag, and clean level
for (clean_method, clean_method_level, clean_method_msg,
final_level) in clean_warnings:
if len(self.test_inst.files.files) > 0:
# Set the clean level
self.test_inst.clean_level = clean_level
target = 'Fake Data to be cleared'
self.test_inst.data = [target]
if clean_method == 'logger':
# A logging message is expected
with caplog.at_level(
getattr(logging, clean_method_level),
logger='pysat'):
self.test_inst.load(date=self.date)
# Test the returned message
out_msg = caplog.text
assert out_msg.find(clean_method_msg) >= 0, \
"{:s} not in output: {:s}".format(
clean_method_msg, out_msg)
elif clean_method == 'warning':
# A warning message is expected
with warnings.catch_warnings(record=True) as war:
self.test_inst.load(date=self.date)
# Test the warning output
testing.eval_warnings(war, [clean_method_msg],
clean_method_level)
elif clean_method == 'error':
# An error message is expected, evaluate error
# and the error message
testing.eval_bad_input(
self.test_inst.load, clean_method_level,
clean_method_msg,
input_kwargs={'date': self.date})
else:
raise AttributeError(
'unknown type of warning: {:}'.format(
clean_method))
# Test to see if the clean flag has the expected value
# afterwards
assert self.test_inst.clean_level == final_level, \
"Clean level should now be {:s}, not {:s}".format(
final_level, self.test_inst.clean_level)
# Make sure fake data is cleared
assert target not in self.test_inst.data
else:
pytest.skip("".join(["Can't test clean warnings for ",
"Instrument ",
repr(inst_dict['inst_module']),
" level ", clean_level,
" (no downloaded files)"]))
else:
pytest.skip("".join(["No clean warnings for Instrument ",
repr(inst_dict['inst_module']), " level ",
clean_level]))
else:
pytest.skip("No clean warnings for Instrument {:s}".format(
repr(inst_dict['inst_module'])))
return
# TODO(#1172): remove mark.new_tests at v3.3.0
@pytest.mark.second
@pytest.mark.load_options
@pytest.mark.new_tests
@pytest.mark.parametrize('pad', [{'days': 1}, dt.timedelta(days=1)])
def test_load_w_pad(self, pad, inst_dict):
"""Test that instruments load with a pad specified different ways.
Parameters
----------
pad : pds.DateOffset, dict, or NoneType
Valid pad value for initializing an instrument
inst_dict : dict
Dictionary containing info to instantiate a specific instrument.
Set automatically from instruments['download'] when
`initialize_test_package` is run.
"""
# Skip for Python 3.6, keeping information that will allow adding
# or skipping particular instruments.
# TODO(#1136): Remove skip once memory management is improved
if sys.version_info.minor < 7:
pytest.skip("skipping 3.6 for {:} ({:} =? {:})".format(
inst_dict, inst_dict['inst_module'].__name__.find(
'pysat_testing'), len(inst_dict['inst_module'].__name__)
- len('pysat_testing')))
return
# Update the Instrument dict with the desired pad
if 'kwargs' in inst_dict.keys():
inst_dict['kwargs']['pad'] = pad
else:
inst_dict['kwargs'] = {'pad': pad}
# Assign the expected representation
if type(pad) in [dict]:
pad_repr = repr(pds.DateOffset(days=1))
elif type(pad) in [dt.timedelta]:
pad_repr = "1 day, 0:00:00"
else:
pad_repr = repr(pad)
self.test_inst, self.date = initialize_test_inst_and_date(inst_dict)
if len(self.test_inst.files.files) > 0:
# Make sure the strict time flag doesn't interfere with
# the load tests
self.test_inst.clean = 'none'
load_and_set_strict_time_flag(self.test_inst, self.date,
raise_error=True, clean_off=True)
if self.test_inst.empty:
# This will be empty if this is a forecast file that doesn't
# include the load date
self.test_inst.pad = None
load_and_set_strict_time_flag(self.test_inst, self.date,
raise_error=True, clean_off=True)
assert not self.test_inst.empty, \
"No data on {:}".format(self.date)
assert self.test_inst.index.max() < self.date, \
"Padding should have left data and didn't"
else:
# Padding was successful, evaluate the data index length
assert (self.test_inst.index[-1]
- self.test_inst.index[0]).total_seconds() < 86400.0
# Evaluate the recorded pad
inst_str = self.test_inst.__str__()
assert inst_str.find(
'Data Padding: {:s}'.format(pad_repr)) > 0, "".join([
"bad pad value: ", pad_repr, " not in ", inst_str])
else:
pytest.skip("Download data not available")
return
@pytest.mark.download
def test_remote_file_list(self, inst_dict):
"""Test if optional list_remote_files routine exists and is callable.
Parameters
----------
inst_dict : dict
Dictionary containing info to instantiate a specific instrument.
Set automatically from instruments['download'] when
`initialize_test_package` is run.
"""
self.test_inst, self.date = initialize_test_inst_and_date(inst_dict)
name = '_'.join((self.test_inst.platform, self.test_inst.name))
if hasattr(getattr(self.inst_loc, name), 'list_remote_files'):
assert callable(self.test_inst.remote_file_list)
# Check for username
if 'user_info' in inst_dict.keys():
dl_dict = inst_dict['user_info']
else:
dl_dict = {}
files = self.test_inst.remote_file_list(start=self.date,
stop=self.date, **dl_dict)
# If test date is correctly chosen, files should exist
assert len(files) > 0
else:
pytest.skip("remote_file_list not available")
return
@pytest.mark.no_download
def test_download_warning(self, inst_dict):
"""Test that instruments without download support have a warning.
Parameters
----------
inst_dict : dict
Dictionary containing info to instantiate a specific instrument.
Set automatically from instruments['no_download'] when
`initialize_test_package` is run.
"""
self.test_inst, self.date = initialize_test_inst_and_date(inst_dict)
with warnings.catch_warnings(record=True) as war:
self.test_inst.download(self.date, self.date)
assert len(war) >= 1
categories = [war[j].category for j in range(0, len(war))]
assert UserWarning in categories
return
|
pysatREPO_NAMEpysatPATH_START.@pysat_extracted@pysat-main@pysat@tests@classes@cls_instrument_library.py@.PATH_END.py
|
{
"filename": "viz.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/examples/air/viz.py",
"type": "Python"
}
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
from collections import namedtuple
import numpy as np
from PIL import Image, ImageDraw
def bounding_box(z_where, x_size):
"""This doesn't take into account interpolation, but it's close
enough to be usable."""
w = x_size / z_where.s
h = x_size / z_where.s
xtrans = -z_where.x / z_where.s * x_size / 2.0
ytrans = -z_where.y / z_where.s * x_size / 2.0
x = (x_size - w) / 2 + xtrans # origin is top left
y = (x_size - h) / 2 + ytrans
return (x, y), w, h
def arr2img(arr):
# arr is expected to be a 2d array of floats in [0,1]
return Image.frombuffer(
"L", arr.shape, (arr * 255).astype(np.uint8).tostring(), "raw", "L", 0, 1
)
def img2arr(img):
# assumes color image
# returns an array suitable for sending to visdom
return (
np.array(img.getdata(), np.uint8).reshape(img.size + (3,)).transpose((2, 0, 1))
)
def colors(k):
return [(255, 0, 0), (0, 255, 0), (0, 0, 255)][k % 3]
def draw_one(imgarr, z_arr):
# Note that this clipping makes the visualisation somewhat
# misleading, as it incorrectly suggests objects occlude one
# another.
clipped = np.clip(imgarr.detach().cpu().numpy(), 0, 1)
img = arr2img(clipped).convert("RGB")
draw = ImageDraw.Draw(img)
for k, z in enumerate(z_arr):
# It would be better to use z_pres to change the opacity of
# the bounding boxes, but I couldn't make that work with PIL.
# Instead this darkens the color, and skips boxes altogether
# when z_pres==0.
if z.pres > 0:
(x, y), w, h = bounding_box(z, imgarr.size(0))
color = tuple(map(lambda c: int(c * z.pres), colors(k)))
draw.rectangle([x, y, x + w, y + h], outline=color)
is_relaxed = any(z.pres != math.floor(z.pres) for z in z_arr)
fmtstr = "{:.1f}" if is_relaxed else "{:.0f}"
draw.text((0, 0), fmtstr.format(sum(z.pres for z in z_arr)), fill="white")
return img2arr(img)
def draw_many(imgarrs, z_arr):
# canvases is expected to be a (n,w,h) numpy array
# z_where_arr is expected to be a list of length n
return [draw_one(imgarr, z) for (imgarr, z) in zip(imgarrs.cpu(), z_arr)]
z_obj = namedtuple("z", "s,x,y,pres")
# Map a tensor of latents (as produced by latents_to_tensor) to a list
# of z_obj named tuples.
def tensor_to_objs(latents):
return [[z_obj._make(step) for step in z] for z in latents]
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@examples@air@viz.py@.PATH_END.py
|
{
"filename": "check_contents_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/third_party/xla/build_tools/lint/check_contents_test.py",
"type": "Python"
}
|
# Copyright 2023 The OpenXLA Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from absl.testing import absltest
from xla.build_tools import test_utils
from xla.build_tools.lint import check_contents
from xla.build_tools.lint import diff_parser
class CheckDiffsTest(absltest.TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
testdata = test_utils.xla_src_root() / "build_tools" / "lint" / "testdata"
with (testdata / "bad_cc.diff").open() as f:
cls.bad_cc_hunks = diff_parser.parse_hunks(f.read())
with (testdata / "important_cc.diff").open() as f:
cls.important_cc_hunks = diff_parser.parse_hunks(f.read())
def test_check_good_diff(self):
locs = check_contents.check_diffs(
self.bad_cc_hunks,
prohibited_regex="Make_Unique",
suppression_regex="OK",
)
self.assertEmpty(locs, 0)
def test_check_suppressed_diff_without_suppressions(self):
locs = check_contents.check_diffs(
self.bad_cc_hunks, prohibited_regex="Make_Unique"
)
expected_locs = [
check_contents.RegexLocation(
path="src/dir/bad.cc",
line_number=3,
line_contents="using Make_Unique = std::make_unique; // OK",
matched_text="Make_Unique",
),
check_contents.RegexLocation(
path="src/dir/bad.cc",
line_number=6,
line_contents=" return Make_Unique<int>(a + b); // OK. Fixed now!",
matched_text="Make_Unique",
),
]
self.assertEqual(locs, expected_locs)
def test_check_suppressed_diff_with_path_regexes(self):
filtered_hunks = check_contents.filter_hunks_by_path(
self.bad_cc_hunks,
path_regexes=["src/important\\..*"],
path_regex_exclusions=[],
)
self.assertLen(filtered_hunks, 1)
locs = check_contents.check_diffs(
filtered_hunks, prohibited_regex="Make_Unique"
)
self.assertEmpty(locs)
def test_check_suppressed_diff_with_exclusions(self):
filtered_hunks = check_contents.filter_hunks_by_path(
self.bad_cc_hunks,
path_regexes=[],
path_regex_exclusions=["src/dir/.*"],
)
self.assertLen(filtered_hunks, 1)
locs = check_contents.check_diffs(
filtered_hunks, prohibited_regex="Make_Unique"
)
self.assertEmpty(locs)
def test_check_suppressed_diff_with_suppression(self):
filtered_hunks = check_contents.filter_hunks_by_path(
self.bad_cc_hunks, path_regexes=[], path_regex_exclusions=[]
)
# filtering without path_regex(_exclusions) is a noop
self.assertEqual(self.bad_cc_hunks, filtered_hunks)
locs = check_contents.check_diffs(
filtered_hunks, prohibited_regex="Make_Unique", suppression_regex="OK"
)
self.assertEmpty(locs)
if __name__ == "__main__":
absltest.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@third_party@xla@build_tools@lint@check_contents_test.py@.PATH_END.py
|
{
"filename": "_base_meta.py",
"repo_name": "light-curve/light-curve-python",
"repo_path": "light-curve-python_extracted/light-curve-python-master/light-curve/light_curve/light_curve_py/features/_base_meta.py",
"type": "Python"
}
|
from abc import abstractmethod
from dataclasses import dataclass, field
from typing import Collection, Union
from light_curve.light_curve_ext import Extractor as _RustExtractor
from light_curve.light_curve_ext import _FeatureEvaluator as _RustBaseFeature
from ..dataclass_field import dataclass_field
from ._base import BaseSingleBandFeature
from .extractor import Extractor, _PyExtractor
@dataclass
class BaseMetaSingleBandFeature(BaseSingleBandFeature):
features: Collection[Union[BaseSingleBandFeature, _RustBaseFeature]] = dataclass_field(
default_factory=list, kw_only=True
)
extractor: Union[_RustExtractor, _PyExtractor] = field(init=False)
def __post_init__(self):
super().__post_init__()
self.extractor = Extractor(*self.features)
@abstractmethod
def transform(self, *, t, m, sigma):
"""Must return temporarily sorted arrays (t, m, sigma)"""
pass
def _eval_single_band(self, t, m, sigma=None):
raise NotImplementedError("_eval_single_band is missed for BaseMetaFeature")
def _eval_and_fill_single_band(self, *, t, m, sigma, fill_value):
t, m, sigma = self.transform(t=t, m=m, sigma=sigma)
return self.extractor._eval_and_fill_single_band(t=t, m=m, sigma=sigma, fill_value=fill_value)
@property
def size_single_band(self):
if isinstance(self.extractor, _RustExtractor):
return self.extractor.size
return self.extractor.size_single_band
|
light-curveREPO_NAMElight-curve-pythonPATH_START.@light-curve-python_extracted@light-curve-python-master@light-curve@light_curve@light_curve_py@features@_base_meta.py@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "rohinkumar/correlcalc",
"repo_path": "correlcalc_extracted/correlcalc-master/correlcalc/main.py",
"type": "Python"
}
|
__author__ = 'Rohin Kumar Y'
# from fileios import *
# msg = 'Enter Absolute Path to file: '
# f_name = raw_input(msg).strip()
# Ran multiple tests for most methods in the package... need to clean up
# path = file_data_and_path(f_name)
# if path != None:
# print 'Path:',path
# import tpcf
# from fileios import *
# from comovdist import *
from datprep import *
from tpcf import *
import matplotlib.pyplot as plt
from genrand import *
from runtimeupdate import *
# from metrics import *
# t1 = checkdatfile('./testfile.dat')
# print t1
#
# t2=inputfiles('data')
# print t2
# %matplotlib osx
from datvis import *
# dat=checkinfile('./testfile.dat','data')
# print dat
from metrics import *
from tpcf import *
# dat=readinfile('./testfile.dat','data')
# dat=datprep('./testfile.dat','data','lcdm')
from antpcf import *
# bins=np.arange(0.002,0.082,0.002)
# tpcf('./testfile.dat',bins,randfile='./testfile.dat',method='ls')
# tpcf('./testfile.dat',bins,mask='/Users/rohin/Documents/ipy_notebooks/galsurveystudy/masks/boss_geometry_2011_06_10.ply',cosmology='lc',method='ls')
# bins=np.arange(0,0.201,0.01)
# tpcf(dat,dat,3,bins,flatdistsq,'ls')
# antpcf(dat,dat,bins,flatdistsq,flatdistsq,'ls')
# z=np.array(dat['Z'])
# ra=np.array(dat['ra'])
# dec=np.array(dat['Dec'])
# hpix=healpixmap(ra,dec)
# print(hpix)
# hu.mollview(hpix,rot=180)
# zr=randz(z,2*len(dat['Z']))
# print(zr)
# spinner.text='Generating Random RA, DEC...'
# spinner.start()
# ra,dec=randang('/Users/rohin/Downloads/mask.dr72safe0.ply',2*len(dat['Z']))
# spinner.stop()
# print(ra,dec)
# plt.hist(z, 10, normed=True, alpha=0.5, label='hist')
# x_grid=np.linspace(min(z),max(z),2*len(dat['Z']))#check nb file
# kdepdf=kde(z,x_grid,bandwidth=1e-3)#check nb file#,bandwidth=0.01
# plt.plot(x_grid, kdepdf, color='r', alpha=0.5, lw=3, label='kde')
# plt.show()
# bins=np.arange(0,0.1,0.01)
# print(autocorr(dat,bins,flatdistsq))
# print(crosscorr(dat,dat,bins,flatdistsq))
# print(closedistsq(dat[1],dat[2]))
# mask=readmaskfile('/Users/rohin/Downloads/mask.dr72safe0.ply')
#
# z=dat['Z']
# print(z)
# np.vectorize(Ez)
# ez=Ez(z)
# print ez
# np.vectorize(DC_LCDM)
# s=DC_LCDM(z)
# s=comov(z,'lcdm')
# print s
# For 2pCF for LCDM model
from correlcalc import *
bins = np.arange(0.002, 0.082, 0.002)
correldr72 = tpcf('/Users/rohin/Downloads/DR7-Full.ascii', bins, mask='/Users/rohin/Documents/ipy_notebooks/galsurveystudy/masks/window.dr72safe0.ply', randfact=2)
import matplotlib.pyplot as plt
binMpc = 3000*bins
plt.plot(binMpc[1:], correldr72[0], 'ro-')
plt.show()
plt.yscale('log')
plt.plot(binMpc[4:], correldr72[0][3:], 'ro-')
plt.show()
plt.yscale('log')
plt.xscale('log')
plt.plot(binMpc[3:], correldr72[0][2:], 'ro-')
plt.show()
# For anisotropic 2pCF using delta Z and Z delta theta as in arXiv: 1312.0003
from antpcf import *
bins=np.arange(0.01, 0.201, 0.01)
atpcf('./testfile.dat', bins, randfile='./testfile.dat', estimator='ls', permetric='apzdth', parmetric='apdz')
|
rohinkumarREPO_NAMEcorrelcalcPATH_START.@correlcalc_extracted@correlcalc-master@correlcalc@main.py@.PATH_END.py
|
{
"filename": "SentencePieceTokenizerOptionsT.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_support/metadata_schema_py_generated/SentencePieceTokenizerOptionsT.md",
"type": "Markdown"
}
|
page_type: reference
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_support.metadata_schema_py_generated.SentencePieceTokenizerOptionsT" />
<meta itemprop="path" content="Stable" />
<meta itemprop="property" content="InitFromBuf"/>
<meta itemprop="property" content="InitFromObj"/>
<meta itemprop="property" content="InitFromPackedBuf"/>
<meta itemprop="property" content="Pack"/>
<meta itemprop="property" content="__init__"/>
</div>
# tflite_support.metadata_schema_py_generated.SentencePieceTokenizerOptionsT
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
<td>
<a target="_blank" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/metadata/metadata_schema_py_generated.py#L1484-L1553">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub
</a>
</td>
</table>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>tflite_support.metadata_schema_py_generated.SentencePieceTokenizerOptionsT()
</code></pre>
<!-- Placeholder for "Used in" -->
## Methods
<h3 id="InitFromBuf"><code>InitFromBuf</code></h3>
<a target="_blank" class="external" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/metadata/metadata_schema_py_generated.py#L1491-L1495">View source</a>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>@classmethod</code>
<code>InitFromBuf(
buf, pos
)
</code></pre>
<h3 id="InitFromObj"><code>InitFromObj</code></h3>
<a target="_blank" class="external" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/metadata/metadata_schema_py_generated.py#L1502-L1506">View source</a>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>@classmethod</code>
<code>InitFromObj(
sentencePieceTokenizerOptions
)
</code></pre>
<h3 id="InitFromPackedBuf"><code>InitFromPackedBuf</code></h3>
<a target="_blank" class="external" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/metadata/metadata_schema_py_generated.py#L1497-L1500">View source</a>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>@classmethod</code>
<code>InitFromPackedBuf(
buf, pos=0
)
</code></pre>
<h3 id="Pack"><code>Pack</code></h3>
<a target="_blank" class="external" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/metadata/metadata_schema_py_generated.py#L1530-L1553">View source</a>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>Pack(
builder
)
</code></pre>
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_support@metadata_schema_py_generated@SentencePieceTokenizerOptionsT.md@.PATH_END.py
|
{
"filename": "proportion_test.py",
"repo_name": "pyro-ppl/numpyro",
"repo_path": "numpyro_extracted/numpyro-master/examples/proportion_test.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
"""
Example: Proportion Test
========================
You are managing a business and want to test if calling your customers will
increase their chance of making a purchase. You get 100,000 customer records and call
roughly half of them and record if they make a purchase in the next three months.
You do the same for the half that did not get called. After three months, the data is in -
did calling help?
This example answers this question by estimating a logistic regression model where the
covariates are whether the customer got called and their gender. We place a multivariate
normal prior on the regression coefficients. We report the 95% highest posterior
density interval for the effect of making a call.
"""
import argparse
import os
from jax import random
import jax.numpy as jnp
from jax.scipy.special import expit
import numpyro
from numpyro.diagnostics import hpdi
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
def make_dataset(rng_key) -> tuple[jnp.ndarray, jnp.ndarray]:
"""
Make simulated dataset where potential customers who get a
sales calls have ~2% higher chance of making another purchase.
"""
key1, key2, key3 = random.split(rng_key, 3)
num_calls = 51342
num_no_calls = 48658
made_purchase_got_called = dist.Bernoulli(0.084).sample(
key1, sample_shape=(num_calls,)
)
made_purchase_no_calls = dist.Bernoulli(0.061).sample(
key2, sample_shape=(num_no_calls,)
)
made_purchase = jnp.concatenate([made_purchase_got_called, made_purchase_no_calls])
is_female = dist.Bernoulli(0.5).sample(
key3, sample_shape=(num_calls + num_no_calls,)
)
got_called = jnp.concatenate([jnp.ones(num_calls), jnp.zeros(num_no_calls)])
design_matrix = jnp.hstack(
[
jnp.ones((num_no_calls + num_calls, 1)),
got_called.reshape(-1, 1),
is_female.reshape(-1, 1),
]
)
return design_matrix, made_purchase
def model(design_matrix: jnp.ndarray, outcome: jnp.ndarray = None) -> None:
"""
Model definition: Log odds of making a purchase is a linear combination
of covariates. Specify a Normal prior over regression coefficients.
:param design_matrix: Covariates. All categorical variables have been one-hot
encoded.
:param outcome: Binary response variable. In this case, whether or not the
customer made a purchase.
"""
beta = numpyro.sample(
"coefficients",
dist.MultivariateNormal(
loc=0.0, covariance_matrix=jnp.eye(design_matrix.shape[1])
),
)
logits = design_matrix.dot(beta)
with numpyro.plate("data", design_matrix.shape[0]):
numpyro.sample("obs", dist.Bernoulli(logits=logits), obs=outcome)
def print_results(coef: jnp.ndarray, interval_size: float = 0.95) -> None:
"""
Print the confidence interval for the effect size with interval_size
probability mass.
"""
baseline_response = expit(coef[:, 0])
response_with_calls = expit(coef[:, 0] + coef[:, 1])
impact_on_probability = hpdi(
response_with_calls - baseline_response, prob=interval_size
)
effect_of_gender = hpdi(coef[:, 2], prob=interval_size)
print(
f"There is a {interval_size * 100}% probability that calling customers "
"increases the chance they'll make a purchase by "
f"{(100 * impact_on_probability[0]):.2} to {(100 * impact_on_probability[1]):.2} percentage points."
)
print(
f"There is a {interval_size * 100}% probability the effect of gender on the log odds of conversion "
f"lies in the interval ({effect_of_gender[0]:.2}, {effect_of_gender[1]:.2f})."
" Since this interval contains 0, we can conclude gender does not impact the conversion rate."
)
def run_inference(
design_matrix: jnp.ndarray,
outcome: jnp.ndarray,
rng_key: jnp.ndarray,
num_warmup: int,
num_samples: int,
num_chains: int,
interval_size: float = 0.95,
) -> None:
"""
Estimate the effect size.
"""
kernel = NUTS(model)
mcmc = MCMC(
kernel,
num_warmup=num_warmup,
num_samples=num_samples,
num_chains=num_chains,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True,
)
mcmc.run(rng_key, design_matrix, outcome)
# 0th column is intercept (not getting called)
# 1st column is effect of getting called
# 2nd column is effect of gender (should be none since assigned at random)
coef = mcmc.get_samples()["coefficients"]
print_results(coef, interval_size)
def main(args):
rng_key, _ = random.split(random.PRNGKey(3))
design_matrix, response = make_dataset(rng_key)
run_inference(
design_matrix,
response,
rng_key,
args.num_warmup,
args.num_samples,
args.num_chains,
args.interval_size,
)
if __name__ == "__main__":
assert numpyro.__version__.startswith("0.16.1")
parser = argparse.ArgumentParser(description="Testing whether ")
parser.add_argument("-n", "--num-samples", nargs="?", default=500, type=int)
parser.add_argument("--num-warmup", nargs="?", default=1500, type=int)
parser.add_argument("--num-chains", nargs="?", default=1, type=int)
parser.add_argument("--interval-size", nargs="?", default=0.95, type=float)
parser.add_argument("--device", default="cpu", type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args)
|
pyro-pplREPO_NAMEnumpyroPATH_START.@numpyro_extracted@numpyro-master@examples@proportion_test.py@.PATH_END.py
|
{
"filename": "makePerfectForecastDataset.py",
"repo_name": "cmbant/CosmoMC",
"repo_path": "CosmoMC_extracted/CosmoMC-master/python/makePerfectForecastDataset.py",
"type": "Python"
}
|
# take CAMB file (e.g. test_lensedCls.dat) and produce dataset with given noise for testing
# Use in cosmomc .ini file using e.g.
# cmb_dataset[MyForecast]=data/MyForecast/test_lensedCls_exactsim.dataset
from __future__ import absolute_import
import shutil
import os
import numpy as np
from getdist import IniFile
from CMBlikes import lastTopComment, DatasetLikelihood, ClsArray
def make_forecast_cmb_dataset(input_cl_file, output_root, output_dir=None, noise_muK_arcmin_T=None,
noise_muK_arcmin_P=None, NoiseVar=None, ENoiseFac=2, fwhm_arcmin=None,
lmin=2, lmax=None, fsky=1, fields_use=None,
lens_recon_noise=None, cl_data_cols=''):
"""
Make a simulated .dataset and associated files with 'data' set at the input fiducial model.
:param input_cl_file: input fiducial CL
:param output_root: root name for output files, e.g. 'my_sim1'
:param output_dir: output directory
:param noise_muK_arcmin_T: temperature noise in muK-arcmin
:param noise_muK_arcmin_P: polarization noise in muK-arcmin
:param NoiseVar: effective isotropic noise variance for the temperature (N_L=NoiseVar with no beam)
:param ENoiseFac: factor by which polarization noise variance is higher (usually 2, for Planck about 4
as only half the detectors polarized)
:param fwhm_arcmin: beam fwhm in arcminutes
:param lmin: l_min
:param lmax: l_max
:param fsky: sky fraction
:param fields_use: optional list of fields to restict to (e.g. 'T E')
:param lens_recon_noise: optional array, starting at L=0, for the PP lensing reconstruction noise, in [L(L+1)]^2C_L^phi/2pi units
:param cl_data_cols: if not specified in file header, order of columns in input CL file (e.g. 'TT TE EE BB PP')
:return:
"""
use_lensing = lens_recon_noise
use_CMB = noise_muK_arcmin_T or NoiseVar is not None
ini = IniFile()
dataset = ini.params
if not cl_data_cols:
cl_data_cols = lastTopComment(input_cl_file)
if not cl_data_cols:
raise Exception('input CL file must specific names of columns (TT TE EE..)')
else:
dataset['cl_hat_order'] = cl_data_cols
if use_CMB:
if NoiseVar is None:
if noise_muK_arcmin_T is None:
raise ValueError('Must specify noise')
NoiseVar = (noise_muK_arcmin_T * np.pi / 180 / 60.) ** 2
if noise_muK_arcmin_P is not None:
ENoiseFac = (noise_muK_arcmin_P / noise_muK_arcmin_T) ** 2
elif noise_muK_arcmin_T is not None or noise_muK_arcmin_P is not None:
raise ValueError('Specific either noise_muK_arcmin or NoiseVar')
if not fields_use:
fields_use = ''
if 'TT' or 'TE' in cl_data_cols: fields_use = 'T'
if 'EE' or 'TE' in cl_data_cols: fields_use += ' E'
if 'BB' in cl_data_cols: fields_use += ' B'
if 'PP' in cl_data_cols and use_lensing: fields_use += ' P'
else:
fields_use = fields_use or 'P'
if output_dir is None:
output_dir = os.path.join(os.path.dirname(__file__), '..', 'data', output_root)
if not os.path.exists(output_dir): os.makedirs(output_dir)
dataset['fields_use'] = fields_use
if use_CMB:
fwhm = fwhm_arcmin / 60
xlc = 180 * np.sqrt(8. * np.log(2.)) / np.pi
sigma2 = (fwhm / xlc) ** 2
noise_cols = 'TT EE BB'
if use_lensing: noise_cols += ' PP'
elif use_lensing:
noise_cols = 'PP'
noise_file = output_root + '_Noise.dat'
with open(os.path.join(output_dir, noise_file), 'w') as f:
f.write('#L %s\n' % noise_cols)
for l in range(lmin, lmax + 1):
NoiseCl = l * (l + 1.) / 2 / np.pi * NoiseVar * np.exp(l * (l + 1) * sigma2)
noises = []
if use_CMB: noises += [NoiseCl, ENoiseFac * NoiseCl, ENoiseFac * NoiseCl]
if use_lensing: noises += [lens_recon_noise[l]]
f.write("%d " % l + " ".join("%E" % elem for elem in noises) + "\n")
dataset['fullsky_exact_fksy'] = fsky
dataset['dataset_format'] = 'CMBLike2'
dataset['like_approx'] = 'exact'
dataset['cl_lmin'] = lmin
dataset['cl_lmax'] = lmax
dataset['binned'] = False
dataset['cl_hat_includes_noise'] = False
shutil.copy(input_cl_file, os.path.join(output_dir, output_root + '.dat'))
dataset['cl_hat_file'] = output_root + '.dat'
dataset['cl_noise_file '] = noise_file
ini.saveFile(os.path.join(output_dir, output_root + '.dataset'))
if __name__ == "__main__":
import tempfile
# Edit parameters you want to change here
lensedTotClFileRoot = os.path.join(os.path.dirname(__file__), '..', 'data',
'base_plikHM_TT_lowTEB.minimum.theory_cl')
# these numbers are Planck-like
# Noise var is N_l in muK^2 for white noise
# note NoiseVar = (muKArcmin * np.pi / 180 / 60.) ** 2
# Pol noise var = ENoiseFac * NoiseVar
# 2 normally, but for Planck only half detectors are polarized
output_dir = tempfile.gettempdir()
output_root = 'test_sim'
make_forecast_cmb_dataset(input_cl_file=lensedTotClFileRoot, output_root=output_root,
output_dir=output_dir, lmin=2, lmax=2500,
fwhm_arcmin=5, fsky=0.7, NoiseVar=4e-5, ENoiseFac=4)
print('Made ' + os.path.join(output_dir, output_root + '.dataset'))
# The rest is just a test on files produced above
like = DatasetLikelihood(os.path.join(output_dir, output_root + '.dataset'))
cls = ClsArray(lensedTotClFileRoot)
cls.cls_array[0, 0] *= 1.004
cls.cls_array[1, 1] *= 0.991
import time
start = time.time()
chi2 = like.chi_squared(cls)
end = time.time() - start
print('Test chi2 = %s' % chi2)
print('Time: %s' % end)
if not np.allclose(49.055, chi2, rtol=1e-5): raise Exception('likelihood test failed')
|
cmbantREPO_NAMECosmoMCPATH_START.@CosmoMC_extracted@CosmoMC-master@python@makePerfectForecastDataset.py@.PATH_END.py
|
{
"filename": "m3dis_class.py",
"repo_name": "TSFitPy-developers/TSFitPy",
"repo_path": "TSFitPy_extracted/TSFitPy-main/scripts/m3dis_class.py",
"type": "Python"
}
|
from __future__ import annotations
import logging
import shutil
import subprocess
import numpy as np
import os
import tempfile
from scipy.interpolate import LinearNDInterpolator, interp1d
from . import marcs_class
from .solar_abundances import periodic_table, solar_abundances
from .solar_isotopes import solar_isotopes
from .synthetic_code_class import SyntheticSpectrumGenerator
class M3disCall(SyntheticSpectrumGenerator):
def __init__(self, m3dis_path: str, interpol_path: str, line_list_paths: str, marcs_grid_path: str,
marcs_grid_list: str, model_atom_path: str, departure_file_path: str,
aux_file_length_dict: dict,
marcs_value_keys: list, marcs_values: dict, marcs_models: dict, model_temperatures: np.ndarray,
model_logs: np.ndarray, model_mets: np.ndarray, m3dis_python_module, night_mode: bool=False, n_nu=None,
hash_table_size=None, mpi_cores=None, iterations_max=None, convlim=None, snap=None,
dims=None, nx=None, ny=None, nz=None):
"""
Instantiate a class for generating synthetic stellar spectra using Turbospectrum.
:param turbospec_path: Path where the turbospectrum binaries 'babsma' and 'bsyn' can be found.
:param interpol_path: Path where the compiled interpol_modeles.f binary can be found.
:param line_list_paths: Path(s) where line lists for synthetic spectra can be found. Specify as either a string, or a list of strings.
:param marcs_grid_path: Path where a grid of MARCS .mod files can be found. These contain the model atmospheres we use.
:param model_atom_path: Path to the model atom paths
:param departure_file_path: Path to the NLTE departure file paths
"""
super().__init__(m3dis_path, interpol_path, line_list_paths, marcs_grid_path,
marcs_grid_list, model_atom_path,
marcs_value_keys, marcs_values, marcs_models, model_temperatures,
model_logs, model_mets, night_mode)
self.m3dis_path = self.code_path
self.mpi_cores: int = mpi_cores
self.departure_file_path = departure_file_path
self.aux_file_length_dict = aux_file_length_dict
self.m3dis_python_module = m3dis_python_module
self.n_nu = n_nu
self.hash_table_size = hash_table_size
self.iterations_max = iterations_max
self.convlim = convlim
self.snap = snap
self.dims = dims
self.nx = nx
self.ny = ny
self.nz = nz
self.skip_linelist = False
self.save_spectra = True
self.use_precomputed_depart = True
self.atmosphere_path_3d_model = None
self.atmos_format_3d = None
# if using just MARCS model instead of interpolating the same one 8 times
self.use_marcs_directly = False
def configure(self, lambda_min: float=None, lambda_max:float=None, lambda_delta: float=None,
metallicity: float=None, log_g: float=None, t_eff: float=None, stellar_mass: float=None,
turbulent_velocity: float=None, free_abundances=None, free_isotopes=None,
sphere=None, alpha=None, s_process=None, r_process=None,
line_list_paths=None, line_list_files=None,
verbose=None, temp_directory=None, nlte_flag: bool = None, atmosphere_dimension=None,
mpi_cores:int=None,
windows_flag=None, segment_file=None, line_mask_file=None, depart_bin_file=None,
depart_aux_file=None, model_atom_file=None, atmosphere_path_3d_model=None, atmos_format_3d=None):
"""
Set the stellar parameters of the synthetic spectra to generate. This can be called as often as needed
to generate many synthetic spectra with one class instance. All arguments are optional; any which are not
supplied will remain unchanged.
:param lambda_min:
Short wavelength limit of the synthetic spectra we generate. Unit: A.
:param lambda_max:
Long wavelength limit of the synthetic spectra we generate. Unit: A.
:param lambda_delta:
Wavelength step of the synthetic spectra we generate. Unit: A.
:param metallicity:
Metallicity of the star we're synthesizing.
:param t_eff:
Effective temperature of the star we're synthesizing.
:param log_g:
Log(gravity) of the star we're synthesizing.
:param stellar_mass:
Mass of the star we're synthesizing (solar masses).
:param turbulent_velocity:
Micro turbulence velocity in km/s
:param free_abundances:
List of elemental abundances to use in stellar model. These are passed to Turbospectrum.
:param sphere:
Select whether to use a spherical model (True) or a plane-parallel model (False).
:param alpha:
Alpha enhancement to use in stellar model.
:param s_process:
S-Process element enhancement to use in stellar model.
:param r_process:
R-Process element enhancement to use in stellar model.
:param line_list_paths:
List of paths where we should search for line lists.
:param line_list_files:
List of line list files to use. If not specified, we use all files in `line_list_paths`
:param verbose:
Let Turbospectrum print debugging information to terminal?
:return:
None
"""
if lambda_min is not None:
self.lambda_min = lambda_min
if lambda_max is not None:
self.lambda_max = lambda_max
if lambda_delta is not None:
self.lambda_delta = lambda_delta
if metallicity is not None:
self.metallicity = metallicity
if t_eff is not None:
self.t_eff = t_eff
if log_g is not None:
self.log_g = log_g
if stellar_mass is not None:
self.stellar_mass = stellar_mass
if turbulent_velocity is not None:
self.turbulent_velocity = turbulent_velocity
if free_abundances is not None:
self.free_abundances = free_abundances # [X/H]
if free_isotopes is not None:
self.free_isotopes = free_isotopes
if sphere is not None:
self.sphere = sphere
if alpha is not None:
self.alpha = alpha
if s_process is not None:
self.s_process = s_process
if r_process is not None:
self.r_process = r_process
if line_list_paths is not None:
if not isinstance(line_list_paths, (list, tuple)):
line_list_paths = [line_list_paths]
self.line_list_paths = line_list_paths
if line_list_files is not None:
self.line_list_files = line_list_files
if verbose is not None:
self.verbose = verbose
if temp_directory is not None:
self.tmp_dir = temp_directory
if nlte_flag is not None:
self.nlte_flag = nlte_flag
if atmosphere_dimension is not None:
self.atmosphere_dimension = atmosphere_dimension
if self.atmosphere_dimension == "3D":
self.turbulent_velocity = None
if mpi_cores is not None:
self.mpi_cores = mpi_cores
if model_atom_file is not None:
self.model_atom_file = model_atom_file
if windows_flag is not None:
self.windows_flag = windows_flag
if depart_bin_file is not None:
self.depart_bin_file = depart_bin_file
if depart_aux_file is not None:
self.depart_aux_file = depart_aux_file
if model_atom_file is not None:
self.model_atom_file = model_atom_file
if segment_file is not None:
self.segment_file = segment_file
if atmosphere_path_3d_model is not None:
self.atmosphere_path_3d_model = atmosphere_path_3d_model
if atmos_format_3d is not None:
self.atmos_format_3d = atmos_format_3d
def run_m3dis(self, input_in, stderr, stdout):
# Write the input data to a temporary file
# TODO: check this solution because temp direction might mess up something
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp.write(bytes(input_in, "utf-8"))
temp_file_name = temp.name
#print(input_in)
# check if file exists
#if not os.path.exists("./dispatch.x"):
# print("File does not exist")
#else:
# print("File exists")
# Create a copy of the current environment variables
env = os.environ.copy()
# Set OMP_NUM_THREADS for the subprocess
env['OMP_NUM_THREADS'] = str(self.mpi_cores)
# Now, you can use temp_file_name as an argument to dispatch.x
pr1 = subprocess.Popen(
[
"./dispatch.x",
temp_file_name,
],
stdin=subprocess.PIPE,
stdout=stdout,
stderr=stderr,
env=env,
)
# pr1.stdin.write(bytes(input_in, "utf-8"))
stdout_bytes, stderr_bytes = pr1.communicate()
# Don't forget to remove the temporary file at some point
os.unlink(temp_file_name)
return pr1, stderr_bytes
def write_abund_file(self):
# file path is temp_dir + abund
file_path = os.path.join(self.tmp_dir, "abund_to_use")
# open file
with open(file_path, "w") as file:
# write the number of elements
# write the elements and their abundances
for element in periodic_table:
if element != "":
if element == "H" or element == "He":
abundance_to_write = solar_abundances[element]
else:
if element in self.free_abundances:
# Here abundance is passed as [X/H], so we need to add the solar abundance to convert to A(X)
# A(X)_star = A(X)_solar + [X/H]
abundance_to_write = self.free_abundances[element] + solar_abundances[element]
else:
# If the element is not in the free abundances, we assume it has the solar scaled abundance
# A(X)_star = A(X)_solar + [Fe/H]
abundance_to_write = solar_abundances[element] + self.metallicity
if self.atmosphere_dimension == "3D" or self.use_marcs_directly:
# if 3D, we need to subtract the metallicity from the abundance, because it auto scales (adds it) in M3D with FeH already
abundance_to_write = abundance_to_write - self.metallicity
file.write(f"{element:<4} {abundance_to_write:>6.3f}\n")
logging.debug(f"{element:<4} {abundance_to_write:>6.3f}")
return file_path
def write_isotope_file(self):
atomic_weights_path = "scripts/atomicweights.dat"
# check if file exists
if not os.path.exists(atomic_weights_path):
# add ../ to the path
atomic_weights_path = os.path.join("../", atomic_weights_path)
m3d_isotopes_file_path = os.path.join(self.tmp_dir, "isotopes")
# check if file exists
if not os.path.exists(m3d_isotopes_file_path):
if self.free_isotopes is None:
self.free_isotopes = solar_isotopes
elements_atomic_mass_number = self.free_isotopes.keys()
# elements now consists of e.g. '3.006'. we want to convert 3
elements_atomic_number = [int(float(element.split(".")[0])) for element in elements_atomic_mass_number]
# count the number of each element, such that we have e.g. 3: 2, 4: 1, 5: 1
elements_count = {element: elements_atomic_number.count(element) for element in elements_atomic_number}
# remove duplicates
elements_atomic_number_unique = set(elements_atomic_number)
separator = "_" # separator between sections in the file from NIST
atomic_weights = {}
with open(atomic_weights_path, "r") as file:
skip_section = True
current_element_atomic_number = 0
for line in file:
if line[0] != separator and skip_section:
continue
elif line[0] == separator and skip_section:
skip_section = False
continue
elif line[0] != separator and not skip_section and current_element_atomic_number == 0:
current_element_atomic_number_to_test = int(line.split()[0])
if current_element_atomic_number_to_test not in elements_atomic_number_unique:
skip_section = True
continue
current_element_atomic_number = current_element_atomic_number_to_test
atomic_weights[current_element_atomic_number] = {}
# remove any spaces and anything after (
atomic_weights[current_element_atomic_number][int(line[8:12].replace(" ", ""))] = \
line[13:32].replace(" ", "").split("(")[0]
elif line[0] != separator and not skip_section and current_element_atomic_number != 0:
atomic_weights[current_element_atomic_number][int(line[8:12].replace(" ", ""))] = atomic_weight = \
line[13:32].replace(" ", "").split("(")[0]
elif line[0] == separator and not skip_section and current_element_atomic_number != 0:
current_element_atomic_number = 0
"""
format:
Li 2
6 6.0151 0.0759
7 7.0160 0.9241
"""
# open file
with open(m3d_isotopes_file_path, "w") as file:
# write element, then number of isotopes. next lines are isotope mass and abundance
current_element_atomic_number = 0
for element, isotope in self.free_isotopes.items():
element_atomic_number = int(float(element.split(".")[0]))
element_mass_number = int(float(element.split(".")[1]))
if current_element_atomic_number != element_atomic_number:
# elements now consists of e.g. '3.006'. we want to convert 3 to and 6
current_element_atomic_number = element_atomic_number
file.write(
f"{periodic_table[element_atomic_number]:<5}{elements_count[element_atomic_number]:>2}\n")
file.write(
f"{int(element_mass_number):>4} {float(atomic_weights[element_atomic_number][element_mass_number]):>12.8f} {isotope:>12.8f}\n")
return m3d_isotopes_file_path
def call_m3dis(self, skip_linelist=False, use_precomputed_depart=False):
abund_file_path = self.write_abund_file()
isotope_file_path = self.write_isotope_file()
# get all files from self.line_list_paths[0]
self.line_list_files = os.listdir(self.line_list_paths[0])
if self.atmosphere_dimension == "1D":
atmo_param = f"atmos_format='Marcs' vmic={round(self.turbulent_velocity, 5)}"
self.dims = 1
#atmos_path = "./input_multi3d/atmos/p5777_g+4.4_m0.0_t01_st_z+0.00_a+0.00_c+0.00_n+0.00_o+0.00_r+0.00_s+0.00.mod"
atmo_param = f"atmos_format='Text' vmic={round(self.turbulent_velocity, 5)}"
atmos_path = f"{os.path.join(self.tmp_dir, self.marcs_model_name)}"
if self.use_marcs_directly:
atmo_param = f"atmos_format='Marcs' vmic={round(self.turbulent_velocity, 5)}"
atmos_path = os.path.join(self.marcs_grid_path, self.marcs_model_name)
# convert to m3dis format
#atmos_path = self.convert_interpolated_atmo_to_m3dis(atmos_path)
elif self.atmosphere_dimension == "3D":
# check if teff is about 4600, logg is about 1.39 and feh is about -2.55, within tolerance of 0.01
# if so, use the 3D model
if np.isclose(self.t_eff, 4665, atol=100) and np.isclose(self.log_g, 1.64, atol=0.25) and np.isclose(self.metallicity, -2.51, atol=0.3):
atmo_param = f"atmos_format='Stagger' snap={self.snap} FeH={self.metallicity} dims={self.dims} nx={self.nx} ny={self.ny} nz={self.nz}"
atmos_path = "/mnt/beegfs/gemini/groups/bergemann/users/shared-storage/bergemann-data/Stagger_remo/hd1225623/2013-04-10_nlam48/t46g16m2503"
else:
if self.atmosphere_path_3d_model is not None:
atmos_path = self.atmosphere_path_3d_model
if self.atmos_format_3d.lower() == "multi" or self.atmos_format_3d.lower() == "muram":
atmo_param = f"atmos_format='Multi'"
elif self.atmos_format_3d.lower() == "stagger":
atmo_param = f"atmos_format='Stagger' snap={self.snap} nx={self.nx} ny={self.ny} nz={self.nz}"
elif self.atmos_format_3d.lower() == "must":
atmo_param = f"atmos_format='MUST'"
else:
raise ValueError(f"Atmosphere format {self.atmos_format_3d} not recognized")
else:
raise ValueError("3D atmospheres not implemented yet")
atmo_param = "atmos_format='MUST'"
atmo_param = "&atmos_params dims=10 atmos_format='Multi' atmos_file='/Users/storm/PycharmProjects/3d_nlte_stuff/m3dis_l/m3dis/experiments/Multi3D/input_multi3d/atmos/t5777g44m0005_20.5x5x230'/"
atmo_param = f"atmos_format='Multi'"
atmos_path = "/Users/storm/PycharmProjects/3d_nlte_stuff/m3dis_l/m3dis/experiments/Multi3D/input_multi3d/atmos/t5777g44m0005_20.5x5x230"
# &atmos_params dims=1 atmos_format='MUST' atmos_file='input_multi3d/atmos/m3dis_sun_magg22_10x10x280_1' /
# multi:
#atmos_format='MUST' dims=23 atmos_file='/shared-storage/bergemann/m3dis/experiments/Multi3D/input_multi3d/atmos/299/magg2022_150x300/m3dis_sun_magg22_80x80x299_1'
# might need these two as well? use_density=T use_ne=F
# stagger:
# atmos_format="Stagger" snap=20 dims=23 FeH=0.0 nx=30 ny=30 nz=230 atmos_file='./input_multi3d/atmos/t5777g44m00/v05/t5777g44m0005'/
# muram:
# atmos_format='Multi' dims=23 atmos_file='./input_multi3d/atmos/muram/mDIS_MARCS_v0.5.1_box_MURaM_HDSun' /
else:
raise ValueError("Atmosphere dimension must be either 1D or 3D: m3dis_class.py")
if self.nlte_flag:
atom_path = self.model_atom_path
atom_files = list(self.model_atom_file.keys())
atom_file_element = atom_files[0]
if len(atom_files) > 1:
print(f"Only one atom file is allowed for NLTE: m3dis, using the first one {atom_file_element}")
if use_precomputed_depart:
precomputed_depart = f"precomputed_depart='{os.path.join(self.tmp_dir, '../precomputed_depart', '')}'"
else:
precomputed_depart = ""
atom_params = (f"&atom_params atom_file='{os.path.join(atom_path, self.model_atom_file[atom_file_element])}' "
f"convlim={self.convlim} use_atom_abnd=F exclude_trace_cont=T exclude_from_line_list=T "
f"{precomputed_depart}/\n")
# linelist_param_extra
linelist_param_extra = f"exclude_elements='{atom_file_element}'"
else:
atom_params = ""
linelist_param_extra = ""
if skip_linelist:
linelist_parameters = ""
else:
linelist_parameters = (f"&linelist_params linelist_file='{os.path.join(self.line_list_paths[0], self.line_list_files[0])}' {linelist_param_extra}/\n\
&spectrum_params daa={self.lambda_delta} aa_blue={self.lambda_min} aa_red={self.lambda_max} /\n")
if False:
# check if feh is almost 0
absmet_file_global_path = "/mnt/beegfs/gemini/groups/bergemann/users/storm/data/absmet/"
absmet_file_global_path = "/Users/storm/PhD_2022-2025/m3dis_useful_stuff/absmet_files/"
if np.abs(self.metallicity) < 0.01:
# /mnt/beegfs/gemini/groups/bergemann/users/storm/data/absmet/OPACITIES/M+0.00a+0.00c+0.00n+0.00o+0.00r+0.00s+0.00
absmet_file = f"OPACITIES/M+0.00a+0.00c+0.00n+0.00o+0.00r+0.00s+0.00/metals_noMnCrCoNi.x01"
absmet_file = f"absmet_file='{absmet_file_global_path}/{absmet_file}' absmet_big_end=F"
# check if feh is almost -1 or -0.5
elif np.abs(self.metallicity + 1) < 0.01 or np.abs(self.metallicity + 0.5) < 0.01:
absmet_file = f"m-1.00a+0.40c+0.00n+0.00o+0.40r+0.00s+0.00/metals.x01"
absmet_file = f"absmet_file='{absmet_file_global_path}/{absmet_file}' absmet_big_end=T"
# check if feh is almost -2
elif np.abs(self.metallicity + 2) < 0.01:
absmet_file = f"OPACITIES/M-2.00a+0.40c+0.00n+0.00o+0.40r+0.00s+0.00/metals_noMnCrCoNi.x01"
absmet_file = f"absmet_file='{absmet_file_global_path}/{absmet_file}' absmet_big_end=F"
# check if feh is almost -3
elif np.abs(self.metallicity + 3) < 0.01:
absmet_file = f"OPACITIES/M-2.00a+0.40c+0.00n+0.00o+0.40r+0.00s+0.00/metals_noMnCrCoNi.x01"
absmet_file = f"absmet_file='{absmet_file_global_path}/{absmet_file}' absmet_big_end=F"
# check if feh is almost -4
elif np.abs(self.metallicity + 4) < 0.01:
absmet_file = f"OPACITIES/M-4.00a+0.40c+0.00n+0.00o+0.40r+0.00s+0.00/metals_noMnCrCoNi.x01"
absmet_file = f"absmet_file='{absmet_file_global_path}/{absmet_file}' absmet_big_end=F"
# check if feh is almost +0.5
elif np.abs(self.metallicity - 0.5) < 0.01:
absmet_file = f"OPACITIES/M+0.50a+0.00c+0.00n+0.00o+0.00r+0.00s+0.00/metals_noMnCrCoNi.x01"
absmet_file = f"absmet_file='{absmet_file_global_path}/{absmet_file}' absmet_big_end=F"
# &composition_params absmet_file='/u/nisto/data/absmet//m1/metals.x01' absmet_big_end=T /
# absmet_file = f"absmet_file='{os.path.join(self.departure_file_path, '')}' absmet_big_end=T"
else:
absmet_file = ""
# 0.010018 0.052035 0.124619 0.222841 0.340008 0.468138 0.598497 0.722203 0.830825 0.916958 0.974726 1.000000
# turbospectrum angles
output = {}
config_m3dis = (f"! -- Parameters defining the run -----------------------------------------------\n\
&io_params datadir='{self.tmp_dir}' gb_step=100.0 do_trace=F /\n\
&timer_params sec_per_report=1e8 /\n\
&atmos_params dims={self.dims} save_atmos=T atmos_file='{atmos_path}' {atmo_param}/\n{atom_params}\
&m3d_params decouple_continuum=T verbose=2 n_nu={self.n_nu} maxiter={self.iterations_max} short_scheme='set_a2' long_scheme='custom' custom_mu='0.010 0.052 0.124 0.223 0.340 0.468 0.598 0.722 0.831 0.917 0.975 1.000'/\n\
{linelist_parameters}\
&composition_params isotope_file='{isotope_file_path}' abund_file='{abund_file_path}' {absmet_file}/\n\
&task_list_params hash_table_size={self.hash_table_size} /\n")
#TODO absmet files?
if self.verbose:
print(config_m3dis)
if self.verbose:
stdout = None
stderr = subprocess.STDOUT
else:
stdout = open("/dev/null", "w")
stderr = subprocess.STDOUT
cwd = os.getcwd()
try: # chdir is NECESSARY, turbospectrum cannot run from other directories sadly
os.chdir(os.path.join(self.m3dis_path, "")) #
#print(os.getcwd())
pr1, stderr_bytes = self.run_m3dis(config_m3dis, stderr, stdout)
except subprocess.CalledProcessError:
output["errors"] = "babsma failed with CalledProcessError"
return output
finally:
os.chdir(cwd)
if stderr_bytes is None:
stderr_bytes = b""
if pr1.returncode != 0:
output["errors"] = f"m3dis failed with return code {pr1.returncode} {stderr_bytes.decode('utf-8')}"
return output
# Return output
# output["return_code"] = pr.returncode
# output["output_file"] = os_path.join(
# self.tmp_dir, "spectrum_{:08d}.spec".format(self.counter_spectra)
# )
return output
def interpolate_m3dis_atmosphere(self, marcs_models_to_load):
modelAtmGrid = {'teff': [], 'logg': [], 'feh': [], 'vturb': [], 'file': [], 'structure': [],
'structure_keys': [], 'mass': []} # data
all_marcs_models = []
marcs_path = self.marcs_grid_path
for marcs_model in marcs_models_to_load:
one_model = marcs_class.MARCSModel(os.path.join(marcs_path, marcs_model))
all_marcs_models.append(one_model)
modelAtmGrid['teff'].append(one_model.teff)
modelAtmGrid['logg'].append(one_model.logg)
modelAtmGrid['feh'].append(one_model.metallicity)
modelAtmGrid['vturb'].append(one_model.vmicro)
modelAtmGrid['file'].append(one_model.file)
modelAtmGrid['structure'].append(np.vstack((one_model.lgTau5, one_model.temperature, one_model.pe,
np.full(one_model.depth.shape, one_model.vmicro),
one_model.density, one_model.depth)))
modelAtmGrid['structure_keys'].append(['tau500', 'temp', 'pe', 'vmic', 'density', 'depth'])
modelAtmGrid['mass'].append(one_model.mass)
interpolate_variables = ['teff', 'logg', 'feh'] # , 'vmic'
# convert all to numpy arrays
for k in modelAtmGrid:
modelAtmGrid[k] = np.asarray(modelAtmGrid[k])
points = []
norm_coord = {}
for k in interpolate_variables:
points.append(modelAtmGrid[k]) # / max(modelAtmGrid[k]) )
norm_coord.update({k: max(modelAtmGrid[k])})
points = np.array(points).T
values = np.array(modelAtmGrid['structure'])
interpolate_point = [self.t_eff, self.log_g, self.metallicity]
points, unique_indices = np.unique(points, axis=0, return_index=True)
values = values[unique_indices]
indices_to_delete = []
for i in range(len(interpolate_variables)):
# get the column
column = points[:, i]
# check if all elements are the same
if np.all(column == column[0]):
indices_to_delete.append(i)
#interpolate_point_new.pop(i)
#interpolate_variables_new.pop(i)
## also remove ith column from points
#points_new = np.delete(points_new, i, axis=1)
# remove the indices
points = np.delete(points, indices_to_delete, axis=1)
interpolate_point = np.delete(interpolate_point, indices_to_delete)
interpolate_variables = np.delete(interpolate_variables, indices_to_delete)
if len(interpolate_point) > 1:
interp_f = LinearNDInterpolator(points, values)
tau500_new, temp_new, pe_new, vmic_new, density_new, depth_new = interp_f(interpolate_point)[0]
elif len(interpolate_point) == 1:
# linear interpolation
# flatten points
points = points.flatten()
# take the first element of the array
interpolate_point = interpolate_point[0]
interp_f = interp1d(points, values, axis=0, kind='linear')
tau500_new, temp_new, pe_new, vmic_new, density_new, depth_new = interp_f(interpolate_point)
else:
# only one model, so return that
tau500_new, temp_new, pe_new, vmic_new, density_new, depth_new = values[0]
# check if nan
if np.any(np.isnan(tau500_new)):
print("NAN in model atmosphere")
density_new, depth_new, pe_new, tau500_new, temp_new, vmic_new = self.convert_atmo_to_equidistant_one(
density_new, depth_new, pe_new, tau500_new, temp_new, vmic_new)
return tau500_new, temp_new, pe_new, vmic_new, density_new, depth_new
def convert_atmo_to_equidistant_one(self, density, depth, pe, tau500, temp, vmic, depth_points=256):
# interpolate all variables to equidistant depth grid
depth_min = np.min(depth)
depth_max = np.max(depth)
if depth_points is None:
depth_points = np.size(depth)
depth_new_equi = np.linspace(depth_min, depth_max, depth_points)
tau500 = np.interp(depth_new_equi, depth, tau500)
temp = np.interp(depth_new_equi, depth, temp)
pe = np.interp(depth_new_equi, depth, pe)
vmic = np.interp(depth_new_equi, depth, vmic)
density = np.interp(depth_new_equi, depth, density)
depth = depth_new_equi
return density, depth, pe, tau500, temp, vmic
def calculate_atmosphere(self):
if self.atmosphere_dimension == "1D":
self.calculate_atmosphere_1d()
elif self.atmosphere_dimension == "3D":
self.calculate_atmosphere_3d()
else:
raise ValueError("Atmosphere dimension must be either 1D or 3D: m3dis_class.py")
def calculate_atmosphere_3d(self):
return
def calculate_atmosphere_1d(self):
possible_turbulence = [0.0, 1.0, 2.0, 5.0]
flag_dont_interp_microturb = False
for i in range(len(possible_turbulence)):
if self.turbulent_velocity == possible_turbulence[i]:
flag_dont_interp_microturb = True
if self.log_g < 3:
flag_dont_interp_microturb = True
logging.debug(
f"flag_dont_interp_microturb: {flag_dont_interp_microturb} {self.turbulent_velocity} {self.t_eff} {self.log_g}")
self.use_marcs_directly = False
#TODO: when checking if model already exists, there is a certain precision loss
if not flag_dont_interp_microturb and self.turbulent_velocity < 2.0 and (
self.turbulent_velocity > 1.0 or (self.turbulent_velocity < 1.0 and self.t_eff < 3900.)):
self.marcs_model_name = "atmos.marcs_tef{:.1f}_g{:.2f}_z{:.2f}_tur{:.2f}".format(self.t_eff, self.log_g,
self.metallicity,
self.turbulent_velocity)
# check if the model exists
interp_model_name = os.path.join(self.tmp_dir, self.marcs_model_name)
if not os.path.exists(interp_model_name):
# Bracket the microturbulence to figure out what two values to generate the models to interpolate between using Andy's code
turbulence_low = 0.0
microturbulence = self.turbulent_velocity
for i in range(len(possible_turbulence)):
if self.turbulent_velocity > possible_turbulence[i]:
turbulence_low = possible_turbulence[i]
place = i
turbulence_high = possible_turbulence[place + 1]
self.turbulent_velocity = turbulence_low
marcs_model_list_low = self._generate_model_atmosphere()
if marcs_model_list_low["errors"] is not None:
raise ValueError(f"{marcs_model_list_low['errors']}")
tau500_low, temp_low, pe_low, vmic_low, density_low, depth_low = self.interpolate_m3dis_atmosphere(marcs_model_list_low["marcs_model_list"])
self.turbulent_velocity = turbulence_high
marcs_model_list_high = self._generate_model_atmosphere()
if marcs_model_list_high["errors"] is not None:
raise ValueError(f"{marcs_model_list_high['errors']}")
tau500_high, temp_high, pe_high, vmic_high, density_high, depth_high = self.interpolate_m3dis_atmosphere(marcs_model_list_high["marcs_model_list"])
atmosphere_properties = marcs_model_list_high
self.turbulent_velocity = microturbulence
# interpolate and find a model atmosphere for the microturbulence
fxhigh = (microturbulence - turbulence_low) / (turbulence_high - turbulence_low)
fxlow = 1.0 - fxhigh
tau500_interp = tau500_low * fxlow + tau500_high * fxhigh
temp_interp = temp_low * fxlow + temp_high * fxhigh
pe_interp = pe_low * fxlow + pe_high * fxhigh
vmic_interp = vmic_low * fxlow + vmic_high * fxhigh
density_interp = density_low * fxlow + density_high * fxhigh
depth_interp = depth_low * fxlow + depth_high * fxhigh
density_interp, depth_interp, pe_interp, tau500_interp, temp_interp, vmic_interp = (
self.convert_atmo_to_equidistant_one(density_interp, depth_interp, pe_interp, tau500_interp, temp_interp, vmic_interp))
# print(interp_model_name)
interp_model_name = os.path.join(self.tmp_dir, self.marcs_model_name)
self.save_m3dis_model(interp_model_name, depth_interp, temp_interp, pe_interp, density_interp, vmic_interp)
elif not flag_dont_interp_microturb and self.turbulent_velocity > 2.0: # not enough models to interp if higher than 2
microturbulence = self.turbulent_velocity # just use 2.0 for the model if between 2 and 3
self.turbulent_velocity = 2.0
self.marcs_model_name = "atmos.marcs_tef{:.1f}_g{:.2f}_z{:.2f}_tur{:.2f}".format(self.t_eff, self.log_g,
self.metallicity,
self.turbulent_velocity)
# check if the model exists
interp_model_name = os.path.join(self.tmp_dir, self.marcs_model_name)
if not os.path.exists(interp_model_name):
marcs_model_list = self._generate_model_atmosphere()
atmosphere_properties = marcs_model_list
if marcs_model_list["errors"] is not None:
raise ValueError(f"{marcs_model_list['errors']}")
tau500, temp, pe, vmic, density, depth = self.interpolate_m3dis_atmosphere(marcs_model_list["marcs_model_list"])
interp_model_name = os.path.join(self.tmp_dir, self.marcs_model_name)
self.save_m3dis_model(interp_model_name, depth, temp, pe, density, vmic)
self.turbulent_velocity = microturbulence
elif not flag_dont_interp_microturb and self.turbulent_velocity < 1.0 and self.t_eff >= 3900.: # not enough models to interp if lower than 1 and t_eff > 3900
microturbulence = self.turbulent_velocity
self.turbulent_velocity = 1.0
self.marcs_model_name = "atmos.marcs_tef{:.1f}_g{:.2f}_z{:.2f}_tur{:.2f}".format(self.t_eff, self.log_g,
self.metallicity,
self.turbulent_velocity)
# check if the model exists
interp_model_name = os.path.join(self.tmp_dir, self.marcs_model_name)
if not os.path.exists(interp_model_name):
marcs_model_list = self._generate_model_atmosphere()
atmosphere_properties = marcs_model_list
if marcs_model_list["errors"] is not None:
raise ValueError(f"{marcs_model_list['errors']}")
tau500, temp, pe, vmic, density, depth = self.interpolate_m3dis_atmosphere(marcs_model_list["marcs_model_list"])
interp_model_name = os.path.join(self.tmp_dir, self.marcs_model_name)
self.save_m3dis_model(interp_model_name, depth, temp, pe, density, vmic)
self.turbulent_velocity = microturbulence
elif flag_dont_interp_microturb:
if self.log_g < 3:
microturbulence = self.turbulent_velocity
self.turbulent_velocity = 2.0
marcs_model_list = self._generate_model_atmosphere()
atmosphere_properties = marcs_model_list
if marcs_model_list["errors"] is not None:
raise ValueError(f"{marcs_model_list['errors']}")
if np.size(np.unique(marcs_model_list["marcs_model_list"])) == 1:
# then can just use MARCS model directly
self.marcs_model_name = marcs_model_list["marcs_model_list"][0]
#interp_model_name = self.marcs_model_name
#self.marcs_model_name = interp_model_name
self.use_marcs_directly = True
else:
self.marcs_model_name = "atmos.marcs_tef{:.1f}_g{:.2f}_z{:.2f}_tur{:.2f}".format(self.t_eff, self.log_g,
self.metallicity,
self.turbulent_velocity)
# check if the model exists
interp_model_name = os.path.join(self.tmp_dir, self.marcs_model_name)
if not os.path.exists(interp_model_name):
tau500, temp, pe, vmic, density, depth = self.interpolate_m3dis_atmosphere(marcs_model_list["marcs_model_list"])
interp_model_name = os.path.join(self.tmp_dir, self.marcs_model_name)
self.save_m3dis_model(interp_model_name, depth, temp, pe, density, vmic)
if self.log_g < 3:
self.turbulent_velocity = microturbulence
else:
print("Unexpected error?")
self.atmosphere_properties = atmosphere_properties
def save_m3dis_model(self, interp_model_name, depth_interp, temp_interp, pe_interp, density_interp, vmic_interp):
with open(interp_model_name, "w") as file:
# first is name of file
file.write(f"{self.marcs_model_name}\n")
# next is number of points as integer
depth_points = np.size(depth_interp)
file.write(f"{depth_points}\n")
# next is the format
file.write("* depth temp pe pg vmic\n")
for i in range(len(depth_interp)):
file.write(
f"{depth_interp[i]:>13.6e} {temp_interp[i]:>8.1f} {pe_interp[i]:>12.4E} {density_interp[i]:>12.4E} {vmic_interp[i]:>5.3f}\n")
def synthesize_spectra(self) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
wavelength, normalised_flux, flux = None, None, None
try:
logging.debug("Running m3dis and atmosphere")
logging.debug("Cleaning temp directory")
# clean temp directory
save_file_dir = os.path.join(self.tmp_dir, "save")
if os.path.exists(save_file_dir):
# just in case it fails, so that it doesn't reuse the old files
shutil.rmtree(save_file_dir)
self.calculate_atmosphere()
logging.debug("Running m3dis")
output = self.call_m3dis(skip_linelist=self.skip_linelist, use_precomputed_depart=self.use_precomputed_depart)
if "errors" in output:
if not self.night_mode:
print(output["errors"], "m3dis failed")
else:
if self.save_spectra:
try:
completed_run = self.m3dis_python_module.m3dis.read(
self.tmp_dir
)
wavelength, _ = completed_run.get_xx(completed_run.lam)
flux = completed_run.get_yy(norm=False)
normalised_flux = completed_run.get_yy(norm=True)
flux = flux * 2.99792458e10 / (wavelength**2) * 1e8
except FileNotFoundError as e:
if not self.night_mode:
print(f"m3dis, cannot find {e}")
except (FileNotFoundError, ValueError, TypeError) as error:
if not self.night_mode:
print(f"Interpolation failed? {error}")
return wavelength, normalised_flux, flux
if __name__ == '__main__':
from scripts.synthetic_code_class import fetch_marcs_grid
teff, logg, feh, vmic = 5777, 4.44, 0.0, 1.0
model_atmosphere_grid_path = "/Users/storm/docker_common_folder/TSFitPy/input_files/model_atmospheres/1D/"
model_atmosphere_list = model_atmosphere_grid_path + "model_atmosphere_list.txt"
temp_dir = "./temppp_dirrr/"
os.makedirs(temp_dir, exist_ok=True)
model_temperatures, model_logs, model_mets, marcs_value_keys, marcs_models, marcs_values = fetch_marcs_grid(
model_atmosphere_list, M3disCall.marcs_parameters_to_ignore)
m3dis_class = M3disCall(
m3dis_path=None,
interpol_path=None,
line_list_paths=None,
marcs_grid_path=model_atmosphere_grid_path,
marcs_grid_list=model_atmosphere_list,
model_atom_path=None,
departure_file_path=None,
aux_file_length_dict=None,
model_temperatures=model_temperatures,
model_logs=model_logs,
model_mets=model_mets,
marcs_value_keys=marcs_value_keys,
marcs_models=marcs_models,
marcs_values=marcs_values,
m3dis_python_module=None,
n_nu=None,
hash_table_size=None,
mpi_cores=None,
iterations_max=None,
convlim=None,
snap=None,
dims=None,
nx=None,
ny=None,
nz=None
)
teff, logg, feh, vmic = 5777, 4.44, 0.0, 1.0
teff = 4665
logg = 1.64
feh = -2.5
vmic = 2.0
fehs = [-2.5, -2.3, -2.1, -1.9, -2.7, -2.9, -3.1]
for feh in fehs:
m3dis_class.configure(t_eff=teff, log_g=logg, metallicity=feh, turbulent_velocity=vmic,
lambda_delta=None, lambda_min=None, lambda_max=None,
free_abundances=None, temp_directory=temp_dir, nlte_flag=False,
verbose=None,
atmosphere_dimension="1D", windows_flag=None,
segment_file=None, line_mask_file=None,
depart_bin_file=None, depart_aux_file=None,
model_atom_file=None)
m3dis_class.calculate_atmosphere()
|
TSFitPy-developersREPO_NAMETSFitPyPATH_START.@TSFitPy_extracted@TSFitPy-main@scripts@m3dis_class.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/unit_tests/tools/powerbi/__init__.py",
"type": "Python"
}
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@unit_tests@tools@powerbi@__init__.py@.PATH_END.py
|
|
{
"filename": "socket_server.py",
"repo_name": "mhammond/pywin32",
"repo_path": "pywin32_extracted/pywin32-main/win32/Demos/security/sspi/socket_server.py",
"type": "Python"
}
|
"""A sample socket server and client using SSPI authentication and encryption.
You must run with either 'client' or 'server' as arguments. A server must be
running before a client can connect.
To use with Kerberos you should include in the client options
--target-spn=username, where 'username' is the user under which the server is
being run.
Running either the client or server as a different user can be informative.
A command-line such as the following may be useful:
`runas /user:{user} {fqp}\\python.exe {fqp}\\socket_server.py --wait client|server`
{fqp} should specify the relevant fully-qualified path names.
To use 'runas' with Kerberos, the client program will need to
specify --target-spn with the username under which the *server* is running.
See the SSPI documentation for more details.
"""
import http.client # sorry, this demo needs 2.3+
import optparse
import socketserver
import struct
import traceback
import sspi
import win32api
import win32security
options = None # set to optparse object.
def GetUserName():
try:
return win32api.GetUserName()
except win32api.error as details:
# Seeing 'access denied' errors here for non-local users (presumably
# without permission to login locally). Get the fully-qualified
# username, although a side-effect of these permission-denied errors
# is a lack of Python codecs - so printing the Unicode value fails.
# So just return the repr(), and avoid codecs completely.
return repr(win32api.GetUserNameEx(win32api.NameSamCompatible))
# Send a simple "message" over a socket - send the number of bytes first,
# then the string. Ditto for receive.
def _send_msg(s, m):
s.send(struct.pack("i", len(m)))
s.send(m)
def _get_msg(s):
size_data = s.recv(struct.calcsize("i"))
if not size_data:
return None
cb = struct.unpack("i", size_data)[0]
return s.recv(cb)
class SSPISocketServer(socketserver.TCPServer):
def __init__(self, *args, **kw):
socketserver.TCPServer.__init__(self, *args, **kw)
self.sa = sspi.ServerAuth(options.package)
def verify_request(self, sock, ca):
# Do the sspi auth dance
self.sa.reset()
while 1:
data = _get_msg(sock)
if data is None:
return False
try:
err, sec_buffer = self.sa.authorize(data)
except sspi.error as details:
print("FAILED to authorize client:", details)
return False
if err == 0:
break
_send_msg(sock, sec_buffer[0].Buffer)
return True
def process_request(self, request, client_address):
# An example using the connection once it is established.
print("The server is running as user", GetUserName())
self.sa.ctxt.ImpersonateSecurityContext()
try:
print("Having conversation with client as user", GetUserName())
while 1:
# we need to grab 2 bits of data - the encrypted data, and the
# 'key'
data = _get_msg(request)
key = _get_msg(request)
if data is None or key is None:
break
data = self.sa.decrypt(data, key)
print("Client sent:", repr(data))
finally:
self.sa.ctxt.RevertSecurityContext()
self.close_request(request)
print("The server is back to user", GetUserName())
def serve():
s = SSPISocketServer(("localhost", options.port), None)
print("Running test server...")
s.serve_forever()
def sspi_client():
c = http.client.HTTPConnection("localhost", options.port)
c.connect()
# Do the auth dance.
ca = sspi.ClientAuth(options.package, targetspn=options.target_spn)
data = None
while 1:
err, out_buf = ca.authorize(data)
_send_msg(c.sock, out_buf[0].Buffer)
if err == 0:
break
data = _get_msg(c.sock)
print("Auth dance complete - sending a few encryted messages")
# Assume out data is sensitive - encrypt the message.
for data in "Hello from the client".split():
blob, key = ca.encrypt(data)
_send_msg(c.sock, blob)
_send_msg(c.sock, key)
c.sock.close()
print("Client completed.")
if __name__ == "__main__":
parser = optparse.OptionParser("%prog [options] client|server", description=__doc__)
parser.add_option(
"",
"--package",
action="store",
default="NTLM",
help="The SSPI package to use (eg, Kerberos) - default is NTLM",
)
parser.add_option(
"",
"--target-spn",
action="store",
help="""The target security provider name to use. The
string contents are security-package specific. For
example, 'Kerberos' or 'Negotiate' require the server
principal name (SPN) (ie, the username) of the remote
process. For NTLM this must be blank.""",
)
parser.add_option(
"",
"--port",
action="store",
default="8181",
help="The port number to use (default=8181)",
)
parser.add_option(
"",
"--wait",
action="store_true",
help="""Cause the program to wait for input just before
terminating. Useful when using via runas to see
any error messages before termination.
""",
)
options, args = parser.parse_args()
try:
options.port = int(options.port)
except (ValueError, TypeError):
parser.error("--port must be an integer")
try:
try:
if not args:
args = [""]
if args[0] == "client":
sspi_client()
elif args[0] == "server":
serve()
else:
parser.error(
"You must supply 'client' or 'server' - use --help for details"
)
except KeyboardInterrupt:
pass
except SystemExit:
pass
except:
traceback.print_exc()
finally:
if options.wait:
input("Press enter to continue")
|
mhammondREPO_NAMEpywin32PATH_START.@pywin32_extracted@pywin32-main@win32@Demos@security@sspi@socket_server.py@.PATH_END.py
|
{
"filename": "gen_ddp_n8.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/py/LSS/DESI_ke/gen_ddp_n8.py",
"type": "Python"
}
|
import os
import sys
import fitsio
import argparse
import runtime
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, vstack
from scipy.spatial import KDTree
from delta8_limits import delta8_tier, d8_limits
from findfile import findfile, fetch_fields, overwrite_check, gather_cat, write_desitable
from config import Configuration
from bitmask import lumfn_mask, consv_mask
from delta8_limits import d8_limits
from runtime import calc_runtime
parser = argparse.ArgumentParser(description='Generate DDP1 N8 for all gold galaxies.')
parser.add_argument('--log', help='Create a log file of stdout.', action='store_true')
parser.add_argument('-d', '--dryrun', help='Dryrun.', action='store_true')
parser.add_argument('-s', '--survey', help='Select survey', default='gama')
parser.add_argument('--realz', help='Realization', default=0, type=int)
parser.add_argument('--prefix', help='randoms filename prefix', default='randoms_ddp1')
parser.add_argument('--nooverwrite', help='Do not overwrite outputs if on disk', action='store_true')
args = parser.parse_args()
log = args.log
realz = args.realz
dryrun = args.dryrun
prefix = args.prefix
survey = args.survey.lower()
fields = fetch_fields(survey)
fpath = findfile(ftype='ddp', dryrun=dryrun, survey=survey)
opath = findfile(ftype='ddp_n8', dryrun=dryrun, survey=survey)
if log:
logfile = findfile(ftype='ddp_n8', dryrun=False, survey=survey, log=True)
print(f'Logging to {logfile}')
sys.stdout = open(logfile, 'w')
if args.nooverwrite:
overwrite_check(opath)
# Read ddp cat.
dat = Table.read(fpath)
print('Reading: {} with length {}'.format(fpath, len(dat)))
assert 'DDP1_DENS' in dat.meta
points = np.c_[dat['CARTESIAN_X'], dat['CARTESIAN_Y'], dat['CARTESIAN_Z']]
points = np.array(points, copy=True)
kd_tree_all = KDTree(points)
# ---- Find closest matching random to inherit fill factor ----
# Read randoms bound_dist.
rpaths = [findfile(ftype='randoms_bd', dryrun=dryrun, field=ff, survey=survey, prefix=prefix) for ff in fields]
for rpath in rpaths:
print('Reading: {}'.format(rpath))
rand = gather_cat(rpaths)
print('Retrieved galaxies for {}'.format(np.unique(dat['FIELD'].data)))
print('Retrieved randoms for {}'.format(np.unique(rand['FIELD'].data)))
for i, rpath in enumerate(rpaths):
dat.meta['RPATH_{}'.format(i)] = rpath
rpoints = np.c_[rand['CARTESIAN_X'], rand['CARTESIAN_Y'], rand['CARTESIAN_Z']]
rpoints = np.array(rpoints, copy=True)
print('Creating big rand. tree.')
big_tree = KDTree(rpoints)
print('Querying tree for closest rand.')
dd, ii = big_tree.query([x for x in points], k=1)
# Find closest random for bound_dist and fill factor.
# These randoms are split by field.
dat['RANDSEP'] = dd
dat['RANDMATCH'] = rand['RANDID'][ii]
dat['BOUND_DIST'] = rand['BOUND_DIST'][ii]
dat['FILLFACTOR'] = rand['FILLFACTOR'][ii]
dat['IN_D8LUMFN'] += (dat['FILLFACTOR'].data < 0.8) * lumfn_mask.FILLFACTOR
dat['FILLFACTOR_VMAX'] = -99.
print('Solving vol. avg. fill factor for z limits: {} to {}'.format(dat['ZMAX'].data.min(), dat['ZMAX'].data.max()))
_idxs = np.digitize(dat['ZMAX'].data, bins=np.arange(0.0, 1.0, 2.5e-3))
volavg_fillfrac = 0.0
for i, _idx in enumerate(np.unique(_idxs)):
zmax = dat['ZMAX'][_idxs == _idx].max()
sub_rand = rand[rand['Z'] <= zmax]
isin = (sub_rand['FILLFACTOR'] > 0.8)
if np.count_nonzero(isin):
volavg_fillfrac = np.mean(isin)
else:
print('Warning: assuming previous vol. avg. fillfactor of {:.6f} for {:.6f}'.format(volavg_fillfrac, zmax))
dat['FILLFACTOR_VMAX'][_idxs == _idx] = volavg_fillfrac
if not dryrun:
match_sep = 6.5
# Typically, bounded by 1.6
# assert np.all(dat['RANDSEP'].data < match_sep), 'Failed to find matching random with < 5 Mpc/h separation.'
if not np.all(dat['RANDSEP'].data < match_sep):
# Note: DESI randoms are less dense, larger expected separation.
print('WARNING: poor random match, with maximum comoving random separation >3Mpc/h.')
poor_match = dat['RANDSEP'].data > match_sep
print(dat[poor_match])
# ---- Calculate DDPX_N8 for each gama gold galaxy. ----
for idx in range(3):
# Calculate DDP1/2/3 N8 for all gold galaxies.
ddp_idx = idx + 1
dat['DDP{:d}_N8'.format(ddp_idx)] = -99
for field in fields:
print('Building tree for DDP {} and field {}'.format(ddp_idx, field))
in_field = dat['FIELD'] == field
dat_field = dat[in_field]
ddp = dat_field[dat_field['DDP'][:,idx] == 1]
points_ddp = np.c_[ddp['CARTESIAN_X'], ddp['CARTESIAN_Y'], ddp['CARTESIAN_Z']]
points_ddp = np.array(points_ddp, copy=True)
kd_tree_ddp = KDTree(points_ddp)
print('Querying tree for DDP {}'.format(ddp_idx))
indexes_ddp = kd_tree_all.query_ball_tree(kd_tree_ddp, r=8.)
counts = np.array([len(idx) for idx in indexes_ddp])
dat['DDP{:d}_N8'.format(ddp_idx)][in_field] = counts[in_field]
## Derived.
dat.meta['VOL8'] = (4./3.)*np.pi*(8.**3.)
dat['DDP1_DELTA8'] = ((dat['DDP1_N8'] / (dat.meta['VOL8'] * dat.meta['DDP1_DENS']) / dat['FILLFACTOR'])) - 1.
##
outwith = (dat['ZSURV'] > dat.meta['DDP1_ZMIN']) & (dat['ZSURV'] < dat.meta['DDP1_ZMAX'])
outwith = ~outwith
if not dryrun:
# Insufficient randoms in a dryrun.
outwith = outwith | (dat['FILLFACTOR'] < 0.8)
dat['DDP1_DELTA8'][outwith] = -99.
dat['DDP1_DELTA8_TIER'] = delta8_tier(dat['DDP1_DELTA8'])
dat.pprint()
# TODO: Check
if 'ddp1' not in prefix:
dat['DDP2_DELTA8'] = ((dat['DDP2_N8'] / (dat.meta['VOL8'] * dat.meta['DDP2_DENS']) / dat['FILLFACTOR'])) - 1.
dat['DDP3_DELTA8'] = ((dat['DDP3_N8'] / (dat.meta['VOL8'] * dat.meta['DDP3_DENS']) / dat['FILLFACTOR'])) - 1.
for x in dat.meta.keys():
print('{}\t\t{}'.format(x.ljust(20), dat.meta[x]))
print('Writing {}'.format(opath))
write_desitable(opath, dat)
# ---- Generate ddp_n8_d0 files for LF(d8) files, limited to DDP1 (and redshift range) ----
dat = dat[(dat['ZSURV'] > dat.meta['DDP1_ZMIN']) & (dat['ZSURV'] < dat.meta['DDP1_ZMAX'])]
dat['DDP1_DELTA8_TIER'] = delta8_tier(dat['DDP1_DELTA8'])
utiers = np.unique(dat['DDP1_DELTA8_TIER'].data)
if -99 in utiers:
utiers = utiers.tolist()
utiers.remove(-99)
utiers = np.array(utiers)
for ii, xx in enumerate(d8_limits):
dat.meta['D8{}LIMS'.format(ii)] = str(xx)
if not np.all(np.isin(np.arange(9), utiers)):
print('WARNING: MISSING d8 TIERS ({})'.format(utiers))
else:
print(utiers)
print('Delta8 spans {:.4f} to {:.4f} over {} tiers.'.format(dat['DDP1_DELTA8'].min(), dat['DDP1_DELTA8'].max(), utiers))
for tier in np.arange(len(d8_limits)):
print()
print('---- d{} ----'.format(tier))
isin = (dat['DDP1_DELTA8_TIER'].data == tier)
to_write = dat[isin]
dat.meta['DDP1_D{}_NGAL'.format(tier)] = len(to_write)
assert 'AREA' in dat.meta.keys()
assert 'AREA' in to_write.meta.keys()
print('Available fields in tier: {}'.format(np.unique(dat['FIELD'].data)))
for field in fields:
isin = to_write['FIELD'] == field
to_write_field = to_write[isin]
opath_field = findfile('ddp_n8_d0', dryrun=dryrun, field=field, utier=tier, survey=survey, realz=realz)
print('Writing {} galaxies from field {} to {}.'.format(len(to_write_field), np.unique(to_write_field['FIELD'].data), opath_field))
to_write_field.meta['AREA'] = to_write.meta['AREA'] / len(fields)
write_desitable(opath_field, to_write_field)
print('\n\nDone.\n\n')
if log:
sys.stdout.close()
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@py@LSS@DESI_ke@gen_ddp_n8.py@.PATH_END.py
|
{
"filename": "_a.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/carpet/_a.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="a", parent_name="carpet", **kwargs):
super(AValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "data"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@carpet@_a.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "carronj/plancklens",
"repo_path": "plancklens_extracted/plancklens-master/plancklens/helpers/__init__.py",
"type": "Python"
}
|
carronjREPO_NAMEplancklensPATH_START.@plancklens_extracted@plancklens-master@plancklens@helpers@__init__.py@.PATH_END.py
|
|
{
"filename": "dither.py",
"repo_name": "spacetelescope/hstaxe",
"repo_path": "hstaxe_extracted/hstaxe-main/hstaxe/axesrc/dither.py",
"type": "Python"
}
|
"""
See LICENSE.txt
"""
import os
import numpy as np
import logging
from astropy.io import fits
from stsci.imagestats import ImageStats
import stsci.convolve as convolve
from stsci.image.numcombine import numCombine
from drizzlepac.drizCR import quickDeriv
from drizzlepac import minmed
from drizzlepac import adrizzle
from drizzlepac.astrodrizzle import ablot
from hstaxe.axeerror import aXeError
# make sure there is a logger
_log = logging.getLogger(__name__)
class Drizzle:
"""Class to wrap drizzle command"""
def __init__(self):
_log.info("No init in class")
def run(self, data,
outdata,
in_mask,
outweig,
coeffs,
wt_scl,
drizzle_params,
img_nx,
img_ny):
"""
drizzle using drizzlepac.astrodrizzle
Parameters
----------
data: str
The name of the input image file or association table which is to be "drizzled"
in_mask: str
input mask for blocking pixels
outdata: str
The name for the output data image desired by the user
outweig: str
The name for the output weight image
coeffs: str
distortion coefficients
wt_scl: str
Weighting factor for input image. If wt_scl=exptime then wt_scl
will be set equal to the exposure time found in the image header.
This is the standard operation and is recommended. It is also
possible to give wt_scl=expsq for weighting by the square of
exposure time. The latter is optimal for read-noise dominated
images.
drizzle_params:
img_nx: int
output size in x
img_ny: int
output size in y
"""
# Check for file names that are too long for the drizzle task
# this is a fits limitation
if len(data) > 80:
err_msg = ("File name '{0:s}' is too long (>80 chars)"
"for drizzle task".format(data))
raise aXeError(err_msg)
if len(outdata) > 80:
err_msg = ("File name '{0:s}' is too long (>80 chars) "
"for drizzle task".format(outdata))
raise aXeError(err_msg)
ret = adrizzle.drizzle(data, outdata,
outweig=outweig,
in_mask=in_mask,
wt_scl=wt_scl,
coeffs=coeffs,
outnx=img_nx,
outny=img_ny,
in_un=drizzle_params['IN_UN'],
out_un=drizzle_params['OUT_UN'],
pixfrac=drizzle_params['PFRAC'],
scale=drizzle_params['PSCALE'],
kernel=drizzle_params['KERNEL'], Stdout=1)
for i in range(len(ret)):
_log.info(ret[i])
class MedianCombine:
"""Class to median-combine individual drizzles"""
def __init__(self,
contributors,
drizzle_params,
ext_names):
"""Initialize the class"""
# store the parameters
self.combine_maskpt = drizzle_params['combine_maskpt']
self.combine_type = drizzle_params['combine_type']
self.combine_nsigma1 = drizzle_params['combine_nsigma1']
self.combine_nsigma2 = drizzle_params['combine_nsigma2']
self.combine_nlow = drizzle_params['combine_nlow']
self.combine_nhigh = drizzle_params['combine_nhigh']
self.combine_lthresh = drizzle_params['combine_lthresh']
self.combine_hthresh = drizzle_params['combine_hthresh']
self.combine_grow = drizzle_params['combine_grow']
self.rdnoise = drizzle_params['RDNOISE']
self.ext_names = ext_names
# store the name of the median image
self.median_image = ext_names['MED']
self.input_data = self._get_inputs(contributors)
def _get_inputs(self, contributors):
"""
Extract the inputs for the median combine
"""
# generate an empty dictionary
input_data = {}
# go over all contributing objects
sci_imgs = []
wht_imgs = []
exp_vals = []
rdn_vals = []
sky_vals = []
exp_tot = 0.0
for one_contrib in contributors:
# put the image names to the list
sci_imgs.append(one_contrib.ext_names['SING_SCI'])
wht_imgs.append(one_contrib.ext_names['SING_WHT'])
# put image properties to the list
exp_vals.append(one_contrib.info['EXPTIME'])
rdn_vals.append(self.rdnoise)
if 'SKY_CPS' in one_contrib.info:
sky_vals.append(one_contrib.info['SKY_CPS'])
else:
err_msg = ("Sky value missing for image: {0:s}!"
.format(input_data['sci_imgs']))
raise aXeError(err_msg)
# compose the total exposure time
exp_tot += one_contrib.info['EXPTIME']
# put the images to the dictionary
input_data['sci_imgs'] = sci_imgs
input_data['wht_imgs'] = wht_imgs
# put the values to the dictionary
input_data['exp_vals'] = exp_vals
input_data['rdn_vals'] = rdn_vals
input_data['sky_vals'] = sky_vals
input_data['exp_tot'] = exp_tot
# return the dictionary
return input_data
def run(self):
"""
Run the median combine step
The code was either directly stolen from the corresponding
pydrizzle version or done after this version. Necessary
adjustments to the slitless data were applied.
"""
sci_data = []
for one_image in self.input_data['sci_imgs']:
if os.access(one_image, os.F_OK):
in_fits = fits.open(one_image, 'readonly')
sci_data.append(in_fits[0].data)
in_fits.close()
wht_data = []
for one_image in self.input_data['wht_imgs']:
if os.access(one_image, os.F_OK):
in_fits = fits.open(one_image, 'readonly')
wht_data.append(in_fits[0].data)
in_fits.close()
else:
_log.info("{0:s} not found/created by drizzle"
"...skipping it.".format(one_image))
if len(sci_data) != len(wht_data):
_log.info("The number of single_sci images created by "
"drizzle does not match the number of single_wht"
" files created!")
raise aXeError("drizzle error")
weight_mask_list = []
# added the except so that if the image area contains only
# zeros then the zero value is returned which is better for later
# processing
# we dont understand why the original lower=1e-8 value was
# supplied unless it was for the case of spectral in the normal
# field of view see #1110
for wht_arr in wht_data:
try:
tmp_mean_value = self.combine_maskpt * ImageStats(wht_arr,lower=1e-8,lsig=None,usig=None,fields="mean",nclip=0).mean
except (ValueError, AttributeError):
tmp_mean_value = 0.
_log.info("tmp_mean_value set to 0 because no good "
"pixels found; {0:s}".format(self.ext_names["MEF"]))
except:
tmp_mean_value = 0.
_log.info("tmp_mean_value set to 0; possible uncaught "
"exception in dither.py; {0:s}"
.format(self.ext_names["MEF"]))
weight_mask = np.zeros(wht_arr.shape, dtype=np.uint8)
np.putmask(weight_mask, np.less(wht_arr, tmp_mean_value), 1)
weight_mask_list.append(weight_mask)
if len(sci_data) < 2:
_log.info('\nNumber of images to flatten: %i!' % len(sci_data))
_log.info('Set combine type to "minimum"!')
self.combine_type = 'minimum'
if (self.combine_type == "minmed"):
# Create the combined array object using the minmed algorithm
result = minmed(sci_data, # list of input data to be combined.
wht_data,# list of input data weight images to be combined.
self.input_data['rdn_vals'], # list of readnoise values to use for the input images.
self.input_data['exp_vals'], # list of exposure times to use for the input images.
self.input_data['sky_vals'], # list of image background values to use for the input images
weightMaskList = weight_mask_list, # list of imput data weight masks to use for pixel rejection.
combine_grow = self.combine_grow, # Radius (pixels) for neighbor rejection
combine_nsigma1 = self.combine_nsigma1, # Significance for accepting minimum instead of median
combine_nsigma2 = self.combine_nsigma2 # Significance for accepting minimum instead of median
)
else:
# _log.info 'going to other', combine_type
# Create the combined array object using the numcombine task
result = numCombine(sci_data,
numarrayMaskList=weight_mask_list,
combinationType=self.combine_type,
nlow=self.combine_nlow,
nhigh=self.combine_nhigh,
upper=self.combine_hthresh,
lower=self.combine_lthresh
)
# _log.info result.combArrObj
hdu = fits.PrimaryHDU(result.combArrObj)
hdulist = fits.HDUList([hdu])
hdulist[0].header['EXPTIME'] = (self.input_data['exp_tot'],
'total exposure time')
hdulist.writeto(self.median_image)
# delete the various arrays
for one_item in sci_data:
del one_item
del sci_data
for one_item in wht_data:
del one_item
del wht_data
for one_item in weight_mask_list:
del one_item
del weight_mask_list
class Blot:
"""
Class to wrap the blot command
"""
def __init__(self):
pass
def run(self, in_data, out_data, out_nx, out_ny,
drizzle_params):
# """
# Do the actual blot
# """
# drizzle.blot(data=in_data,
# outdata=out_data,
# scale=drizzle_params['PSCALE'],
# coeffs=coeffs,
# outnx=out_nx,
# outny=out_ny,
# interpol=mult_drizzle_par['blot_interp'],
# sinscl=mult_drizzle_par['blot_sinscl'],
# in_un=drizzle_params['IN_UN'],
# out_un=drizzle_params['OUT_UN'],
# expkey='exptime',
# expout='input')
self._a_blot_image(in_data, out_data,
sinscl=drizzle_params['blot_sinscl'],
out_nx=out_nx,
out_ny=out_ny,
interp=interpol)
def _a_blot_image(self,
image_to_blot,
flt_image,
blotted_output,
sinscl=sinscl,
interp=interp):
"""
Blot one image.
Thats just a simple wrapper around the task blot in astrodrizzle
Parameters
----------
image_to_blot: str
the input image name, either the grism or direct drizzled image
blotted_output: str
the name of the output blotted image
"""
try:
blot_header = fits.getheader(image_to_blot)
blot_wcs = HSTWCS(image_to_blot) # assume simple
image_data = fits.getdata(image_to_blot)
flt_header = fits.getheader(flt_image)
flt_wcs = HSTWCS(flt_image)
except:
return IOError("File type of fits image is not "
"supported {0:s}".format(image_to_blot))
# outimage is just the data array
outimage = ablot.do_blot(image_data.astype(np.float32),
blot_wcs,
flt_wcs,
1.,
interp=interp,
sinscl=1.,
coeffs=True,
wcsmap=None,
stepsize=10)
try:
newimage = fits.PrimaryHDU()
newimage.data = outimage
newimage.header = flt_header
newimage.header.update(flt_wcs.to_header())
newimage.verify('silentfix')
newimage.writeto(blotted_output)
except:
raise IOError("Problem writing fits image {0:s}".format(blotted_output))
class Deriv:
"""
Class for the deriv-command
"""
def __init__(self):
"""
Initializes the class
"""
pass
def _absoluteSubtract(self, array, tmpArray, outArray):
"""
Subtract the absolute value of two images
"""
# subtract shifted image from imput image
tmpArray = array - tmpArray
# take the absolute value of tmpArray
tmpArray = np.fabs(tmpArray)
# save maximum value of outArray or tmpArray and save in outArray
outArray = np.maximum(tmpArray, outArray)
# zero out tmpArray before reuse
tmpArray = tmpArray * 0.
return (tmpArray, outArray)
def _qderiv(self, array):
"""
Take the absolute derivate of an image in memory
"""
# Create 2 empty arrays in memory of the same dimensions as 'array'
tmpArray = np.zeros(array.shape, dtype=np.float64)
outArray = np.zeros(array.shape, dtype=np.float64)
# Get the length of an array side
(naxis1, naxis2) = array.shape
# Main derivate loop:
# Shift images +/- 1 in Y.
for y in range(-1, 2, 2):
if y == -1:
# shift input image 1 pixel right
tmpArray[0:(naxis1-1), 1:(naxis2-1)] = array[0:(naxis1-1),
0:(naxis2-2)]
else:
# shift input image 1 pixel left
tmpArray[0:(naxis1-1), 0:(naxis2-2)] = array[0:(naxis1-1),
1:(naxis2-1)]
# subtract the arrays
(tmpArray, outArray) = self._absoluteSubtract(array,
tmpArray,
outArray)
# Shift images +/- 1 in X.
for x in range(-1, 2, 2):
if x == -1:
# shift input image 1 pixel right
tmpArray[1:(naxis1-1), 0:(naxis2-1)] = array[0:(naxis1-2),
0:(naxis2-1)]
else:
# shift input image 1 pixel left
tmpArray[0:(naxis1-2), 0:(naxis2-1)] = array[1:(naxis1-1),
0:(naxis2-1)]
# subtract the arrays
(tmpArray, outArray) = self._absoluteSubtract(array,
tmpArray,
outArray)
# delete the tmp-array
del tmpArray
# return the result
return outArray.astype(np.float32)
def run(self, in_name, out_name):
"""Code stolen from Multidrizzle.deriv()"""
# store the names
self.in_name = in_name
self.out_name = out_name
# make sure the input image exists
if not os.path.isfile(self.in_name):
# complain and out if not
err_msg = "Image missing: %s!" % self.in_name
raise aXeError(err_msg)
# delete output name if existing
if os.path.isfile(self.out_name):
os.unlink(self.out_name)
_log.info("Running quickDeriv on ", self.in_name)
# OPEN THE INPUT IMAGE IN READ ONLY MODE
img = fits.open(self.in_name, mode='readonly', memmap=0)
# calling qderiv with the assumption that the
# input file is a simple FITS file.
absderiv = quickDeriv.qderiv(img["PRIMARY"].data)
# absderiv = self._qderiv(img["PRIMARY"].data)
# WRITE THE OUTPUT IMAGE TO A FITS FILE
outfile = fits.open(self.out_name, 'append')
outhdu = fits.PrimaryHDU(data=absderiv)
outfile.append(outhdu)
# CLOSE THE IMAGE FILES
outfile.close()
img.close()
del outfile
del img
class CRIdent:
def __init__(self, drizzle_params):
"""Initializes the class. """
self.driz_cr_scale = (float(drizzle_params['driz_cr_scale'].split()[0]),
float(drizzle_params['driz_cr_scale'].split()[1]))
self.driz_cr_snr = (float(drizzle_params['driz_cr_snr'].split()[0]),
float(drizzle_params['driz_cr_snr'].split()[1]))
self.driz_cr_grow = int(drizzle_params['driz_cr_grow'])
self.driz_cr_ctegrow = 0
# store the readout noise
self.rdnoise = drizzle_params['RDNOISE']
def _identify_crr(self, in_img, blot_img, blotder_img, exptime, sky_val):
"""Identify cosmic rays and other deviant pixels.
The code was taken from muldidrizzle.DrizCR. Small adjustments and
re-factoring was done.
"""
# create an empty file
__crMask = np.zeros(in_img.shape, dtype=np.uint8)
# Part 1 of computation:
# flag the central pixels
# Create a temp array mask
__t1 = np.absolute(in_img - blot_img)
__ta = np.sqrt(np.absolute(blot_img * exptime
+ sky_val * exptime) +
self.rdnoise*self.rdnoise)
__t2 = self.driz_cr_scale[0] * blotder_img + self.driz_cr_snr[0] * __ta / exptime
__tmp1 = np.logical_not(np.greater(__t1, __t2))
# mop up
del __ta
del __t1
del __t2
# Create a convolution kernel that is 3 x 3 of 1's
__kernel = np.ones((3, 3), dtype=np.uint8)
# Create an output tmp file the same size as the input temp mask array
__tmp2 = np.zeros(__tmp1.shape, dtype=np.int16)
# Convolve the mask with the kernel
convolve.convolve2d(__tmp1,
__kernel,
output=__tmp2,
fft=0,
mode='nearest',
cval=0)
del __kernel
del __tmp1
# Part 2 of computation
# flag the neighboring pixels
# Create the CR Mask
__xt1 = np.absolute(in_img - blot_img)
__xta = np.sqrt(np.absolute(blot_img * exptime +
sky_val * exptime) +
self.rdnoise*self.rdnoise)
__xt2 = self.driz_cr_scale[1] * blotder_img + self.driz_cr_snr[1] * __xta / exptime
# It is necessary to use a bitwise 'and' to create the mask with numarray objects.
__crMask = np.logical_not(np.greater(__xt1, __xt2) & np.less(__tmp2,9) )
del __xta
del __xt1
del __xt2
del __tmp2
# Part 3 of computation - flag additional cte 'radial'
# and 'tail' pixels surrounding CR pixels as CRs
# In both the 'radial' and 'length' kernels below, 0->good and
# 1->bad, so that upon
# convolving the kernels with __crMask, the convolution
# output will have low->bad and high->good
# from which 2 new arrays are created having 0->bad and 1->good.
# These 2 new arrays are then 'anded'
# to create a new __crMask.
# recast __crMask to int for manipulations below;
# will recast to Bool at end
__crMask_orig_bool = __crMask.copy()
__crMask = __crMask_orig_bool.astype(np.int8)
# make radial convolution kernel and convolve it with original __crMask
# kernel for radial masking of CR pixel
cr_grow_kernel = np.ones((self.driz_cr_grow, self.driz_cr_grow))
cr_grow_kernel_conv = __crMask.copy() # for output of convolution
convolve.convolve2d(__crMask,
cr_grow_kernel,
output=cr_grow_kernel_conv)
# make tail convolution kernel and convolve it with original __crMask
cr_ctegrow_kernel = np.zeros((2*self.driz_cr_ctegrow+1,
2*self.driz_cr_ctegrow+1)) # kernel for tail masking of CR pixel
cr_ctegrow_kernel_conv = __crMask.copy() # for output convolution
# which pixels are masked by tail kernel depends on sign of
# ctedir (i.e., readout direction):
ctedir = 0
if (ctedir == 1): # HRC: amp C or D ; WFC: chip = sci,1 ; WFPC2
cr_ctegrow_kernel[0:ctegrow, ctegrow] = 1 # 'positive' direction
if (ctedir == -1): # HRC: amp A or B ; WFC: chip = sci,2
cr_ctegrow_kernel[ctegrow+1:2*ctegrow+1, ctegrow ] = 1 #'negative' direction
if (ctedir == 0): # NICMOS: no cte tail correction
pass
# do the convolution
convolve.convolve2d(__crMask, cr_ctegrow_kernel, output = cr_ctegrow_kernel_conv)
# select high pixels from both convolution outputs; then 'and' them to create new __crMask
where_cr_grow_kernel_conv = np.where(cr_grow_kernel_conv < self.driz_cr_grow*self.driz_cr_grow,0,1 ) # radial
where_cr_ctegrow_kernel_conv = np.where(cr_ctegrow_kernel_conv < self.driz_cr_ctegrow, 0, 1 ) # length
__crMask = np.logical_and(where_cr_ctegrow_kernel_conv, where_cr_grow_kernel_conv) # combine masks
__crMask = __crMask.astype(np.uint8) # cast back to Bool
del __crMask_orig_bool
del cr_grow_kernel
del cr_grow_kernel_conv
del cr_ctegrow_kernel
del cr_ctegrow_kernel_conv
del where_cr_grow_kernel_conv
del where_cr_ctegrow_kernel_conv
# get back the result
return __crMask
def _createcrmaskfile(self, crName = None, crmask = None, header = None, in_imag=None):
"""
Create a fits file containing the generated cosmic ray mask.
"""
# migrate the data over
_cr_file = np.zeros(in_imag.shape,np.uint8)
_cr_file = np.where(crmask,1,0).astype(np.uint8)
# rmove file if it exists
if os.path.isfile(crName):
os.unlink(crName)
# Create the output file
fitsobj = fits.HDUList()
if (header is not None):
del header['NAXIS1']
del header['NAXIS2']
if 'XTENSION' in header:
del header['XTENSION']
if 'EXTNAME' in header:
del header['EXTNAME']
if 'EXTVER' in header:
del header['EXTVER']
if 'NEXTEND' in header:
header['NEXTEND'] = 0
hdu = fits.PrimaryHDU(data=_cr_file, header=header)
del hdu.header['PCOUNT']
del hdu.header['GCOUNT']
else:
hdu = fits.PrimaryHDU(data=_cr_file)
fitsobj.append(hdu)
fitsobj.writeto(crName)
# close the fits image
fitsobj.close()
# mop up
del fitsobj
del _cr_file
def run(self, in_image, blot_image, blotder_image, exptime, sky_val, crr_image):
"""
Do the identification
"""
# open the input image
inImage = fits.open(in_image, 'readonly')
# open the blot image
blotImage = fits.open(blot_image, 'readonly')
# open the blot image
blotDerImage = fits.open(blotder_image, 'readonly')
# identify the CR's
crr_data = self._identify_crr(inImage[0].data,
blotImage[0].data,
blotDerImage[0].data,
exptime, sky_val)
# save the image
self._createcrmaskfile(crr_image,
crr_data,
inImage[0].header,
inImage[0].data)
# delete the array
del crr_data
# close the images
inImage.close()
blotImage.close()
blotDerImage.close()
|
spacetelescopeREPO_NAMEhstaxePATH_START.@hstaxe_extracted@hstaxe-main@hstaxe@axesrc@dither.py@.PATH_END.py
|
{
"filename": "_textinfo.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/funnelarea/_textinfo.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextinfoValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(self, plotly_name="textinfo", parent_name="funnelarea", **kwargs):
super(TextinfoValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["label", "text", "value", "percent"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@funnelarea@_textinfo.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.