metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "classifier.py",
"repo_name": "loostrum/darc",
"repo_path": "darc_extracted/darc-master/darc/processor_tools/classifier.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import os
import socket
from argparse import Namespace
from time import sleep
from queue import Empty
import multiprocessing as mp
import threading
import yaml
import numpy as np
import h5py
from darc.definitions import CONFIG_FILE
from darc.logger import get_queue_logger
# silence the tensorflow logger
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
class Classifier(mp.Process):
"""
Classify candidates from HDF5 files produced by Extractor
"""
def __init__(self, log_queue, input_queue, conn, config_file=CONFIG_FILE, obs_name=''):
"""
:param Queue log_queue: Queue to use for logging
:param Queue input_queue: Input queue for triggers
:param Connection conn: Pipe connection to send output to
:param str config_file: Path to config file
:param str obs_name: Observation name to use in log messages
"""
super(Classifier, self).__init__()
module_name = type(self).__module__.split('.')[-1]
self.logger = get_queue_logger(module_name, log_queue)
self.input_queue = input_queue
self.conn = conn
self.obs_name = obs_name
# load config
self.config_file = config_file
self.config = self._load_config()
# create stop event
self.stop_event = mp.Event()
self.input_empty = False
self.model_freqtime = None
self.model_dmtime = None
self.data_freqtime = None
self.data_dmtime = None
self.nfreq_data = None
self.ndm_data = None
self.ntime_data = None
self.candidates_to_visualize = []
self.tf = None
def _load_tensorflow(self):
"""
Load tensorflow into local namespace
"""
# import tensorflow here as apparently it isn't fork-safe, and results
# in a "Could not retrieve CUDA device count" error when
# this Process is forked from another Process
import tensorflow
self.tf = tensorflow
# set GPU visible to classifier
os.environ['CUDA_VISIBLE_DEVICES'] = str(self.config.gpu)
# set memory growth parameter to avoid allocating all GPU memory
# only one GPU is visible, so always selecting first GPU is fine
# this is only available on tensorflow >= 2.0
if int(self.tf.__version__[0]) >= 2:
gpu = self.tf.config.experimental.list_physical_devices('GPU')[0]
self.tf.config.experimental.set_memory_growth(gpu, True)
# also silence the logger even more
self.tf.get_logger().setLevel('ERROR')
else:
# for TF 1.X, create a session with the required growth parameter
tf_config = self.tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
self.tf.Session(config=tf_config)
def run(self):
"""
Main loop
"""
self.logger.info(f"{self.obs_name}Starting classifier thread")
# initalize tensorflow and models
self._load_tensorflow()
self._init_models()
do_stop = False
while not self.stop_event.is_set():
# read file paths from input queue
try:
fname = self.input_queue.get(timeout=.1)
except Empty:
self.input_empty = True
if do_stop:
# run stop in a thread, so processing can continue
self.logger.debug(f"{self.obs_name}Running stop")
thread = threading.Thread(target=self.stop)
thread.daemon = True
thread.start()
# then set do_stop to false, so it is not run a second time
do_stop = False
continue
else:
self.input_empty = False
if fname == 'stop':
do_stop = True
else:
# do classification
self._classify(fname)
self.logger.info(f"{self.obs_name}Stopping classifier thread")
# send list of candidates to visualize to parent process
self.conn.send(self.candidates_to_visualize)
self.conn.close()
def stop(self):
"""
Stop this thread
"""
# wait until the input queue is empty
if not self.input_empty:
self.logger.debug(f"{self.obs_name}Classifier waiting to finish processing")
while not self.input_empty:
sleep(1)
# then stop
self.stop_event.set()
def _load_config(self):
"""
Load configuration
"""
with open(self.config_file, 'r') as f:
config = yaml.load(f, Loader=yaml.SafeLoader)['processor']['classifier']
# set config, expanding strings
kwargs = {'home': os.path.expanduser('~'), 'hostname': socket.gethostname()}
for key, value in config.items():
if isinstance(value, str):
config[key] = value.format(**kwargs)
# replace any -1 by infinity
elif value == -1:
config[key] = np.inf
# return as Namespace so the keys can be accessed as attributes
return Namespace(**config)
def _init_models(self):
"""
Load the keras models
"""
# intialise analysis tools
self.model_freqtime = self.tf.keras.models.load_model(os.path.join(self.config.model_dir,
self.config.model_freqtime))
self.model_dmtime = self.tf.keras.models.load_model(os.path.join(self.config.model_dir,
self.config.model_dmtime))
# The model's first prediction takes longer
# pre-empt this by classifying an array of zeros before looking at real data
self.model_freqtime.predict(np.zeros([1, self.config.nfreq, self.config.ntime, 1]))
self.model_dmtime.predict(np.zeros([1, self.config.ndm, self.config.ntime, 1]))
def _classify(self, fname):
"""
Classify a candidate
:param str fname: Path to HDF5 file containing candidate data and metadata
"""
# load data
with h5py.File(fname, 'r') as f:
self.data_freq_time = f['data_freq_time'][:]
self.data_dm_time = f['data_dm_time'][:]
self.nfreq_data = f.attrs['nfreq']
self.ntime_data = f.attrs['ntime']
self.ndm_data = f.attrs['ndm']
# prepare data: verify shape and scale as needed
# returns False if something failed
if not self._prepare_data():
return
# classify
prob_freqtime = self.model_freqtime.predict(self.data_freq_time)[0, 1]
prob_dmtime = self.model_dmtime.predict(self.data_dm_time)[0, 1]
self.logger.debug(f"{self.obs_name}Probabilities: freqtime={prob_freqtime:.2f}, dmtime={prob_dmtime:.2f}, "
f"fname={os.path.basename(fname)}")
# append the probabilities to the file
with h5py.File(fname, 'a') as f:
f.attrs.create('prob_freqtime', data=prob_freqtime)
f.attrs.create('prob_dmtime', data=prob_dmtime)
# if the probabilities are above threshold, store the file path
if (prob_freqtime > self.config.thresh_freqtime) and (prob_dmtime > self.config.thresh_dmtime):
self.candidates_to_visualize.append(fname)
def _prepare_data(self):
"""
Verify data shape and downsampled as needed
:return: success (bool)
"""
# verify shapes and downsample if needed
# frequency axis
if self.nfreq_data != self.config.nfreq:
modulo, remainder = divmod(self.nfreq_data, self.config.nfreq)
if remainder != 0:
self.logger.error(f"{self.obs_name}Data nfreq {self.nfreq_data} must be multiple of "
f"model nfreq {self.config.nfreq}")
return False
# reshape the frequency axis
self.logger.debug(f"{self.obs_name}Reshaping freq from {self.nfreq_data} to {self.config.nfreq}")
self.data_freq_time = self.data_freq_time.reshape(self.config.nfreq, modulo, -1).mean(axis=1)
# dm axis
if self.ndm_data != self.config.ndm:
modulo, remainder = divmod(self.ndm_data, self.config.ndm)
if remainder != 0:
self.logger.error(f"{self.obs_name}Data ndm {self.ndm_data} must be multiple of "
f"model ndm {self.config.ndm}")
return False
# reshape the dm axis
self.logger.debug(f"{self.obs_name}Reshaping dm from {self.ndm_data} to {self.config.ndm}")
self.data_dm_time = self.data_dm_time.reshape(self.config.dm, modulo, -1).mean(axis=1)
# time axis
if self.ntime_data != self.config.ntime:
modulo, remainder = divmod(self.ntime_data, self.config.ntime)
if remainder != 0:
self.logger.error(f"{self.obs_name}Data ntime {self.ntime_data} must be multiple of "
f"model ntime {self.config.ntime}")
return False
# reshape the time axis of both data_freq_time and data_dm_time
self.logger.debug(f"{self.obs_name}Reshaping time from {self.ntime_data} to {self.config.ntime}")
self.data_freq_time = self.data_freq_time.reshape(self.config.nfreq,
self.config.ntime, modulo).mean(axis=2)
self.data_dm_time = self.data_dm_time.reshape(self.config.ndm,
self.config.ntime, modulo).mean(axis=2)
# scale data and add required axis for classifier
self.data_freq_time -= np.median(self.data_freq_time)
# silence the potential runtime warning due to divide-by-zero
with np.errstate(invalid='ignore'):
self.data_freq_time /= np.std(self.data_freq_time)
self.data_freq_time[np.isnan(self.data_freq_time)] = 0.
self.data_freq_time = self.data_freq_time[None, ..., None]
self.data_dm_time -= np.median(self.data_dm_time)
self.data_dm_time /= np.std(self.data_dm_time)
self.data_dm_time = self.data_dm_time[None, ..., None]
return True
|
loostrumREPO_NAMEdarcPATH_START.@darc_extracted@darc-master@darc@processor_tools@classifier.py@.PATH_END.py
|
{
"filename": "style_guide.md",
"repo_name": "spacetelescope/mirage",
"repo_path": "mirage_extracted/mirage-master/style_guide/style_guide.md",
"type": "Markdown"
}
|
Python Code Style Guide for `mirage`
===================================
This document serves as a style guide for all `mirage` software development. Any requested contribution to the `mirage` code repository should be checked against this guide, and any violation of the guide should be fixed before the code is committed to
the `master` branch. Please refer to the accompanying [`example.py`](https://github.com/spacetelescope/mirage/blob/master/style_guide/example.py) script for a example code that abides by this style guide.
Prerequisite Reading
--------------------
It is assumed that the reader of this style guide has read and is familiar with the following:
- The [PEP8 Style Guide for Python Code](https://www.python.org/dev/peps/pep-0008/)
- The [PEP257 Docstring Conventions Style Guide](https://www.python.org/dev/peps/pep-0257/)
- The [`numpydoc` docstring convention](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt)
Workflow
--------
All software development for the `mirage` project should follow a continuous integration workflow. Before committing any code changes, use `flake8` to check the code against `PEP8` standards. Also check that your code is conforming to this style guide.
Version Numbers and Tags
------------------------
Any changes pushed to the `master` branch should be tagged with a version number. The version number convention is `x.y.z`, where
x = The main version number. Increase when making incompatible API changes.
y = The feature number. Increase when change contains a new feature with or without bug fixes.
z = The hotfix number. Increase when change only contains bug fixes.
Currently the version number is set in setup.py. Updating the version number should be one of the last things you change prior to merging a
feature branch into `master`. The branch's author and reviewer should agree on the version number prior to merging.
Security
--------
The following items should never be committed in the `mirage` source code or GitHub issues/pull requests:
- Account credentials of any kind (e.g. database usernames and passwords)
- Internal directory structures or filepaths
- Machine names
- Proprietary data
`mirage`-specific Code Standards
------------------------------
`mirage` code shall adhere to the `PEP8` conventions save for the following exceptions:
- Lines of code need not to be restricted to 79 characters. However, it is encouraged to break up obnoxiously long lines into several lines if it benefits the overall readability of the code
Additionally, the code shall adhere to the following special guidelines:
- Function and class definitions should be placed in alphabetical order in the module
- It is encouraged to annotate variables and functions using the [`typing`](https://docs.python.org/3/library/typing.html) module (see [PEP 483](https://www.python.org/dev/peps/pep-0483/), [PEP 484](https://www.python.org/dev/peps/pep-0484/), and [PEP 526](https://www.python.org/dev/peps/pep-0526/)).
`mirage`-Specific Documentation Standards
---------------------------------------
`mirage` code shall adhere to the `PEP257` and `numpydoc` conventions. The following are further recommendations:
- Each module should have at minimum a description, `Authors` and `Use` section.
- Each function/method should have at minimum a description, `Parameters` (if necessary), and `Returns` (if necessary) sections
Acknowledgements
----------------
This style guide as well as [`example.py`](https://github.com/spacetelescope/mirage/blob/master/style_guide/example.py) were adapted from those used by the [`jwql` project](https://github.com/spacetelescope/jwql).
|
spacetelescopeREPO_NAMEmiragePATH_START.@mirage_extracted@mirage-master@style_guide@style_guide.md@.PATH_END.py
|
{
"filename": "debug.py",
"repo_name": "spacetelescope/specview",
"repo_path": "specview_extracted/specview-master/proto/specviewer/qt_signals/debug.py",
"type": "Python"
}
|
import logging
import sys
def msg_debug(msg, cls=None):
if cls is None:
logging.debug('{}: {}'.format(sys._getframe(1).f_code.co_name,
msg))
else:
logging.debug('{}.{}: {}'.format(cls.__name__,
sys._getframe(1).f_code.co_name,
msg))
|
spacetelescopeREPO_NAMEspecviewPATH_START.@specview_extracted@specview-master@proto@specviewer@qt_signals@debug.py@.PATH_END.py
|
{
"filename": "kpno_meta.py",
"repo_name": "HETDEX/elixer",
"repo_path": "elixer_extracted/elixer-main/elixer/kpno_meta.py",
"type": "Python"
}
|
KPNO_META_DICT = {
'km1203_5336.fits': {'RA_min':180.309799,'RA_max':181.386272,'Dec_min':53.224441,'Dec_max':53.920689,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1203_5336.fits'},
'km1439_5224.fits': {'RA_min':219.099152,'RA_max':220.144887,'Dec_min':52.088465,'Dec_max':52.781057,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1439_5224.fits'},
'km1426_5258.fits': {'RA_min':215.985701,'RA_max':217.049636,'Dec_min':52.654059,'Dec_max':53.351037,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1426_5258.fits'},
'km1418_5302.fits': {'RA_min':214.112870,'RA_max':215.172104,'Dec_min':52.653867,'Dec_max':53.345928,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1418_5302.fits'},
'km1252_5150.fits': {'RA_min':192.585599,'RA_max':193.626930,'Dec_min':51.520297,'Dec_max':52.209108,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1252_5150.fits'},
'km1154_5445.fits': {'RA_min':177.921689,'RA_max':179.034197,'Dec_min':54.360000,'Dec_max':55.048447,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1154_5445.fits'},
'km1259_5258.fits': {'RA_min':194.281229,'RA_max':195.378617,'Dec_min':52.623970,'Dec_max':53.340757,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1259_5258.fits'},
'km1318_5150.fits': {'RA_min':199.001192,'RA_max':200.028976,'Dec_min':51.518708,'Dec_max':52.205889,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1318_5150.fits'},
'km1150_5513.fits': {'RA_min':177.003982,'RA_max':178.122370,'Dec_min':54.908288,'Dec_max':55.602895,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1150_5513.fits'},
'km1358_5228.fits': {'RA_min':208.953394,'RA_max':209.994569,'Dec_min':52.082023,'Dec_max':52.771050,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1358_5228.fits'},
'km1157_5150.fits': {'RA_min':178.820733,'RA_max':179.878675,'Dec_min':51.520181,'Dec_max':52.215282,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1157_5150.fits'},
'km1130_5514.fits': {'RA_min':172.041184,'RA_max':173.155379,'Dec_min':54.898514,'Dec_max':55.568653,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1130_5514.fits'},
'km1337_5258.fits': {'RA_min':203.720381,'RA_max':204.779461,'Dec_min':52.675265,'Dec_max':53.364125,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1337_5258.fits'},
'km1306_5228.fits': {'RA_min':196.035199,'RA_max':197.076815,'Dec_min':52.052312,'Dec_max':52.741695,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1306_5228.fits'},
'km1237_5224.fits': {'RA_min':188.692098,'RA_max':189.734727,'Dec_min':52.074372,'Dec_max':52.767315,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1237_5224.fits'},
'km1149_5228.fits': {'RA_min':176.710942,'RA_max':177.753269,'Dec_min':52.063231,'Dec_max':52.754047,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1149_5228.fits'},
'km1106_5303.fits': {'RA_min':166.061975,'RA_max':167.120800,'Dec_min':52.646289,'Dec_max':53.339466,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1106_5303.fits'},
'km1234_5332.fits': {'RA_min':187.908166,'RA_max':188.977406,'Dec_min':53.226554,'Dec_max':53.913570,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1234_5332.fits'},
'km1305_5332.fits': {'RA_min':195.583548,'RA_max':196.661922,'Dec_min':53.214820,'Dec_max':53.911779,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1305_5332.fits'},
'km1134_5224.fits': {'RA_min':173.031888,'RA_max':174.072996,'Dec_min':52.074356,'Dec_max':52.761467,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1134_5224.fits'},
'km1217_5410.fits': {'RA_min':183.640780,'RA_max':184.737586,'Dec_min':53.779815,'Dec_max':54.474395,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1217_5410.fits'},
'km1059_5302.fits': {'RA_min':164.178806,'RA_max':165.235628,'Dec_min':52.645700,'Dec_max':53.336789,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1059_5302.fits'},
'km1111_5440.fits': {'RA_min':167.141501,'RA_max':168.241189,'Dec_min':54.352147,'Dec_max':55.039563,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1111_5440.fits'},
'km1533_5153.fits': {'RA_min':232.887648,'RA_max':233.915347,'Dec_min':51.504561,'Dec_max':52.192800,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1533_5153.fits'},
'km1212_5149.fits': {'RA_min':182.490557,'RA_max':183.519167,'Dec_min':51.514161,'Dec_max':52.203790,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1212_5149.fits'},
'km1144_5302.fits': {'RA_min':175.501501,'RA_max':176.557171,'Dec_min':52.650812,'Dec_max':53.338933,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1144_5302.fits'},
'km1102_5302.fits': {'RA_min':165.115393,'RA_max':166.170156,'Dec_min':52.646661,'Dec_max':53.335802,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1102_5302.fits'},
'km1152_5332.fits': {'RA_min':177.464316,'RA_max':178.534198,'Dec_min':53.222843,'Dec_max':53.912967,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1152_5332.fits'},
'km1057_5228.fits': {'RA_min':163.837019,'RA_max':164.891740,'Dec_min':52.070657,'Dec_max':52.772857,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1057_5228.fits'},
'km1325_5224.fits': {'RA_min':200.634038,'RA_max':201.687357,'Dec_min':52.052184,'Dec_max':52.739735,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1325_5224.fits'},
'km1336_5155.fits': {'RA_min':203.564834,'RA_max':204.599119,'Dec_min':51.529069,'Dec_max':52.218759,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1336_5155.fits'},
'km1059_5410.fits': {'RA_min':164.283551,'RA_max':165.382534,'Dec_min':53.786897,'Dec_max':54.474632,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1059_5410.fits'},
'km1110_5302.fits': {'RA_min':166.989013,'RA_max':168.054124,'Dec_min':52.647055,'Dec_max':53.334142,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1110_5302.fits'},
'km1111_5410.fits': {'RA_min':167.197050,'RA_max':168.280664,'Dec_min':53.783472,'Dec_max':54.474114,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1111_5410.fits'},
'km1340_5154.fits': {'RA_min':204.494345,'RA_max':205.525893,'Dec_min':51.521204,'Dec_max':52.213473,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1340_5154.fits'},
'km1152_5257.fits': {'RA_min':177.317770,'RA_max':178.391969,'Dec_min':52.629659,'Dec_max':53.327956,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1152_5257.fits'},
'km1115_5406.fits': {'RA_min':168.166540,'RA_max':169.254789,'Dec_min':53.783122,'Dec_max':54.476020,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1115_5406.fits'},
'km1158_5444.fits': {'RA_min':178.879997,'RA_max':179.979746,'Dec_min':54.345502,'Dec_max':55.038332,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1158_5444.fits'},
'km1452_5258.fits': {'RA_min':222.576910,'RA_max':223.646302,'Dec_min':52.646503,'Dec_max':53.341471,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1452_5258.fits'},
'km1507_5302.fits': {'RA_min':226.307567,'RA_max':227.364483,'Dec_min':52.648484,'Dec_max':53.340246,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1507_5302.fits'},
'km1441_5258.fits': {'RA_min':219.733956,'RA_max':220.789581,'Dec_min':52.652278,'Dec_max':53.343803,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1441_5258.fits'},
'km1128_5150.fits': {'RA_min':171.490952,'RA_max':172.519904,'Dec_min':51.515372,'Dec_max':52.202834,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1128_5150.fits'},
'km1103_5406.fits': {'RA_min':165.269322,'RA_max':166.346893,'Dec_min':53.783523,'Dec_max':54.475663,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1103_5406.fits'},
'km1229_5228.fits': {'RA_min':186.850001,'RA_max':187.915809,'Dec_min':52.074494,'Dec_max':52.769807,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1229_5228.fits'},
'km1134_5449.fits': {'RA_min':172.894425,'RA_max':174.089574,'Dec_min':54.380091,'Dec_max':55.123341,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1134_5449.fits'},
'km1156_5336.fits': {'RA_min':178.437153,'RA_max':179.511079,'Dec_min':53.221398,'Dec_max':53.913878,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1156_5336.fits'},
'km1143_5155.fits': {'RA_min':175.205877,'RA_max':176.236938,'Dec_min':51.545128,'Dec_max':52.236295,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1143_5155.fits'},
'km1105_5224.fits': {'RA_min':165.665545,'RA_max':166.713316,'Dec_min':52.067186,'Dec_max':52.763677,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1105_5224.fits'},
'km1155_5258.fits': {'RA_min':178.280083,'RA_max':179.359174,'Dec_min':52.626317,'Dec_max':53.333036,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1155_5258.fits'},
'km1402_5224.fits': {'RA_min':209.868637,'RA_max':210.923112,'Dec_min':52.062219,'Dec_max':52.771919,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1402_5224.fits'},
'km1523_5154.fits': {'RA_min':230.168962,'RA_max':231.197440,'Dec_min':51.522411,'Dec_max':52.209681,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1523_5154.fits'},
'km1442_5224.fits': {'RA_min':220.017894,'RA_max':221.058807,'Dec_min':52.090395,'Dec_max':52.777308,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1442_5224.fits'},
'km1209_5410.fits': {'RA_min':181.744378,'RA_max':182.832044,'Dec_min':53.778385,'Dec_max':54.470884,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1209_5410.fits'},
'km1048_5150.fits': {'RA_min':161.388923,'RA_max':162.415446,'Dec_min':51.507796,'Dec_max':52.196960,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1048_5150.fits'},
'km1129_5259.fits': {'RA_min':171.729384,'RA_max':172.784917,'Dec_min':52.678064,'Dec_max':53.367230,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1129_5259.fits'},
'km1230_5332.fits': {'RA_min':186.973599,'RA_max':188.063177,'Dec_min':53.222958,'Dec_max':53.917440,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1230_5332.fits'},
'km1230_5150.fits': {'RA_min':187.070608,'RA_max':188.101360,'Dec_min':51.518004,'Dec_max':52.211318,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1230_5150.fits'},
'km1307_5258.fits': {'RA_min':196.188139,'RA_max':197.246863,'Dec_min':52.653643,'Dec_max':53.348899,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1307_5258.fits'},
'km1032_5224.fits': {'RA_min':157.397484,'RA_max':158.453272,'Dec_min':52.088020,'Dec_max':52.789214,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1032_5224.fits'},
'km1136_5302.fits': {'RA_min':173.595262,'RA_max':174.649752,'Dec_min':52.645250,'Dec_max':53.332831,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1136_5302.fits'},
'km1256_5150.fits': {'RA_min':193.494551,'RA_max':194.525510,'Dec_min':51.517057,'Dec_max':52.210808,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1256_5150.fits'},
'km1436_5336.fits': {'RA_min':218.478323,'RA_max':219.547903,'Dec_min':53.220946,'Dec_max':53.908414,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1436_5336.fits'},
'km1507_5332.fits': {'RA_min':226.124264,'RA_max':227.201738,'Dec_min':53.221327,'Dec_max':53.915858,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1507_5332.fits'},
'km1132_5150.fits': {'RA_min':172.403251,'RA_max':173.433672,'Dec_min':51.513511,'Dec_max':52.206497,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1132_5150.fits'},
'km1110_5336.fits': {'RA_min':166.962957,'RA_max':168.034471,'Dec_min':53.216600,'Dec_max':53.909277,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1110_5336.fits'},
'km1156_5228.fits': {'RA_min':178.561178,'RA_max':179.607228,'Dec_min':52.078717,'Dec_max':52.772871,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1156_5228.fits'},
'km1326_5410.fits': {'RA_min':201.021641,'RA_max':202.107203,'Dec_min':53.784701,'Dec_max':54.477209,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1326_5410.fits'},
'km1345_5258.fits': {'RA_min':205.615499,'RA_max':206.674473,'Dec_min':52.662305,'Dec_max':53.352731,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1345_5258.fits'},
'km1033_5258.fits': {'RA_min':157.591558,'RA_max':158.649322,'Dec_min':52.655724,'Dec_max':53.347287,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1033_5258.fits'},
'km1123_5406.fits': {'RA_min':170.090691,'RA_max':171.182050,'Dec_min':53.781624,'Dec_max':54.511731,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1123_5406.fits'},
'km1343_5332.fits': {'RA_min':205.143474,'RA_max':206.213352,'Dec_min':53.232718,'Dec_max':53.921163,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1343_5332.fits'},
'km1420_5228.fits': {'RA_min':214.468931,'RA_max':215.515532,'Dec_min':52.078187,'Dec_max':52.773373,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1420_5228.fits'},
'km1329_5302.fits': {'RA_min':201.828847,'RA_max':202.905831,'Dec_min':52.629856,'Dec_max':53.325385,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1329_5302.fits'},
'km1336_5154.fits': {'RA_min':203.488747,'RA_max':204.553419,'Dec_min':51.482118,'Dec_max':52.172401,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1336_5154.fits'},
'km1351_5150.fits': {'RA_min':207.243144,'RA_max':208.282914,'Dec_min':51.530534,'Dec_max':52.210668,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1351_5150.fits'},
'km1133_5258.fits': {'RA_min':172.656000,'RA_max':173.716084,'Dec_min':52.642703,'Dec_max':53.335278,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1133_5258.fits'},
'km1238_5336.fits': {'RA_min':188.906085,'RA_max':189.980702,'Dec_min':53.222675,'Dec_max':53.917810,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1238_5336.fits'},
'km1129_5331.fits': {'RA_min':171.741844,'RA_max':172.824311,'Dec_min':53.217912,'Dec_max':53.911518,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1129_5331.fits'},
'km1146_5516.fits': {'RA_min':176.013592,'RA_max':177.127732,'Dec_min':54.901422,'Dec_max':55.568115,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1146_5516.fits'},
'km1515_5150.fits': {'RA_min':228.317550,'RA_max':229.346115,'Dec_min':51.522805,'Dec_max':52.211129,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1515_5150.fits'},
'km1136_5150.fits': {'RA_min':173.355607,'RA_max':174.431599,'Dec_min':51.535170,'Dec_max':52.224750,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1136_5150.fits'},
'km1146_5150.fits': {'RA_min':176.063907,'RA_max':177.092077,'Dec_min':51.534806,'Dec_max':52.223393,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1146_5150.fits'},
'km1442_5150.fits': {'RA_min':220.074862,'RA_max':221.103479,'Dec_min':51.525129,'Dec_max':52.212583,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1442_5150.fits'},
'km1257_5332.fits': {'RA_min':193.686046,'RA_max':194.759960,'Dec_min':53.218607,'Dec_max':53.911899,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1257_5332.fits'},
'km1219_5154.fits': {'RA_min':184.315142,'RA_max':185.367403,'Dec_min':51.523202,'Dec_max':52.210606,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1219_5154.fits'},
'km1207_5258.fits': {'RA_min':181.096040,'RA_max':182.183756,'Dec_min':52.622717,'Dec_max':53.334956,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1207_5258.fits'},
'km1526_5336.fits': {'RA_min':230.904185,'RA_max':232.051981,'Dec_min':53.223809,'Dec_max':53.915269,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1526_5336.fits'},
'km1244_5227.fits': {'RA_min':190.614998,'RA_max':191.707038,'Dec_min':52.059242,'Dec_max':52.747963,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1244_5227.fits'},
'km1044_5302.fits': {'RA_min':160.409206,'RA_max':161.464079,'Dec_min':52.646908,'Dec_max':53.333920,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1044_5302.fits'},
'km1311_5154.fits': {'RA_min':197.166681,'RA_max':198.194856,'Dec_min':51.520262,'Dec_max':52.208752,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1311_5154.fits'},
'km1043_5228.fits': {'RA_min':160.156422,'RA_max':161.211864,'Dec_min':52.075611,'Dec_max':52.766579,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1043_5228.fits'},
'km1127_5228.fits': {'RA_min':171.189371,'RA_max':172.233124,'Dec_min':52.063693,'Dec_max':52.762752,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1127_5228.fits'},
'km1112_5224.fits': {'RA_min':167.506703,'RA_max':168.549627,'Dec_min':52.066097,'Dec_max':52.758731,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1112_5224.fits'},
'km1238_5150.fits': {'RA_min':188.912976,'RA_max':189.940865,'Dec_min':51.522375,'Dec_max':52.212386,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1238_5150.fits'},
'km1234_5150.fits': {'RA_min':187.994266,'RA_max':189.022854,'Dec_min':51.518722,'Dec_max':52.209466,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1234_5150.fits'},
'km1351_5224.fits': {'RA_min':207.077322,'RA_max':208.149927,'Dec_min':52.083816,'Dec_max':52.810884,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1351_5224.fits'},
'km1126_5444.fits': {'RA_min':171.061269,'RA_max':172.162165,'Dec_min':54.351616,'Dec_max':55.041563,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1126_5444.fits'},
'km1114_5518.fits': {'RA_min':168.062546,'RA_max':169.214297,'Dec_min':54.916271,'Dec_max':55.619938,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1114_5518.fits'},
'km1213_5406.fits': {'RA_min':182.679142,'RA_max':183.759025,'Dec_min':53.783394,'Dec_max':54.457653,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1213_5406.fits'},
'km1226_5332.fits': {'RA_min':186.045395,'RA_max':187.114402,'Dec_min':53.225866,'Dec_max':53.914867,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1226_5332.fits'},
'km1403_5302.fits': {'RA_min':210.341271,'RA_max':211.396828,'Dec_min':52.660360,'Dec_max':53.348534,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1403_5302.fits'},
'km1044_5154.fits': {'RA_min':160.474613,'RA_max':161.503915,'Dec_min':51.505780,'Dec_max':52.196790,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1044_5154.fits'},
'km1118_5519.fits': {'RA_min':169.065906,'RA_max':170.181070,'Dec_min':54.925472,'Dec_max':55.613386,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1118_5519.fits'},
'km1142_5513.fits': {'RA_min':175.020158,'RA_max':176.144477,'Dec_min':54.909372,'Dec_max':55.605503,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1142_5513.fits'},
'km1526_5154.fits': {'RA_min':231.079642,'RA_max':232.108304,'Dec_min':51.521334,'Dec_max':52.211552,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1526_5154.fits'},
'km1446_5150.fits': {'RA_min':220.980813,'RA_max':222.015528,'Dec_min':51.519006,'Dec_max':52.215177,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1446_5150.fits'},
'km1530_5256.fits': {'RA_min':231.948848,'RA_max':233.019638,'Dec_min':52.630020,'Dec_max':53.320798,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1530_5256.fits'},
'km1114_5332.fits': {'RA_min':167.911539,'RA_max':168.983267,'Dec_min':53.226588,'Dec_max':53.916284,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1114_5332.fits'},
'km1138_5438.fits': {'RA_min':173.988291,'RA_max':175.085647,'Dec_min':54.315144,'Dec_max':55.004052,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1138_5438.fits'},
'km1358_5155.fits': {'RA_min':209.076171,'RA_max':210.104968,'Dec_min':51.520400,'Dec_max':52.218492,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1358_5155.fits'},
'km1150_5406.fits': {'RA_min':176.845801,'RA_max':177.953539,'Dec_min':53.791102,'Dec_max':54.490355,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1150_5406.fits'},
'km1453_5228.fits': {'RA_min':222.774864,'RA_max':223.818248,'Dec_min':52.086315,'Dec_max':52.779322,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1453_5228.fits'},
'km1344_5150.fits': {'RA_min':205.408742,'RA_max':206.436863,'Dec_min':51.523178,'Dec_max':52.210525,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1344_5150.fits'},
'km1419_5258.fits': {'RA_min':214.091227,'RA_max':215.149711,'Dec_min':52.651229,'Dec_max':53.343375,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1419_5258.fits'},
'km1415_5302.fits': {'RA_min':213.166959,'RA_max':214.222723,'Dec_min':52.658519,'Dec_max':53.348707,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1415_5302.fits'},
'km1138_5228.fits': {'RA_min':173.940398,'RA_max':174.984844,'Dec_min':52.063120,'Dec_max':52.757846,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1138_5228.fits'},
'km1255_5410.fits': {'RA_min':193.302187,'RA_max':194.385880,'Dec_min':53.786854,'Dec_max':54.475982,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1255_5410.fits'},
'km1223_5150.fits': {'RA_min':185.236366,'RA_max':186.271349,'Dec_min':51.518739,'Dec_max':52.212234,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1223_5150.fits'},
'km1126_5518.fits': {'RA_min':171.054889,'RA_max':172.172019,'Dec_min':54.916750,'Dec_max':55.606600,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1126_5518.fits'},
'km1134_5514.fits': {'RA_min':173.039075,'RA_max':174.157770,'Dec_min':54.914905,'Dec_max':55.607475,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1134_5514.fits'},
'km1204_5224.fits': {'RA_min':180.409182,'RA_max':181.454628,'Dec_min':52.082908,'Dec_max':52.776922,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1204_5224.fits'},
'km1130_5444.fits': {'RA_min':172.027763,'RA_max':173.200486,'Dec_min':54.348836,'Dec_max':55.074098,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1130_5444.fits'},
'km1218_5339.fits': {'RA_min':184.098073,'RA_max':185.168446,'Dec_min':53.262712,'Dec_max':53.952272,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1218_5339.fits'},
'km1413_5229.fits': {'RA_min':212.634769,'RA_max':213.682163,'Dec_min':52.097215,'Dec_max':52.790399,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1413_5229.fits'},
'km1035_5224.fits': {'RA_min':158.276634,'RA_max':159.367891,'Dec_min':52.081584,'Dec_max':52.771250,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1035_5224.fits'},
'km1322_5410.fits': {'RA_min':200.021478,'RA_max':201.146856,'Dec_min':53.786405,'Dec_max':54.474657,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1322_5410.fits'},
'km1259_5224.fits': {'RA_min':194.197793,'RA_max':195.239072,'Dec_min':52.057491,'Dec_max':52.744903,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1259_5224.fits'},
'km1130_5228.fits': {'RA_min':172.112086,'RA_max':173.159302,'Dec_min':52.071064,'Dec_max':52.766148,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1130_5228.fits'},
'km1138_5410.fits': {'RA_min':173.955630,'RA_max':175.039738,'Dec_min':53.793357,'Dec_max':54.484075,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1138_5410.fits'},
'km1123_5440.fits': {'RA_min':170.073686,'RA_max':171.220707,'Dec_min':54.348379,'Dec_max':55.077838,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1123_5440.fits'},
'km1248_5302.fits': {'RA_min':191.452413,'RA_max':192.507662,'Dec_min':52.627891,'Dec_max':53.320052,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1248_5302.fits'},
'km1449_5258.fits': {'RA_min':221.623128,'RA_max':222.678223,'Dec_min':52.650428,'Dec_max':53.338981,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1449_5258.fits'},
'km1508_5223.fits': {'RA_min':226.443284,'RA_max':227.484888,'Dec_min':52.080377,'Dec_max':52.768055,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1508_5223.fits'},
'km1142_5228.fits': {'RA_min':174.867919,'RA_max':175.909608,'Dec_min':52.064959,'Dec_max':52.753618,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1142_5228.fits'},
'km1515_5224.fits': {'RA_min':228.286461,'RA_max':229.328356,'Dec_min':52.083182,'Dec_max':52.771203,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1515_5224.fits'},
'km1054_5223.fits': {'RA_min':162.967890,'RA_max':164.008746,'Dec_min':52.077967,'Dec_max':52.767310,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1054_5223.fits'},
'km1029_5150.fits': {'RA_min':156.811043,'RA_max':157.838907,'Dec_min':51.512287,'Dec_max':52.199405,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1029_5150.fits'},
'km1515_5228.fits': {'RA_min':228.304235,'RA_max':229.345485,'Dec_min':52.084960,'Dec_max':52.773201,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1515_5228.fits'},
'km1137_5336.fits': {'RA_min':173.643038,'RA_max':174.717658,'Dec_min':53.221287,'Dec_max':53.916802,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1137_5336.fits'},
'km1154_5518.fits': {'RA_min':177.982093,'RA_max':179.114186,'Dec_min':54.915649,'Dec_max':55.604963,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1154_5518.fits'},
'km1530_5332.fits': {'RA_min':231.852041,'RA_max':232.922569,'Dec_min':53.225144,'Dec_max':53.914653,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1530_5332.fits'},
'km1207_5332.fits': {'RA_min':181.273594,'RA_max':182.343949,'Dec_min':53.228436,'Dec_max':53.916051,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1207_5332.fits'},
'km1208_5154.fits': {'RA_min':181.580660,'RA_max':182.609285,'Dec_min':51.521366,'Dec_max':52.211938,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1208_5154.fits'},
'km1522_5332.fits': {'RA_min':229.940857,'RA_max':231.012777,'Dec_min':53.223977,'Dec_max':53.916164,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1522_5332.fits'},
'km1523_5223.fits': {'RA_min':230.130237,'RA_max':231.171791,'Dec_min':52.064278,'Dec_max':52.754114,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1523_5223.fits'},
'km1139_5150.fits': {'RA_min':174.230852,'RA_max':175.260143,'Dec_min':51.534083,'Dec_max':52.224931,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1139_5150.fits'},
'km1329_5154.fits': {'RA_min':201.718999,'RA_max':202.748555,'Dec_min':51.506183,'Dec_max':52.198114,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1329_5154.fits'},
'km1222_5228.fits': {'RA_min':185.019153,'RA_max':186.092592,'Dec_min':52.075570,'Dec_max':52.768742,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1222_5228.fits'},
'km1450_5150.fits': {'RA_min':221.905762,'RA_max':222.934426,'Dec_min':51.522303,'Dec_max':52.211100,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1450_5150.fits'},
'km1159_5336.fits': {'RA_min':179.363391,'RA_max':180.498449,'Dec_min':53.222850,'Dec_max':53.910496,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1159_5336.fits'},
'km1122_5331.fits': {'RA_min':169.781516,'RA_max':170.894239,'Dec_min':53.208612,'Dec_max':53.912842,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1122_5331.fits'},
'km1134_5410.fits': {'RA_min':172.993375,'RA_max':174.078801,'Dec_min':53.793699,'Dec_max':54.483493,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1134_5410.fits'},
'km1118_5302.fits': {'RA_min':168.888710,'RA_max':169.944134,'Dec_min':52.645416,'Dec_max':53.333852,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1118_5302.fits'},
'km1119_5409.fits': {'RA_min':169.127165,'RA_max':170.213965,'Dec_min':53.772879,'Dec_max':54.456978,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1119_5409.fits'},
'km1241_5154.fits': {'RA_min':189.834696,'RA_max':190.865869,'Dec_min':51.520903,'Dec_max':52.212524,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1241_5154.fits'},
'km1234_5222.fits': {'RA_min':187.923704,'RA_max':188.968685,'Dec_min':52.053640,'Dec_max':52.743439,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1234_5222.fits'},
'km1504_5258.fits': {'RA_min':225.384169,'RA_max':226.448480,'Dec_min':52.647044,'Dec_max':53.336715,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1504_5258.fits'},
'km1318_5224.fits': {'RA_min':198.790276,'RA_max':199.831187,'Dec_min':52.051178,'Dec_max':52.739331,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1318_5224.fits'},
'km1339_5226.fits': {'RA_min':204.349424,'RA_max':205.420302,'Dec_min':52.081048,'Dec_max':52.785224,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1339_5226.fits'},
'km1159_5258.fits': {'RA_min':179.227242,'RA_max':180.305941,'Dec_min':52.627631,'Dec_max':53.332203,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1159_5258.fits'},
'km1211_5332.fits': {'RA_min':182.221546,'RA_max':183.294025,'Dec_min':53.224768,'Dec_max':53.918146,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1211_5332.fits'},
'km1117_5154.fits': {'RA_min':168.745143,'RA_max':169.776786,'Dec_min':51.514080,'Dec_max':52.198363,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1117_5154.fits'},
'km1226_5224.fits': {'RA_min':185.951902,'RA_max':186.978087,'Dec_min':52.078425,'Dec_max':52.771007,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1226_5224.fits'},
'km1047_5226.fits': {'RA_min':161.014225,'RA_max':162.154560,'Dec_min':52.072828,'Dec_max':52.815778,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1047_5226.fits'},
'km1200_5223.fits': {'RA_min':179.510218,'RA_max':180.551403,'Dec_min':52.078128,'Dec_max':52.764710,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1200_5223.fits'},
'km1450_5224.fits': {'RA_min':221.851667,'RA_max':222.901959,'Dec_min':52.088824,'Dec_max':52.777244,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1450_5224.fits'},
'km1303_5224.fits': {'RA_min':195.108622,'RA_max':196.152162,'Dec_min':52.051560,'Dec_max':52.744801,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1303_5224.fits'},
'km1215_5332.fits': {'RA_min':183.182982,'RA_max':184.254938,'Dec_min':53.226461,'Dec_max':53.915846,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1215_5332.fits'},
'km1202_5440.fits': {'RA_min':179.876386,'RA_max':180.977913,'Dec_min':54.347666,'Dec_max':55.038965,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1202_5440.fits'},
'km1055_5332.fits': {'RA_min':163.179767,'RA_max':164.239646,'Dec_min':53.216420,'Dec_max':53.910303,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1055_5332.fits'},
'km1106_5154.fits': {'RA_min':165.990103,'RA_max':167.019903,'Dec_min':51.492889,'Dec_max':52.200346,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1106_5154.fits'},
'km1300_5154.fits': {'RA_min':194.419087,'RA_max':195.446976,'Dec_min':51.518303,'Dec_max':52.207778,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1300_5154.fits'},
'km1225_5406.fits': {'RA_min':185.554166,'RA_max':186.642667,'Dec_min':53.783130,'Dec_max':54.477939,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1225_5406.fits'},
'km1055_5258.fits': {'RA_min':163.230105,'RA_max':164.284738,'Dec_min':52.645551,'Dec_max':53.323458,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1055_5258.fits'},
'km1456_5303.fits': {'RA_min':223.495186,'RA_max':224.567459,'Dec_min':52.645825,'Dec_max':53.346358,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1456_5303.fits'},
'km1203_5257.fits': {'RA_min':180.163720,'RA_max':181.240568,'Dec_min':52.628108,'Dec_max':53.325137,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1203_5257.fits'},
'km1040_5302.fits': {'RA_min':159.472046,'RA_max':160.534408,'Dec_min':52.643174,'Dec_max':53.337453,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1040_5302.fits'},
'km1303_5406.fits': {'RA_min':195.239489,'RA_max':196.325387,'Dec_min':53.784103,'Dec_max':54.476092,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1303_5406.fits'},
'km1122_5258.fits': {'RA_min':169.824071,'RA_max':170.879866,'Dec_min':52.643486,'Dec_max':53.336107,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1122_5258.fits'},
'km1407_5258.fits': {'RA_min':211.277201,'RA_max':212.334020,'Dec_min':52.657881,'Dec_max':53.350758,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1407_5258.fits'},
'km1116_5228.fits': {'RA_min':168.436082,'RA_max':169.477716,'Dec_min':52.067120,'Dec_max':52.756639,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1116_5228.fits'},
'km1508_5150.fits': {'RA_min':226.482586,'RA_max':227.515593,'Dec_min':51.523762,'Dec_max':52.250266,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1508_5150.fits'},
'km1427_5223.fits': {'RA_min':216.295530,'RA_max':217.338380,'Dec_min':52.074264,'Dec_max':52.766476,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1427_5223.fits'},
'km1110_5147.fits': {'RA_min':166.960834,'RA_max':167.989120,'Dec_min':51.471872,'Dec_max':52.162408,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1110_5147.fits'},
'km1036_5302.fits': {'RA_min':158.526993,'RA_max':159.581273,'Dec_min':52.646037,'Dec_max':53.334721,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1036_5302.fits'},
'km1457_5154.fits': {'RA_min':223.737111,'RA_max':224.765715,'Dec_min':51.520199,'Dec_max':52.210893,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1457_5154.fits'},
'km1523_5224.fits': {'RA_min':230.153007,'RA_max':231.206477,'Dec_min':52.083322,'Dec_max':52.777815,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1523_5224.fits'},
'km1200_5332.fits': {'RA_min':179.361657,'RA_max':180.432017,'Dec_min':53.227910,'Dec_max':53.916239,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1200_5332.fits'},
'km1210_5303.fits': {'RA_min':182.049669,'RA_max':183.105146,'Dec_min':52.643151,'Dec_max':53.330529,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1210_5303.fits'},
'km1252_5406.fits': {'RA_min':192.324680,'RA_max':193.413063,'Dec_min':53.784458,'Dec_max':54.479032,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1252_5406.fits'},
'km1233_5259.fits': {'RA_min':187.703201,'RA_max':188.764539,'Dec_min':52.646812,'Dec_max':53.322811,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1233_5259.fits'},
'km1051_5336.fits': {'RA_min':162.166333,'RA_max':163.262609,'Dec_min':53.218022,'Dec_max':53.907943,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1051_5336.fits'},
'km1101_5224.fits': {'RA_min':164.753372,'RA_max':165.793965,'Dec_min':52.071548,'Dec_max':52.759650,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1101_5224.fits'},
'km1051_5150.fits': {'RA_min':162.330139,'RA_max':163.364376,'Dec_min':51.512916,'Dec_max':52.206480,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1051_5150.fits'},
'km1339_5332.fits': {'RA_min':204.164671,'RA_max':205.320145,'Dec_min':53.203183,'Dec_max':53.899491,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1339_5332.fits'},
'km1315_5258.fits': {'RA_min':198.040077,'RA_max':199.138696,'Dec_min':52.627382,'Dec_max':53.366302,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1315_5258.fits'},
'km1144_5336.fits': {'RA_min':175.554266,'RA_max':176.624710,'Dec_min':53.221660,'Dec_max':53.914234,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1144_5336.fits'},
'km1050_5224.fits': {'RA_min':161.998024,'RA_max':163.058035,'Dec_min':52.073618,'Dec_max':52.777124,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1050_5224.fits'},
'km1221_5406.fits': {'RA_min':184.594374,'RA_max':185.678759,'Dec_min':53.779417,'Dec_max':54.473448,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1221_5406.fits'},
'km1215_5228.fits': {'RA_min':183.177531,'RA_max':184.220239,'Dec_min':52.069403,'Dec_max':52.760003,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1215_5228.fits'},
'km1122_5518.fits': {'RA_min':170.048876,'RA_max':171.168449,'Dec_min':54.920299,'Dec_max':55.610631,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1122_5518.fits'},
'km1201_5150.fits': {'RA_min':179.760865,'RA_max':180.773690,'Dec_min':51.523376,'Dec_max':52.212396,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1201_5150.fits'},
'km1240_5411.fits': {'RA_min':189.368885,'RA_max':190.510398,'Dec_min':53.786499,'Dec_max':54.483714,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1240_5411.fits'},
'km1511_5302.fits': {'RA_min':227.300245,'RA_max':228.357353,'Dec_min':52.656759,'Dec_max':53.347772,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1511_5302.fits'},
'km1154_5406.fits': {'RA_min':177.824258,'RA_max':178.908888,'Dec_min':53.787330,'Dec_max':54.487457,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1154_5406.fits'},
'km1244_5411.fits': {'RA_min':190.291990,'RA_max':191.488624,'Dec_min':53.746072,'Dec_max':54.481744,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1244_5411.fits'},
'km1232_5406.fits': {'RA_min':187.489855,'RA_max':188.574829,'Dec_min':53.784848,'Dec_max':54.476691,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1232_5406.fits'},
'km1059_5154.fits': {'RA_min':164.153617,'RA_max':165.186792,'Dec_min':51.511939,'Dec_max':52.206468,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1059_5154.fits'},
'km1141_5332.fits': {'RA_min':174.606553,'RA_max':175.675962,'Dec_min':53.223928,'Dec_max':53.912351,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1141_5332.fits'},
'km1430_5258.fits': {'RA_min':216.928184,'RA_max':217.992905,'Dec_min':52.659133,'Dec_max':53.347087,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1430_5258.fits'},
'km1506_5336.fits': {'RA_min':226.129277,'RA_max':227.207279,'Dec_min':53.225085,'Dec_max':53.919527,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1506_5336.fits'},
'km1248_5228.fits': {'RA_min':191.456266,'RA_max':192.501617,'Dec_min':52.073657,'Dec_max':52.765461,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1248_5228.fits'},
'km1347_5227.fits': {'RA_min':206.163712,'RA_max':207.240966,'Dec_min':52.056358,'Dec_max':52.754619,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1347_5227.fits'},
'km1247_5411.fits': {'RA_min':191.362928,'RA_max':192.447781,'Dec_min':53.799899,'Dec_max':54.486579,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1247_5411.fits'},
'km1125_5332.fits': {'RA_min':170.757293,'RA_max':171.826351,'Dec_min':53.227920,'Dec_max':53.916893,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1125_5332.fits'},
'km1252_5300.fits': {'RA_min':192.402257,'RA_max':193.457373,'Dec_min':52.616071,'Dec_max':53.286446,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1252_5300.fits'},
'km1237_5258.fits': {'RA_min':188.633308,'RA_max':189.689836,'Dec_min':52.635124,'Dec_max':53.320964,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1237_5258.fits'},
'km1106_5332.fits': {'RA_min':166.015209,'RA_max':167.085212,'Dec_min':53.219279,'Dec_max':53.907109,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1106_5332.fits'},
'km1240_5302.fits': {'RA_min':189.568636,'RA_max':190.627511,'Dec_min':52.627113,'Dec_max':53.321628,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1240_5302.fits'},
'km1253_5332.fits': {'RA_min':192.726764,'RA_max':193.796543,'Dec_min':53.218881,'Dec_max':53.909117,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1253_5332.fits'},
'km1047_5302.fits': {'RA_min':161.343635,'RA_max':162.402956,'Dec_min':52.643669,'Dec_max':53.338258,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1047_5302.fits'},
'km1055_5410.fits': {'RA_min':163.299051,'RA_max':164.413716,'Dec_min':53.782261,'Dec_max':54.478833,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1055_5410.fits'},
'km1326_5258.fits': {'RA_min':200.871404,'RA_max':201.932061,'Dec_min':52.627210,'Dec_max':53.353733,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1326_5258.fits'},
'km1205_5150.fits': {'RA_min':180.654906,'RA_max':181.684588,'Dec_min':51.521058,'Dec_max':52.213174,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1205_5150.fits'},
'km1148_5332.fits': {'RA_min':176.517420,'RA_max':177.587852,'Dec_min':53.222622,'Dec_max':53.913027,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1148_5332.fits'},
'km1107_5444.fits': {'RA_min':166.164574,'RA_max':167.271571,'Dec_min':54.349014,'Dec_max':55.043518,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1107_5444.fits'},
'km1511_5332.fits': {'RA_min':227.074743,'RA_max':228.144918,'Dec_min':53.227363,'Dec_max':53.913367,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1511_5332.fits'},
'km1039_5228.fits': {'RA_min':159.229076,'RA_max':160.288997,'Dec_min':52.073303,'Dec_max':52.776291,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1039_5228.fits'},
'km1322_5150.fits': {'RA_min':199.900929,'RA_max':200.935397,'Dec_min':51.520983,'Dec_max':52.216431,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1322_5150.fits'},
'km1526_5258.fits': {'RA_min':231.059561,'RA_max':232.120003,'Dec_min':52.654575,'Dec_max':53.349576,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1526_5258.fits'},
'km1343_5228.fits': {'RA_min':205.267127,'RA_max':206.307860,'Dec_min':52.082462,'Dec_max':52.770665,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1343_5228.fits'},
'km1434_5258.fits': {'RA_min':217.832591,'RA_max':218.904609,'Dec_min':52.639331,'Dec_max':53.346150,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1434_5258.fits'},
'km1318_5302.fits': {'RA_min':198.989721,'RA_max':200.045855,'Dec_min':52.627268,'Dec_max':53.317807,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1318_5302.fits'},
'km1335_5331.fits': {'RA_min':203.200908,'RA_max':204.322867,'Dec_min':53.200838,'Dec_max':53.887742,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1335_5331.fits'},
'km1255_5224.fits': {'RA_min':193.270571,'RA_max':194.318681,'Dec_min':52.057681,'Dec_max':52.754405,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1255_5224.fits'},
'km1446_5224.fits': {'RA_min':220.930428,'RA_max':221.978345,'Dec_min':52.085746,'Dec_max':52.781673,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1446_5224.fits'},
'km1245_5150.fits': {'RA_min':190.749651,'RA_max':191.777295,'Dec_min':51.522136,'Dec_max':52.209853,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1245_5150.fits'},
'km1321_5224.fits': {'RA_min':199.712217,'RA_max':200.759797,'Dec_min':52.047653,'Dec_max':52.741225,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1321_5224.fits'},
'km1218_5302.fits': {'RA_min':183.932567,'RA_max':184.987845,'Dec_min':52.625615,'Dec_max':53.314730,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1218_5302.fits'},
'km1029_5302.fits': {'RA_min':156.643483,'RA_max':157.698669,'Dec_min':52.644746,'Dec_max':53.335928,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1029_5302.fits'},
'km1333_5150.fits': {'RA_min':202.642849,'RA_max':203.671818,'Dec_min':51.506603,'Dec_max':52.197138,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1333_5150.fits'},
'km1519_5150.fits': {'RA_min':229.225778,'RA_max':230.257945,'Dec_min':51.519225,'Dec_max':52.212915,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1519_5150.fits'},
'km1504_5154.fits': {'RA_min':225.574957,'RA_max':226.607841,'Dec_min':51.521844,'Dec_max':52.214241,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1504_5154.fits'},
'km1518_5336.fits': {'RA_min':229.009305,'RA_max':230.063684,'Dec_min':53.226045,'Dec_max':53.915107,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1518_5336.fits'},
'km1130_5406.fits': {'RA_min':171.974221,'RA_max':173.103973,'Dec_min':53.799857,'Dec_max':54.493907,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1130_5406.fits'},
'km1332_5228.fits': {'RA_min':202.474804,'RA_max':203.516411,'Dec_min':52.051848,'Dec_max':52.740857,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1332_5228.fits'},
'km1512_5150.fits': {'RA_min':227.391924,'RA_max':228.428287,'Dec_min':51.518903,'Dec_max':52.215896,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1512_5150.fits'},
'km1409_5228.fits': {'RA_min':211.710134,'RA_max':212.751341,'Dec_min':52.082924,'Dec_max':52.771611,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1409_5228.fits'},
'km1420_5224.fits': {'RA_min':214.483153,'RA_max':215.529076,'Dec_min':52.083259,'Dec_max':52.777766,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1420_5224.fits'},
'km1311_5258.fits': {'RA_min':197.136939,'RA_max':198.200495,'Dec_min':52.638542,'Dec_max':53.338627,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1311_5258.fits'},
'km1325_5154.fits': {'RA_min':200.824629,'RA_max':201.852874,'Dec_min':51.523888,'Dec_max':52.197911,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1325_5154.fits'},
'km1227_5150.fits': {'RA_min':186.161225,'RA_max':187.189525,'Dec_min':51.521149,'Dec_max':52.209561,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1227_5150.fits'},
'km1102_5332.fits': {'RA_min':165.051408,'RA_max':166.127118,'Dec_min':53.214790,'Dec_max':53.910751,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1102_5332.fits'},
'km1229_5257.fits': {'RA_min':186.777368,'RA_max':187.832077,'Dec_min':52.644734,'Dec_max':53.332841,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1229_5257.fits'},
'km1037_5150.fits': {'RA_min':158.642544,'RA_max':159.671094,'Dec_min':51.507595,'Dec_max':52.196210,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1037_5150.fits'},
'km1501_5150.fits': {'RA_min':224.655812,'RA_max':225.685202,'Dec_min':51.523144,'Dec_max':52.212919,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1501_5150.fits'},
'km1117_5337.fits': {'RA_min':168.803928,'RA_max':169.873849,'Dec_min':53.234086,'Dec_max':53.925611,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1117_5337.fits'},
'km1511_5228.fits': {'RA_min':227.374118,'RA_max':228.423688,'Dec_min':52.081380,'Dec_max':52.778411,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1511_5228.fits'},
'km1150_5157.fits': {'RA_min':176.988570,'RA_max':178.023193,'Dec_min':51.559716,'Dec_max':52.252806,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1150_5157.fits'},
'km1252_5230.fits': {'RA_min':192.428256,'RA_max':193.469929,'Dec_min':52.098395,'Dec_max':52.785275,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1252_5230.fits'},
'km1249_5150.fits': {'RA_min':191.646759,'RA_max':192.694492,'Dec_min':51.517691,'Dec_max':52.214468,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1249_5150.fits'},
'km1340_5224.fits': {'RA_min':204.336651,'RA_max':205.379904,'Dec_min':52.074876,'Dec_max':52.766272,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1340_5224.fits'},
'km1219_5224.fits': {'RA_min':184.092905,'RA_max':185.134344,'Dec_min':52.078031,'Dec_max':52.767292,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1219_5224.fits'},
'km1249_5336.fits': {'RA_min':191.686070,'RA_max':192.851207,'Dec_min':53.223682,'Dec_max':53.909630,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1249_5336.fits'},
'km1355_5154.fits': {'RA_min':208.151711,'RA_max':209.182507,'Dec_min':51.519234,'Dec_max':52.212638,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1355_5154.fits'},
'km1348_5258.fits': {'RA_min':206.559833,'RA_max':207.616730,'Dec_min':52.662142,'Dec_max':53.351817,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1348_5258.fits'},
'km1303_5258.fits': {'RA_min':195.239116,'RA_max':196.318722,'Dec_min':52.630847,'Dec_max':53.337027,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1303_5258.fits'},
'km1133_5336.fits': {'RA_min':172.697528,'RA_max':173.763865,'Dec_min':53.223968,'Dec_max':53.906034,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1133_5336.fits'},
'km1255_5302.fits': {'RA_min':193.338481,'RA_max':194.392835,'Dec_min':52.629724,'Dec_max':53.319013,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1255_5302.fits'},
'km1500_5258.fits': {'RA_min':224.445388,'RA_max':225.504705,'Dec_min':52.644342,'Dec_max':53.339039,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1500_5258.fits'},
'km1504_5224.fits': {'RA_min':225.543932,'RA_max':226.600343,'Dec_min':52.084924,'Dec_max':52.777085,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1504_5224.fits'},
'km1259_5410.fits': {'RA_min':194.260062,'RA_max':195.345617,'Dec_min':53.784398,'Dec_max':54.476431,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1259_5410.fits'},
'km1356_5258.fits': {'RA_min':208.455466,'RA_max':209.510596,'Dec_min':52.662462,'Dec_max':53.349252,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1356_5258.fits'},
'km1530_5154.fits': {'RA_min':232.001455,'RA_max':233.032595,'Dec_min':51.519583,'Dec_max':52.211065,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1530_5154.fits'},
'km1216_5150.fits': {'RA_min':183.413605,'RA_max':184.446197,'Dec_min':51.531264,'Dec_max':52.223632,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1216_5150.fits'},
'km1453_5150.fits': {'RA_min':222.812615,'RA_max':223.844232,'Dec_min':51.535292,'Dec_max':52.228316,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1453_5150.fits'},
'km1119_5229.fits': {'RA_min':169.323270,'RA_max':170.364645,'Dec_min':52.070655,'Dec_max':52.761581,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1119_5229.fits'},
'km1126_5410.fits': {'RA_min':171.059030,'RA_max':172.143296,'Dec_min':53.795295,'Dec_max':54.482805,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1126_5410.fits'},
'km1028_5224.fits': {'RA_min':156.473845,'RA_max':157.524778,'Dec_min':52.080395,'Dec_max':52.771269,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1028_5224.fits'},
'km1142_5410.fits': {'RA_min':174.875615,'RA_max':176.018239,'Dec_min':53.791913,'Dec_max':54.484153,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1142_5410.fits'},
'km1055_5150.fits': {'RA_min':163.244224,'RA_max':164.301402,'Dec_min':51.505273,'Dec_max':52.202823,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1055_5150.fits'},
'km1333_5302.fits': {'RA_min':202.749284,'RA_max':203.823979,'Dec_min':52.625048,'Dec_max':53.312624,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1333_5302.fits'},
'km1115_5444.fits': {'RA_min':168.112981,'RA_max':169.219230,'Dec_min':54.348128,'Dec_max':55.044347,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1115_5444.fits'},
'km1140_5258.fits': {'RA_min':174.529007,'RA_max':175.590817,'Dec_min':52.641085,'Dec_max':53.337206,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1140_5258.fits'},
'km1519_5258.fits': {'RA_min':229.182899,'RA_max':230.245648,'Dec_min':52.655066,'Dec_max':53.348824,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1519_5258.fits'},
'km1102_5150.fits': {'RA_min':165.077452,'RA_max':166.105952,'Dec_min':51.515110,'Dec_max':52.203448,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1102_5150.fits'},
'km1508_5225.fits': {'RA_min':226.489257,'RA_max':227.531377,'Dec_min':52.112050,'Dec_max':52.799322,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1508_5225.fits'},
'km1113_5153.fits': {'RA_min':167.878152,'RA_max':168.890088,'Dec_min':51.516790,'Dec_max':52.188228,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1113_5153.fits'},
'km1354_5224.fits': {'RA_min':208.031957,'RA_max':209.076490,'Dec_min':52.080048,'Dec_max':52.773862,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1354_5224.fits'},
'km1242_5332.fits': {'RA_min':189.869964,'RA_max':190.940368,'Dec_min':53.225573,'Dec_max':53.914175,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1242_5332.fits'},
'km1328_5228.fits': {'RA_min':201.533589,'RA_max':202.591873,'Dec_min':52.048724,'Dec_max':52.743658,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1328_5228.fits'},
'km1244_5258.fits': {'RA_min':190.517955,'RA_max':191.572864,'Dec_min':52.628783,'Dec_max':53.318174,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1244_5258.fits'},
'km1146_5406.fits': {'RA_min':175.893188,'RA_max':176.977549,'Dec_min':53.794102,'Dec_max':54.482386,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1146_5406.fits'},
'km1310_5224.fits': {'RA_min':196.949118,'RA_max':197.990024,'Dec_min':52.050542,'Dec_max':52.741036,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1310_5224.fits'},
'km1033_5150.fits': {'RA_min':157.718945,'RA_max':158.750875,'Dec_min':51.514028,'Dec_max':52.208074,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1033_5150.fits'},
'km1145_5228.fits': {'RA_min':175.783222,'RA_max':176.822344,'Dec_min':52.061444,'Dec_max':52.753893,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1145_5228.fits'},
'km1211_5228.fits': {'RA_min':182.249845,'RA_max':183.291825,'Dec_min':52.076660,'Dec_max':52.768419,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1211_5228.fits'},
'km1400_5258.fits': {'RA_min':209.389526,'RA_max':210.451078,'Dec_min':52.657555,'Dec_max':53.353165,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1400_5258.fits'},
'km1206_5444.fits': {'RA_min':180.909489,'RA_max':182.045325,'Dec_min':54.357204,'Dec_max':55.046518,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1206_5444.fits'},
'km1158_5514.fits': {'RA_min':178.982994,'RA_max':180.098220,'Dec_min':54.914301,'Dec_max':55.605702,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1158_5514.fits'},
'km1054_5228.fits': {'RA_min':162.913066,'RA_max':163.972616,'Dec_min':52.072797,'Dec_max':52.772094,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1054_5228.fits'},
'km1501_5224.fits': {'RA_min':224.618798,'RA_max':225.661438,'Dec_min':52.089163,'Dec_max':52.779317,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1501_5224.fits'},
'km1301_5336.fits': {'RA_min':194.694053,'RA_max':195.787776,'Dec_min':53.220377,'Dec_max':53.907554,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1301_5336.fits'},
'km1314_5150.fits': {'RA_min':198.085472,'RA_max':199.119188,'Dec_min':51.516666,'Dec_max':52.209930,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1314_5150.fits'},
'km1129_5258.fits': {'RA_min':171.710475,'RA_max':172.765185,'Dec_min':52.644397,'Dec_max':53.334008,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1129_5258.fits'},
'km1314_5223.fits': {'RA_min':197.875580,'RA_max':198.933029,'Dec_min':52.041498,'Dec_max':52.741062,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1314_5223.fits'},
'km1405_5228.fits': {'RA_min':210.793874,'RA_max':211.836299,'Dec_min':52.082193,'Dec_max':52.772937,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1405_5228.fits'},
'km1411_5258.fits': {'RA_min':212.230345,'RA_max':213.286668,'Dec_min':52.658789,'Dec_max':53.348755,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1411_5258.fits'},
'km1336_5224.fits': {'RA_min':203.388606,'RA_max':204.430658,'Dec_min':52.049869,'Dec_max':52.741501,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1336_5224.fits'},
'km1510_5336.fits': {'RA_min':227.084158,'RA_max':228.153782,'Dec_min':53.227876,'Dec_max':53.914932,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1510_5336.fits'},
'km1416_5224.fits': {'RA_min':213.555225,'RA_max':214.596648,'Dec_min':52.083149,'Dec_max':52.769897,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1416_5224.fits'},
'km1222_5302.fits': {'RA_min':184.869264,'RA_max':185.924530,'Dec_min':52.619389,'Dec_max':53.310488,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1222_5302.fits'},
'km1040_5154.fits': {'RA_min':159.552482,'RA_max':160.580544,'Dec_min':51.505976,'Dec_max':52.197212,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1040_5154.fits'},
'km1523_5258.fits': {'RA_min':230.124885,'RA_max':231.180048,'Dec_min':52.658424,'Dec_max':53.345858,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1523_5258.fits'},
'km1205_5410.fits': {'RA_min':180.719434,'RA_max':181.815400,'Dec_min':53.781063,'Dec_max':54.476392,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1205_5410.fits'},
'km1157_5410.fits': {'RA_min':178.783737,'RA_max':179.872199,'Dec_min':53.791132,'Dec_max':54.485613,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1157_5410.fits'},
'km1329_5149.fits': {'RA_min':201.733191,'RA_max':202.763757,'Dec_min':51.519422,'Dec_max':52.212204,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1329_5149.fits'},
'km1146_5440.fits': {'RA_min':175.947343,'RA_max':177.047062,'Dec_min':54.349085,'Dec_max':55.035926,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1146_5440.fits'},
'km1154_5151.fits': {'RA_min':177.907947,'RA_max':178.942220,'Dec_min':51.549763,'Dec_max':52.236405,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1154_5151.fits'},
'km1124_5154.fits': {'RA_min':170.567895,'RA_max':171.602595,'Dec_min':51.511792,'Dec_max':52.207808,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1124_5154.fits'},
'km1347_5150.fits': {'RA_min':206.304249,'RA_max':207.340130,'Dec_min':51.518162,'Dec_max':52.212136,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1347_5150.fits'},
'km1229_5258.fits': {'RA_min':186.759080,'RA_max':187.813666,'Dec_min':52.628102,'Dec_max':53.313799,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1229_5258.fits'},
'km1303_5150.fits': {'RA_min':195.328927,'RA_max':196.358080,'Dec_min':51.518082,'Dec_max':52.209012,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1303_5150.fits'},
'km1125_5257.fits': {'RA_min':170.767438,'RA_max':171.832100,'Dec_min':52.630019,'Dec_max':53.347440,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1125_5257.fits'},
'km1322_5302.fits': {'RA_min':199.912059,'RA_max':200.966610,'Dec_min':52.628227,'Dec_max':53.318102,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1322_5302.fits'},
'km1241_5224.fits': {'RA_min':189.618017,'RA_max':190.781215,'Dec_min':52.074315,'Dec_max':52.765524,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1241_5224.fits'},
'km1153_5228.fits': {'RA_min':177.651740,'RA_max':178.761087,'Dec_min':52.076902,'Dec_max':52.765535,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1153_5228.fits'},
'km1213_5444.fits': {'RA_min':182.834867,'RA_max':183.934138,'Dec_min':54.358954,'Dec_max':55.046727,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1213_5444.fits'},
'km1519_5228.fits': {'RA_min':229.217971,'RA_max':230.345976,'Dec_min':52.086809,'Dec_max':52.780481,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1519_5228.fits'},
'km1109_5224.fits': {'RA_min':166.592693,'RA_max':167.633582,'Dec_min':52.070120,'Dec_max':52.757334,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1109_5224.fits'},
'km1150_5440.fits': {'RA_min':176.918587,'RA_max':178.023571,'Dec_min':54.344687,'Dec_max':55.052213,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1150_5440.fits'},
'km1210_5440.fits': {'RA_min':181.829898,'RA_max':182.934811,'Dec_min':54.355878,'Dec_max':55.048862,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1210_5440.fits'},
'km1457_5224.fits': {'RA_min':223.703801,'RA_max':224.745774,'Dec_min':52.086965,'Dec_max':52.777356,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1457_5224.fits'},
'km1424_5223.fits': {'RA_min':215.389650,'RA_max':216.430644,'Dec_min':52.064653,'Dec_max':52.753136,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1424_5223.fits'},
'km1118_5444.fits': {'RA_min':169.102419,'RA_max':170.202328,'Dec_min':54.351552,'Dec_max':55.039158,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1118_5444.fits'},
'km1422_5258.fits': {'RA_min':215.030491,'RA_max':216.085538,'Dec_min':52.653562,'Dec_max':53.341485,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1422_5258.fits'},
'km1526_5223.fits': {'RA_min':231.061461,'RA_max':232.114153,'Dec_min':52.077185,'Dec_max':52.768164,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1526_5223.fits'},
'km1515_5258.fits': {'RA_min':228.237700,'RA_max':229.292861,'Dec_min':52.657962,'Dec_max':53.346539,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1515_5258.fits'},
'km1201_5405.fits': {'RA_min':179.758301,'RA_max':180.907167,'Dec_min':53.783569,'Dec_max':54.503913,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1201_5405.fits'},
'km1114_5302.fits': {'RA_min':167.940458,'RA_max':169.000662,'Dec_min':52.642531,'Dec_max':53.337364,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1114_5302.fits'},
'km1223_5332.fits': {'RA_min':185.094694,'RA_max':186.167297,'Dec_min':53.224183,'Dec_max':53.915972,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1223_5332.fits'},
'km1316_5338.fits': {'RA_min':198.467847,'RA_max':199.538710,'Dec_min':53.253121,'Dec_max':53.942168,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1316_5338.fits'},
'km1148_5257.fits': {'RA_min':176.439626,'RA_max':177.495886,'Dec_min':52.637081,'Dec_max':53.329786,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1148_5257.fits'},
'km1202_5519.fits': {'RA_min':179.934191,'RA_max':181.096228,'Dec_min':54.938778,'Dec_max':55.630860,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1202_5519.fits'},
'km1058_5336.fits': {'RA_min':164.104263,'RA_max':165.173679,'Dec_min':53.219293,'Dec_max':53.906841,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1058_5336.fits'},
'km1445_5302.fits': {'RA_min':220.689830,'RA_max':221.748112,'Dec_min':52.641477,'Dec_max':53.333670,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1445_5302.fits'},
'km1207_5228.fits': {'RA_min':181.336676,'RA_max':182.378809,'Dec_min':52.078937,'Dec_max':52.767402,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1207_5228.fits'},
'km1225_5302.fits': {'RA_min':185.819615,'RA_max':186.875665,'Dec_min':52.625912,'Dec_max':53.317356,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1225_5302.fits'},
'km1341_5258.fits': {'RA_min':204.662047,'RA_max':205.719741,'Dec_min':52.661443,'Dec_max':53.353954,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1341_5258.fits'},
'km1107_5410.fits': {'RA_min':166.235189,'RA_max':167.321910,'Dec_min':53.783925,'Dec_max':54.473951,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1107_5410.fits'},
'km1047_5336.fits': {'RA_min':161.239503,'RA_max':162.311924,'Dec_min':53.217146,'Dec_max':53.907961,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1047_5336.fits'},
'km1437_5258.fits': {'RA_min':218.797124,'RA_max':219.854165,'Dec_min':52.654413,'Dec_max':53.344156,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1437_5258.fits'},
'km1333_5154.fits': {'RA_min':202.656089,'RA_max':203.684857,'Dec_min':51.522609,'Dec_max':52.213116,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1333_5154.fits'},
'km1228_5406.fits': {'RA_min':186.522160,'RA_max':187.613567,'Dec_min':53.778654,'Dec_max':54.446964,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1228_5406.fits'},
'km1343_5336.fits': {'RA_min':205.193770,'RA_max':206.312422,'Dec_min':53.204653,'Dec_max':53.893857,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1343_5336.fits'},
'km1307_5152.fits': {'RA_min':196.254168,'RA_max':197.337861,'Dec_min':51.538920,'Dec_max':52.208882,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1307_5152.fits'},
'km1519_5224.fits': {'RA_min':229.201155,'RA_max':230.244726,'Dec_min':52.080344,'Dec_max':52.773562,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1519_5224.fits'},
'km1123_5224.fits': {'RA_min':170.276704,'RA_max':171.319398,'Dec_min':52.062398,'Dec_max':52.753914,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1123_5224.fits'},
'km1402_5149.fits': {'RA_min':209.985766,'RA_max':211.015569,'Dec_min':51.511193,'Dec_max':52.202193,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1402_5149.fits'},
'km1245_5336.fits': {'RA_min':190.819071,'RA_max':191.888606,'Dec_min':53.223515,'Dec_max':53.915833,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1245_5336.fits'},
'km1201_5406.fits': {'RA_min':179.731773,'RA_max':180.845623,'Dec_min':53.788582,'Dec_max':54.482192,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1201_5406.fits'},
'km1121_5149.fits': {'RA_min':169.650056,'RA_max':170.677420,'Dec_min':51.504972,'Dec_max':52.192897,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1121_5149.fits'},
'km1214_5302.fits': {'RA_min':182.984037,'RA_max':184.041936,'Dec_min':52.622864,'Dec_max':53.317030,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1214_5302.fits'},
'km1142_5444.fits': {'RA_min':174.968758,'RA_max':176.076855,'Dec_min':54.345494,'Dec_max':55.040492,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1142_5444.fits'},
'km1514_5332.fits': {'RA_min':228.030060,'RA_max':229.104603,'Dec_min':53.223672,'Dec_max':53.918659,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1514_5332.fits'},
'km1051_5258.fits': {'RA_min':162.292842,'RA_max':163.348191,'Dec_min':52.647272,'Dec_max':53.335824,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1051_5258.fits'},
'km1138_5518.fits': {'RA_min':174.026855,'RA_max':175.141273,'Dec_min':54.916676,'Dec_max':55.604771,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1138_5518.fits'},
'km1236_5410.fits': {'RA_min':188.464397,'RA_max':189.550783,'Dec_min':53.784722,'Dec_max':54.476793,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1236_5410.fits'},
'km1352_5258.fits': {'RA_min':207.510190,'RA_max':208.574651,'Dec_min':52.659888,'Dec_max':53.353240,'instrument':'mosaic_1_1','filter':'g SDSS k1017','path':'/data/03261/polonius/hdr2/imaging/KMImaging/km1352_5258.fits'}
}
Image_Coord_Range = {'RA_min':156.473845, 'RA_max':233.915347, 'Dec_min':51.471872, 'Dec_max':55.630860}
|
HETDEXREPO_NAMEelixerPATH_START.@elixer_extracted@elixer-main@elixer@kpno_meta.py@.PATH_END.py
|
{
"filename": "distributions.py",
"repo_name": "zclaytor/butterpy",
"repo_path": "butterpy_extracted/butterpy-main/butterpy/distributions.py",
"type": "Python"
}
|
import numpy as np
class Distribution(object):
"""Base probability distribution class.
Distribution has shape `shape` and range [min, max).
"""
def __init__(self, min, max, shape):
"""
Initialize distribution shape.
Args:
min (float): distribution range minimum.
max (float): distribution range maximum.
shape (str): distribution shape, e.g., 'Uniform'.
"""
self.min = min
self.max = max
self.shape = shape
def __repr__(self):
"""Print distribution shape and range.
"""
repr = f"butterpy.Distribution with shape `{self.shape}` from {self.min} to {self.max}"
return repr
class Uniform(Distribution):
"""Uniform distribution from `min` to `max`.
"""
def __init__(self, min=0, max=1):
"""Creates a Uniform distribution with range [min, max).
That is, the values are uniformly distributed between min (inclusive)
and max (exclusive):
x ~ U(min, max).
"""
super().__init__(min, max, shape="Uniform")
def sample(self, size=None):
"""Sample the distribution, with optional `size` argument.
`size` is passed directly to `numpy.random.uniform`, so the behavior
matches the `numpy` behavior.
Args:
size (int): The number of times to sample the distribution.
Defaults to None, in which case a single float is returned.
Otherwise, an array with length `size` is returned.
Returns:
sample (float or numpy.ndarray): The samples from the distribution.
"""
return np.random.uniform(low=self.min, high=self.max, size=size)
class LogUniform(Distribution):
"""Log Uniform distribution from `min` to `max`.
That is, the logarithms of the values are uniformly distributed.
This is accomplished using inverse transform sampling:
log10(x) ~ U(log10(min), log10(max)).
Negative values are supported, but note that it simply mirrors the
positive distribution about zero.
"""
def __init__(self, min=1, max=10):
"""Creates a LogUniform distribution with range [min, max).
"""
assert np.all(min != 0), "Minimum must be non-zero."
assert np.all(max != 0), "Maximum must be non-zero."
assert (np.all(max > 0) and np.all(min > 0)) or \
(np.all(max < 0) and np.all(min < 0)), \
"Range cannot include zero."
super().__init__(min, max, shape="LogUniform")
self._sign = int(np.median(max/abs(max)))
def sample(self, size=None):
"""Sample the distribution, with optional `size` argument.
`size` is passed directly to `numpy.random.uniform`, so the behavior
matches the `numpy` behavior.
Args:
size (int): The number of times to sample the distribution.
Defaults to None, in which case a single float is returned.
Otherwise, an array with length `size` is returned.
Returns:
sample (float or numpy.ndarray): The samples from the distribution.
"""
return self._sign * 10**np.random.uniform(
low=np.log10(self._sign*self.min), high=np.log10(self._sign*self.max), size=size)
class SineSquared(Distribution):
"""Uniform in sin^2 from `min` to `max`.
That is, the squared sines of the values are uniformly distributed.
This is accomplished using inverse transform sampling:
sin^2 (x) ~ U(sin^2 (min), sin^2 (max)).
Only values in the range [0, π/2] are allowed.
"""
def __init__(self, min=0, max=np.pi/2):
"""Creates a SineSqaured distribution with range [min, max).
"""
assert min >= 0 and max <= np.pi/2, "Only values in the range [0, π/2] are allowed."
super().__init__(min, max, shape="SineSquared")
def sample(self, size=None):
"""Sample the distribution, with optional `size` argument.
`size` is passed directly to `numpy.random.uniform`, so the behavior
matches the `numpy` behavior.
Args:
size (int): The number of times to sample the distribution.
Defaults to None, in which case a single float is returned.
Otherwise, an array with length `size` is returned.
Returns:
sample (float or numpy.ndarray): The samples from the distribution.
"""
return np.arcsin(np.sqrt(
np.random.uniform(np.sin(self.min)**2, np.sin(self.max)**2, size)))
class Composite(Distribution):
"""Composite distribution with specified weights for each part.
The weights are internally renormalized to add to unity.
Example:
c = Composite(
[Uniform(0, 1), LogUniform(1, 10)],
weights=[1, 3])
`c.sample(100)` will return 25 values uniformly sampled from [0, 1)
and 75 values logarithmically sampled from [1, 10).
"""
def __init__(self, distributions, weights=None):
"""Initialize Composite distribution.
Args:
distributions (list-like): List of initialized Distributions.
weights (list-like): List of relative weights corresponding to
each distribution. Defaults to equal weighting.
"""
super().__init__(
min=min([d.min for d in distributions]),
max=max([d.max for d in distributions]),
shape="Composite")
self.distributions = distributions
if weights is None:
weights = np.ones(len(self.distributions))
self.weights = np.asarray(weights)/sum(weights)
def __repr__(self):
"""Print each distribution shape and range.
"""
repr = f"butterpy.Composite distribution with:\n" \
+ "\n".join([f" {w*100:2.0f}%: {d.__repr__()}"
for w, d in zip(self.weights, self.distributions)])
return repr
def _sample_one(self, size=None):
"""Specialized behavior for a single sample,
using `numpy.random.choice` with weights to choose which
distribution to sample.
Args:
size (int): The number of samples, which must be `None` or 1.
For `None`, a single float is returned. For 1, an array
with length 1 is returned.
Returns:
sample (float or numpy.ndarray): The single sample value.
"""
if size not in [None, 1]:
raise ValueError("`size` must be either `None` or 1.")
d = np.random.choice(self.distributions, p=self.weights)
return d.sample(size=size)
def sample(self, size=None, shuffle=True):
"""Sample the Composite distribution, with optional `size` argument.
`size` behavior is intended to mimic that of `numpy.random.uniform`.
Args:
size (int): The number of times to sample the distribution.
Defaults to None, in which case a single float is returned.
Otherwise, an array with length `size` is returned.
shuffle (bool): Whether to shuffle the samples between distributions.
True by default, but False will return, e.g.,
array([*sample1, *sample2, ...]),
with the samples ordered by the distribution they're pulled from.
Returns:
sample (float or numpy.ndarray): The samples from the distribution.
"""
if size is None or size == 1:
return self._sample_one(size)
n_samples = (size*self.weights).round().astype(int)
samples = np.concatenate(
[d.sample(n) for d, n in zip(self.distributions, n_samples)]
)
# If under the requested size by 1, generate a bonus sample
if len(samples) == size - 1:
samples = np.append(samples, self._sample_one())
# Everything past here can be shuffled
if shuffle:
np.random.shuffle(samples)
# If over the requested size by 1, shuffle and truncate
if len(samples) == size + 1:
samples = samples[:-1]
if len(samples) != size:
# If we make it here, something went horribly wrong.
raise ValueError(f"Something has gone horribly wrong. "
f"{size} samples requested; {len(samples)} returned.")
return samples
class Boolean(Distribution):
"""docs
"""
def __init__(self, p=0.5):
"""docs
"""
super().__init__(min=0, max=1, shape="Boolean")
assert 0 <= p <= 1, "`p` must be between 0 and 1."
self.p = p
def __repr__(self):
"""docs
"""
return f"Boolean distribution with p(True) = {self.p}"
def sample(self, size=None):
return np.random.choice(
[1, 0], p=[self.p, 1-self.p], size=size)
class Fixed(Distribution):
"""docs
"""
def __init__(self, v=0):
"""docs
"""
super().__init__(min=v, max=v, shape="Fixed")
self.v = v
def __repr__(self):
"""docs
"""
return f"Fixed distribution at {self.v}"
def sample(self, size=None):
if size is None:
return self.v
return np.full(size, self.v)
|
zclaytorREPO_NAMEbutterpyPATH_START.@butterpy_extracted@butterpy-main@butterpy@distributions.py@.PATH_END.py
|
{
"filename": "smp_comp_flux.py",
"repo_name": "dscolnic/Pantheon",
"repo_path": "Pantheon_extracted/Pantheon-master/SCRIPTS/smp_comp_flux.py",
"type": "Python"
}
|
import numpy as np
import string
import matplotlib.pyplot as plt
import matplotlib
import plotsetup
from matplotlib import gridspec
from scipy.stats import pearsonr
from pylab import polyfit
plotsetup.fullpaperfig()
#matplotlib.rcParams.update({'font.size': 14})
val1=[]
val2=[]
fil=[]
a=open('smp_comp.txt', 'r')
for line in a:
x=string.split(line)
val1=np.append(val1,x[0])
val2=np.append(val2,x[1])
fil=np.append(fil,x[2])
#list1, idsurvey,z1, mass1,x11,c1,sb,mu1, mu1e = np.loadtxt('PS1_Scolnic_test/NewDan101f/FITOPT000+SALT2mu.FITRES', usecols=(1,3, 6,10,17,19,31,34,35), unpack=True, dtype='string', skiprows=12)
list1,z1,sb,mass1,PKMJD,x11,c1 ,NDOF1 ,FITPROB,RA,DECL,TGAPMAX1 = np.loadtxt('../DATA/DS17_PS1_Scolnic2/PS1_Spec_DS17/FITOPT000.FITRES', usecols=(1,7,12,13,18,20,22,31,33,34,35,36), unpack=True, dtype='string', skiprows=16)
print mass1
print sb
sb=[]
listb, filb,massb,surf = np.loadtxt('sbb.txt', usecols=(0,1,2,3), unpack=True, dtype='string')
yy=np.where(massb=='nan')
massb[yy[0]]='25'
for i in range(0,len(list1)):
yy=np.where((('PSc'+list1[i])==listb)&(filb=='r'))
print 'len', list1[i], len(yy[0])
if len(yy[0])>0: sb=np.append(sb,massb[yy[0]][0])
if len(yy[0])==0: sb=np.append(sb,-9)
#mass=mass[xx[0]]
#print len(mass), len(z1)
#print 'mass', mass
val1,val1e,val2,val2e,mjd,nam,fil = np.loadtxt('smp_comp.txt', usecols=(0,1,2,3,4,5,6), unpack=True, dtype='string')
for i in range(0,len(fil)):
nam[i]=nam[i].replace("PSc","")
#sys_ps1.py:list1, idsurvey1, z1,x11,c1,mb1,mu1, mu1e = np.loadtxt('PS1_Scolnic/NewDan101f/FITOPT000+SALT2mu.FITRES', usecols=(1, 2,6,17,19,21,36,37), unpack=True, dtype='string', skiprows=15)
print 'sb', sb
sbm=[]
massm=[]
for i in range(0,len(fil)):
xx=np.where(nam[i]==(list1))
#print 'xx', xx[0]
temp=sb[xx[0]]
jemp=mass1[xx[0]]
print 'temp', temp
print nam[i], len(xx[0])
#print 'sb', sb[xx[0]], sbm[i]
if len(xx[0])>0:
sbm.append(temp[0])
massm.append(jemp[0])
if len(xx[0])==0:
sbm.append('-9')
massm.append('-9')
#print 'sbm', sbm[i]
print 'sbm', sbm
mjd=mjd.astype(float)
val1=val1.astype(float)
val2=val2.astype(float)
xx=np.where(val1>6000.0)
#print val1[xx[0]]
#print nam[xx[0]]
val1e=val1e.astype(float)
val2e=val2e.astype(float)
#val2=(val2-val1)
weights=val2*0+1.0
xx=np.where(np.absolute(val2)>300)
y1=nam[xx[0]]
y2=fil[xx[0]]
#for i in range(0,len(y1)):
# print y1[i], y2[i]
#stop
weights2=weights*0
bins = np.linspace(18,25,13)
sbm=np.array(sbm)
sbm=sbm.astype(float)
massm=np.array(massm)
massm=massm.astype(float)
for i in range(0,len(nam)):
if (('50221' in nam[i])|('110460' in nam[i])|('80735' in nam[i])|(mjd[i]<(-20))|(mjd[i]>(60))): weights2[i]=1
#xx=np.where((weights2!=1)&(fil=='g'))
#xx=np.where((weights2!=1)&(val1/val1e>7)&(val2/val2e>7)&(fil=='z'))
xx=np.where((weights2!=1))
xx2=np.where((weights2!=1))
#print xx[0]
#stop
#xx=np.where(weights2!=1)
val2=val2[xx[0]]
val1=val1[xx[0]]
#val1=val1
weights=weights[xx[0]]
val1e=val1e[xx[0]]
val2e=val2e[xx[0]]
mjd=mjd[xx[0]]
sbm=sbm[xx[0]]
print 'sbm', np.median(sbm)
mjd=mjd.astype(float)
massm=massm[xx[0]]
mval1=27.5-2.5*np.log10(val1)
mval2=27.5-2.5*np.log10(val2)
plt.figure(1)
gs1 = gridspec.GridSpec(1, 2)
gs1.update(left=0.1, right=0.48,wspace=0,bottom=0.15,top=0.95)
ax1= plt.subplot(gs1[0])
ax2= plt.subplot(gs1[1])
gs2 = gridspec.GridSpec(1, 2)
gs2.update(left=0.60, right=0.98,wspace=0,bottom=0.15,top=0.95)
ax3= plt.subplot(gs2[0])
ax4= plt.subplot(gs2[1])
xx=np.where((sbm<21.5)&(sbm>0))
mval2=(val1[xx[0]]-val2[xx[0]])/val1e[xx[0]]-0.15
ax1.set_xlabel("SN "+r'$r_{PS1}$'+" mag")
ax1.set_ylabel('(SMP-DIFF)/(Unc.)')
ax1.set_ylim(-3.5,3.5)
ax1.set_xlim(18,24.9)
ax1.plot(mval1[xx[0]],mval2,'.r',alpha=0.25)
print 'sigma bias!!!', np.median((val1[xx[0]]-val2[xx[0]])/val1e[xx[0]]), np.median((val1[xx[0]]-val2[xx[0]])), np.median((val1[xx[0]]-val2[xx[0]]-1.0)/val1e[xx[0]])
#ax[0,0].plot(mval1[xx[0]],(val1[xx[0]]-val2[xx[0]])/val1[xx[0]],'.r')
#val2=mval2[xx[0]]-mval1[xx[0]]
#mval2=(val1[xx[0]]-val2[xx[0]])/val1[xx[0]]
mval1x=mval1[xx[0]]
print 'mval1x', mval1x
bins = np.linspace(18,25,13)
digitized = np.digitize(mval1[xx[0]], bins)
bins = np.linspace(18,25,13)
print 'bins', bins
bin_means = [np.median(mval2[digitized == i]) for i in range(0, len(bins))]
bin_z = [np.median(mval1x[digitized == i]) for i in range(0, len(bins))]
bin_std = [np.std(mval2[digitized == i])/np.sqrt(len(mval2[digitized == i])) for i in range(0, len(bins))]
ax1.errorbar(bin_z, bin_means, yerr=bin_std, fmt='ko', ecolor='b', color='b',label='Binned Data')
ax1.legend(loc='upper left',prop={'size':10})
ax3.text(22,9,"High")
ax3.text(22,6,"Surface")
ax3.text(22,3,"Brightness")
ax4.text(22,9,"Low")
ax4.text(22,6,"Surface")
ax4.text(22,3,"Brightness")
ax1.text(19.8,-2.3,"High")
ax1.text(19.8,-2.7,"Surface")
ax1.text(19.8,-3.1,"Brightness")
ax2.text(19.8,-2.3,"Low")
ax2.text(19.8,-2.7,"Surface")
ax2.text(19.8,-3.1,"Brightness")
xx=np.where(sbm>10)
xx=np.where((sbm>21.5)&(sbm>0))
mval2=(val1[xx[0]]-val2[xx[0]])/val1e[xx[0]]-0.15
print 'sxx', xx[0]
ax2.set_xlabel("SN "+r'$r_{PS1}$'+" mag")
#ax[0,1].set_ylabel('Delta Mag')
ax2.set_ylim(-3.5,3.5)
ax2.set_xlim(18,24.9)
ax2.plot(mval1[xx[0]],mval2,'.r',alpha=0.25)
print 'sigma bias!!!', np.median((val1[xx[0]]-val2[xx[0]])/val1e[xx[0]]), np.median((val1[xx[0]]-val2[xx[0]])), np.median((val1[xx[0]]-val2[xx[0]]-1.0)/val1e[xx[0]])
#ax[0,1].plot(mval1[xx[0]],(val1[xx[0]]-val2[xx[0]])/val1[xx[0]],'.r')
#val2=mval2[xx[0]]-mval1[xx[0]]
#mval2=(val1[xx[0]]-val2[xx[0]])/val1[xx[0]]
mval1x=mval1[xx[0]]
bins = np.linspace(18,25, 13)
digitized = np.digitize(mval1[xx[0]], bins)
bin_means = [np.median(mval2[digitized == i]) for i in range(0, len(bins))]
bin_z = [np.median(mval1x[digitized == i]) for i in range(0, len(bins))]
bin_std = [np.std(mval2[digitized == i])/np.sqrt(len(mval2[digitized == i])) for i in range(0, len(bins))]
ax2.errorbar(bin_z, bin_means, yerr=bin_std, fmt='ko', ecolor='b', color='b',label='D15 Sim')
line, = ax1.plot(range(1,59999), np.zeros(59998), lw=2,color='black')
xx=np.where((sbm<21.5)&(sbm>0))
ax3.set_xlabel('SMP Phot. Unc.')
ax3.set_ylabel('DIFF Phot. Unc.')
ax4.set_xlabel('SMP Phot. Unc.')
ax3.set_ylim(0,60)
ax3.set_xlim(0,59.5)
ax3.plot(val1e[xx[0]],val2e[xx[0]],'.r',alpha=0.25)
#zz3=(val1e[xx[0]]<40)
val2=val2e[xx[0]]
mval1x=val1e[xx[0]]
print 'mval1x', mval1x
bins = np.linspace(0,70,10)
digitized = np.digitize(val1e[xx[0]], bins)
bins = np.linspace(18,25,13)
print 'bins', bins
bin_means = [np.median(val2[digitized == i]) for i in range(0, len(bins))]
bin_z = [np.median(mval1x[digitized == i]) for i in range(0, len(bins))]
bin_std = [np.std(val2[digitized == i])/np.sqrt(len(val2[digitized == i])) for i in range(0, len(bins))]
ax3.errorbar(bin_z, bin_means, yerr=bin_std, fmt='ko', ecolor='b', color='b',label='D15 Sim')
#line, = ax3.plot(range(1,59999), np.zeros(59998), lw=2,color='black',linestyle='--')
print bin
#zz=np.isfinite(bin_z)
#print bin_z[zz],bin_means[zz]
print bin_z, bin_means
print 'pearson alpha',pearsonr(bin_z[1:6],bin_means[1:6])
from scipy.stats import linregress
m, b, r_value, p_value, std_err = linregress(bin_z[1:6],bin_means[1:6])
print m,b,r_value,p_value,std_err
pos=[]
for i in range(1,299):
pos.append(i/100.0)
line, = ax3.plot(range(1,299), range(1,299), lw=2,color='black')
#line, = ax3.plot(np.arange(1,299,1)*.9, range(1,299), lw=2,color='black',linestyle='--')
xx=np.where((sbm>21.5))
#ax[1,1].set_xlabel('Mag')
#ax[1,1].set_ylabel('Delta Mag')
ax4.set_ylim(0,60)
ax4.set_xlim(0,59.5)
ax4.plot(val1e[xx[0]],val2e[xx[0]],'.r',alpha=0.25)
print 'pearson alpha',pearsonr(val1e[xx[0]],val2e[xx[0]])
m,b = polyfit(val1e[xx[0]],val2e[xx[0]], 1)
print m,b
val2=val2e[xx[0]]
mval1x=val1e[xx[0]]
print 'mval1x', mval1x
bins = np.linspace(0,70,10)
digitized = np.digitize(val1e[xx[0]], bins)
bins = np.linspace(18,25,13)
print 'bins', bins
bin_means = [np.median(val2[digitized == i]) for i in range(0, len(bins))]
bin_z = [np.median(mval1x[digitized == i]) for i in range(0, len(bins))]
bin_std = [np.std(val2[digitized == i])/np.sqrt(len(val2[digitized == i])) for i in range(0, len(bins))]
ax4.errorbar(bin_z, bin_means, yerr=bin_std, fmt='ko', ecolor='b', color='b',label='D15 Sim')
line, = ax4.plot(range(1,59999), range(1,59999), lw=2,color='black')
#line, = ax4.plot(np.arange(1,299)*.9, range(1,299), lw=2,color='black',linestyle='--')
line, = ax1.plot(range(1,59999), np.zeros(59998), lw=2,color='black')
line, = ax2.plot(range(1,59999), np.zeros(59998), lw=2,color='black')
pos=[]
for i in range(1,299):
pos.append(i/100.0)
ax1.set_yticks([-3,-2,-1,0,1,2,3])
ax1.set_yticklabels(['-3','-2','-1','0','1','2','3'])
ax2.set_yticks([-3,-2,-1,0,1,2,3])
ax2.set_yticklabels(['','-','-','','','',''])
ax3.set_yticks([0,20,40,60])
ax3.set_yticklabels(['0','20','40','60'])
ax4.set_yticks([0,20,40,60])
ax4.set_yticklabels(['','','',''])
#plt.tight_layout()
plt.show()
plt.savefig('smp_comp_flux.png')
stop
plt.figure(1)
val1,val1e,val2,val2e,mjd,nam,fil = np.loadtxt('smp_comp.txt', usecols=(0,1,2,3,4,5,6), unpack=True, dtype='string')
mjd=mjd.astype(float)
val1=val1.astype(float)
val2=val2.astype(float)
val1e=val1e.astype(float)
val2e=val2e.astype(float)
val1e=val1e*1.1
fig, ax = plt.subplots(2,1)
xx=np.where(np.absolute(mjd)>100)
rvec=np.random.randn(len(xx[0]))
n, bins, patches = ax[0].hist(rvec, bins=30,range=[-4,4], facecolor='r', alpha=0.25)
n, bins, patches = ax[0].hist((val1[xx[0]])/val1e[xx[0]], bins=30,range=[-4,4], facecolor='g', alpha=0.75)
ax[0].set_xlabel('Normalized Flux')
ax[0].set_ylabel('#')
ax[0].set_title('SMP '+str(1.48*np.median(np.absolute((val1[xx[0]])/val1e[xx[0]]))))
n, bins, patches = ax[1].hist(rvec, bins=30,range=[-4,4], facecolor='r', alpha=0.25)
n, bins, patches = ax[1].hist((val2[xx[0]])/val2e[xx[0]], bins=30,range=[-4,4], facecolor='r', alpha=0.75)
ax[1].set_xlabel('Normalized Flux')
ax[1].set_ylabel('#')
ax[1].set_title('Diff '+str(1.48*np.median(np.absolute((val2[xx[0]])/val2e[xx[0]]))))
print 1.48*np.median(np.absolute((val1[xx[0]])/val1e[xx[0]]))
print 1.48*np.median(np.absolute((val2[xx[0]])/val2e[xx[0]]))
plt.tight_layout()
plt.show()
plt.savefig('smp_comp_flux_hist.png')
|
dscolnicREPO_NAMEPantheonPATH_START.@Pantheon_extracted@Pantheon-master@SCRIPTS@smp_comp_flux.py@.PATH_END.py
|
{
"filename": "setup_package.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/ogle/tests/setup_package.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
def get_package_data():
paths_test = [os.path.join('data', '*.txt')]
return {'astroquery.ogle.tests': paths_test}
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@ogle@tests@setup_package.py@.PATH_END.py
|
{
"filename": "pytestplugin.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/hypothesis/py3/hypothesis/extra/pytestplugin.py",
"type": "Python"
}
|
# This file is part of Hypothesis, which may be found at
# https://github.com/HypothesisWorks/hypothesis/
#
# Copyright the Hypothesis Authors.
# Individual contributors are listed in AUTHORS.rst and the git log.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""
Stub for users who manually load our pytest plugin.
The plugin implementation is now located in a top-level module outside the main
hypothesis tree, so that Pytest can load the plugin without thereby triggering
the import of Hypothesis itself (and thus loading our own plugins).
"""
from _hypothesis_pytestplugin import * # noqa
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@hypothesis@py3@hypothesis@extra@pytestplugin.py@.PATH_END.py
|
{
"filename": "test_util.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_Util/test_util.py",
"type": "Python"
}
|
__author__ = "sibirrer"
import lenstronomy.Util.util as util
import numpy as np
import pytest
import numpy.testing as npt
import unittest
def test_isiterable():
z = np.array([0, 1])
boolean = util.isiterable(z)
assert boolean is True
z = 1
boolean = util.isiterable(z)
assert boolean is False
def test_estimate_theta_E():
x = np.array([-0.45328229, 0.57461556, 0.53757501, -0.42312438])
y = np.array([0.69582971, -0.51226356, 0.37577509, -0.40245467])
approx = util.approx_theta_E(x, y)
npt.assert_array_less(approx - 1, 0.2)
def test_sort_img_index():
ximg, yimg = np.array([1, 2, 3, 4]), np.array([0, 0, 1, 2])
xref, yref = np.array([2, 3, 1, 4]), np.array([0, 1, 0, 2])
indexes = util.sort_image_index(ximg, yimg, xref, yref)
npt.assert_allclose(ximg[indexes], xref)
npt.assert_allclose(yimg[indexes], yref)
xref, yref = np.array([2, 3, 1, 4]), np.array([0, 1, 0, 2])
indexes = util.sort_image_index(xref, yref, xref, yref)
npt.assert_allclose(np.array(indexes), [0, 1, 2, 3])
def test_map_coord2pix():
ra = 0
dec = 0
x_0 = 1
y_0 = -1
M = np.array([[1, 0], [0, 1]])
x, y = util.map_coord2pix(ra, dec, x_0, y_0, M)
assert x == 1
assert y == -1
ra = [0, 1, 2]
dec = [0, 2, 1]
x, y = util.map_coord2pix(ra, dec, x_0, y_0, M)
assert x[0] == 1
assert y[0] == -1
assert x[1] == 2
M = np.array([[0, 1], [1, 0]])
x, y = util.map_coord2pix(ra, dec, x_0, y_0, M)
assert x[1] == 3
assert y[1] == 0
def test_make_grid():
numPix = 11
deltapix = 1.0
grid = util.make_grid(numPix, deltapix)
assert grid[0][0] == -5
assert np.sum(grid[0]) == 0.0
x_grid, y_grid = util.make_grid(numPix, deltapix, subgrid_res=2.0)
assert np.sum(x_grid) == 0.0
assert x_grid[0] == -5.25
x_grid, y_grid = util.make_grid(numPix, deltapix, subgrid_res=1, left_lower=True)
assert x_grid[0] == 0.0
assert y_grid[0] == 0.0
# Similar tests for a non-rectangular grid
x_grid, y_grid = util.make_grid((numPix, numPix - 1), deltapix)
assert x_grid[0] == -5.0
assert y_grid[0] == -4.5
assert np.sum(x_grid) == np.sum(y_grid) == 0
x_grid, y_grid = util.make_grid(numPix, deltapix, subgrid_res=2.0)
assert np.sum(x_grid) == np.sum(y_grid) == 0
assert x_grid[0] == -5.25
x_grid, y_grid = util.make_grid(numPix, deltapix, left_lower=True)
assert x_grid[0] == 0
assert y_grid[0] == 0
def test_make_grid_transform():
numPix = 11
theta = np.pi / 2
deltaPix = 0.05
Mpix2coord = (
np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
* deltaPix
)
ra_coord, dec_coord = util.make_grid_transformed(numPix, Mpix2coord)
ra2d = util.array2image(ra_coord)
assert ra2d[5, 5] == 0
assert ra2d[4, 5] == deltaPix
npt.assert_almost_equal(ra2d[5, 4], 0, decimal=10)
def test_grid_with_coords():
numPix = 11
deltaPix = 1.0
(
x_grid,
y_grid,
ra_at_xy_0,
dec_at_xy_0,
x_at_radec_0,
y_at_radec_0,
Mpix2coord,
Mcoord2pix,
) = util.make_grid_with_coordtransform(
numPix, deltaPix, subgrid_res=1, left_lower=False
)
ra = 0
dec = 0
x, y = util.map_coord2pix(ra, dec, x_at_radec_0, y_at_radec_0, Mcoord2pix)
assert x == 5
assert y == 5
numPix = 11
deltaPix = 0.1
(
x_grid,
y_grid,
ra_at_xy_0,
dec_at_xy_0,
x_at_radec_0,
y_at_radec_0,
Mpix2coord,
Mcoord2pix,
) = util.make_grid_with_coordtransform(
numPix, deltaPix, subgrid_res=1, left_lower=False
)
ra = 0
dec = 0
x, y = util.map_coord2pix(ra, dec, x_at_radec_0, y_at_radec_0, Mcoord2pix)
assert x == 5
assert y == 5
numPix = 11
deltaPix = 1.0
(
x_grid,
y_grid,
ra_at_xy_0,
dec_at_xy_0,
x_at_radec_0,
y_at_radec_0,
Mpix2coord,
Mcoord2pix,
) = util.make_grid_with_coordtransform(
numPix, deltaPix, subgrid_res=1, left_lower=False, inverse=True
)
x_, y_ = 0, 0
ra, dec = util.map_coord2pix(x_, y_, ra_at_xy_0, dec_at_xy_0, Mpix2coord)
assert ra == 5
assert dec == -5
numPix = 11
deltaPix = 1.0
(
x_grid,
y_grid,
ra_at_xy_0,
dec_at_xy_0,
x_at_radec_0,
y_at_radec_0,
Mpix2coord,
Mcoord2pix,
) = util.make_grid_with_coordtransform(
numPix, deltaPix, subgrid_res=1, left_lower=False, inverse=False
)
x_, y_ = 0, 0
ra, dec = util.map_coord2pix(x_, y_, ra_at_xy_0, dec_at_xy_0, Mpix2coord)
assert ra == -5
assert dec == -5
numPix = 11
deltaPix = 0.1
(
x_grid,
y_grid,
ra_at_xy_0,
dec_at_xy_0,
x_at_radec_0,
y_at_radec_0,
Mpix2coord,
Mcoord2pix,
) = util.make_grid_with_coordtransform(
numPix, deltaPix, subgrid_res=1, left_lower=False
)
x_, y_ = 0, 0
ra, dec = util.map_coord2pix(x_, y_, ra_at_xy_0, dec_at_xy_0, Mpix2coord)
assert ra == 0.5
assert dec == -0.5
x__, y__ = util.map_coord2pix(ra, dec, x_at_radec_0, y_at_radec_0, Mcoord2pix)
assert x__ == x_
assert y__ == y_
numPix = 11
deltaPix = 0.1
(
x_grid,
y_grid,
ra_at_xy_0,
dec_at_xy_0,
x_at_radec_0,
y_at_radec_0,
Mpix2coord,
Mcoord2pix,
) = util.make_grid_with_coordtransform(
numPix, deltaPix, subgrid_res=1, left_lower=True
)
assert ra_at_xy_0 == 0
assert dec_at_xy_0 == 0
numPix = 11
deltaPix = 0.1
(
x_grid,
y_grid,
ra_at_xy_0,
dec_at_xy_0,
x_at_radec_0,
y_at_radec_0,
Mpix2coord,
Mcoord2pix,
) = util.make_grid_with_coordtransform(
numPix, deltaPix, subgrid_res=1, left_lower=True, center_ra=2, center_dec=3
)
assert ra_at_xy_0 == 2
assert dec_at_xy_0 == 3
def test_centered_coordinate_system():
num_pix = 51
delta_pix = 0.1
(
x_grid,
y_grid,
ra_at_xy_0,
dec_at_xy_0,
x_at_radec_0,
y_at_radec_0,
transform_pix2angle,
Mcoord2pix,
) = util.make_grid_with_coordtransform(
num_pix, delta_pix, subgrid_res=1, left_lower=False, inverse=False
)
kwargs_grid = util.centered_coordinate_system(num_pix, transform_pix2angle)
npt.assert_almost_equal(kwargs_grid["ra_at_xy_0"], ra_at_xy_0, decimal=7)
npt.assert_almost_equal(kwargs_grid["dec_at_xy_0"], dec_at_xy_0, decimal=7)
(
x_grid,
y_grid,
ra_at_xy_0,
dec_at_xy_0,
x_at_radec_0,
y_at_radec_0,
transform_pix2angle,
Mcoord2pix,
) = util.make_grid_with_coordtransform(
num_pix, delta_pix, subgrid_res=1, left_lower=False, inverse=True
)
kwargs_grid = util.centered_coordinate_system(num_pix, transform_pix2angle)
npt.assert_almost_equal(kwargs_grid["ra_at_xy_0"], ra_at_xy_0, decimal=7)
npt.assert_almost_equal(kwargs_grid["dec_at_xy_0"], dec_at_xy_0, decimal=7)
from lenstronomy.Data.coord_transforms import Coordinates
theta = 50 / 360 * 2 * np.pi
transform_pix2angle = np.array(
[
[np.cos(theta) * delta_pix, -np.sin(theta) * delta_pix],
[np.sin(theta) * delta_pix, np.cos(theta) * delta_pix],
]
)
kwargs_grid = util.centered_coordinate_system(num_pix, transform_pix2angle)
coords = Coordinates(**kwargs_grid)
x, y = coords.map_coord2pix(ra=0, dec=0)
npt.assert_almost_equal(x, (num_pix - 1) / 2, decimal=7)
npt.assert_almost_equal(y, (num_pix - 1) / 2, decimal=7)
def test_array2image():
array = np.linspace(1, 100, 100)
image = util.array2image(array)
assert image[9][9] == 100
assert image[0][9] == 10
def test_image2array():
image = np.zeros((10, 10))
image[1, 2] = 1
array = util.image2array(image)
assert array[12] == 1
def test_image2array2image():
image = np.zeros((20, 10))
nx, ny = np.shape(image)
image[1, 2] = 1
array = util.image2array(image)
image_new = util.array2image(array, nx, ny)
assert image_new[1, 2] == image[1, 2]
def test_array2cube():
array = np.linspace(1, 200, 200)
image = util.array2cube(array, 2, 100)
assert image[0][9][9] == 100
assert image[1][0][9] == 110
def test_cube2array():
sube = np.zeros((2, 10, 10))
sube[1, 2, 2] = 1
array = util.cube2array(sube)
assert array[122] == 1
def test_cube2array2cube():
cube = np.zeros((2, 10, 10))
ns, nx, ny = np.shape(cube)
assert nx == ny # condition required
nxy = nx * ny
cube[1, 2, 2] = 1
array = util.cube2array(cube)
cube_new = util.array2cube(array, ns, nxy)
assert cube_new[1, 2, 2] == cube[1, 2, 2]
def test_get_axes():
numPix = 11
deltapix = 0.1
x_grid, y_grid = util.make_grid(numPix, deltapix)
x_axes, y_axes = util.get_axes(x_grid, y_grid)
npt.assert_almost_equal(x_axes[0], -0.5, decimal=12)
npt.assert_almost_equal(y_axes[0], -0.5, decimal=12)
npt.assert_almost_equal(x_axes[1], -0.4, decimal=12)
npt.assert_almost_equal(y_axes[1], -0.4, decimal=12)
x_grid += 1
x_axes, y_axes = util.get_axes(x_grid, y_grid)
npt.assert_almost_equal(x_axes[0], 0.5, decimal=12)
npt.assert_almost_equal(y_axes[0], -0.5, decimal=12)
def test_symmetry():
array = np.linspace(0, 10, 100)
image = util.array2image(array)
array_new = util.image2array(image)
assert array_new[42] == array[42]
def test_displaceAbs():
x = np.array([0, 1, 2])
y = np.array([3, 2, 1])
sourcePos_x = 1
sourcePos_y = 2
result = util.displaceAbs(x, y, sourcePos_x, sourcePos_y)
assert result[0] == np.sqrt(2)
assert result[1] == 0
def test_get_distance():
x_mins = np.array([1.0])
y_mins = np.array([1.0])
x_true = np.array([0.0])
y_true = np.array([0.0])
dist = util.get_distance(x_mins, y_mins, x_true, y_true)
assert dist == 2
x_mins = np.array([1.0, 2])
y_mins = np.array([1.0, 1])
x_true = np.array([0.0])
y_true = np.array([0.0])
dist = util.get_distance(x_mins, y_mins, x_true, y_true)
assert dist == 10000000000
x_mins = np.array([1.0, 2])
y_mins = np.array([1.0, 1])
x_true = np.array([0.0, 1])
y_true = np.array([0.0, 2])
dist = util.get_distance(x_mins, y_mins, x_true, y_true)
assert dist == 6
x_mins = np.array([1.0, 2, 0])
y_mins = np.array([1.0, 1, 0])
x_true = np.array([0.0, 1, 1])
y_true = np.array([0.0, 2, 1])
dist = util.get_distance(x_mins, y_mins, x_true, y_true)
assert dist == 2
def test_selectBest():
array = np.array([4, 3, 6, 1, 3])
select = np.array([2, 4, 7, 3, 3])
numSelect = 4
array_select = util.selectBest(array, select, numSelect, highest=True)
assert array_select[0] == 6
assert array_select[3] == 1
array_select = util.selectBest(array, select, numSelect, highest=False)
assert array_select[0] == 3
assert array_select[3] == 4
array_select = util.selectBest(array, select, numSelect=10, highest=False)
assert len(array_select) == len(array)
def test_select_best():
array = np.array([4, 3, 6, 1, 3])
select = np.array([2, 4, 7, 3, 3])
numSelect = 4
array_select = util.select_best(array, select, numSelect, highest=True)
assert array_select[0] == 6
assert array_select[3] == 1
array_select = util.select_best(array, select, numSelect, highest=False)
assert array_select[0] == 3
assert array_select[3] == 4
array_select = util.select_best(array, select, num_select=10, highest=False)
assert len(array_select) == len(array)
def test_compare_distance():
x_mapped = np.array([4, 3, 6, 1, 3])
y_mapped = np.array([2, 4, 7, 3, 3])
X2 = util.compare_distance(x_mapped, y_mapped)
assert X2 == 140
def test_min_square_dist():
x_1 = np.array([4, 3, 6, 1, 3])
y_1 = np.array([2, 4, 7, 3, 3])
x_2 = np.array([4, 3, 6, 1, 3])
y_2 = np.array([2, 3, 7, 3, 3])
dist = util.min_square_dist(x_1, y_1, x_2, y_2)
assert dist[0] == 0
assert dist[1] == 1
def test_neighbor_select_fast():
a = np.ones(100)
a[41] = 0
x = np.linspace(0, 99, 100)
y = np.linspace(0, 99, 100)
x_mins, y_mins, values = util.local_minima_2d(a, x, y)
assert x_mins[0] == 41
assert y_mins[0] == 41
assert values[0] == 0
def test_neighborSelect():
a = np.ones(100)
a[41] = 0
x = np.linspace(0, 99, 100)
y = np.linspace(0, 99, 100)
x_mins, y_mins, values = util.neighborSelect(a, x, y)
assert x_mins[0] == 41
assert y_mins[0] == 41
assert values[0] == 0
def test_make_subgrid():
numPix = 101
deltapix = 1
x_grid, y_grid = util.make_grid(numPix, deltapix, subgrid_res=1)
x_sub_grid, y_sub_grid = util.make_subgrid(x_grid, y_grid, subgrid_res=2)
assert np.sum(x_grid) == 0
assert len(x_grid) == 101 * 101
assert x_sub_grid[0] == -50.25
assert y_sub_grid[17] == -50.25
x_sub_grid_new, y_sub_grid_new = util.make_subgrid(x_grid, y_grid, subgrid_res=4)
assert x_sub_grid_new[0] == -50.375
def test_fwhm2sigma():
fwhm = 0.5
sigma = util.fwhm2sigma(fwhm)
assert sigma == fwhm / (2 * np.sqrt(2 * np.log(2)))
def test_points_on_circle():
radius = 1
points = 8
ra, dec = util.points_on_circle(radius, points, connect_ends=True)
assert ra[0] == 1
assert dec[0] == 0
ra_, dec_ = util.points_on_circle(radius, points - 1, connect_ends=False)
npt.assert_almost_equal(ra[:-1], ra_, decimal=8)
npt.assert_almost_equal(dec[:-1], dec_, decimal=8)
def test_convert_bool_list():
bool_list = util.convert_bool_list(n=10, k=None)
assert len(bool_list) == 10
assert bool_list[0] == True
bool_list = util.convert_bool_list(n=10, k=3)
assert len(bool_list) == 10
assert bool_list[3] is True
assert bool_list[2] is False
bool_list = util.convert_bool_list(n=10, k=[3, 7])
assert len(bool_list) == 10
assert bool_list[3] is True
assert bool_list[7] is True
assert bool_list[2] is False
bool_list = util.convert_bool_list(n=3, k=[False, False, True])
assert len(bool_list) == 3
assert bool_list[0] is False
assert bool_list[1] is False
assert bool_list[2] is True
bool_list = util.convert_bool_list(n=3, k=[])
assert len(bool_list) == 3
assert bool_list[0] is False
def test_area():
r = 1
x_, y_ = util.points_on_circle(radius=r, connect_ends=True, num_points=1000)
vs = np.dstack([x_, y_])[0]
a = util.area(vs)
npt.assert_almost_equal(a, np.pi * r**2, decimal=3)
class TestRaise(unittest.TestCase):
def test_raise(self):
with self.assertRaises(ValueError):
array = np.ones(5)
util.array2image(array)
with self.assertRaises(ValueError):
array = np.ones((2, 2))
util.array2cube(array, 2, 2)
with self.assertRaises(ValueError):
x, y = np.ones(6), np.ones(6)
util.get_axes(x, y)
with self.assertRaises(ValueError):
util.selectBest(
array=np.ones(6), criteria=np.ones(5), numSelect=1, highest=True
)
with self.assertRaises(ValueError):
util.select_best(
array=np.ones(6), criteria=np.ones(5), num_select=1, highest=True
)
with self.assertRaises(ValueError):
util.convert_bool_list(n=2, k=[3, 7])
with self.assertRaises(ValueError):
util.convert_bool_list(n=3, k=[True, True])
with self.assertRaises(ValueError):
util.convert_bool_list(n=2, k=[0.1, True])
def test_raise_make_grid(self):
with self.assertRaises(ValueError):
util.make_grid(numPix=1.1, deltapix=1)
with self.assertRaises(ValueError):
util.make_grid(numPix=[1.1, 1], deltapix=1)
if __name__ == "__main__":
pytest.main()
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_Util@test_util.py@.PATH_END.py
|
{
"filename": "test_pipeline_hst.py",
"repo_name": "gbrammer/grizli",
"repo_path": "grizli_extracted/grizli-master/test_pipeline_hst.py",
"type": "Python"
}
|
import unittest
import os
import glob
import yaml
import numpy as np
import matplotlib.pyplot as plt
import mastquery
from grizli import utils, prep, multifit, GRIZLI_PATH
from grizli.pipeline import auto_script
TEST_HOME = os.getcwd()
HOME_PATH = f'{os.getcwd()}/PipelineTest'
if not os.path.exists(HOME_PATH):
os.mkdir(HOME_PATH)
os.chdir(HOME_PATH)
root = ''
kwargs = ''
visits = None
groups = None
info = None
visit_prep_args = None
grp = None
def test_config():
"""
Fetch config files if CONF not found
"""
new = []
for subd in ['iref','jref','CONF']:
conf_path = os.path.join(GRIZLI_PATH, subd)
if not os.path.exists(conf_path):
new.append(subd)
os.mkdir(conf_path)
if 'CONF' in new:
print(f'Download config and calib files to {conf_path}')
utils.fetch_default_calibs(get_acs=False)
utils.fetch_config_files(get_epsf=True)
files = glob.glob(f'{conf_path}/*')
print('Files: ', '\n'.join(files))
assert(os.path.exists(os.path.join(conf_path,
'G141.F140W.V4.32.conf')))
def test_query():
"""
"""
from mastquery import query, overlaps
global root
# "parent" query is grism exposures in GO-11359. Can also query the archive on position with
# box=[ra, dec, radius_in_arcmin]
parent = query.run_query(box=None, proposal_id=[11359],
instruments=['WFC3/IR', 'ACS/WFC'],
filters=['G102','G141'])
# ### "overlap" query finds anything that overlaps with the exposures
# ### in the parent query
# extra = query.DEFAULT_EXTRA # ignore calibrations, etc.
# ## To match *just* the grism visits, add, e.g., the following:
# extra += ["TARGET.TARGET_NAME LIKE 'WFC3-ERSII-G01'"]
tabs = overlaps.find_overlaps(parent, buffer_arcmin=0.01,
filters=['F140W','G141'],
proposal_id=[11359],
instruments=['WFC3/IR','WFC3/UVIS','ACS/WFC'],
extra={'target_name':'WFC3-ERSII-G01'},
close=False)
root = tabs[0].meta['NAME']
def test_set_kwargs():
"""
"""
global kwargs
from grizli.pipeline.auto_script import get_yml_parameters
kwargs = get_yml_parameters()
kwargs['is_parallel_field'] = False
kwargs['fetch_files_args']['reprocess_clean_darks'] = False
kwargs['parse_visits_args']['combine_same_pa'] = False
def test_fetch_files():
"""
"""
auto_script.fetch_files(field_root=root, HOME_PATH=HOME_PATH,
**kwargs['fetch_files_args'])
assert(len(glob.glob(f'{HOME_PATH}/{root}/RAW/*raw.fits')) == 8)
assert(len(glob.glob(f'{HOME_PATH}/{root}/RAW/*flt.fits')) == 8)
def test_parse_visits():
"""
"""
global visits, groups, info
os.chdir(os.path.join(HOME_PATH, root, 'Prep'))
visits, groups, info = auto_script.parse_visits(field_root=root,
**kwargs['parse_visits_args'])
assert(len(visits) == 2)
def test_preprocess():
"""
"""
global kwargs, visit_prep_args
visit_prep_args = kwargs['visit_prep_args']
preprocess_args = kwargs['preprocess_args']
# Maximum shift for "tweakshifts" relative alignment
tweak_max_dist = 1.
if 'tweak_max_dist' not in visit_prep_args:
visit_prep_args['tweak_max_dist'] = tweak_max_dist
# Fit and subtract a SExtractor-like background to each visit
visit_prep_args['imaging_bkg_params'] = {'bh': 256, 'bw': 256,
'fh': 3, 'fw': 3,
'pixel_scale': 0.1,
'get_median': False}
# Alignment reference catalogs, searched in this order
visit_prep_args['reference_catalogs'] = ['LS_DR9', 'PS1', 'GAIA',
'SDSS','WISE']
os.chdir(os.path.join(HOME_PATH, root, 'Prep'))
auto_script.preprocess(field_root=root, HOME_PATH=HOME_PATH,
visit_prep_args=visit_prep_args, **preprocess_args)
assert(os.path.exists('wfc3-ersii-g01-b6o-23-119.0-f140w_drz_sci.fits'))
assert(os.path.exists('wfc3-ersii-g01-b6o-23-119.0-f140w_shifts.log'))
assert(os.path.exists('wfc3-ersii-g01-b6o-23-119.0-f140w_wcs.log'))
# def test_fine_alignment():
# """
# """
# global kwargs
# fine_alignment_args = kwargs['fine_alignment_args']
#
# # Align to GAIA with proper motions evaluated at
# # each separate visit execution epoch
# fine_alignment_args['catalogs'] = ['GAIA']
# fine_alignment_args['gaia_by_date'] = True
#
# os.chdir(os.path.join(HOME_PATH, root, 'Prep'))
#
# out = auto_script.fine_alignment(field_root=root, HOME_PATH=HOME_PATH,
# **fine_alignment_args)
#
# visit_file = auto_script.find_visit_file(root=root)
# print('Update exposure footprints in {0}'.format(visit_file))
# res = auto_script.get_visit_exposure_footprints(root=root,
# check_paths=['./', '../RAW'])
def test_make_mosaics():
"""
"""
global visits, groups, info
# Drizzle mosaics in each filter and combine all IR filters
mosaic_args = kwargs['mosaic_args']
mosaic_pixfrac = mosaic_args['mosaic_pixfrac']
os.chdir(os.path.join(HOME_PATH, root, 'Prep'))
preprocess_args = kwargs['preprocess_args']
combine_all_filters=True
if len(glob.glob('{0}-ir_dr?_sci.fits'.format(root))) == 0:
## Mosaic WCS
wcs_ref_file = '{0}_wcs-ref.fits'.format(root)
if not os.path.exists(wcs_ref_file):
auto_script.make_reference_wcs(info, output=wcs_ref_file,
get_hdu=True,
**mosaic_args['wcs_params'])
if combine_all_filters:
all_filters = mosaic_args['ir_filters']
all_filters += mosaic_args['optical_filters']
auto_script.drizzle_overlaps(root,
filters=all_filters,
min_nexp=1, pixfrac=mosaic_pixfrac,
make_combined=True,
ref_image=wcs_ref_file,
drizzle_filters=False)
## IR filters
if 'fix_stars' in visit_prep_args:
fix_stars = visit_prep_args['fix_stars']
else:
fix_stars = False
auto_script.drizzle_overlaps(root, filters=mosaic_args['ir_filters'],
min_nexp=1, pixfrac=mosaic_pixfrac,
make_combined=(not combine_all_filters),
ref_image=wcs_ref_file,
include_saturated=fix_stars)
## Mask diffraction spikes
mask_spikes=True
ir_mosaics = glob.glob('{0}-f*drz_sci.fits'.format(root))
if (len(ir_mosaics) > 0) & (mask_spikes):
cat = prep.make_SEP_catalog('{0}-ir'.format(root), threshold=4,
save_fits=False,
column_case=str.lower)
selection = (cat['mag_auto'] < 17) & (cat['flux_radius'] < 4.5)
for visit in visits:
filt = visit['product'].split('-')[-1]
if filt[:2] in ['f0','f1']:
auto_script.mask_IR_psf_spikes(visit=visit,
selection=selection,
cat=cat, minR=5, dy=5)
## Remake mosaics
auto_script.drizzle_overlaps(root,
filters=mosaic_args['ir_filters'],
min_nexp=1, pixfrac=mosaic_pixfrac,
make_combined=(not combine_all_filters),
ref_image=wcs_ref_file,
include_saturated=True)
# Fill IR filter mosaics with scaled combined data so they can be used
# as grism reference
fill_mosaics = mosaic_args['fill_mosaics']
if fill_mosaics:
if fill_mosaics == 'grism':
# Only fill mosaics if grism filters exist
has_grism = utils.column_string_operation(info['FILTER'],
['G141','G102','G800L'],
'count', 'or').sum() > 0
if has_grism:
auto_script.fill_filter_mosaics(root)
else:
auto_script.fill_filter_mosaics(root)
mosaics = glob.glob('{0}-ir_dr?_sci.fits'.format(root))
wcs_ref_optical = wcs_ref_file
auto_script.drizzle_overlaps(root,
filters=mosaic_args['optical_filters'],
pixfrac=mosaic_pixfrac,
make_combined=(len(mosaics) == 0), ref_image=wcs_ref_optical,
min_nexp=1+preprocess_args['skip_single_optical_visits']*1)
assert(os.path.exists('j033216m2743-ir_drz_sci.fits'))
assert(os.path.exists('j033216m2743-f140w_drz_sci.fits'))
if not os.path.exists('{0}.field.jpg'.format(root)):
slx, sly, rgb_filts, fig = auto_script.field_rgb(root=root, scl=3,
HOME_PATH=None)
plt.close(fig)
def test_make_phot():
"""
"""
os.chdir(os.path.join(HOME_PATH, root, 'Prep'))
if not os.path.exists(f'{root}_phot.fits'):
multiband_catalog_args=kwargs['multiband_catalog_args']
tab = auto_script.multiband_catalog(field_root=root,
**multiband_catalog_args)
assert(os.path.exists(f'{root}_phot.fits'))
assert(os.path.exists(f'{root}-ir.cat.fits'))
def test_make_contam_model():
"""
"""
global grp
os.chdir(os.path.join(HOME_PATH, root, 'Prep'))
files = glob.glob('*GrismFLT.fits')
if len(files) == 0:
### Grism contamination model
# Which filter to use as direct image
# Will try in order of the list until a match is found.
grism_prep_args = kwargs['grism_prep_args']
grism_prep_args['gris_ref_filters'] = {'G141': ['F140W', 'F160W'],
'G102': ['F105W', 'F098M', 'F110W']}
grp = auto_script.grism_prep(field_root=root, **grism_prep_args)
grp = multifit.GroupFLT(grism_files=glob.glob('*GrismFLT.fits'),
catalog=f'{root}-ir.cat.fits',
cpu_count=-1, sci_extn=1, pad=256)
else:
grp = multifit.GroupFLT(grism_files=glob.glob('*GrismFLT.fits'),
catalog=f'{root}-ir.cat.fits',
cpu_count=-1, sci_extn=1, pad=256)
def test_make_field_psf():
"""
"""
# Make PSF file
os.chdir(os.path.join(HOME_PATH, root, 'Prep'))
if not os.path.exists('{0}-f140w_psf.fits'.format(root)):
auto_script.field_psf(root=root)
def test_extract_and_fit():
"""
"""
global grp
import astropy.units as u
from grizli import fitting
pline = auto_script.DITHERED_PLINE
os.chdir(os.path.join(HOME_PATH, root, 'Extractions'))
# Generate the parameter dictionary
args = auto_script.generate_fit_params(field_root=root,
prior=None,
MW_EBV=0.005,
pline=pline,
fit_only_beams=True,
run_fit=True,
poly_order=7,
fsps=True,
sys_err = 0.03,
fcontam=0.2,
zr=[0.05, 3.4],
save_file='fit_args.npy')
tab = utils.GTable()
tab['ra'] = [53.0657456, 53.0624459]
tab['dec'] = [-27.720518, -27.707018]
idx, dr = grp.catalog.match_to_catalog_sky(tab)
assert(np.allclose(dr.value, 0, atol=0.2))
source_ids = grp.catalog['NUMBER'][idx]
# Cutouts
drizzler_args = kwargs['drizzler_args']
print(yaml.dump(drizzler_args))
os.chdir('../Prep')
auto_script.make_rgb_thumbnails(root=root, ids=source_ids,
drizzler_args=drizzler_args)
os.chdir('../Extractions')
id=source_ids[0]
auto_script.extract(field_root=root, ids=[id], MW_EBV=0.005,
pline=pline, run_fit=False, grp=grp, diff=True)
# Redshift fit
_res = fitting.run_all_parallel(id)
assert(os.path.exists(f'{root}_{id:05d}.row.fits'))
row = utils.read_catalog(f'{root}_{id:05d}.row.fits')
assert(np.allclose(row['z_map'], 1.7397, rtol=1.e-2))
#
# def test_run_full(self):
# """
# All in one go
# """
# auto_script.go(root=root, HOME_PATH=HOME_PATH, **kwargs)
def test_cleanup():
"""
"""
pass
|
gbrammerREPO_NAMEgrizliPATH_START.@grizli_extracted@grizli-master@test_pipeline_hst.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "silburt/DeepMoon",
"repo_path": "DeepMoon_extracted/DeepMoon-master/README.md",
"type": "Markdown"
}
|
# DeepMoon - Lunar Crater Counting Through Deep Learning
Center for Planetary Sciences / Department of Astronomy & Astrophysics / Canadian Institute for Theoretical Astrophysics
University of Toronto
DeepMoon is a TensorFlow-based pipeline for training a convolutional neural
network (CNN) to recognize craters on the Moon, and determine their positions and
radii. It is the companion repo to the paper
[Lunar Crater Identification via Deep Learning](https://arxiv.org/abs/1803.02192), which
describes the motivation and development of the code, as well as results.
## Getting Started
### Overview
The DeepMoon pipeline trains a neural net using data derived from a global
digital elevation map (DEM) and catalogue of craters. The code is divided into
three parts. The first generates a set images of the Moon randomly cropped
from the DEM, with corresponding crater positions and radii. The second
trains a convnet using this data. The third validates the convnet's
predictions.
To first order, our CNN activates regions with high negative gradients, i.e.
pixels that decrease in value as you move across the image. Below illustrates
two examples of this, the first is a genuine DEM Lunar image from our dataset,
the second is a sample image taken from the web.


### Dependences
DeepMoon requires the following packages to function:
- [Python](https://www.python.org/) version 2.7 or 3.5+
- [Cartopy](http://scitools.org.uk/cartopy/) >= 0.14.2. Cartopy itself has a
number of [dependencies](http://scitools.org.uk/cartopy/docs/latest/installing.html#installing),
including the GEOS and Proj.4.x libraries. (For Ubuntu systems, these can be
installed through the `libgeos++-dev` and `libproj-dev` packages,
respectively.)
- [h5py](http://www.h5py.org/) >= 2.6.0
- [Keras](https://keras.io/) 1.2.2 [(documentation)](https://faroit.github.io/keras-docs/1.2.2/);
also tested with Keras >= 2.0.2
- [Numpy](http://www.numpy.org/) >= 1.12
- [OpenCV](https://pypi.python.org/pypi/opencv-python) >= 3.2.0.6
- [*pandas*](https://pandas.pydata.org/) >= 0.19.1
- [Pillow](https://python-pillow.org/) >= 3.1.2
- [PyTables](http://www.pytables.org/) >=3.4.2
- [TensorFlow](https://www.tensorflow.org/) 0.10.0rc0, also tested with
TensorFlow >= 1.0
This list can also be found in the `requirements.txt`.
### Data Sources
Our train, validation and test datasets, global DEM, post-processed
crater distribution on the test set, best model, and sample output
images can be found [on Zenodo](https://doi.org/10.5281/zenodo.1133969).
Examples of how to read these data can be found in the
`docs/Using Zenodo Data.ipynb` IPython notebook.
#### Digital Elevation Maps
We use the [LRO-Kaguya merged 59 m/pixel DEM][lola dem]. The DEM was
downsampled to 118 m/pixel and converted to 16-bit GeoTiff with the USGS
Astrogeology Cloud Processing service, and then rescaled to 8-bit PNG using
the [GDAL](http://www.gdal.org/) library:
```
gdal_translate -of PNG -scale -21138 21138 -co worldfile=no
LunarLROLrocKaguya_118mperpix_int16.tif LunarLROLrocKaguya_118mperpix.png
```
#### Crater Catalogues
For the ground truth longitude / latitude locations and sizes of craters, we
combine the [LROC Craters 5 to 20 km diameter][lroc cat] dataset with the
[Head et al. 2010 >= 20 km diameter][head cat] one ([alternate download
link][head cat2]). The LROC dataset was converted from ESRI shapefile to .csv.
They can be found under the `catalogues` folder of the repo, and have had their
formatting slightly modified to be read into *pandas*.
During initial testing, we also used the [Salamunićcar LU78287GT
catalogue][sala cat].
### Running DeepMoon
Each stage of DeepMoon has a corresponding script: `run_input_data_gen.py` for
generating input data, `run_model_training.py` to build and train the convnet,
and `run_get_unique_craters.py` to validate predictions and generate a crater
atlas. User-defined parameters, and instructions on on how to use each script,
can be found in the scripts themselves.
We recommend copying these scripts into a new working directory (and appending
this repo to your Python path) instead of modifying them in the repo.
Our model with default parameters was trained on a 16GB Tesla P100 GPU, however
12GB GPUs are more standard. Therefore, our default model may not run on many
systems without reducing the batch size, number of filters, etc., which can
affect final model convergence.
### Quick Usage
See `docs/Using Zenodo Data.ipynb` for basic examples on generating sample
datasets, loading a pre-trained CNN and using it to make predictions on
samples.
## Authors
* **Ari Silburt** - convnet architecture, crater extraction and post-processing
[silburt](https://github.com/silburt)
* **Charles Zhu** - input image generation, data ingestion and post-processing
[cczhu](https://github.com/cczhu)
### Contributors
* Mohamad Ali-Dib - [malidib](https://github.com/malidib/)
* Kristen Menou - [kmenou](https://www.kaggle.com/kmenou)
* Alan Jackson
## License
Copyright 2018 Ari Silburt, Charles Zhu and contributors.
DeepMoon is free software made available under the MIT License. For details see
the LICENSE.md file.
[lola dem]: https://astrogeology.usgs.gov/search/map/Moon/LRO/LOLA/Lunar_LRO_LrocKaguya_DEMmerge_60N60S_512ppd
[lroc cat]: http://wms.lroc.asu.edu/lroc/rdr_product_select?filter%5Btext%5D=&filter%5Blat%5D=&filter%5Blon%5D=&filter%5Brad%5D=&filter%5Bwest%5D=&filter%5Beast%5D=&filter%5Bsouth%5D=&filter%5Bnorth%5D=&filter%5Btopographic%5D=either&filter%5Bprefix%5D%5B%5DSHAPEFILE&show_thumbs=0&per_page=100&commit=Search
[head cat]: http://science.sciencemag.org/content/329/5998/1504/tab-figures-data
[head cat2]: http://www.planetary.brown.edu/html_pages/LOLAcraters.html
[sala cat]: https://astrogeology.usgs.gov/search/map/Moon/Research/Craters/GoranSalamuniccar_MoonCraters
|
silburtREPO_NAMEDeepMoonPATH_START.@DeepMoon_extracted@DeepMoon-master@README.md@.PATH_END.py
|
{
"filename": "simulate.py",
"repo_name": "smsharma/fermi-gce-flows",
"repo_path": "fermi-gce-flows_extracted/fermi-gce-flows-main/simulate.py",
"type": "Python"
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import sys, os
import argparse
import logging
from operator import itemgetter
import numpy as np
import healpy as hp
from tqdm.auto import tqdm
import torch
logger = logging.getLogger(__name__)
sys.path.append("./")
sys.path.append("../")
from sbi import utils
from simulations.wrapper import simulator
from utils import create_mask as cm
from utils.templates import get_NFW2_template
from utils.utils import ring2nest
from models.psf import KingPSF
def simulate(n=1000, r_outer=25, nside=128, psf="king", dif="ModelO", gamma="default", ps_mask_type="0p8deg", disk_type="thick", new_ps_priors=False, prior_dm_negative=False):
"""High-level simulation script"""
# Get mask of central pixel for nside=1
hp_mask_nside1 = cm.make_mask_total(nside=1, band_mask=True, band_mask_range=0, mask_ring=True, inner=0, outer=r_outer)
# Get mask corresponding to nside=128
mask_sim = hp.ud_grade(hp_mask_nside1, nside)
# ROI to normalize counts over
mask_normalize_counts = cm.make_mask_total(nside=nside, band_mask=True, band_mask_range=2, mask_ring=True, inner=0, outer=25.0)
# Get PS mask
if ps_mask_type == "0p8deg":
ps_mask = np.load("data/mask_3fgl_0p8deg.npy")
elif ps_mask_type == "95pc":
ps_mask = np.load("data/fermi_data/fermidata_pscmask.npy") > 0
mask_roi = cm.make_mask_total(nside=nside, band_mask=True, band_mask_range=2, mask_ring=True, inner=0, outer=r_outer, custom_mask=ps_mask)
# ROI over which templates are normalized
roi_normalize_temps = cm.make_mask_total(nside=128, band_mask=True, band_mask_range=2, mask_ring=True, inner=0, outer=30)
# King PSF hard-coded for now
if psf == "king":
kp = KingPSF()
else:
raise NotImplementedError
# Load standard templates
temp_gce = np.load("data/fermi_data/template_gce.npy")
temp_dif = np.load("data/fermi_data/template_dif.npy")
temp_psc = np.load("data/fermi_data/template_psc.npy")
temp_iso = np.load("data/fermi_data/template_iso.npy")
if disk_type == "thick":
temp_dsk = np.load("data/fermi_data/template_dsk.npy")
elif disk_type == "thin":
temp_dsk = np.load("data/external/template_disk_r_s_5_z_s_0.3.npy")
temp_bub = np.load("data/fermi_data/template_bub.npy")
# Load exposure
fermi_exp = np.load("data/fermi_data/fermidata_exposure.npy")
# Rescaling factor to remove exposure from PS templates
rescale = fermi_exp / np.mean(fermi_exp)
# Load Model O templates
temp_mO_pibrem = np.load("data/fermi_data/ModelO_r25_q1_pibrem.npy")
temp_mO_ics = np.load("data/fermi_data/ModelO_r25_q1_ics.npy")
# Load Model A templates
temp_mA_pibrem = hp.ud_grade(np.load("data/external/template_Api.npy"), nside_out=128, power=-2)
temp_mA_ics = hp.ud_grade(np.load("data/external/template_Aic.npy"), nside_out=128, power=-2)
# Load Model F templates
temp_mF_pibrem = hp.ud_grade(np.load("data/external/template_Fpi.npy"), nside_out=128, power=-2)
temp_mF_ics = hp.ud_grade(np.load("data/external/template_Fic.npy"), nside_out=128, power=-2)
logger.info("Generating training data with %s maps", n)
# Dict to save results
results = {}
# Priors for DM template, if required
if gamma in ["fix", "default"]:
prior_temp = [[], []]
elif gamma == "float":
prior_temp = [[0.5], [1.5]]
elif gamma == "float_both":
prior_temp = [[0.5, 0.5], [1.5, 1.5]]
else:
raise NotImplementedError
# gce, dsk PS priors
if new_ps_priors:
prior_ps = [[0.001, 10.0, 1.1, -10.0, 1.0, 0.1, 0.001, 10.0, 1.1, -10.0, 1.0, 0.1], [2.5, 20.0, 1.99, 1.99, 30.0, 0.99, 2.5, 20.0, 1.99, 1.99, 30.0, 0.99]]
else:
prior_ps = [[0.001, 10.0, 1.1, -10.0, 5.0, 0.1, 0.001, 10.0, 1.1, -10.0, 5.0, 0.1], [2.5, 20.0, 1.99, 1.99, 40.0, 4.99, 2.5, 20.0, 1.99, 1.99, 40.0, 4.99]]
# Poiss priors
if dif in ["ModelO", "ModelF", "ModelA"]:
# iso, bub, psc, dif_pibrem, dif_ics
prior_poiss = [[0.001, 0.001, 0.001, 6.0, 1.0], [1.5, 1.5, 1.5, 12.0, 6.0]]
elif dif == "p6v11":
# iso, bub, psc, dif
prior_poiss = [[0.001, 0.001, 0.001, 11.0], [1.5, 1.5, 1.5, 16.0]]
else:
raise NotImplementedError
if prior_dm_negative:
logger.info("Allowing for negative DM priors")
prior_dm_lo = -1.0
prior_dm_hi = prior_ps[1][0]
else:
prior_dm_lo = prior_ps[0][0]
prior_dm_hi = prior_ps[1][0]
# Generate simulation parameter points. Priors hard-coded for now.
prior = utils.BoxUniform(low=torch.tensor([prior_dm_lo] + prior_poiss[0] + prior_ps[0] + prior_temp[0]), high=torch.tensor([prior_dm_hi] + prior_poiss[1] + prior_ps[1] + prior_temp[1]))
thetas = prior.sample((n,))
# Generate NFW template
logger.info("Generating NFW template...")
if gamma == "default":
temps_gce_poiss = [temp_gce] * n
temps_gce_ps = [temp_gce / rescale] * n
elif gamma == "fix":
temp_gce = get_NFW2_template(gamma=1.2, exp_map=fermi_exp, roi_normalize=roi_normalize_temps)
temps_gce_poiss = [temp_gce] * n
temps_gce_ps = [temp_gce / rescale] * n
elif gamma == "float":
temps_gce = [get_NFW2_template(gamma=gamma.detach().numpy(), exp_map=fermi_exp, roi_normalize=roi_normalize_temps) for gamma in tqdm(thetas[:, -1])]
temps_gce_poiss = temps_gce
temps_gce_ps = temps_gce / rescale
elif gamma == "float_both":
temps_gce_poiss = [get_NFW2_template(gamma=gamma.detach().numpy(), exp_map=fermi_exp, roi_normalize=roi_normalize_temps) for gamma in tqdm(thetas[:, -2])]
temps_gce_ps = [get_NFW2_template(gamma=gamma.detach().numpy(), exp_map=fermi_exp, roi_normalize=roi_normalize_temps) / rescale for gamma in tqdm(thetas[:, -1])]
else:
raise NotImplementedError
# List of templates except GCE template
temps_ps = [temp_dsk / rescale]
if dif == "ModelO":
temps_poiss = [temp_iso, temp_bub, temp_psc, temp_mO_pibrem, temp_mO_ics]
elif dif == "ModelA":
temps_poiss = [temp_iso, temp_bub, temp_psc, temp_mA_pibrem, temp_mA_ics]
elif dif == "ModelF":
temps_poiss = [temp_iso, temp_bub, temp_psc, temp_mF_pibrem, temp_mF_ics]
elif dif == "p6v11":
temps_poiss = [temp_iso, temp_bub, temp_psc, temp_dif]
else:
raise NotImplementedError
# Generate maps
logger.info("Generating maps...")
x_and_aux = [simulator(theta.detach().numpy(), [temp_gce_poiss] + temps_poiss, [temp_gce_ps] + temps_ps, mask_sim, mask_normalize_counts, mask_roi, kp.psf_fermi_r, fermi_exp) for (theta, temp_gce_poiss, temp_gce_ps) in tqdm(zip(thetas, temps_gce_poiss, temps_gce_ps))]
# Grab maps and aux variables
x = torch.Tensor(list(map(itemgetter(0), x_and_aux)))
x_aux = torch.Tensor(list(map(itemgetter(1), x_and_aux)))
# print(x.shape, x_aux.shape)
logger.info("Converting from RING to NEST ordering...")
# Convert from RING to NEST Healpix ordering, as that's required by DeepSphere pooling
x = ring2nest(x.squeeze(), mask_sim) # Collapse channel dimension
logger.info("Expanding dims...")
x = np.expand_dims(x, 1) # Reinstate channel dimension
logger.info("Populating dict...")
results["x"] = x
results["x_aux"] = x_aux
results["theta"] = thetas
return results
def save(data_dir, name, data):
"""Save simulated data to file"""
logger.info("Saving results with name %s", name)
if not os.path.exists(data_dir):
os.mkdir(data_dir)
if not os.path.exists("{}/data".format(data_dir)):
os.mkdir("{}/data".format(data_dir))
if not os.path.exists("{}/data/samples".format(data_dir)):
os.mkdir("{}/data/samples".format(data_dir))
for key, value in data.items():
np.save("{}/data/samples/{}_{}.npy".format(data_dir, key, name), value)
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description="Main high-level script that starts the GCE simulations")
parser.add_argument(
"-n",
type=int,
default=10000,
help="Number of samples to generate. Default is 10k.",
)
parser.add_argument("--dif", type=str, default="ModelO", help='Diffuse model to simulate, whether "ModelO" (default) or "p6"')
parser.add_argument("--ps_mask_type", type=str, default="0p8deg", help='PS mask, either "0p8deg" (default) or "95pc"')
parser.add_argument("--disk_type", type=str, default="thick", help='Disk type, either "thick" (default) or "thin"')
parser.add_argument("--gamma", type=str, default="default", help='Whether to float NFW index gamma. "fix" (default, fixes to gamma=1.2), "float" (float both gammas), or "float_both" (float PS and poiss gammas separately)')
parser.add_argument("--name", type=str, default=None, help='Sample name, like "train" or "test".')
parser.add_argument("--dir", type=str, default=".", help="Base directory. Results will be saved in the data/samples subfolder.")
parser.add_argument("--debug", action="store_true", help="Prints debug output.")
parser.add_argument("--new_ps_priors", type=int, default=0, help="Whether to use new set of PS priors")
parser.add_argument("--prior_dm_negative", type=int, default=0, help="Whether to allow DM prior go negative")
return parser.parse_args()
if __name__ == "__main__":
args = parse_args()
logging.basicConfig(
format="%(asctime)-5.5s %(name)-20.20s %(levelname)-7.7s %(message)s",
datefmt="%H:%M",
level=logging.DEBUG if args.debug else logging.INFO,
)
logger.info("Hi!")
name = "train" if args.name is None else args.name
results = simulate(n=args.n, dif=args.dif, gamma=args.gamma, ps_mask_type=args.ps_mask_type, disk_type=args.disk_type, new_ps_priors=args.new_ps_priors, prior_dm_negative=args.prior_dm_negative)
save(args.dir, name, results)
logger.info("All done! Have a nice day!")
|
smsharmaREPO_NAMEfermi-gce-flowsPATH_START.@fermi-gce-flows_extracted@fermi-gce-flows-main@simulate.py@.PATH_END.py
|
{
"filename": "transform.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/autofit/graphical/factor_graphs/transform.py",
"type": "Python"
}
|
from abc import abstractmethod
from typing import Dict, Tuple, Optional, List
import numpy as np
from scipy.linalg import cho_factor
from autoconf import cached_property
from autofit.graphical.factor_graphs.abstract import AbstractNode, Value, FactorValue
# from ...mapper.operator import
from autofit.mapper.operator import (
CholeskyOperator,
InvCholeskyTransform,
IdentityOperator,
DiagonalMatrix,
)
from autofit.mapper.variable import Variable, VariableData
class VariableTransform:
""" """
def __init__(self, transforms):
self.transforms = transforms
def __mul__(self, values: Value) -> Value:
return {k: M * values[k] for k, M in self.transforms.items()}
def __rtruediv__(self, values: Value) -> Value:
return {k: values[k] / M for k, M in self.transforms.items()}
def __rmul__(self, values: Value) -> Value:
return {k: values[k] * M for k, M in self.transforms.items()}
def ldiv(self, values: Value) -> Value:
return {k: M.ldiv(values[k]) for k, M in self.transforms.items()}
def __add__(self, other: "VariableTransform") -> "VariableTransform":
return VariableTransform(
{k: M + other.transforms[k] for k, M in self.transforms.items()}
)
def inv(self) -> "VariableTransform":
return VariableTransform({k: M.inv() for k, M in self.transforms.items()})
rdiv = __rtruediv__
rmul = __rmul__
lmul = __mul__
__matmul__ = __mul__
def quad(self, values):
return {v: H.T if np.ndim(H) else H for v, H in (values * self).items()} * self
def invquad(self, values):
return {v: H.T if np.ndim(H) else H for v, H in (values / self).items()} / self
@cached_property
def log_det(self):
return sum(M.log_det for M in self.transforms.values())
@classmethod
def from_scales(cls, scales):
return cls({v: DiagonalMatrix(scale) for v, scale in scales.items()})
@classmethod
def from_covariances(cls, covs):
return cls(
{v: InvCholeskyTransform(cho_factor(cov)) for v, cov in covs.items()}
)
@classmethod
def from_inv_covariances(cls, inv_covs):
return cls(
{
v: CholeskyOperator(cho_factor(inv_cov))
for v, inv_cov in inv_covs.items()
}
)
class FullCholeskyTransform(VariableTransform):
def __init__(self, cholesky, param_shapes):
self.cholesky = cholesky
self.param_shapes = param_shapes
@classmethod
def from_optresult(cls, opt_result):
param_shapes = opt_result.param_shapes
cov = opt_result.result.hess_inv
if not isinstance(cov, np.ndarray):
# if optimiser is L-BFGS-B then convert
# implicit hess_inv into dense matrix
cov = cov.todense()
return cls(InvCholeskyTransform.from_dense(cov), param_shapes)
def __mul__(self, values: Value) -> Value:
M, x = self.cholesky, self.param_shapes.flatten(values)
return self.param_shapes.unflatten(M * x)
def __rtruediv__(self, values: Value) -> Value:
M, x = self.cholesky, self.param_shapes.flatten(values)
return self.param_shapes.unflatten(x / M)
def __rmul__(self, values: Value) -> Value:
M, x = self.cholesky, self.param_shapes.flatten(values)
return self.param_shapes.unflatten(x * M)
@abstractmethod
def ldiv(self, values: Value) -> Value:
M, x = self.cholesky, self.param_shapes.flatten(values)
return self.param_shapes.unflatten(M.ldiv(x))
rdiv = __rtruediv__
rmul = __rmul__
lmul = __mul__
__matmul__ = __mul__
@cached_property
def log_det(self):
return self.cholesky.log_det
class IdentityVariableTransform(VariableTransform):
def __init__(self):
pass
def _identity(self, values: Value) -> Value:
return values
__mul__ = _identity
__rtruediv__ = _identity
__rmul__ = _identity
ldiv = _identity
rdiv = __rtruediv__
rmul = __rmul__
lmul = __mul__
__matmul__ = __mul__
quad = _identity
invquad = _identity
@property
def log_det(self):
return 0.0
identity_transform = IdentityOperator()
identity_variable_transform = IdentityVariableTransform()
class TransformedNode(AbstractNode):
def __init__(self, node: AbstractNode, transform: VariableTransform):
self.node = node
self.transform = transform
@property
def variables(self):
return self.node.variables
@property
def deterministic_variables(self):
return self.node.deterministic_variables
@property
def all_variables(self):
return self.node.all_variables
@property
def name(self):
return f"FactorApproximation({self.node.name})"
def __call__(
self,
values: Dict[Variable, np.ndarray],
) -> FactorValue:
return self.node(self.transform.ldiv(values))
def func_jacobian(
self,
values: Dict[Variable, np.ndarray],
variables: Optional[List[Variable]] = None,
_calc_deterministic: bool = True,
**kwargs,
) -> Tuple[FactorValue, VariableData]:
fval, jval = self.node.func_jacobian(
self.transform.ldiv(values),
variables=variables,
_calc_deterministic=_calc_deterministic,
)
# TODO this doesn't deal with deterministic jacobians
grad = jval / self.transform
return fval, grad
def func_jacobian_hessian(
self,
values: Dict[Variable, np.ndarray],
variables: Optional[List[Variable]] = None,
_calc_deterministic: bool = True,
**kwargs,
) -> Tuple[FactorValue, VariableData, VariableData]:
M = self.transform
fval, jval, hval = self.node.func_jacobian_hessian(
M.ldiv(values),
variables=variables,
_calc_deterministic=_calc_deterministic,
)
grad = jval / M
# hess = {v: H.T for v, H in (hval / M).items()} / M
hess = M.invquad(hval)
return fval, grad, hess
def __getattribute__(self, name):
try:
return super().__getattribute__(name)
except AttributeError:
return getattr(self.node, name)
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@autofit@graphical@factor_graphs@transform.py@.PATH_END.py
|
{
"filename": "basic_spectral_analysis-checkpoint.ipynb",
"repo_name": "aburgasser/kastredux",
"repo_path": "kastredux_extracted/kastredux-main/training/.ipynb_checkpoints/basic_spectral_analysis-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
# KastRedux Tutorial: Basic Spectral Analysis
## Authors
Adam Burgasser
## Version date
12 July 2024
## Learning Goals
* Read in an optical Kast spectrum (kastredux.getSpectrum)
* Explore built-in functions for Spectrum objects (kastredux.Spectrum)
* Some basic manipulation of spectra - normalizing, scaling, trimming, changing units, spectral math (kastredux.Spectrum)
* Compare a spectrum to another spectrum (kastredux.compareSpectrum)
* Compare a spectrum a set of spectral standards (kastredux.classifyByStandard)
* Measure a set of indices to infer a classification (kastredux.measureIndexSet, kastredux.classifyByIndex)
* Measure line equivalent widths (kastredux.ew, kastredux.ewSet)
* Measure metallicity and magnetic emission (kastredux.zeta, kastredux.lhalbol)
## Keywords
spectral analysis, indices, classification
## Companion Content
None
## Summary
In this tutorial, we will explore some basic spectral analysis and visualization tools included in the kastredux package, which are particularly designed for analysis of ultracol dwarfs (M, L, T dwarfs).
```python
# if you are using google colab, first install kastredux
#!pip install git+https://github.com/aburgasser/kastredux.git
```
```python
# import statements
import kastredux as kr
import astropy.units as u
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
```
# Using the kastredux Spectrum object
In this section we'll familiarize ourselves with the kastredux Spectrum object, which contains several built-in functions for basic visualization and spectral manipulation, as well as reading in fits and ascii spectral data
```python
# read in a fits file from sample directory
sp = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J0102+5254_20210925.fits',name='J0101+5254')
sp
```
```python
# quick visualization of spectrum
sp.plot()
```
```python
# trim to a smaller spectral region
sp.trim([6000,9000])
sp.plot()
```
```python
# mask a part of the spectrum, such as telluric absorption regions
sp.maskWave([7580,7640])
sp.plot()
```
```python
# normalize - notice the difference on the y-axis
sp.normalize([8100,8300])
sp.plot()
```
```python
# scale by a constant factor - notice the difference on the y-axis
sp.scale(2.5)
sp.plot()
```
```python
# smooth spectrum
sp.smooth(10)
sp.plot()
```
```python
# return to the original spectrum - note: you'll need to add in the name
sp.reset()
sp.name = 'J0101+5254'
sp.plot()
```
```python
# save spectrum to a file
sp.write('myspectrum.fits')
```
```python
# read in an ascii file from sample directory
sp2 = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_G233-42_20210925.txt',name='G233-42')
sp2.plot()
```
```python
# add two spectra together
sp1 = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J0102+5254_20210925.fits',name='J0101+5254')
sp1.normalize([8100,8300])
sp2 = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_G233-42_20210925.txt',name='G233-42')
sp2.normalize([8100,8300])
sp3 = sp1+sp2
sp3.plot()
```
```python
# subtract two spectra
sp3 = sp1-sp2
sp3.plot()
```
```python
# use matplotlib to compare two spectra and their difference
xlim = [6000,9000]
plt.plot(sp1.wave,sp1.flux,'g-',label=sp1.name)
plt.plot(sp2.wave,sp2.flux,'m-',label=sp2.name)
plt.plot(sp3.wave,sp3.flux,'k--',label='Difference')
plt.legend()
plt.plot(xlim,[0,0],'k:')
plt.xlim(xlim)
plt.ylim([-0.5,1.2])
plt.xlabel('Wavelength (Angstrom)')
plt.ylabel('Normalized Flux')
```
# Comparing spectra to each other and standards
One of the most common tasks in spectral analysis is comparing to other spectral templates or models to determine physical properties. kastredux has dedicated tools and comparison spectra for this
```python
# a common function is to read in 2 spectra and compare them
# note that what is returned is the chi-square statistic and a scale factor for the second spectrum
sp1 = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J0102+5254_20210925.fits',name='J0101+5254')
sp2 = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_G233-42_20210925.txt',name='G233-42')
chi,scale = kr.compareSpectra(sp1,sp2,plot=True)
print(chi,scale)
```
```python
# we can refine te fit but choosing the region to compare
chi,scale = kr.compareSpectra(sp1,sp2,plot=True,fit_range=[7200,7500])
print(chi,scale)
```
```python
# use the scale factor to generate your own plot using matplotlib
sp2.scale(scale)
sp3 = sp1-sp2
xlim = [6000,9000]
plt.plot(sp1.wave,sp1.flux,'g-',label=sp1.name)
plt.plot(sp2.wave,sp2.flux,'m-',label=sp2.name)
plt.plot(sp3.wave,sp3.flux,'k--',label='Difference')
plt.legend()
plt.plot(xlim,[0,0],'k:')
plt.xlim(xlim)
plt.ylim([-5,40])
plt.xlabel('Wavelength (Angstrom)')
plt.ylabel('Normalized Flux')
```
## Comparison to spectral standards
kastredux has a library of built-in spectral standards to compare to spectra to obtain classifications and identify peculiar features
```python
# initialize the spectral standards, which are stored in the SPTSTDS global variable
# by default it will load in M and L dwarf SDSS standards from
# Bochanski et al., Schmidt et al., and Kesseli et al.
kr.initializeStandards()
kr.SPTSTDS
```
```python
# you can also add in subdwarf (sd, esd, usd), low gravity (beta, gamma),
# and giant standards from SDSS
kr.initializeStandards(sd=True)
kr.SPTSTDS
```
```python
# you can also add in specific spectral standards from
# Kirkpatrick et al. (L dwarfs), Burgasser et al. (T dwarfs), and Lepine et al. (subdwarfs)
kr.initializeStandards(sdss=False,reset=True)
kr.SPTSTDS
```
```python
# each of these is just a Spectrum object
kr.SPTSTDS['L8.0'].plot()
```
```python
# the easiest way to compare to all sources is to use classifyTemplate()
sp = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J0102+5254_20210925.fits',name='J0101+5254')
kr.classifyTemplate(sp,plot=True)
```
```python
# by default this returns the best fit template; you can also return all of the comparison data
kr.classifyTemplate(sp,plot=True,output='allmeasures')
```
```python
# you can also define a custom template set
templates = {
'G233-42': kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_G233-42_20210925.txt',name='G233-42'),
'J1010+5254': kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J0102+5254_20210925.fits',name='J0101+5254'),
'LP389-13': kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_LP389-13_20210926.fits',name='LP389-13'),
}
sp = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J1229+0752_20220310.fits',name='J1229+0752')
kr.classifyTemplate(sp,template_set=templates,plot=True,verbose=True)
```
# Measuring spectral indices
Spectral indices measure the strengths of atomic and molecular features, as well as overall spectral shape, and can be correlated with spectral type or physical quantities such as temperature, surface gravity, and metallicity. kastredux has several sets of pre-defined indices that can be used for optical spectra of ultracool dwarfs.
```python
# the set of index sets included in kastredux
# are contained in the INDEX_SETS global variable
kr.INDEX_SETS.keys()
```
```python
kr.INDEX_SETS['lepine2003']['indices'].keys()
```
```python
# you can measure of set of indices using measureIndexSet()
# this returns a dict with each index name key pointed to a value and uncertainty tuple
sp = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J1229+0752_20220310.fits',name='J1229+0752')
kr.measureIndexSet(sp,ref='lepine2003')
```
```python
kr.INDEX_SETS
```
```python
# you can also define your own index by defining the sample ranges
# and the how these are measured and combined
# in this example we'll measure the strength of the TiO band at 8400 Angstroms
# using a simple ratio of in-band value to nearby continuum
sp = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J1229+0752_20220310.fits',name='J1229+0752')
sp.normalize([7000,7100])
rng1 = [7060,7070] # in band
rng2 = [7040,7050] # continuum
val,unc = kr.measureIndex(sp,[[7060,7070],[7045,7055]],sample='median',method='ratio')
print(val,unc)
# visualize this
plt.plot(sp.wave,sp.flux,'k-')
plt.plot(rng1,[sp.sample(rng1),sp.sample(rng1)],'m-',linewidth=5)
plt.plot(rng2,[sp.sample(rng2),sp.sample(rng2)],'m-',linewidth=5)
plt.xlim([7000,7100])
plt.ylim([0,1.2])
```
## Classifications from indices
Indices can be used to estimate classifications, and it's sometimes useful to compare the template-based classifications to index-based ones
```python
# some of these indices are used for classification
# the global variable INDEX_CLASSIFICATION_RELATIONS contains this info
kr.INDEX_CLASSIFICATION_RELATIONS.keys()
```
```python
# use the classification indices with kr.classifyIndices()
sp = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J1229+0752_20220310.fits',name='J1229+0752')
kr.classifyIndices(sp,ref='lepine2003',verbose=True)
```
```python
# compare to template classification
kr.classifyTemplate(sp,plot=True)
```
## Measuring Metallicity
There are also indices used to measure metallicity, based on the zeta method of Lepine et al. (2007). There are several zeta calibrations in kastredux, accessed with the zeta() function, which measures the zeta value and can optionally return the metallicity class and estimate of the metallicity.
```python
# measure zeta
sp = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J1717+7244_20211113.fits',name='J1717+7244')
kr.zeta(sp,ref='lepine2007')
```
```python
# return the metallicity class
kr.zeta(sp,ref='lepine2007',output='class')
```
```python
# check against the templates to see if this is right
kr.initializeStandards(sd=True)
kr.compareSpectra(sp,kr.SPTSTDS['sdM1.0'],fit_range=[6000,8500],plot=True)
```
```python
# return estimate of metallicity from zeta
kr.zeta(sp,ref='lepine2007',metallicity_ref='mann2013',output='z')
```
```python
# return everything
kr.zeta(sp,output='allmeasures')
```
# Equivalent Widths
Equivalent widths are measurements of atomic line strengths, equal to the width (in Angstroms) that a perfectly rectangular absorption feature would be if it went to zero flux from the local continuum. Equivalent width is defined as
$EW = \int\left(1-\frac{F_{line}(\lambda)}{F_{continuum}(\lambda)}\right)d\lambda$
kastredux has several functions for measuring individual lines (measureEW), multiple lines from a given element (measureEWElement), and sets of lines defined in various papers (measureEWSet).
```python
# measure the EW of the 8195 Angstrom Na I line and visualize measurement
sp = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J1229+0752_20220310.fits',name='J1229+0752')
kr.measureEW(sp,8195,plot=True)
```
```python
# measure all of the Fe I lines in a spectrum
kr.measureEWElement(sp,'FeI',plot=True)
```
```python
# do the same for hydrogen; note for emission lines you
# should set emission=True and will get a negative EW
kr.measureEWElement(sp,'H',plot=True,emission=True)
```
```python
# see what lines are currently available for measurement
kr.EW_LINES
```
```python
# there is one EW set from the literature included in kastredux from Mann et al. (2013)
kr.measureEWSet(sp,ref='mann2013')
```
## Measuring Halpha emission and luminosity
The equivalent width of Halpha emission can be used to compute the Halpha luminosity of a star by using a $\chi$ correction that compensates for the local continuum
$\log_{10}\frac{L_{H\alpha}}{L_{bol}} = \chi\times|{EW}|$
There are two empirical relations for the $\chi$ as a function of spectral type contained in kastredux, from Douglas et al. (2014) for spectral types M0-M9 and Schmidt et al. (2014) for spectral types M7-L7, which can be accessed using the chiFactor() function
```python
# read in a spectrum and determine its spectal type
sp = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J1229+0752_20220310.fits',name='J1229+0752')
kr.classifyTemplate(sp,plot=True)
```
```python
# for this M7 dwarf spectrum, we can use either relation; let's try both!
# by default, log(LHalpha/Lbol) that is returned
# Douglas relation
lha,e_lha = kr.chiFactor(sp,ref='douglas2014',verbose=True)
print('log LHa/Lbol = {:.2f}+/-{:.2f}\n'.format(lha,e_lha))
# Schmidt relation
lha,e_lha = kr.chiFactor(sp,ref='schmidt2014',verbose=True)
print('log LHa/Lbol = {:.2f}+/-{:.2f}'.format(lha,e_lha))
```
```python
# you can also just get the chi factor and its uncertainty if you want it
kr.chiFactor(sp,ref='douglas2014',output='chi')
```
# theWorks()
A handy tool that measures everything all at once is theWorks(), which runs through all f the various templates, indices, and relations and returns a large dictionary with a total assessment of the spectrum. Use wisely! Be sure to verify these measurements by visualizing the spectrum
```python
sp = kr.readSpectrum(kr.SAMPLEFOLDER+'kastRED_J1229+0752_20220310.fits',name='J1229+0752')
kr.theWorks(sp,verbose=True)
```
|
aburgasserREPO_NAMEkastreduxPATH_START.@kastredux_extracted@kastredux-main@training@.ipynb_checkpoints@basic_spectral_analysis-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "_outlinecolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymap/colorbar/_outlinecolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OutlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="outlinecolor", parent_name="densitymap.colorbar", **kwargs
):
super(OutlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymap@colorbar@_outlinecolor.py@.PATH_END.py
|
{
"filename": "plot_c_miri.ipynb",
"repo_name": "arminrest/jhat",
"repo_path": "jhat_extracted/jhat-master/Docs/source/examples/plot_c_miri.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%matplotlib inline
```
# JWST MIRI
Aligning JWST/MIRI images with JHAT.
An example MIRI Dataset is downloaded, and then a series of
alignment methods are used. For more information on the
key parameters used for alignment see
`params:Useful Parameters`.
```python
import sys,os,glob
from astropy.io import fits
from astropy.table import Table
from astropy.nddata import extract_array
from astropy.coordinates import SkyCoord
from astropy import wcs
from astropy.wcs.utils import skycoord_to_pixel
from astropy import units as u
import numpy as np
import matplotlib.pyplot as plt
from astroquery.mast import Observations
from astropy.visualization import (simple_norm,LinearStretch)
import jhat
from jhat import jwst_photclass,st_wcs_align
```
## Relative Alignment
**Download some Data**
For this example we download 2 MIRI cal images from MAST. They're
the same field and different filters. Note that
the code will also work for level 3 images.
```python
obs_table1 = Observations.query_criteria(obs_id='jw02107-o038_t019_miri_f770w')
data_products_by_obs = Observations.get_product_list(obs_table1)
data_products_by_obs = data_products_by_obs[data_products_by_obs['calib_level']==2]
data_products_by_obs = data_products_by_obs[data_products_by_obs['productSubGroupDescription']=='CAL'][0]
Observations.download_products(data_products_by_obs,extension='fits')
obs_table2 = Observations.query_criteria(obs_id='jw02107-c1018_t019_miri_f1130w')
data_products_by_obs = Observations.get_product_list(obs_table2)
data_products_by_obs = data_products_by_obs[data_products_by_obs['calib_level']==2]
data_products_by_obs = data_products_by_obs[data_products_by_obs['productSubGroupDescription']=='CAL'][0]
Observations.download_products(data_products_by_obs,extension='fits')
```
**Examine the Reference Image**
```python
files = glob.glob('mastDownload/JWST/*miri*/*cal.fits')
ref_image = files[0]
print(ref_image)
ref_fits = fits.open(ref_image)
ref_data = fits.open(ref_image)['SCI',1].data
norm1 = simple_norm(ref_data,stretch='log',min_cut=5,max_cut=25)
plt.imshow(ref_data, origin='lower',
norm=norm1,cmap='gray')
plt.gca().tick_params(labelcolor='none',axis='both',color='none')
plt.show()
```
**Zoom in to see the offset**
Here add an artificial offset to the wcs, and then we see the
same star in both images at the same ra/dec
location, demonstrating a large offset between
the images.
```python
star_location = SkyCoord('23:09:44.0809','-43:26:05.613',unit=(u.hourangle,u.deg))
align_image = files[1]
align_fits = fits.open(align_image)
align_fits['SCI',1].header['CRPIX1']+=2
align_fits['SCI',1].header['CRPIX2']+=2
align_fits.writeto(align_image,overwrite=True)
align_data = fits.open(align_image)['SCI',1].data
ref_y,ref_x = skycoord_to_pixel(star_location,wcs.WCS(ref_fits['SCI',1],ref_fits))
align_y,align_x = skycoord_to_pixel(star_location,wcs.WCS(align_fits['SCI',1],align_fits))
ref_cutout = extract_array(ref_data,(11,11),(ref_x,ref_y))
align_cutout = extract_array(align_data,(11,11),(align_x,align_y))
norm1 = simple_norm(ref_cutout,stretch='log',min_cut=-1,max_cut=200)
norm2 = simple_norm(align_cutout,stretch='log',min_cut=-1,max_cut=200)
fig,axes = plt.subplots(1,2)
axes[0].imshow(ref_cutout, origin='lower',
norm=norm1,cmap='gray')
axes[1].imshow(align_cutout, origin='lower',
norm=norm2,cmap='gray')
axes[0].set_title('Reference')
axes[1].set_title('To Align')
axes[0].tick_params(labelcolor='none',axis='both',color='none')
axes[1].tick_params(labelcolor='none',axis='both',color='none')
plt.show()
```
**Create a Photometric Catalog for Relative Alignment**
We choose one of the images to be the reference image, and then
create a catalog that we will use to align the other image.
```python
jwst_phot = jwst_photclass()
jwst_phot.run_phot(imagename=ref_image,photfilename='auto',overwrite=True)
ref_catname = ref_image.replace('.fits','.phot.txt') # the default
refcat = Table.read(ref_catname,format='ascii')
print(refcat)
```
**Align the second image**
The plots outputted here show the various steps used by jhat to
determine the true matching sources in the image, and the
subsequent correction needed for optimal alignment.
```python
wcs_align = st_wcs_align()
wcs_align.run_all(align_image,
telescope='jwst',
outsubdir='mastDownload',
refcat_racol='ra',
refcat_deccol='dec',
refcat_magcol='mag',
refcat_magerrcol='dmag',
overwrite=True,
d2d_max=1,
showplots=2,
refcatname=ref_catname,
histocut_order='dxdy',
sharpness_lim=(0.3,0.9),
roundness1_lim=(-0.7, 0.7),
SNR_min= 3,
dmag_max=1.0,
objmag_lim =(14,24))
```
**Check the Output**
The reference image has not changed, but let's read in the newly
aligned image and compare with the original.
subsequent correction needed for optimal alignment.
```python
aligned_image = os.path.join('mastDownload',os.path.basename(align_image).replace('cal.fits','jhat.fits'))
aligned_fits = fits.open(aligned_image)
aligned_data = fits.open(aligned_image)['SCI',1].data
aligned_y,aligned_x = skycoord_to_pixel(star_location,wcs.WCS(aligned_fits['SCI',1],aligned_fits))
aligned_cutout = extract_array(aligned_data,(11,11),(aligned_x,aligned_y))
norm3 = simple_norm(aligned_cutout,stretch='log',min_cut=-1,max_cut=200)
fig,axes = plt.subplots(1,3)
axes[0].imshow(ref_cutout, origin='lower',
norm=norm1,cmap='gray')
axes[1].imshow(align_cutout, origin='lower',
norm=norm2,cmap='gray')
axes[2].imshow(aligned_cutout, origin='lower',
norm=norm3,cmap='gray')
axes[0].set_title('Reference')
axes[1].set_title('To Align')
axes[2].set_title('Aligned')
for i in range(3):
axes[i].tick_params(labelcolor='none',axis='both',color='none')
plt.show()
```
|
arminrestREPO_NAMEjhatPATH_START.@jhat_extracted@jhat-master@Docs@source@examples@plot_c_miri.ipynb@.PATH_END.py
|
{
"filename": "test_efsearch.py",
"repo_name": "StingraySoftware/HENDRICS",
"repo_path": "HENDRICS_extracted/HENDRICS-main/hendrics/tests/test_efsearch.py",
"type": "Python"
}
|
import copy
import importlib
import os
from collections.abc import Iterable
import numpy as np
import pytest
from stingray.events import EventList
from stingray.lightcurve import Lightcurve
from hendrics.base import HAS_PINT, hen_root
from hendrics.efsearch import (
HAS_IMAGEIO,
decide_binary_parameters,
folding_orbital_search,
main_accelsearch,
main_efsearch,
main_z2vspf,
main_zsearch,
)
from hendrics.fold import (
fit_profile_with_sinusoids,
get_TOAs_from_events,
main_deorbit,
main_fold,
std_fold_fit_func,
)
from hendrics.io import (
HEN_FILE_EXTENSION,
get_file_type,
load_events,
load_folding,
save_events,
save_lcurve,
)
from hendrics.plot import plot_folding
from hendrics.tests import _dummy_par
from . import cleanup_test_dir
HAS_PD = importlib.util.find_spec("pandas") is not None
class TestEFsearch:
def setup_class(cls):
cls.pulse_period = 0.101
cls.pulse_frequency = 1 / cls.pulse_period
cls.tstart = 0
# cls.toa = 0.2 * cls.pulse_period
cls.tend = 25.25
cls.tseg = cls.tend - cls.tstart
cls.dt = 0.00606
cls.times = np.arange(cls.tstart, cls.tend, cls.dt) + cls.dt / 2
cls.counts = 400 + 100 * np.cos(2 * np.pi * cls.times * cls.pulse_frequency)
cls.mjdref = 56000
lc = Lightcurve(cls.times, cls.counts, gti=[[cls.tstart, cls.tend]], dt=cls.dt)
cls.lcfile = "lcurve" + HEN_FILE_EXTENSION
save_lcurve(lc, cls.lcfile)
events = EventList()
events.mjdref = cls.mjdref
events.simulate_times(lc)
events.mission = "nusboh"
cls.event_times = events.time
cls.dum_noe = "events_noe" + HEN_FILE_EXTENSION
save_events(events, cls.dum_noe)
events.pi = np.random.uniform(0, 1000, len(events.time))
cls.dum_pi = "events_pi" + HEN_FILE_EXTENSION
save_events(events, cls.dum_pi)
events.energy = np.random.uniform(3, 79, len(events.time))
cls.dum = "events" + HEN_FILE_EXTENSION
cls.dum_scramble = "events_scramble" + HEN_FILE_EXTENSION
save_events(events, cls.dum)
events_scramble = copy.deepcopy(events)
events_scramble.time = np.sort(np.random.uniform(cls.tstart, cls.tend, events.time.size))
save_events(events_scramble, cls.dum_scramble)
cls.par = "bububububu.par"
_dummy_par(cls.par)
events = EventList(time=np.sort(np.random.uniform(0, 100, 200)))
cls.empty = "empty_ev" + HEN_FILE_EXTENSION
save_events(events, cls.empty)
def test_get_TOAs(self):
events = load_events(self.dum)
nbin = 32
toas, toaerrs = get_TOAs_from_events(
events.time,
self.tseg,
self.pulse_frequency,
gti=events.gti,
nbin=nbin,
mjdref=events.mjdref,
template=None,
)
assert toas is not None, toaerrs is not None
possible_toas = events.mjdref + np.arange(2) * self.pulse_period / 86400
closest = possible_toas[np.argmin(np.abs(possible_toas - toas[0]))]
delta_toa_s = (toas[0] - closest) * 86400
toa_err_s = max(toaerrs[0] / 1e6, 1 / nbin / self.pulse_frequency)
assert np.abs(delta_toa_s) < toa_err_s * 4
def test_get_TOAs_template(self):
nbin = 32
phases = np.arange(0, 1, 1 / nbin)
template = np.cos(2 * np.pi * phases)
events = load_events(self.dum)
toas, toaerrs = get_TOAs_from_events(
events.time,
self.tseg,
self.pulse_frequency,
gti=events.gti,
mjdref=events.mjdref,
template=template,
nbin=nbin,
)
assert toas is not None, toaerrs is not None
possible_toas = events.mjdref + np.arange(2) * self.pulse_period / 86400
closest = possible_toas[np.argmin(np.abs(possible_toas - toas[0]))]
assert (toas[0] - closest) < toaerrs[0] / 86400000000
def test_fit_profile_with_sinusoids(self):
nbin = 32
phases = np.arange(0, 1, 1 / nbin)
prof_smooth = np.cos(2 * np.pi * phases) + 0.5 * np.cos(4 * np.pi * (phases + 0.5))
prof_smooth = (prof_smooth + 5) * 64
prof = np.random.poisson(prof_smooth)
baseline = np.mean(prof)
proferr = np.sqrt(baseline)
fit_pars_save, success_save, chisq_save = fit_profile_with_sinusoids(
prof, proferr, debug=True, baseline=True
)
assert np.allclose(
std_fold_fit_func(fit_pars_save, phases),
prof_smooth,
atol=3 * proferr,
)
def test_fold(self):
evfile = self.dum
evfile_noe = self.dum_noe
evfile_pi = self.dum_pi
main_fold(
[
evfile,
"-f",
str(self.pulse_frequency),
"-n",
"64",
"--test",
"--norm",
"ratios",
]
)
outfile = hen_root(evfile) + "_ratios.png"
assert os.path.exists(outfile)
os.unlink(outfile)
main_fold(
[
evfile_noe,
"-f",
str(self.pulse_frequency),
"-n",
"64",
"--test",
"--norm",
"blablabla",
]
)
outfile = hen_root(evfile_noe) + ".png"
assert os.path.exists(outfile)
os.unlink(outfile)
main_fold(
[
evfile_pi,
"-f",
str(self.pulse_frequency),
"-n",
"64",
"--test",
"--norm",
"to1",
]
)
outfile = hen_root(evfile_pi) + "_to1.png"
assert os.path.exists(outfile)
os.unlink(outfile)
def test_fold_invalid(self):
evfile = self.dum
with pytest.raises(ValueError, match="Only specify one between "):
main_fold(
[
evfile,
"-f",
str(self.pulse_frequency),
"-n",
"64",
"--test",
"--norm",
"ratios",
"--pepoch",
str(self.mjdref),
"--tref",
"0",
]
)
def test_efsearch(self):
evfile = self.dum
main_efsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--emin",
"3",
"--emax",
"79",
"--fit-candidates",
]
)
outfile = "events_EF_3-79keV_9.85-9.95Hz" + HEN_FILE_EXTENSION
assert os.path.exists(outfile)
plot_folding([outfile], ylog=True)
ftype, efperiod = get_file_type(outfile)
assert ftype == "folding"
assert np.isclose(efperiod.peaks[0], self.pulse_frequency, atol=1 / 25.25)
os.unlink(outfile)
def test_efsearch_bad_freq(self):
evfile = self.dum_scramble
with pytest.warns(UserWarning, match="No peaks detected"):
main_efsearch(
[
evfile,
"-f",
"100",
"-F",
"100.01",
"-n",
"64",
"--emin",
"3",
"--emax",
"79",
"--fit-candidates",
"--conflevel",
"99.9999999999",
]
)
def test_efsearch_from_lc(self):
evfile = self.lcfile
main_efsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--fit-candidates",
]
)
outfile = "lcurve_EF_9.85-9.95Hz" + HEN_FILE_EXTENSION
assert os.path.exists(outfile)
plot_folding([outfile], ylog=True)
efperiod = load_folding(outfile)
assert np.isclose(efperiod.peaks[0], self.pulse_frequency, atol=1 / 25.25)
def test_zsearch(self):
evfile = self.dum
main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--emin",
"3",
"--emax",
"79",
"--fit-candidates",
"--fit-frequency",
str(self.pulse_frequency),
"--dynstep",
"5",
]
)
outfile = "events_Z22_3-79keV_9.85-9.95Hz" + HEN_FILE_EXTENSION
assert os.path.exists(outfile)
plot_folding([outfile], ylog=True)
efperiod = load_folding(outfile)
assert np.isclose(efperiod.peaks[0], self.pulse_frequency, atol=1 / 25.25)
# Defaults to 2 harmonics
assert efperiod.N == 2
os.unlink(outfile)
def test_zsearch_from_lc(self):
evfile = self.lcfile
main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--fit-candidates",
"--fit-frequency",
str(self.pulse_frequency),
"--dynstep",
"5",
]
)
outfile = "lcurve_Z22_9.85-9.95Hz" + HEN_FILE_EXTENSION
assert os.path.exists(outfile)
plot_folding([outfile], ylog=True)
efperiod = load_folding(outfile)
assert np.isclose(efperiod.peaks[0], self.pulse_frequency, atol=1 / 25.25)
# Defaults to 2 harmonics
assert efperiod.N == 2
os.unlink(outfile)
def test_zsearch_fdots(self):
evfile = self.dum
main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"32",
"--fdotmin",
" -0.01",
"--fdotmax",
"0.01",
"--fit-candidates",
"--fit-frequency",
str(self.pulse_frequency),
]
)
outfile = "events_Z22_9.85-9.95Hz" + HEN_FILE_EXTENSION
assert os.path.exists(outfile)
plot_folding([outfile], ylog=True, output_data_file="bla.qdp")
efperiod = load_folding(outfile)
assert np.isclose(efperiod.peaks[0], self.pulse_frequency, atol=1 / 25.25)
# Defaults to 2 harmonics
assert len(efperiod.fdots) > 1
assert efperiod.N == 2
os.unlink(outfile)
@pytest.mark.skipif("not HAS_IMAGEIO")
def test_transient(self):
evfile = self.dum
main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"32",
"--fdotmin",
" -0.1",
"--fdotmax",
"0.1",
"--transient",
"--n-transient-intervals",
"16",
]
)
outfile = "events_Z22_9.85-9.95Hz_transient.gif"
assert os.path.exists(outfile)
os.unlink(outfile)
@pytest.mark.skipif("HAS_IMAGEIO")
def test_transient_warn_if_no_imageio(self):
evfile = self.dum
with pytest.warns(UserWarning, match="imageio needed"):
main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--fdotmin",
" -0.1",
"--fdotmax",
"0.1",
"--transient",
]
)
def test_zsearch_print_upperlim(self):
evfile = self.empty
outfile = main_zsearch([evfile, "-f", "4", "-F", "6", "-N", "1", "--fast"])[0]
plot_folding([outfile], ylog=True, output_data_file="bla.qdp")
# assert "Upper limit for sinusoids:" in caplog.text
os.unlink(outfile)
def test_zsearch_fast(self):
evfile = self.dum
main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--fast",
"--find-candidates",
"--oversample",
"4",
]
)
outfile = "events_Z22_9.85-9.95Hz_fast" + HEN_FILE_EXTENSION
assert os.path.exists(outfile)
plot_folding([outfile], ylog=True, output_data_file="bla.qdp")
efperiod = load_folding(outfile)
assert len(efperiod.fdots) > 1
assert efperiod.N == 2
os.unlink(outfile)
def test_zsearch_fast_nofdot(self):
evfile = self.dum
outfiles = main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--fast",
"--fdotmin",
0,
"--fdotmax",
"0",
]
)
outfile = outfiles[0]
assert os.path.exists(outfile)
efperiod = load_folding(outfile)
assert not isinstance(efperiod.fdots, Iterable) or len(efperiod.fdots) <= 1
assert efperiod.N == 2
os.unlink(outfile)
def test_zsearch_fast_nbin_small_warns(self):
evfile = self.dum
with pytest.warns(UserWarning, match="The number of bins is too small"):
_ = main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"2",
"--fast",
]
)
def test_zsearch_fdots_fast(self):
evfile = self.dum
outfiles = main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--fast",
"--mean-fdot",
"1e-10",
]
)
outfile = outfiles[0]
assert os.path.exists(outfile)
plot_folding([outfile], ylog=True, output_data_file="bla.qdp")
efperiod = load_folding(outfile)
assert len(efperiod.fdots) > 1
assert efperiod.N == 2
os.unlink(outfile)
def test_zsearch_fddots_fast(self):
evfile = self.dum
outfiles = main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--fast",
"--mean-fdot",
"0",
"--mean-fddot",
"1e-13",
]
)
outfile = outfiles[0]
assert os.path.exists(outfile)
plot_folding([outfile], ylog=True, output_data_file="bla.qdp")
efperiod = load_folding(outfile)
assert len(efperiod.fdots) > 1
assert efperiod.N == 2
os.unlink(outfile)
def test_zsearch_fdots_ffa(self):
evfile = self.dum
with pytest.warns(UserWarning, match="Folding Algorithm functionality"):
main_zsearch(
[
evfile,
"-f",
"9.89",
"-F",
"9.92",
"-n",
"32",
"--ffa",
"--find-candidates",
]
)
outfile = "events_Z22_9.89-9.92Hz_ffa" + HEN_FILE_EXTENSION
assert os.path.exists(outfile)
plot_folding([outfile], ylog=True, output_data_file="bla_ffa.qdp")
efperiod = load_folding(outfile)
assert efperiod.N == 2
assert np.isclose(efperiod.peaks[0], self.pulse_frequency, atol=1 / 25.25)
os.unlink(outfile)
def test_fold_fast_fails(self):
evfile = self.dum
with pytest.raises(ValueError, match="The fast option is only available for z "):
main_efsearch([evfile, "-f", "9.85", "-F", "9.95", "-n", "64", "--fast"])
def test_zsearch_fdots_fast_transient(self):
evfile = self.dum
main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--fast",
"--transient",
"--n-transient-intervals",
"16",
]
)
@pytest.mark.skipif("not HAS_PD")
def test_orbital(self):
import pandas as pd
events = load_events(self.dum)
csv_file = decide_binary_parameters(
137430,
[0.03, 0.035],
[2.0 * 86400, 2.5 * 86400],
[0.0, 1.0],
fdot_range=[0, 5e-10],
reset=False,
NMAX=10,
)
table = pd.read_csv(csv_file)
assert len(table) == 10
folding_orbital_search(events, csv_file, chunksize=10, outfile="out.csv")
table = pd.read_csv("out.csv")
assert len(table) == 10
assert np.all(table["done"])
os.unlink(csv_file)
os.unlink("out.csv")
@pytest.mark.remote_data
@pytest.mark.skipif("not HAS_PINT")
def test_efsearch_deorbit(self):
evfile = self.dum
ip = main_zsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--deorbit-par",
self.par,
]
)
outfile = "events_Z22_9.85-9.95Hz" + HEN_FILE_EXTENSION
assert os.path.exists(outfile)
plot_folding([outfile], ylog=True)
@pytest.mark.remote_data
@pytest.mark.skipif("not HAS_PINT")
def test_fold_deorbit(self):
evfile = self.dum
main_fold(
[
evfile,
"-f",
str(self.pulse_frequency),
"-n",
"64",
"--test",
"--norm",
"ratios",
"--deorbit-par",
self.par,
"--pepoch",
str(self.mjdref),
]
)
outfile = hen_root(evfile) + "_ratios.png"
assert os.path.exists(outfile)
os.unlink(outfile)
@pytest.mark.remote_data
@pytest.mark.skipif("not HAS_PINT")
def test_deorbit(self):
evfile = self.dum
main_deorbit([evfile, "--deorbit-par", self.par])
outfile = hen_root(evfile) + "_deorb" + HEN_FILE_EXTENSION
assert os.path.exists(outfile)
os.unlink(outfile)
def test_efsearch_deorbit_invalid(self):
evfile = self.dum
with pytest.raises(FileNotFoundError, match="Parameter file"):
ip = main_efsearch(
[
evfile,
"-f",
"9.85",
"-F",
"9.95",
"-n",
"64",
"--deorbit-par",
"nonexistent.par",
]
)
def test_accelsearch(self):
evfile = self.dum
with pytest.warns(UserWarning, match="The accelsearch functionality is "):
outfile = main_accelsearch(
[
evfile,
"--fmin",
"1",
"--fmax",
"10",
"--zmax",
"1",
"--delta-z",
"0.5",
]
)
assert os.path.exists(outfile)
os.unlink(outfile)
def test_accelsearch_nodetections(self):
evfile = self.dum_scramble
with pytest.warns(UserWarning, match="The accelsearch functionality"):
outfile = main_accelsearch([evfile, "--fmin", "1", "--fmax", "1.1", "--zmax", "1"])
assert os.path.exists(outfile)
os.unlink(outfile)
def test_accelsearch_detrend(self):
evfile = self.dum_scramble
with pytest.warns(UserWarning, match="The accelsearch functionality"):
outfile = main_accelsearch(
[
evfile,
"--fmin",
"1",
"--fmax",
"1.1",
"--zmax",
"1",
"--detrend",
"20",
]
)
assert os.path.exists(outfile)
os.unlink(outfile)
def test_accelsearch_rednoise(self):
evfile = self.dum_scramble
with pytest.warns(UserWarning, match="The accelsearch functionality"):
outfile = main_accelsearch(
[
evfile,
"--fmin",
"1",
"--fmax",
"1.1",
"--zmax",
"1",
"--red-noise-filter",
]
)
assert os.path.exists(outfile)
os.unlink(outfile)
@pytest.mark.skipif("not HAS_PINT")
def test_accelsearch_deorbit(self):
evfile = self.dum_scramble
with pytest.warns(UserWarning, match="The accelsearch functionality"):
outfile = main_accelsearch(
[
evfile,
"--fmin",
"1",
"--fmax",
"1.1",
"--zmax",
"1",
"--deorbit-par",
self.par,
]
)
assert os.path.exists(outfile)
os.unlink(outfile)
@pytest.mark.skipif("HAS_PINT")
def test_accelsearch_deorbit_fails_no_pint(self):
evfile = self.dum_scramble
with pytest.warns(UserWarning, match="The accelsearch functionality"):
with pytest.raises(ImportError, match="PINT"):
main_accelsearch(
[
evfile,
"--fmin",
"1",
"--fmax",
"1.1",
"--zmax",
"1",
"--deorbit-par",
self.par,
]
)
def test_accelsearch_energy_and_freq_filt(self):
evfile = self.dum
with pytest.warns(UserWarning, match="The accelsearch functionality"):
outfile = main_accelsearch(
[
evfile,
"--emin",
"3",
"--emax",
"80",
"--fmin",
"0.1",
"--fmax",
"1",
"--zmax",
"5",
]
)
assert os.path.exists(outfile)
os.unlink(outfile)
def test_accelsearch_pad(self):
evfile = self.dum
with pytest.warns(UserWarning, match="The accelsearch functionality"):
outfile = main_accelsearch([evfile, "--pad-to-double", "--zmax", "1"])
assert os.path.exists(outfile)
os.unlink(outfile)
def test_accelsearch_interbin(self):
evfile = self.dum
with pytest.warns(UserWarning, match="The accelsearch functionality"):
outfile = main_accelsearch([evfile, "--interbin", "--zmax", "1"])
assert os.path.exists(outfile)
os.unlink(outfile)
def test_z2vspf(self):
evfile = self.dum
ip = main_z2vspf([evfile, "--show-z-values", "30", "--ntrial", "10"])
@classmethod
def teardown_class(cls):
cleanup_test_dir(".")
|
StingraySoftwareREPO_NAMEHENDRICSPATH_START.@HENDRICS_extracted@HENDRICS-main@hendrics@tests@test_efsearch.py@.PATH_END.py
|
{
"filename": "newWindowHack.py",
"repo_name": "iraf-community/pyraf",
"repo_path": "pyraf_extracted/pyraf-main/pyraf/newWindowHack.py",
"type": "Python"
}
|
"""
This module hacks tkSimpleDialog to make askstring() work
even when the root window has been withdrawn.
w/o this hack, Python-2.4.3/Tk8.4 locks up for the following
code: the dialog is created, but it is withdrawn just like the
root window (!) so there is nothing to interact with and the system
hangs.
import tkinter
tk = tkinter.Tk()
tk.withdraw()
import tkSimpleDialog
tkSimpleDialog.askstring("window title", "question?")
"""
import tkinter.simpledialog
from tkinter import Toplevel, Frame
def __init__(self, parent, title=None):
'''Initialize a dialog.
Arguments:
parent -- a parent window (the application window)
title -- the dialog title
'''
Toplevel.__init__(self, parent)
if parent.winfo_viewable(): # XXX this condition is the only "fix".
self.transient(parent)
if title:
self.title(title)
self.parent = parent
self.result = None
body = Frame(self)
self.initial_focus = self.body(body)
body.pack(padx=5, pady=5)
self.buttonbox()
self.wait_visibility() # window needs to be visible for the grab
self.grab_set()
if not self.initial_focus:
self.initial_focus = self
self.protocol("WM_DELETE_WINDOW", self.cancel)
if self.parent is not None:
self.geometry("+{:d}+{:d}".format(parent.winfo_rootx() + 50,
parent.winfo_rooty() + 50))
self.initial_focus.focus_set()
self.wait_window(self)
tkinter.simpledialog.Dialog.__init__ = __init__
"""
Here are some more notes from my "investigation":
====================================================================================
http://mail.python.org/pipermail/python-list/2005-April/275761.html
tkinter "withdraw" and "askstring" problem
Jeff Epler jepler at unpythonic.net
Tue Apr 12 15:58:22 CEST 2005
* Previous message: tkinter "withdraw" and "askstring" problem
* Next message: os.open() i flaga lock
* Messages sorted by: [ date ] [ thread ] [ subject ] [ author ]
The answer has to do with a concept Tk calls "transient".
wm transient window ?master?
If master is specified, then the window manager is informed that
window is a transient window (e.g. pull-down menu) working on
behalf of master (where master is the path name for a top-level
window). If master is specified as an empty string then window
is marked as not being a transient window any more. Otherwise
the command returns the path name of s current master, or
an empty string if window t currently a transient window. A
transient window will mirror state changes in the master and
inherit the state of the master when initially mapped. It is an
error to attempt to make a window a transient of itself.
In tkSimpleDialog, the dialog window is unconditionally made transient
for the master. Windows is simply following the documentation: The
askstring window "inherit[s] the state of the master [i.e., withdrawn]
when initially mapped".
The fix is to modify tkSimpleDialog.Dialog.__init__ to only make the
dialog transient for its master when the master is viewable. This
mirrors what is done in dialog.tcl in Tk itself. You can either change
tkSimpleDialog.py, or you can include a new definition of __init__ with
these lines at the top, and the rest of the function the same:
def __init__(self, parent, title = None):
''' the docstring ... '''
Toplevel.__init__(self, parent)
if parent.winfo_viewable():
self.transient(parent)
...
# Thanks for being so dynamic, Python!
tkSimpleDialog.Dialog.__init__ = __init__; del __init__
Jeff
"""
|
iraf-communityREPO_NAMEpyrafPATH_START.@pyraf_extracted@pyraf-main@pyraf@newWindowHack.py@.PATH_END.py
|
{
"filename": "_plotting.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scikit-learn/py3/sklearn/utils/_plotting.py",
"type": "Python"
}
|
import numpy as np
from . import check_consistent_length, check_matplotlib_support
from ._response import _get_response_values_binary
from .multiclass import type_of_target
from .validation import _check_pos_label_consistency
class _BinaryClassifierCurveDisplayMixin:
"""Mixin class to be used in Displays requiring a binary classifier.
The aim of this class is to centralize some validations regarding the estimator and
the target and gather the response of the estimator.
"""
def _validate_plot_params(self, *, ax=None, name=None):
check_matplotlib_support(f"{self.__class__.__name__}.plot")
import matplotlib.pyplot as plt
if ax is None:
_, ax = plt.subplots()
name = self.estimator_name if name is None else name
return ax, ax.figure, name
@classmethod
def _validate_and_get_response_values(
cls, estimator, X, y, *, response_method="auto", pos_label=None, name=None
):
check_matplotlib_support(f"{cls.__name__}.from_estimator")
name = estimator.__class__.__name__ if name is None else name
y_pred, pos_label = _get_response_values_binary(
estimator,
X,
response_method=response_method,
pos_label=pos_label,
)
return y_pred, pos_label, name
@classmethod
def _validate_from_predictions_params(
cls, y_true, y_pred, *, sample_weight=None, pos_label=None, name=None
):
check_matplotlib_support(f"{cls.__name__}.from_predictions")
if type_of_target(y_true) != "binary":
raise ValueError(
f"The target y is not binary. Got {type_of_target(y_true)} type of"
" target."
)
check_consistent_length(y_true, y_pred, sample_weight)
pos_label = _check_pos_label_consistency(pos_label, y_true)
name = name if name is not None else "Classifier"
return pos_label, name
def _validate_score_name(score_name, scoring, negate_score):
"""Validate the `score_name` parameter.
If `score_name` is provided, we just return it as-is.
If `score_name` is `None`, we use `Score` if `negate_score` is `False` and
`Negative score` otherwise.
If `score_name` is a string or a callable, we infer the name. We replace `_` by
spaces and capitalize the first letter. We remove `neg_` and replace it by
`"Negative"` if `negate_score` is `False` or just remove it otherwise.
"""
if score_name is not None:
return score_name
elif scoring is None:
return "Negative score" if negate_score else "Score"
else:
score_name = scoring.__name__ if callable(scoring) else scoring
if negate_score:
if score_name.startswith("neg_"):
score_name = score_name[4:]
else:
score_name = f"Negative {score_name}"
elif score_name.startswith("neg_"):
score_name = f"Negative {score_name[4:]}"
score_name = score_name.replace("_", " ")
return score_name.capitalize()
def _interval_max_min_ratio(data):
"""Compute the ratio between the largest and smallest inter-point distances.
A value larger than 5 typically indicates that the parameter range would
better be displayed with a log scale while a linear scale would be more
suitable otherwise.
"""
diff = np.diff(np.sort(data))
return diff.max() / diff.min()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scikit-learn@py3@sklearn@utils@_plotting.py@.PATH_END.py
|
{
"filename": "Star.py",
"repo_name": "bretonr/Icarus",
"repo_path": "Icarus_extracted/Icarus-master/Icarus/Core/Star.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE
from __future__ import print_function, division
__all__ = ["Star"]
from ..Utils.import_modules import *
from .. import Utils
from .Star_base import Star_base
logger = logging.getLogger(__name__)
######################## class Star ########################
class Star(Star_base):
"""Star(Star_base)
This class allows determine the flux of the companion star
in a binary system using an atmosphere grid. It is derived
from the Star_base class.
The noticeable difference is that the surface is constructed
from a geodesic tesselation of equilateral triangles derived
from an isocahedron.
Axis convention:
x: From the primary center of mass towards the secondary.
y: Along the orbital plane along the orbital motion.
z: Along the orbital angular momentum.
"""
def __init__(self, ndiv, atmo_grid=None, read=True, oldchi=False):
Star_base.__init__(self, ndiv, atmo_grid=atmo_grid)
if read:
self._Read_geodesic()
else:
self._New_Initialization()
self.oldchi = oldchi
def _New_Initialization(self):
"""Initialization(self)
Run important initialization steps important for the
class to work.
self.vertices contains x,y,z coordinates of vertices. shape = self.n_vertices,3
self.faces contains indices of vertices forming faces. shape = self.n_faces,3
self.assoc contains indices of faces associated to a vertice. shape = self.n_vertices,6
Note: when only 5 faces associated, 6th value is equal to -99
"""
print( "Generating the geodesic surface using PyGTS" )
try:
import gts
except:
print( "You likely don't have the PyGTS package installed on your computer." )
print( "It is impossible to create the surface vertices from scratch." )
print( "Will trying reading them from the restart file instead." )
self._Read_geodesic()
return
# Generate the geodesic primitives
s = gts.sphere(self.ndiv)
x,y,z,t = gts.get_coords_and_face_indices(s,True)
self.vertices = np.c_[x,y,z]
self.faces = np.array(t)
self.n_vertices = self.vertices.shape[0]
self.n_faces = self.faces.shape[0]
print( "Calculatating the associations" )
self.assoc = Utils.Tessellation.Match_assoc(self.faces, self.n_vertices)
# We will pre-calculate the surface areas. They will need to be multiplied by rc^2.
# The calculation is simply the Pythagorean sum of the areas of the respective projections on the x,y,z planes.
print( "meshing the surface" )
mesh = self.vertices[self.faces]
print( "calculating the area" )
self.pre_area = 0.5 *np.sqrt( ((mesh[:,0,0]*mesh[:,1,1]+mesh[:,1,0]*mesh[:,2,1]+mesh[:,2,0]*mesh[:,0,1]) - (mesh[:,0,1]*mesh[:,1,0]+mesh[:,1,1]*mesh[:,2,0]+mesh[:,2,1]*mesh[:,0,0]))**2 + ((mesh[:,0,1]*mesh[:,1,2]+mesh[:,1,1]*mesh[:,2,2]+mesh[:,2,1]*mesh[:,0,2]) - (mesh[:,0,2]*mesh[:,1,1]+mesh[:,1,2]*mesh[:,2,1]+mesh[:,2,2]*mesh[:,0,1]))**2 + ((mesh[:,0,2]*mesh[:,1,0]+mesh[:,1,2]*mesh[:,2,0]+mesh[:,2,2]*mesh[:,0,0]) - (mesh[:,0,0]*mesh[:,1,2]+mesh[:,1,0]*mesh[:,2,2]+mesh[:,2,0]*mesh[:,0,2]))**2 )
# The cosine of x,y,z for the center of the faces. shape = n_faces, 3
print( "calculating the angles" )
self.cosx, self.cosy, self.cosz = mesh.mean(axis=1).T
return
def _Initialization(self):
"""Initialization(self)
Run important initialization steps important for the
class to work.
self.vertices contains x,y,z coordinates of vertices. shape = self.n_vertices,3
self.faces contains indices of vertices forming faces. shape = self.n_faces,3
self.assoc contains indices of faces associated to a vertice. shape = self.n_vertices,6
Note: when only 5 faces associated, 6th value is equal to -99
"""
print( "Generating the geodesic surface" )
# Generate the geodesic primitives
self.n_faces, self.n_vertices, self.faces, self.vertices, self.assoc = Utils.Tessellation.Make_geodesic(self.ndiv)
# We will pre-calculate the surface areas. They will need to be multiplied by rc^2.
# The calculation is simply the Pythagorean sum of the areas of the respective projections on the x,y,z planes.
print( "meshing the surface" )
mesh = self.vertices[self.faces]
print( "calculating the area" )
self.pre_area = 0.5 *np.sqrt( ((mesh[:,0,0]*mesh[:,1,1]+mesh[:,1,0]*mesh[:,2,1]+mesh[:,2,0]*mesh[:,0,1]) - (mesh[:,0,1]*mesh[:,1,0]+mesh[:,1,1]*mesh[:,2,0]+mesh[:,2,1]*mesh[:,0,0]))**2 + ((mesh[:,0,1]*mesh[:,1,2]+mesh[:,1,1]*mesh[:,2,2]+mesh[:,2,1]*mesh[:,0,2]) - (mesh[:,0,2]*mesh[:,1,1]+mesh[:,1,2]*mesh[:,2,1]+mesh[:,2,2]*mesh[:,0,1]))**2 + ((mesh[:,0,2]*mesh[:,1,0]+mesh[:,1,2]*mesh[:,2,0]+mesh[:,2,2]*mesh[:,0,0]) - (mesh[:,0,0]*mesh[:,1,2]+mesh[:,1,0]*mesh[:,2,2]+mesh[:,2,0]*mesh[:,0,2]))**2 )
# The cosine of x,y,z for the center of the faces. shape = n_faces, 3
print( "calculating the angles" )
self.cosx, self.cosy, self.cosz = mesh.mean(axis=1).T
return
def Outline(self, ntheta=100, debug=False):
"""Outline(ntheta=100, debug=False)
Calculates the radii of the outline of the star for a vector
of theta=np.arange(ntheta)/ntheta*cts.TWOPI.
theta is defined as np.arctan2(y_projected,z_projected).
theta0 = 0
dtheta = cts.TWOPI/ntheta
ntheta (100): Number of points defining the outline.
debug (False): Print debug information when True.
>>> self._Outline()
"""
if debug: print( 'Begin _Outline()' )
theta = np.arange(ntheta, dtype=float)/ntheta * cts.TWOPI
y = np.cos(theta)
z = np.sin(theta)
# radii of the outline of the star.
radii = self._Radius(y*0., y, z, self.psi0, self.rc_eq)
return radii
def _Read_geodesic(self):
"""Read_geodesic()
The information about the geodesic surface on the unit
sphere has already been precalculated. We simply load the
one have the desired precision.
"""
#f = open('geodesic/geodesic_n%i.txt'%self.ndiv, 'r')
f = open(Utils.__path__[0][:-5]+'geodesic/geodesic_n%i.txt'%self.ndiv, 'r')
lines = f.readlines()
# We store the number of vertices, faces and edges as class variables.
tmp, self.n_vertices, self.n_faces, self.n_edges = lines[0].split()
self.n_vertices = int(self.n_vertices)
self.n_faces = int(self.n_faces)
self.n_edges = int(self.n_edges)
# Vertice information contains coordinate x,y,z of vertices. shape = n_vertices,3
self.vertices = np.array([l.split() for l in lines[1:1+self.n_vertices]], dtype=float)
# Face information contains indices of vertices forming faces. shape = n_faces,3
self.faces = np.array([l.split() for l in lines[1+self.n_vertices:1+self.n_vertices+self.n_faces]], dtype=int)
self.faces = self.faces[:,1:]
# We calculate the associations
self.assoc = Utils.Tessellation.Match_assoc(self.faces, self.n_vertices)
# We will pre-calculate the surface areas. They will need to be multiplied by rc^2.
# The calculation is simply the Pythagorean sum of the areas of the respective projections on the x,y,z planes.
mesh = self.vertices[self.faces]
self.pre_area = 0.5 *np.sqrt( ((mesh[:,0,0]*mesh[:,1,1]+mesh[:,1,0]*mesh[:,2,1]+mesh[:,2,0]*mesh[:,0,1]) - (mesh[:,0,1]*mesh[:,1,0]+mesh[:,1,1]*mesh[:,2,0]+mesh[:,2,1]*mesh[:,0,0]))**2 + ((mesh[:,0,1]*mesh[:,1,2]+mesh[:,1,1]*mesh[:,2,2]+mesh[:,2,1]*mesh[:,0,2]) - (mesh[:,0,2]*mesh[:,1,1]+mesh[:,1,2]*mesh[:,2,1]+mesh[:,2,2]*mesh[:,0,1]))**2 + ((mesh[:,0,2]*mesh[:,1,0]+mesh[:,1,2]*mesh[:,2,0]+mesh[:,2,2]*mesh[:,0,0]) - (mesh[:,0,0]*mesh[:,1,2]+mesh[:,1,0]*mesh[:,2,2]+mesh[:,2,0]*mesh[:,0,2]))**2 )
# The cosine of x,y,z for the center of the faces. shape = n_faces, 3
self.cosx, self.cosy, self.cosz = mesh.mean(axis=1).T
return
def Radius(self):
"""Radius()
Returns the volume-averaged radius of the star, in
units of orbital separation.
>>> self.Radius()
"""
return (self.rc**3).mean()**(1./3)
def Roche(self):
"""Roche()
Returns the volume-averaged Roche lobe radius
of the star in units of orbital separation.
For the geodesic tessellation, the volume-averaged
Roche-lobe radius is easilly found since each surface
element subtend the same solid angle. Therefore, the
volume-averaged radius is the cubic root of the average
values of the radii cubed <rc^3>^1/3.
>>> self.Roche()
"""
filling = self.filling
self.Make_surface(filling=1.)
radius = self.Radius()
self.Make_surface(filling=filling)
return radius
def _Surface(self, debug=False):
"""_Surface(debug=False)
Calculates the surface grid values of surface gravity
and surface element area by solving the potential
equation.
debug (False): Print debug information when True.
>>> self._Surface()
"""
logger.log(9, "start")
if debug: print( 'Begin _Surface()' )
## Calculate some quantities
self._Calc_qp1by2om2()
## Saddle point, i.e. the Roche-lobe radius at L1 (on the near side)
xl1 = self._Saddle(0.5)
self.L1 = xl1
if debug: print( 'Saddle %f' %xl1 )
## Potential at the saddle point, L1
psil1 = self._Potential(xl1, 0., 0.)[-1]
if debug: print( 'Potential psil1 %f' %psil1 )
## rc_l1 is the stellar radius on the near side, i.e. the nose of the star
self.rc_l1 = self.filling*xl1
if debug: print( 'rc_l1 %f' %self.rc_l1 )
## Potential at rc_l1, the nose of the star
trc, trx, dpsi, dpsidx, dpsidy, dpsidz, psi0 = self._Potential(self.rc_l1, 0., 0.)
self.psi0 = psi0
if debug: print( 'Potential psi0\n trc: %f, trx %f, dpsi %f, dpsidx %f, dpsidy %f, dpsidz %f, psi0 %f' % (trc, trx, dpsi, dpsidx, dpsidy, dpsidz, self.psi0) )
## rc_pole is stellar radius at 90 degrees, i.e. at the pole, which is perpendicular to the line separating the two stars and the orbital plane
if debug: print( 'psi0,r '+str(self.psi0)+' '+str(r) )
self.rc_pole = self._Radius(0.,0.,1.,self.psi0,self.rc_l1)
trc, trx, dpsi, dpsidx, dpsidy, dpsidz, psi = self._Potential(0.,0.,self.rc_pole)
## log surface gravity at the pole of the star
self.logg_pole = np.log10(np.sqrt(dpsidx**2+dpsidy**2+dpsidz**2))
## rc_eq is stellar radius at 90 degrees in the orbital plane, i.e. at the equator, but not in the direction of the companion
self.rc_eq = self._Radius(0.,1.,0.,self.psi0,self.rc_l1)
trc, trx, dpsi, dpsidx, dpsidy, dpsidz, psi = self._Potential(0.,self.rc_eq,0.)
## log surface gravity at the pole of the star
self.logg_eq = np.log10(np.sqrt(dpsidx**2+dpsidy**2+dpsidz**2))
## r_vertices are the radii of the vertices. shape = n_vertices
self.r_vertices = self._Radius(self.vertices[:,0], self.vertices[:,1], self.vertices[:,2], self.psi0, self.rc_l1)
### Calculate useful quantities for all surface elements
## rc corresponds to r1 from Tjemkes et al., the distance from the center of mass of the pulsar companion. shape = n_faces
self.rc = self._Radius(self.cosx, self.cosy, self.cosz, self.psi0, self.rc_l1)
## rx corresponds to r2 from Tjemkes et al., the distance from the center of mass of the pulsar. shape = n_faces
trc, self.rx, dpsi, dpsidx, dpsidy, dpsidz, psi = self._Potential(self.rc*self.cosx,self.rc*self.cosy,self.rc*self.cosz)
## log surface gravity. shape = n_faces
geff = self._Geff(dpsidx, dpsidy, dpsidz)
self.logg = np.log10(geff)
## gradient of the gravitational potential in x,y,z. shape = n_faces
self.gradx = -dpsidx/geff
self.grady = -dpsidy/geff
self.gradz = -dpsidz/geff
if self.oldchi:
## coschi is the cosine angle between the rx and the surface element. shape = n_faces
## A value of 1 means that the companion's surface element is directly facing the pulsar, 0 is at the limb and -1 on the back.
## The following is the old way, which is derived from the spherical approximation, i.e. that the normal to the surface is approximately the same as the radial position
#self.coschi = -(self.rc-self.cosx)/self.rx
## The better calculation should use the gradient as the normal vector, and the direction to the pulsar as positive x.
## This implies that the angle coschi is simply the x component of the gradient.
self.coschi = self.gradx.copy()
else:
## coschi = (N * rx) / (abs(N) abs(rx))
## N: vector normal to the surface, which is the grad of the potential
## rx: vector from the secondary (e.g. neutron star) to the primary (e.g. companion)
## note that N is normalised already
self.coschi = -self.rc*((self.cosx-1/self.rc)*self.gradx + self.cosy*self.grady + self.cosz*self.gradz) / np.abs(self.rx)
## surface area. shape = n_faces
self.area = self.rc**2 * self.pre_area
logger.log(9, "end")
return
######################## class Star ########################
|
bretonrREPO_NAMEIcarusPATH_START.@Icarus_extracted@Icarus-master@Icarus@Core@Star.py@.PATH_END.py
|
{
"filename": "GaussBAODVLikelihood.py",
"repo_name": "ja-vazquez/SimpleMC",
"repo_path": "SimpleMC_extracted/SimpleMC-master/simplemc/likelihoods/GaussBAODVLikelihood.py",
"type": "Python"
}
|
from simplemc.likelihoods.BaseLikelihood import BaseLikelihood
class GaussBAODVLikelihood(BaseLikelihood):
def __init__(self, name, z, DV, DVErr, fidtheory, maxchi2=1e30):
"""
This is a BAO likelihood, where we give DV and its error and optionally a value
at which chi2 is cut.
Parameters
----------
name
z
DV
DVErr
fidtheory
maxchi2
Returns
-------
"""
BaseLikelihood.__init__(self, name)
self.z = z
rd = fidtheory.rd
DV /= rd
DVErr /= rd
self.maxchi2 = maxchi2
print(name, "measurement in ", fidtheory.rd_approx, ":", DV, "+-", DVErr)
self.setData(DV, DVErr)
def setData(self, DV, DVErr):
self.DV = DV
self.DVErr2 = DVErr**2
def loglike(self):
DVT = self.theory_.DVOverrd(self.z)
chi2 = min(self.maxchi2, (DVT-self.DV)**2/(self.DVErr2))
return -chi2/2.0
|
ja-vazquezREPO_NAMESimpleMCPATH_START.@SimpleMC_extracted@SimpleMC-master@simplemc@likelihoods@GaussBAODVLikelihood.py@.PATH_END.py
|
{
"filename": "slice2d_get_support.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/particles/spd_slice2d/slice2d_get_support.py",
"type": "Python"
}
|
from .tplot_average import tplot_average
def slice2d_get_support(variable, trange, matrix=False):
"""
Retrieve user specified support data for spd_slice2d
Input
------
variable: str or float or ndarray
Tplot variable or input data
trange: list of float
Time range to average over if the input is a tplot variable
Parameters
-----------
matrix: ndarray
Specifies that the input is a matrix
Returns
--------
Output data (average over time range if the input is a tplot variable)
"""
if variable is None:
return
if isinstance(variable, str): # tplot variable
return tplot_average(variable, trange)
return variable
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@particles@spd_slice2d@slice2d_get_support.py@.PATH_END.py
|
{
"filename": "MakePlotMovie.py",
"repo_name": "saopicc/killMS",
"repo_path": "killMS_extracted/killMS-master/killMS/MakePlotMovie.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
killMS, a package for calibration in radio interferometry.
Copyright (C) 2013-2017 Cyril Tasse, l'Observatoire de Paris,
SKA South Africa, Rhodes University
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import optparse
import sys
from killMS.Other import MyPickle
from killMS.Other import logo
from killMS.Other import ModColor
from DDFacet.Other import logger
import os
log=logger.getLogger("killMS")
log.logger.setLevel(logger.logging.CRITICAL)
sys.path=[name for name in sys.path if not(("pyrap" in name)&("/usr/local/lib/" in name))]
# test
#import numpy
#print numpy.__file__
#import pyrap
#print pyrap.__file__
#stop
if "nocol" in sys.argv:
print("nocol")
ModColor.silent=1
if "nox" in sys.argv:
import matplotlib
matplotlib.use('agg')
print(ModColor.Str(" == !NOX! =="))
import time
import os
import numpy as np
import pickle
from itertools import product as ItP
NameSave="last_plotSols.obj"
def read_options():
desc="""killMS Questions and suggestions: cyril.tasse@obspm.fr"""
opt = optparse.OptionParser(usage='Usage: %prog --ms=somename.MS <options>',version='%prog version 1.0',description=desc)
group = optparse.OptionGroup(opt, "* Data-related options", "Won't work if not specified.")
group.add_option('--SolsFile',help='Input Solutions list [no default]',default='')
group.add_option('--DoResid',type="int",help='No [no default]',default=-1)
group.add_option('--PlotMode',type='str',help=' [no default]',default="AP")
opt.add_option_group(group)
options, arguments = opt.parse_args()
f = open(NameSave,"wb")
pickle.dump(options,f)
import pylab
import numpy as np
def GiveNXNYPanels(Ns,ratio=800/500):
nx=int(round(np.sqrt(Ns/ratio)))
ny=int(nx*ratio)
if nx*ny<Ns: ny+=1
return nx,ny
from killMS.Array import ModLinAlg
def NormMatrices(G):
print("no norm")
return G
nt,nch,na,_,_=G.shape
for iChan,it in ItP(range(nch),range(nt)):
Gt=G[it,iChan,:,:]
u,s,v=np.linalg.svd(Gt[0])
# #J0/=np.linalg.det(J0)
# J0=Gt[0]
# JJ=np.dot(J0.T.conj(),J0)
# sqJJ=ModLinAlg.sqrtSVD(JJ)
# sqJJinv=ModLinAlg.invSVD(JJ)
# U=np.dot(J0,sqJJinv)
U=np.dot(u,v)
for iAnt in range(0,na):
Gt[iAnt,:,:]=np.dot(U.T.conj(),Gt[iAnt,:,:])
#Gt[iAnt,:,:]=np.dot(np.dot(u,Gt[iAnt,:,:]),v.T.conj())
#Gt[iAnt,:,:]=np.dot(Gt[iAnt,:,:],J0)
return G
def main(options=None):
if options==None:
f = open(NameSave,'rb')
options = pickle.load(f)
FilesList=options.SolsFile.split(",")
if not os.path.isdir("png"):
os.system("mkdir -p png")
LSols=[]
nSol=len(FilesList)
t0=None
for FileName in FilesList:
if "npz" in FileName:
SolsDico=np.load(FileName)
Sols=SolsDico["Sols"]
StationNames=SolsDico["StationNames"]
ClusterCat=SolsDico["ClusterCat"]
Sols=Sols.view(np.recarray)
nt,nch,na,nd,_,_=Sols.G.shape
elif "h5" in FileName:
import tables
H5=tables.openFile(FileName)
npol, nch, nd, na, nchan, nt=H5.root.sol000.amplitude000.val.shape
GH5=H5.root.sol000.amplitude000.val[:]*np.exp(1j*H5.root.sol000.phase000.val[:])
Times=H5.root.sol000.amplitude000.time[:]
StationNames=H5.root.sol000.antenna[:]["name"]
H5.close()
Sols=np.zeros((nt,),dtype=[("t0",np.float64),
("t1",np.float64),
("G",np.complex64,(nch,na,nd,2,2))])
Sols=Sols.view(np.recarray)
dt=np.median(Times[1::]-Times[0:-1])
Sols.t0=Times-dt/2.
Sols.t1=Times+dt/2.
for iTime in range(nt):
for iDir0,iDir1 in zip(range(3),range(3)):#[0,2,1]):#range(nd):
for iAnt in range(na):
for ipol in range(4):
Sols.G[iTime,iAnt,iDir0].flat[ipol]=GH5[ipol,iDir1,iAnt,0,iTime]
ind=np.where(Sols.t1!=0)[0]
Sols=Sols[ind]
tm=(Sols.t1+Sols.t0)/2.
if t0==None:
t0=tm[0]
tm-=t0
Sols.t0=tm
nx,ny=GiveNXNYPanels(na)
LSols.append(Sols)
# LSols=[LSols[0]]
# nSol=1
# diag terms
Lls=["-",":",":"]
Lcol0=["black","black","blue"]
Lcol1=["gray","gray","red"]
Lalpha0=[1,1,1]
Lalpha1=[0.5,0.5,0.5]
# Lls=["-","-",":"]
# Lcol0=["black","blue","blue"]
# Lcol1=["gray","red","red"]
# Lalpha0=[1,0.5,1]
# Lalpha1=[0.5,0.5,0.5]
# off-diag terms
Lls_off=Lls#["-","--",":"]
Lcol0_off=Lcol0#["black","black","blue"]
Lcol1_off=Lcol1#["gray","gray","red"]
if options.DoResid!=-1:
Sresid=LSols[1].copy()
LSols.append(Sresid)
DirList=range(nd)
#DirList=[np.where(ClusterCat["SumI"]==np.max(ClusterCat["SumI"]))[0][0]]
#print DirList
nt,nch,na,nd,_,_=Sols.G.shape
for iDir in DirList:
iAnt=0
for iSol in range(nSol):
Sols=LSols[iSol]
G=Sols.G[:,:,:,iDir,:,:]
Sols.G[:,:,:,iDir,:,:]=NormMatrices(G)
tm=(LSols[0].t0+LSols[0].t1)/2.
tm-=tm[0]
tm/=60.
fig=pylab.figure(0,figsize=(13,8))
for iTime in range(nt):
print("%i/%i"%(iTime,nt))
ampMax=1.5*np.max(np.median(np.abs(LSols[0].G),axis=0))
if options.PlotMode=="AP":
op0=np.abs
op1=np.angle
ylim0=0,ampMax
ylim1=-np.pi,np.pi
PlotDiag=[True,False]
elif options.PlotMode=="ReIm":
op0=np.real
op1=np.imag
ylim0=-ampMax,ampMax
ylim1=-ampMax,ampMax
PlotDiag=[True,True]
elif options.PlotMode=="A":
op0=np.abs
op1=None
ylim0=0,ampMax
PlotDiag=[True]
elif options.PlotMode=="P":
op0=np.angle
op1=None
ylim0=-np.pi,np.pi
PlotDiag=[False]
# L_ylim0=(0,1.5*np.max(np.median(np.abs(LSols[0].G[:,:,:,iDir,:,:]),axis=0)))
# if options.DoResid!=-1:
# LSols[-1].G[:,:,iDir,:,:]=LSols[1].G[:,:,iDir,:,:]-LSols[0].G[:,:,iDir,:,:]
# nSol+=1
# marker="."
lmax=1.5*np.max([np.max(np.abs(ClusterCat["l"])),np.max(np.abs(ClusterCat["m"]))])
Npix=101
lgrid,mgrid=np.mgrid[-lmax:lmax:1j*Npix,-lmax:lmax:1j*Npix]
lgrid,mgrid=lgrid.reshape((-1,1)),mgrid.reshape((-1,1))
d=np.sqrt((lgrid-ClusterCat["l"].reshape((1,-1)))**2+(mgrid-ClusterCat["m"].reshape((1,-1)))**2)
inode=np.argmin(d,axis=1)
A=np.zeros((Npix,Npix),np.complex64)
pylab.clf()
for i in range(nx):
for j in range(ny):
if iAnt>=na:continue
if iAnt>=1:
ax=pylab.subplot(nx,ny,iAnt+1,sharex=axRef,sharey=axRef)
else:
axRef=pylab.subplot(nx,ny,iAnt+1)
ax=axRef
if op1!=None: ax2 = ax.twinx()
pylab.title(StationNames[iAnt], fontsize=9)
for iChan in range(nch):
Sols=LSols[iSol]
#G=Sols.G[:,iChan,:,iDir,:,:]
G=Sols.G[iTime,iChan,:,:,:,:]
J=G[iAnt,:,:,:]
A.flat[:]=J[:,0,0][inode]
#ax.scatter(ClusterCat["l"],ClusterCat["m"],c=op0(J[:,0,0]),vmin=0,vmax=ampMax)
ax.imshow(op0(A),vmin=0,vmax=ampMax)
ax.set_xticks([])
ax.set_yticks([])
# if op1!=None:
# ax2.plot(Sols.t0,op1(J[:,1,1]),color=Lcol1[iSol],alpha=Lalpha1[iSol],ls=Lls[iSol],marker=marker)
# ax2.plot(Sols.t0,op1(J[:,0,0]),color=Lcol1[iSol],alpha=Lalpha1[iSol],ls=Lls[iSol],marker=marker)
# if PlotDiag[1]:
# ax2.plot(Sols.t0,op1(J[:,0,1]),color=Lcol1_off[iSol],alpha=Lalpha1[iSol],ls=Lls_off[iSol],marker=marker)
# ax2.plot(Sols.t0,op1(J[:,1,0]),color=Lcol1_off[iSol],alpha=Lalpha1[iSol],ls=Lls_off[iSol],marker=marker)
# ax2.set_ylim(ylim1)
# ax2.set_xticks([])
# ax2.set_yticks([])
# #print StationNames[iAnt]
iAnt+=1
pylab.suptitle('Time since start %6.2f minutes'%(tm[iTime]))#L_ylim0)))
#pylab.tight_layout(pad=3., w_pad=0.5, h_pad=2.0)
pylab.draw()
# pylab.show(False)
# pylab.pause(0.1)
fig.savefig("png/%5.5i.png"%iTime)
#time.sleep(1)
iAnt=0
OutFile="animation.gif"
log.print("Creating %s"%OutFile)
os.system("convert -delay 10 -loop 0 png/*.png %s"%OutFile)
def driver():
read_options()
f = open(NameSave,'rb')
options = pickle.load(f)
main(options=options)
if __name__=="__main__":
# do not place any other code here --- cannot be called as a package entrypoint otherwise, see:
# https://packaging.python.org/en/latest/specifications/entry-points/
driver()
|
saopiccREPO_NAMEkillMSPATH_START.@killMS_extracted@killMS-master@killMS@MakePlotMovie.py@.PATH_END.py
|
{
"filename": "DecayLCDMCosmology.py",
"repo_name": "igomezv/simplemc_tests",
"repo_path": "simplemc_tests_extracted/simplemc_tests-main/simplemc/models/DecayLCDMCosmology.py",
"type": "Python"
}
|
import sys
import numpy as np
from scipy.integrate import odeint
from scipy.interpolate import interp1d
from scipy.optimize import minimize
from simplemc.models.LCDMCosmology import LCDMCosmology
from simplemc.cosmo.paramDefs import xfrac_par, lambda_par
class DecayLCDMCosmology(LCDMCosmology):
# note that if we don't varyOr, it will be set so that
# density at early a is zero.
def __init__(self, varylam=True, varyxfrac=True, xfrac=xfrac_par.value):
"""
This is a CDM cosmology with a decaying
dark matter component.
Parameters
----------
varylam
varyxfrac
xfrac
Returns
-------
"""
self.varylam = varylam
self.varyxfrac = varyxfrac
self.lam = lambda_par.value
self.xfrac = xfrac
LCDMCosmology.__init__(self)
self.logar = np.linspace(0.0, -7.1, 100)
self.ilogar = self.logar[::-1]
# force caching
self.updateParams([])
# my free parameters. We add lam, xfrac
def freeParameters(self):
l = LCDMCosmology.freeParameters(self)
if (self.varylam): l.append(lambda_par)
if (self.varyxfrac): l.append(xfrac_par)
return l
def updateParams(self, pars):
ok = LCDMCosmology.updateParams(self, pars)
if not ok:
return False
for p in pars:
if p.name == "lambda":
self.lam = p.value
if p.name == "xfrac":
self.xfrac = p.value
self.SolveEq()
# and updated with relevant rd
self.setrd(self.rd_func_(
self.Obh2, self.Ocbh2_early, self.Omnuh2, self.Nnu()))
assert(abs(self.RHSquared_a(1.0)-1) < 1e-4)
return True
def H2_rxrr_a(self, a, rx, rr):
NuContrib = self.NuDensity.rho(a)/self.h**2
return self.Ocb_std/a**3+self.Omrad/a**4+NuContrib+(1.0-self.Om-self.Or)+rx/a**3+rr/a**4
def RHS_(self, y, lna):
# we are solving rx, so that rhox=rx/a**3 and rhor=rr/a**4
##
a = np.exp(lna)
H2 = self.H2_rxrr_a(a, y[0], y[1])
H = np.sqrt(abs(H2))
factor = self.lam*y[0]/H
return np.array([- factor, + factor * a])
def FuncMin_(self, x):
fractoday, self.Or = x
self.Odm_dec = self.Odm*fractoday
self.Odm_ndec = self.Odm*(1-fractoday)
self.Ocb_std = self.Ob+self.Odm_ndec
yinit = np.array([self.Odm_dec, self.Or])
sol = odeint(self.RHS_, yinit, self.logar)
rxe, rre = sol[-1, :]
# we want Ore early be as small as possible
eps = rre**2
# we want early frac to be xfrac
eps += ((rxe/(rxe+self.Odm_ndec))-self.xfrac)**2
return eps
def SolveEq(self):
self.Odm = self.Ocb-self.Obh2/(self.h**2)
self.Ob = self.Ocb-self.Odm
if (self.lam == 0):
self.fractoday, self.Or = self.xfrac, 0.0
if (self.xfrac == 1.0):
res = minimize(lambda x: self.FuncMin_(
[1.0, x[0]]), [0.001], tol=1e-5)
self.Or = res.x[0]
self.fractoday = 1.0
else:
res = minimize(self.FuncMin_, [self.xfrac, 0.001], tol=1e-5)
self.fractoday, self.Or = res.x
# print fmin.__doc__
# print "lam=",self.lam
# print "res=",res
# stop
# stupid interp1d doesn't take inverses
self.Ocb_std = self.Ob+self.Odm*(1-self.fractoday)
self.Odm_dec = self.Odm*self.fractoday
yinit = np.array([self.Odm_dec, self.Or])
sol = odeint(self.RHS_, yinit, self.logar)
self.sol = sol
self.rx = interp1d(self.ilogar, sol[::-1, 0])
self.rr = interp1d(self.ilogar, sol[::-1, 1])
# take early time solution
self.Ocbh2_early = (self.Ocb_std+sol[-1, 0])*self.h**2
def RHSquared_a(self, a):
lna = np.log(a)
return self.H2_rxrr_a(a, self.rx(lna), self.rr(lna))
def WangWangVec(self):
print("no WW with Decay")
sys.exit(1)
return None
# this returns the "SimpleCMB" variables in a vec
def CMBSimpleVec(self):
zstar = 1090.
Dastar = self.Da_z(zstar)*self.c_/(self.h*100)
return np.array([self.Obh2, self.Ocbh2_early, Dastar/self.rd])
def Om_z(self, a):
lna = np.log(a)
return self.Ocb_std/a**3 + self.rx(lna)/a**3
|
igomezvREPO_NAMEsimplemc_testsPATH_START.@simplemc_tests_extracted@simplemc_tests-main@simplemc@models@DecayLCDMCosmology.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "astrobengaly/GaPP",
"repo_path": "GaPP_extracted/GaPP-main/README.md",
"type": "Markdown"
}
|
# GaPP
This Gaussian Processes GaPP code auxiliary repository, replacing the original link [http://www.acgc.uct.ac.za/~seikel/GAPP/index.html] which seems to be broken.
GaPP was written by Seikel, Clarkson, Smith 2012 (https://arxiv.org/abs/1204.2832, JCAP06(2012)036). The algorithm Gaussian processes can reconstruct a function from a sample of data without assuming a parameterization of the function. The GaPP code can be used on any dataset to reconstruct a function. It handles individual error bars on the data and can be used to determine the derivatives of the reconstructed function. The data sample can consists on observations of the function and of its first derivative.
After downloading and unziping the GaPP file, GaPP can be set up following the documentation in the same folder.
Questions about GaPP can be addressed to carlosap87@gmail.com or carlosbengaly@on.br.
ps: some people have previously reported a bug when running the gp and dgp modules. Some possible solutions are:
if (self.alpha == None): in dpg.py
had to be changed to
if (np.any(self.alpha) == None):
Thanks Shantanu Desai for that.
Please change if (self.alpha == None): to 'if (self.alpha == None).all():' in gapp/gp.py and gapp/dgp.py
Thanks to Ian Jhon for verifying this.
Update March 13th 2024: Some people were still coming across some bugs when installing or running those modules. Please check the file "GaPP_v2024.zip" file, which includes the modifications:
if (self.alpha == None): in dgp.py and gp,py
was changed to
if (self.alpha.any == None):
Thanks to Antônio Cunha for that.
|
astrobengalyREPO_NAMEGaPPPATH_START.@GaPP_extracted@GaPP-main@README.md@.PATH_END.py
|
{
"filename": "saveopts.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/setuptools/command/saveopts.py",
"type": "Python"
}
|
from setuptools.command.setopt import edit_config, option_base
class saveopts(option_base):
"""Save command-line options to a file"""
description = "save supplied options to setup.cfg or other config file"
def run(self):
dist = self.distribution
settings = {}
for cmd in dist.command_options:
if cmd == 'saveopts':
continue # don't save our own options!
for opt, (src, val) in dist.get_option_dict(cmd).items():
if src == "command line":
settings.setdefault(cmd, {})[opt] = val
edit_config(self.filename, settings, self.dry_run)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@setuptools@command@saveopts.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/violin/hoverlabel/font/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="violin.hoverlabel.font", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
no_blank=kwargs.pop("no_blank", True),
role=kwargs.pop("role", "style"),
strict=kwargs.pop("strict", True),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@violin@hoverlabel@font@_family.py@.PATH_END.py
|
{
"filename": "test_initial_point.py",
"repo_name": "pymc-devs/pymc",
"repo_path": "pymc_extracted/pymc-main/tests/test_initial_point.py",
"type": "Python"
}
|
# Copyright 2024 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cloudpickle
import numpy as np
import pytensor
import pytensor.tensor as pt
import pytest
from pytensor.compile.builders import OpFromGraph
from pytensor.tensor.random.op import RandomVariable
import pymc as pm
from pymc.distributions.distribution import _support_point, support_point
from pymc.initial_point import make_initial_point_fn, make_initial_point_fns_per_chain
def transform_fwd(rv, expected_untransformed, model):
return model.rvs_to_transforms[rv].forward(expected_untransformed, *rv.owner.inputs).eval()
def transform_back(rv, transformed, model) -> np.ndarray:
return model.rvs_to_transforms[rv].backward(transformed, *rv.owner.inputs).eval()
class TestInitvalEvaluation:
def test_make_initial_point_fns_per_chain_checks_kwargs(self):
with pm.Model() as pmodel:
A = pm.Uniform("A", 0, 1, initval=0.5)
B = pm.Uniform("B", lower=A, upper=1.5, default_transform=None, initval="support_point")
with pytest.raises(ValueError, match="Number of initval dicts"):
make_initial_point_fns_per_chain(
model=pmodel,
overrides=[{}, None],
jitter_rvs={},
chains=1,
)
pass
@pytest.mark.parametrize("reverse_rvs", [False, True])
def test_dependent_initvals(self, reverse_rvs):
with pm.Model() as pmodel:
L = pm.Uniform("L", 0, 1, initval=0.5)
U = pm.Uniform("U", lower=9, upper=10, initval=9.5)
B1 = pm.Uniform("B1", lower=L, upper=U, initval=5)
B2 = pm.Uniform("B2", lower=L, upper=U, initval=(L + U) / 2)
if reverse_rvs:
pmodel.free_RVs = pmodel.free_RVs[::-1]
ip = pmodel.initial_point(random_seed=0)
assert ip["L_interval__"] == 0
assert ip["U_interval__"] == 0
assert ip["B1_interval__"] == 0
assert ip["B2_interval__"] == 0
# Modify initval of L and re-evaluate
pmodel.rvs_to_initial_values[U] = 9.9
ip = pmodel.initial_point(random_seed=0)
assert ip["B1_interval__"] < 0
assert ip["B2_interval__"] == 0
pass
def test_nested_initvals(self):
# See issue #5168
with pm.Model() as pmodel:
one = pm.LogNormal("one", mu=np.log(1), sigma=1e-5, initval="prior")
two = pm.Lognormal("two", mu=np.log(one * 2), sigma=1e-5, initval="prior")
three = pm.LogNormal("three", mu=np.log(two * 2), sigma=1e-5, initval="prior")
four = pm.LogNormal("four", mu=np.log(three * 2), sigma=1e-5, initval="prior")
five = pm.LogNormal("five", mu=np.log(four * 2), sigma=1e-5, initval="prior")
six = pm.LogNormal("six", mu=np.log(five * 2), sigma=1e-5, initval="prior")
ip_vals = list(make_initial_point_fn(model=pmodel, return_transformed=True)(0).values())
assert np.allclose(np.exp(ip_vals), [1, 2, 4, 8, 16, 32], rtol=1e-3)
ip_vals = list(make_initial_point_fn(model=pmodel, return_transformed=False)(0).values())
assert np.allclose(ip_vals, [1, 2, 4, 8, 16, 32], rtol=1e-3)
pmodel.rvs_to_initial_values[four] = 1
ip_vals = list(make_initial_point_fn(model=pmodel, return_transformed=True)(0).values())
assert np.allclose(np.exp(ip_vals), [1, 2, 4, 1, 2, 4], rtol=1e-3)
ip_vals = list(make_initial_point_fn(model=pmodel, return_transformed=False)(0).values())
assert np.allclose(ip_vals, [1, 2, 4, 1, 2, 4], rtol=1e-3)
def test_initval_resizing(self):
with pm.Model() as pmodel:
data = pytensor.shared(np.arange(4))
rv = pm.Uniform("u", lower=data, upper=10, initval="prior")
ip = pmodel.initial_point(random_seed=0)
assert np.shape(ip["u_interval__"]) == (4,)
data.set_value(np.arange(5))
ip = pmodel.initial_point(random_seed=0)
assert np.shape(ip["u_interval__"]) == (5,)
pass
def test_seeding(self):
with pm.Model() as pmodel:
pm.Normal("A", initval="prior")
pm.Uniform("B", initval="prior")
pm.Normal("C", initval="support_point")
ip1 = pmodel.initial_point(random_seed=42)
ip2 = pmodel.initial_point(random_seed=42)
ip3 = pmodel.initial_point(random_seed=15)
assert ip1 == ip2
assert ip3 != ip2
pass
def test_untransformed_initial_point(self):
with pm.Model() as pmodel:
pm.Flat("A", initval="support_point")
pm.HalfFlat("B", initval="support_point")
fn = make_initial_point_fn(model=pmodel, jitter_rvs={}, return_transformed=False)
iv = fn(0)
assert iv["A"] == 0
assert iv["B"] == 1
pass
def test_adds_jitter(self):
with pm.Model() as pmodel:
A = pm.Flat("A", initval="support_point")
B = pm.HalfFlat("B", initval="support_point")
C = pm.Normal("C", mu=A + B, initval="support_point")
fn = make_initial_point_fn(model=pmodel, jitter_rvs={B}, return_transformed=True)
iv = fn(0)
# Moment of the Flat is 0
assert iv["A"] == 0
# Moment of the HalfFlat is 1, but HalfFlat is log-transformed by default
# so the transformed initial value with jitter will be zero plus a jitter between [-1, 1].
b_transformed = iv["B_log__"]
b_untransformed = transform_back(B, b_transformed, model=pmodel)
assert b_transformed != 0
assert -1 < b_transformed < 1
# C is centered on 0 + untransformed initval of B
assert np.isclose(iv["C"], np.array(0 + b_untransformed, dtype=pytensor.config.floatX))
# Test jitter respects seeding.
assert fn(0) == fn(0)
assert fn(0) != fn(1)
def test_respects_overrides(self):
with pm.Model() as pmodel:
A = pm.Flat("A", initval="support_point")
B = pm.HalfFlat("B", initval=4)
C = pm.Normal("C", mu=A + B, initval="support_point")
fn = make_initial_point_fn(
model=pmodel,
jitter_rvs={},
return_transformed=True,
overrides={
A: pt.as_tensor(2, dtype=int),
B: 3,
C: 5,
},
)
iv = fn(0)
assert iv["A"] == 2
assert np.isclose(iv["B_log__"], np.log(3))
assert iv["C"] == 5
def test_string_overrides_work(self):
with pm.Model() as pmodel:
A = pm.Flat("A", initval=10)
B = pm.HalfFlat("B", initval=10)
C = pm.HalfFlat("C", initval=10)
fn = make_initial_point_fn(
model=pmodel,
jitter_rvs={},
return_transformed=True,
overrides={
"A": 1,
"B": 1,
"C_log__": 0,
},
)
iv = fn(0)
assert iv["A"] == 1
assert np.isclose(iv["B_log__"], 0)
assert iv["C_log__"] == 0
@pytest.mark.parametrize("reverse_rvs", [False, True])
def test_dependent_initval_from_OFG(self, reverse_rvs):
class MyTestOp(OpFromGraph):
pass
@_support_point.register(MyTestOp)
def my_test_op_support_point(op, out):
out1, out2 = out.owner.outputs
if out is out1:
return out1
else:
return out1 * 4
out1 = pt.zeros(())
out2 = out1 * 2
rv_op = MyTestOp([], [out1, out2])
with pm.Model() as model:
A, B = rv_op()
if reverse_rvs:
model.register_rv(B, "B")
model.register_rv(A, "A")
else:
model.register_rv(A, "A")
model.register_rv(B, "B")
assert model.initial_point() == {"A": 0, "B": 0}
model.set_initval(A, 1)
assert model.initial_point() == {"A": 1, "B": 4}
model.set_initval(B, 3)
assert model.initial_point() == {"A": 1, "B": 3}
class TestSupportPoint:
def test_basic(self):
# Standard distributions
rv = pm.Normal.dist(mu=2.3)
np.testing.assert_allclose(support_point(rv).eval(), 2.3)
# Special distributions
rv = pm.Flat.dist()
assert support_point(rv).eval() == np.zeros(())
rv = pm.HalfFlat.dist()
assert support_point(rv).eval() == np.ones(())
rv = pm.Flat.dist(size=(2, 4))
assert np.all(support_point(rv).eval() == np.zeros((2, 4)))
rv = pm.HalfFlat.dist(size=(2, 4))
assert np.all(support_point(rv).eval() == np.ones((2, 4)))
@pytest.mark.parametrize("rv_cls", [pm.Flat, pm.HalfFlat])
def test_numeric_support_point_shape(self, rv_cls):
rv = rv_cls.dist(shape=(2,))
assert not hasattr(rv.tag, "test_value")
assert tuple(support_point(rv).shape.eval()) == (2,)
@pytest.mark.parametrize("rv_cls", [pm.Flat, pm.HalfFlat])
def test_symbolic_support_point_shape(self, rv_cls):
s = pt.scalar(dtype="int64")
rv = rv_cls.dist(shape=(s,))
assert not hasattr(rv.tag, "test_value")
assert tuple(support_point(rv).shape.eval({s: 4})) == (4,)
pass
@pytest.mark.parametrize("rv_cls", [pm.Flat, pm.HalfFlat])
def test_support_point_from_dims(self, rv_cls):
with pm.Model(
coords={
"year": [2019, 2020, 2021, 2022],
"city": ["Bonn", "Paris", "Lisbon"],
}
):
rv = rv_cls("rv", dims=("year", "city"))
assert not hasattr(rv.tag, "test_value")
assert tuple(support_point(rv).shape.eval()) == (4, 3)
pass
def test_support_point_not_implemented_fallback(self):
class MyNormalRV(RandomVariable):
name = "my_normal"
signature = "(),()->()"
dtype = "floatX"
@classmethod
def rng_fn(cls, rng, mu, sigma, size):
return np.pi
class MyNormalDistribution(pm.Normal):
rv_op = MyNormalRV()
with pm.Model() as m:
x = MyNormalDistribution("x", 0, 1, initval="support_point")
with pytest.warns(
UserWarning, match="Moment not defined for variable x of type MyNormalRV"
):
res = m.initial_point()
assert np.isclose(res["x"], np.pi)
def test_future_warning_moment(self):
with pm.Model() as m:
pm.Normal("x", initval="moment")
with pytest.warns(
FutureWarning,
match="The 'moment' strategy is deprecated. Use 'support_point' instead.",
):
ip = m.initial_point(random_seed=42)
def test_pickling_issue_5090():
with pm.Model() as model:
pm.Normal("x", initval="prior")
ip_before = model.initial_point(random_seed=5090)
model = cloudpickle.loads(cloudpickle.dumps(model))
ip_after = model.initial_point(random_seed=5090)
assert ip_before["x"] == ip_after["x"]
|
pymc-devsREPO_NAMEpymcPATH_START.@pymc_extracted@pymc-main@tests@test_initial_point.py@.PATH_END.py
|
{
"filename": "wrappers.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/wrapt_ES/wrappers.py",
"type": "Python"
}
|
import os
import sys
import functools
import operator
import weakref
import inspect
PY2 = sys.version_info[0] == 2
if PY2:
string_types = basestring,
else:
string_types = str,
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
return meta("NewBase", bases, {})
class _ObjectProxyMethods(object):
# We use properties to override the values of __module__ and
# __doc__. If we add these in ObjectProxy, the derived class
# __dict__ will still be setup to have string variants of these
# attributes and the rules of descriptors means that they appear to
# take precedence over the properties in the base class. To avoid
# that, we copy the properties into the derived class type itself
# via a meta class. In that way the properties will always take
# precedence.
@property
def __module__(self):
return self.__wrapped__.__module__
@__module__.setter
def __module__(self, value):
self.__wrapped__.__module__ = value
@property
def __doc__(self):
return self.__wrapped__.__doc__
@__doc__.setter
def __doc__(self, value):
self.__wrapped__.__doc__ = value
# We similar use a property for __dict__. We need __dict__ to be
# explicit to ensure that vars() works as expected.
@property
def __dict__(self):
return self.__wrapped__.__dict__
# Need to also propagate the special __weakref__ attribute for case
# where decorating classes which will define this. If do not define
# it and use a function like inspect.getmembers() on a decorator
# class it will fail. This can't be in the derived classes.
@property
def __weakref__(self):
return self.__wrapped__.__weakref__
class _ObjectProxyMetaType(type):
def __new__(cls, name, bases, dictionary):
# Copy our special properties into the class so that they
# always take precedence over attributes of the same name added
# during construction of a derived class. This is to save
# duplicating the implementation for them in all derived classes.
dictionary.update(vars(_ObjectProxyMethods))
return type.__new__(cls, name, bases, dictionary)
class ObjectProxy(with_metaclass(_ObjectProxyMetaType)):
__slots__ = '__wrapped__'
def __init__(self, wrapped):
object.__setattr__(self, '__wrapped__', wrapped)
# Python 3.2+ has the __qualname__ attribute, but it does not
# allow it to be overridden using a property and it must instead
# be an actual string object instead.
try:
object.__setattr__(self, '__qualname__', wrapped.__qualname__)
except AttributeError:
pass
# Python 3.10 onwards also does not allow itself to be overridden
# using a property and it must instead be set explicitly.
try:
object.__setattr__(self, '__annotations__', wrapped.__annotations__)
except AttributeError:
pass
@property
def __name__(self):
return self.__wrapped__.__name__
@__name__.setter
def __name__(self, value):
self.__wrapped__.__name__ = value
@property
def __class__(self):
return self.__wrapped__.__class__
@__class__.setter
def __class__(self, value):
self.__wrapped__.__class__ = value
def __dir__(self):
return dir(self.__wrapped__)
def __str__(self):
return str(self.__wrapped__)
if not PY2:
def __bytes__(self):
return bytes(self.__wrapped__)
def __repr__(self):
return '<{} at 0x{:x} for {} at 0x{:x}>'.format(
type(self).__name__, id(self),
type(self.__wrapped__).__name__,
id(self.__wrapped__))
def __reversed__(self):
return reversed(self.__wrapped__)
if not PY2:
def __round__(self):
return round(self.__wrapped__)
if sys.hexversion >= 0x03070000:
def __mro_entries__(self, bases):
return (self.__wrapped__,)
def __lt__(self, other):
return self.__wrapped__ < other
def __le__(self, other):
return self.__wrapped__ <= other
def __eq__(self, other):
return self.__wrapped__ == other
def __ne__(self, other):
return self.__wrapped__ != other
def __gt__(self, other):
return self.__wrapped__ > other
def __ge__(self, other):
return self.__wrapped__ >= other
def __hash__(self):
return hash(self.__wrapped__)
def __nonzero__(self):
return bool(self.__wrapped__)
def __bool__(self):
return bool(self.__wrapped__)
def __setattr__(self, name, value):
if name.startswith('_self_'):
object.__setattr__(self, name, value)
elif name == '__wrapped__':
object.__setattr__(self, name, value)
try:
object.__delattr__(self, '__qualname__')
except AttributeError:
pass
try:
object.__setattr__(self, '__qualname__', value.__qualname__)
except AttributeError:
pass
try:
object.__delattr__(self, '__annotations__')
except AttributeError:
pass
try:
object.__setattr__(self, '__annotations__', value.__annotations__)
except AttributeError:
pass
elif name == '__qualname__':
setattr(self.__wrapped__, name, value)
object.__setattr__(self, name, value)
elif name == '__annotations__':
setattr(self.__wrapped__, name, value)
object.__setattr__(self, name, value)
elif hasattr(type(self), name):
object.__setattr__(self, name, value)
else:
setattr(self.__wrapped__, name, value)
def __getattr__(self, name):
# If we are being to lookup '__wrapped__' then the
# '__init__()' method cannot have been called.
if name == '__wrapped__':
raise ValueError('wrapper has not been initialised')
return getattr(self.__wrapped__, name)
def __delattr__(self, name):
if name.startswith('_self_'):
object.__delattr__(self, name)
elif name == '__wrapped__':
raise TypeError('__wrapped__ must be an object')
elif name == '__qualname__':
object.__delattr__(self, name)
delattr(self.__wrapped__, name)
elif hasattr(type(self), name):
object.__delattr__(self, name)
else:
delattr(self.__wrapped__, name)
def __add__(self, other):
return self.__wrapped__ + other
def __sub__(self, other):
return self.__wrapped__ - other
def __mul__(self, other):
return self.__wrapped__ * other
def __div__(self, other):
return operator.div(self.__wrapped__, other)
def __truediv__(self, other):
return operator.truediv(self.__wrapped__, other)
def __floordiv__(self, other):
return self.__wrapped__ // other
def __mod__(self, other):
return self.__wrapped__ % other
def __divmod__(self, other):
return divmod(self.__wrapped__, other)
def __pow__(self, other, *args):
return pow(self.__wrapped__, other, *args)
def __lshift__(self, other):
return self.__wrapped__ << other
def __rshift__(self, other):
return self.__wrapped__ >> other
def __and__(self, other):
return self.__wrapped__ & other
def __xor__(self, other):
return self.__wrapped__ ^ other
def __or__(self, other):
return self.__wrapped__ | other
def __radd__(self, other):
return other + self.__wrapped__
def __rsub__(self, other):
return other - self.__wrapped__
def __rmul__(self, other):
return other * self.__wrapped__
def __rdiv__(self, other):
return operator.div(other, self.__wrapped__)
def __rtruediv__(self, other):
return operator.truediv(other, self.__wrapped__)
def __rfloordiv__(self, other):
return other // self.__wrapped__
def __rmod__(self, other):
return other % self.__wrapped__
def __rdivmod__(self, other):
return divmod(other, self.__wrapped__)
def __rpow__(self, other, *args):
return pow(other, self.__wrapped__, *args)
def __rlshift__(self, other):
return other << self.__wrapped__
def __rrshift__(self, other):
return other >> self.__wrapped__
def __rand__(self, other):
return other & self.__wrapped__
def __rxor__(self, other):
return other ^ self.__wrapped__
def __ror__(self, other):
return other | self.__wrapped__
def __iadd__(self, other):
self.__wrapped__ += other
return self
def __isub__(self, other):
self.__wrapped__ -= other
return self
def __imul__(self, other):
self.__wrapped__ *= other
return self
def __idiv__(self, other):
self.__wrapped__ = operator.idiv(self.__wrapped__, other)
return self
def __itruediv__(self, other):
self.__wrapped__ = operator.itruediv(self.__wrapped__, other)
return self
def __ifloordiv__(self, other):
self.__wrapped__ //= other
return self
def __imod__(self, other):
self.__wrapped__ %= other
return self
def __ipow__(self, other):
self.__wrapped__ **= other
return self
def __ilshift__(self, other):
self.__wrapped__ <<= other
return self
def __irshift__(self, other):
self.__wrapped__ >>= other
return self
def __iand__(self, other):
self.__wrapped__ &= other
return self
def __ixor__(self, other):
self.__wrapped__ ^= other
return self
def __ior__(self, other):
self.__wrapped__ |= other
return self
def __neg__(self):
return -self.__wrapped__
def __pos__(self):
return +self.__wrapped__
def __abs__(self):
return abs(self.__wrapped__)
def __invert__(self):
return ~self.__wrapped__
def __int__(self):
return int(self.__wrapped__)
def __long__(self):
return long(self.__wrapped__)
def __float__(self):
return float(self.__wrapped__)
def __complex__(self):
return complex(self.__wrapped__)
def __oct__(self):
return oct(self.__wrapped__)
def __hex__(self):
return hex(self.__wrapped__)
def __index__(self):
return operator.index(self.__wrapped__)
def __len__(self):
return len(self.__wrapped__)
def __contains__(self, value):
return value in self.__wrapped__
def __getitem__(self, key):
return self.__wrapped__[key]
def __setitem__(self, key, value):
self.__wrapped__[key] = value
def __delitem__(self, key):
del self.__wrapped__[key]
def __getslice__(self, i, j):
return self.__wrapped__[i:j]
def __setslice__(self, i, j, value):
self.__wrapped__[i:j] = value
def __delslice__(self, i, j):
del self.__wrapped__[i:j]
def __enter__(self):
return self.__wrapped__.__enter__()
def __exit__(self, *args, **kwargs):
return self.__wrapped__.__exit__(*args, **kwargs)
def __iter__(self):
return iter(self.__wrapped__)
def __copy__(self):
raise NotImplementedError('object proxy must define __copy__()')
def __deepcopy__(self, memo):
raise NotImplementedError('object proxy must define __deepcopy__()')
def __reduce__(self):
raise NotImplementedError(
'object proxy must define __reduce_ex__()')
def __reduce_ex__(self, protocol):
raise NotImplementedError(
'object proxy must define __reduce_ex__()')
class CallableObjectProxy(ObjectProxy):
def __call__(self, *args, **kwargs):
return self.__wrapped__(*args, **kwargs)
class PartialCallableObjectProxy(ObjectProxy):
def __init__(self, *args, **kwargs):
if len(args) < 1:
raise TypeError('partial type takes at least one argument')
wrapped, args = args[0], args[1:]
if not callable(wrapped):
raise TypeError('the first argument must be callable')
super(PartialCallableObjectProxy, self).__init__(wrapped)
self._self_args = args
self._self_kwargs = kwargs
def __call__(self, *args, **kwargs):
_args = self._self_args + args
_kwargs = dict(self._self_kwargs)
_kwargs.update(kwargs)
return self.__wrapped__(*_args, **_kwargs)
class _FunctionWrapperBase(ObjectProxy):
__slots__ = ('_self_instance', '_self_wrapper', '_self_enabled',
'_self_binding', '_self_parent')
def __init__(self, wrapped, instance, wrapper, enabled=None,
binding='function', parent=None):
super(_FunctionWrapperBase, self).__init__(wrapped)
object.__setattr__(self, '_self_instance', instance)
object.__setattr__(self, '_self_wrapper', wrapper)
object.__setattr__(self, '_self_enabled', enabled)
object.__setattr__(self, '_self_binding', binding)
object.__setattr__(self, '_self_parent', parent)
def __get__(self, instance, owner):
# This method is actually doing double duty for both unbound and
# bound derived wrapper classes. It should possibly be broken up
# and the distinct functionality moved into the derived classes.
# Can't do that straight away due to some legacy code which is
# relying on it being here in this base class.
#
# The distinguishing attribute which determines whether we are
# being called in an unbound or bound wrapper is the parent
# attribute. If binding has never occurred, then the parent will
# be None.
#
# First therefore, is if we are called in an unbound wrapper. In
# this case we perform the binding.
#
# We have one special case to worry about here. This is where we
# are decorating a nested class. In this case the wrapped class
# would not have a __get__() method to call. In that case we
# simply return self.
#
# Note that we otherwise still do binding even if instance is
# None and accessing an unbound instance method from a class.
# This is because we need to be able to later detect that
# specific case as we will need to extract the instance from the
# first argument of those passed in.
if self._self_parent is None:
if not inspect.isclass(self.__wrapped__):
descriptor = self.__wrapped__.__get__(instance, owner)
return self.__bound_function_wrapper__(descriptor, instance,
self._self_wrapper, self._self_enabled,
self._self_binding, self)
return self
# Now we have the case of binding occurring a second time on what
# was already a bound function. In this case we would usually
# return ourselves again. This mirrors what Python does.
#
# The special case this time is where we were originally bound
# with an instance of None and we were likely an instance
# method. In that case we rebind against the original wrapped
# function from the parent again.
if self._self_instance is None and self._self_binding == 'function':
descriptor = self._self_parent.__wrapped__.__get__(
instance, owner)
return self._self_parent.__bound_function_wrapper__(
descriptor, instance, self._self_wrapper,
self._self_enabled, self._self_binding,
self._self_parent)
return self
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# This can occur where initial function wrapper was applied to
# a function that was already bound to an instance. In that case
# we want to extract the instance from the function and use it.
if self._self_binding in ('function', 'classmethod'):
if self._self_instance is None:
instance = getattr(self.__wrapped__, '__self__', None)
if instance is not None:
return self._self_wrapper(self.__wrapped__, instance,
args, kwargs)
# This is generally invoked when the wrapped function is being
# called as a normal function and is not bound to a class as an
# instance method. This is also invoked in the case where the
# wrapped function was a method, but this wrapper was in turn
# wrapped using the staticmethod decorator.
return self._self_wrapper(self.__wrapped__, self._self_instance,
args, kwargs)
def __set_name__(self, owner, name):
# This is a special method use to supply information to
# descriptors about what the name of variable in a class
# definition is. Not wanting to add this to ObjectProxy as not
# sure of broader implications of doing that. Thus restrict to
# FunctionWrapper used by decorators.
if hasattr(self.__wrapped__, "__set_name__"):
self.__wrapped__.__set_name__(owner, name)
def __instancecheck__(self, instance):
# This is a special method used by isinstance() to make checks
# instance of the `__wrapped__`.
return isinstance(instance, self.__wrapped__)
def __subclasscheck__(self, subclass):
# This is a special method used by issubclass() to make checks
# about inheritance of classes. We need to upwrap any object
# proxy. Not wanting to add this to ObjectProxy as not sure of
# broader implications of doing that. Thus restrict to
# FunctionWrapper used by decorators.
if hasattr(subclass, "__wrapped__"):
return issubclass(subclass.__wrapped__, self.__wrapped__)
else:
return issubclass(subclass, self.__wrapped__)
class BoundFunctionWrapper(_FunctionWrapperBase):
def __call__(self, *args, **kwargs):
# If enabled has been specified, then evaluate it at this point
# and if the wrapper is not to be executed, then simply return
# the bound function rather than a bound wrapper for the bound
# function. When evaluating enabled, if it is callable we call
# it, otherwise we evaluate it as a boolean.
if self._self_enabled is not None:
if callable(self._self_enabled):
if not self._self_enabled():
return self.__wrapped__(*args, **kwargs)
elif not self._self_enabled:
return self.__wrapped__(*args, **kwargs)
# We need to do things different depending on whether we are
# likely wrapping an instance method vs a static method or class
# method.
if self._self_binding == 'function':
if self._self_instance is None:
# This situation can occur where someone is calling the
# instancemethod via the class type and passing the instance
# as the first argument. We need to shift the args before
# making the call to the wrapper and effectively bind the
# instance to the wrapped function using a partial so the
# wrapper doesn't see anything as being different.
if not args:
raise TypeError('missing 1 required positional argument')
instance, args = args[0], args[1:]
wrapped = PartialCallableObjectProxy(self.__wrapped__, instance)
return self._self_wrapper(wrapped, instance, args, kwargs)
return self._self_wrapper(self.__wrapped__, self._self_instance,
args, kwargs)
else:
# As in this case we would be dealing with a classmethod or
# staticmethod, then _self_instance will only tell us whether
# when calling the classmethod or staticmethod they did it via an
# instance of the class it is bound to and not the case where
# done by the class type itself. We thus ignore _self_instance
# and use the __self__ attribute of the bound function instead.
# For a classmethod, this means instance will be the class type
# and for a staticmethod it will be None. This is probably the
# more useful thing we can pass through even though we loose
# knowledge of whether they were called on the instance vs the
# class type, as it reflects what they have available in the
# decoratored function.
instance = getattr(self.__wrapped__, '__self__', None)
return self._self_wrapper(self.__wrapped__, instance, args,
kwargs)
class FunctionWrapper(_FunctionWrapperBase):
__bound_function_wrapper__ = BoundFunctionWrapper
def __init__(self, wrapped, wrapper, enabled=None):
# What it is we are wrapping here could be anything. We need to
# try and detect specific cases though. In particular, we need
# to detect when we are given something that is a method of a
# class. Further, we need to know when it is likely an instance
# method, as opposed to a class or static method. This can
# become problematic though as there isn't strictly a fool proof
# method of knowing.
#
# The situations we could encounter when wrapping a method are:
#
# 1. The wrapper is being applied as part of a decorator which
# is a part of the class definition. In this case what we are
# given is the raw unbound function, classmethod or staticmethod
# wrapper objects.
#
# The problem here is that we will not know we are being applied
# in the context of the class being set up. This becomes
# important later for the case of an instance method, because in
# that case we just see it as a raw function and can't
# distinguish it from wrapping a normal function outside of
# a class context.
#
# 2. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved direct from the __dict__ of the class
# type. This is effectively the same as (1) above.
#
# 3. The wrapper is being applied when performing monkey
# patching of the class type afterwards and the method to be
# wrapped was retrieved from the class type. In this case
# binding will have been performed where the instance against
# which the method is bound will be None at that point.
#
# This case is a problem because we can no longer tell if the
# method was a static method, plus if using Python3, we cannot
# tell if it was an instance method as the concept of an
# unnbound method no longer exists.
#
# 4. The wrapper is being applied when performing monkey
# patching of an instance of a class. In this case binding will
# have been perfomed where the instance was not None.
#
# This case is a problem because we can no longer tell if the
# method was a static method.
#
# Overall, the best we can do is look at the original type of the
# object which was wrapped prior to any binding being done and
# see if it is an instance of classmethod or staticmethod. In
# the case where other decorators are between us and them, if
# they do not propagate the __class__ attribute so that the
# isinstance() checks works, then likely this will do the wrong
# thing where classmethod and staticmethod are used.
#
# Since it is likely to be very rare that anyone even puts
# decorators around classmethod and staticmethod, likelihood of
# that being an issue is very small, so we accept it and suggest
# that those other decorators be fixed. It is also only an issue
# if a decorator wants to actually do things with the arguments.
#
# As to not being able to identify static methods properly, we
# just hope that that isn't something people are going to want
# to wrap, or if they do suggest they do it the correct way by
# ensuring that it is decorated in the class definition itself,
# or patch it in the __dict__ of the class type.
#
# So to get the best outcome we can, whenever we aren't sure what
# it is, we label it as a 'function'. If it was already bound and
# that is rebound later, we assume that it will be an instance
# method and try an cope with the possibility that the 'self'
# argument it being passed as an explicit argument and shuffle
# the arguments around to extract 'self' for use as the instance.
if isinstance(wrapped, classmethod):
binding = 'classmethod'
elif isinstance(wrapped, staticmethod):
binding = 'staticmethod'
elif hasattr(wrapped, '__self__'):
if inspect.isclass(wrapped.__self__):
binding = 'classmethod'
else:
binding = 'function'
else:
binding = 'function'
super(FunctionWrapper, self).__init__(wrapped, None, wrapper,
enabled, binding)
try:
if not os.environ.get('WRAPT_DISABLE_EXTENSIONS'):
from ._wrappers import (ObjectProxy, CallableObjectProxy,
PartialCallableObjectProxy, FunctionWrapper,
BoundFunctionWrapper, _FunctionWrapperBase)
except ImportError:
pass
# Helper functions for applying wrappers to existing functions.
def resolve_path(module, name):
if isinstance(module, string_types):
__import__(module)
module = sys.modules[module]
parent = module
path = name.split('.')
attribute = path[0]
# We can't just always use getattr() because in doing
# that on a class it will cause binding to occur which
# will complicate things later and cause some things not
# to work. For the case of a class we therefore access
# the __dict__ directly. To cope though with the wrong
# class being given to us, or a method being moved into
# a base class, we need to walk the class hierarchy to
# work out exactly which __dict__ the method was defined
# in, as accessing it from __dict__ will fail if it was
# not actually on the class given. Fallback to using
# getattr() if we can't find it. If it truly doesn't
# exist, then that will fail.
def lookup_attribute(parent, attribute):
if inspect.isclass(parent):
for cls in inspect.getmro(parent):
if attribute in vars(cls):
return vars(cls)[attribute]
else:
return getattr(parent, attribute)
else:
return getattr(parent, attribute)
original = lookup_attribute(parent, attribute)
for attribute in path[1:]:
parent = original
original = lookup_attribute(parent, attribute)
return (parent, attribute, original)
def apply_patch(parent, attribute, replacement):
setattr(parent, attribute, replacement)
def wrap_object(module, name, factory, args=(), kwargs={}):
(parent, attribute, original) = resolve_path(module, name)
wrapper = factory(original, *args, **kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Function for applying a proxy object to an attribute of a class
# instance. The wrapper works by defining an attribute of the same name
# on the class which is a descriptor and which intercepts access to the
# instance attribute. Note that this cannot be used on attributes which
# are themselves defined by a property object.
class AttributeWrapper(object):
def __init__(self, attribute, factory, args, kwargs):
self.attribute = attribute
self.factory = factory
self.args = args
self.kwargs = kwargs
def __get__(self, instance, owner):
value = instance.__dict__[self.attribute]
return self.factory(value, *self.args, **self.kwargs)
def __set__(self, instance, value):
instance.__dict__[self.attribute] = value
def __delete__(self, instance):
del instance.__dict__[self.attribute]
def wrap_object_attribute(module, name, factory, args=(), kwargs={}):
path, attribute = name.rsplit('.', 1)
parent = resolve_path(module, path)[2]
wrapper = AttributeWrapper(attribute, factory, args, kwargs)
apply_patch(parent, attribute, wrapper)
return wrapper
# Functions for creating a simple decorator using a FunctionWrapper,
# plus short cut functions for applying wrappers to functions. These are
# for use when doing monkey patching. For a more featured way of
# creating decorators see the decorator decorator instead.
def function_wrapper(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
return FunctionWrapper(target_wrapped, target_wrapper)
return FunctionWrapper(wrapper, _wrapper)
def wrap_function_wrapper(module, name, wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
def patch_function_wrapper(module, name):
def _wrapper(wrapper):
return wrap_object(module, name, FunctionWrapper, (wrapper,))
return _wrapper
def transient_function_wrapper(module, name):
def _decorator(wrapper):
def _wrapper(wrapped, instance, args, kwargs):
target_wrapped = args[0]
if instance is None:
target_wrapper = wrapper
elif inspect.isclass(instance):
target_wrapper = wrapper.__get__(None, instance)
else:
target_wrapper = wrapper.__get__(instance, type(instance))
def _execute(wrapped, instance, args, kwargs):
(parent, attribute, original) = resolve_path(module, name)
replacement = FunctionWrapper(original, target_wrapper)
setattr(parent, attribute, replacement)
try:
return wrapped(*args, **kwargs)
finally:
setattr(parent, attribute, original)
return FunctionWrapper(target_wrapped, _execute)
return FunctionWrapper(wrapper, _wrapper)
return _decorator
# A weak function proxy. This will work on instance methods, class
# methods, static methods and regular functions. Special treatment is
# needed for the method types because the bound method is effectively a
# transient object and applying a weak reference to one will immediately
# result in it being destroyed and the weakref callback called. The weak
# reference is therefore applied to the instance the method is bound to
# and the original function. The function is then rebound at the point
# of a call via the weak function proxy.
def _weak_function_proxy_callback(ref, proxy, callback):
if proxy._self_expired:
return
proxy._self_expired = True
# This could raise an exception. We let it propagate back and let
# the weakref.proxy() deal with it, at which point it generally
# prints out a short error message direct to stderr and keeps going.
if callback is not None:
callback(proxy)
class WeakFunctionProxy(ObjectProxy):
__slots__ = ('_self_expired', '_self_instance')
def __init__(self, wrapped, callback=None):
# We need to determine if the wrapped function is actually a
# bound method. In the case of a bound method, we need to keep a
# reference to the original unbound function and the instance.
# This is necessary because if we hold a reference to the bound
# function, it will be the only reference and given it is a
# temporary object, it will almost immediately expire and
# the weakref callback triggered. So what is done is that we
# hold a reference to the instance and unbound function and
# when called bind the function to the instance once again and
# then call it. Note that we avoid using a nested function for
# the callback here so as not to cause any odd reference cycles.
_callback = callback and functools.partial(
_weak_function_proxy_callback, proxy=self,
callback=callback)
self._self_expired = False
if isinstance(wrapped, _FunctionWrapperBase):
self._self_instance = weakref.ref(wrapped._self_instance,
_callback)
if wrapped._self_parent is not None:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped._self_parent, _callback))
else:
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped, _callback))
return
try:
self._self_instance = weakref.ref(wrapped.__self__, _callback)
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped.__func__, _callback))
except AttributeError:
self._self_instance = None
super(WeakFunctionProxy, self).__init__(
weakref.proxy(wrapped, _callback))
def __call__(self, *args, **kwargs):
# We perform a boolean check here on the instance and wrapped
# function as that will trigger the reference error prior to
# calling if the reference had expired.
instance = self._self_instance and self._self_instance()
function = self.__wrapped__ and self.__wrapped__
# If the wrapped function was originally a bound function, for
# which we retained a reference to the instance and the unbound
# function we need to rebind the function and then call it. If
# not just called the wrapped function.
if instance is None:
return self.__wrapped__(*args, **kwargs)
return function.__get__(instance, type(instance))(*args, **kwargs)
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@wrapt_ES@wrappers.py@.PATH_END.py
|
{
"filename": "_linecolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/smith/realaxis/_linecolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="linecolor", parent_name="layout.smith.realaxis", **kwargs
):
super(LinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@smith@realaxis@_linecolor.py@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattercarpet/marker/colorbar/title/font/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="variant",
parent_name="scattercarpet.marker.colorbar.title.font",
**kwargs,
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattercarpet@marker@colorbar@title@font@_variant.py@.PATH_END.py
|
{
"filename": "metrics_nonportable.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/python/metrics/metrics_nonportable.py",
"type": "Python"
}
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python TFLite metrics helper."""
from typing import Optional, Text
import uuid
from tensorflow.compiler.mlir.lite.metrics import converter_error_data_pb2
from tensorflow.lite.python.metrics import metrics_interface
from tensorflow.lite.python.metrics.wrapper import metrics_wrapper
from tensorflow.python.eager import monitoring
_counter_debugger_creation = monitoring.Counter(
'/tensorflow/lite/quantization_debugger/created',
'Counter for the number of debugger created.')
_counter_interpreter_creation = monitoring.Counter(
'/tensorflow/lite/interpreter/created',
'Counter for number of interpreter created in Python.', 'language')
# The following are conversion metrics. Attempt and success are kept separated
# instead of using a single metric with a label because the converter may
# raise exceptions if conversion failed. That may lead to cases when we are
# unable to capture the conversion attempt. Increasing attempt count at the
# beginning of conversion process and the success count at the end is more
# suitable in these cases.
_counter_conversion_attempt = monitoring.Counter(
'/tensorflow/lite/convert/attempt',
'Counter for number of conversion attempts.')
_counter_conversion_success = monitoring.Counter(
'/tensorflow/lite/convert/success',
'Counter for number of successful conversions.')
_gauge_conversion_params = monitoring.StringGauge(
'/tensorflow/lite/convert/params',
'Gauge for keeping conversion parameters.', 'name')
_gauge_conversion_errors = monitoring.StringGauge(
'/tensorflow/lite/convert/errors',
'Gauge for collecting conversion errors. The value represents the error '
'message.', 'component', 'subcomponent', 'op_name', 'error_code')
_gauge_conversion_latency = monitoring.IntGauge(
'/tensorflow/lite/convert/latency', 'Conversion latency in ms.')
class TFLiteMetrics(metrics_interface.TFLiteMetricsInterface):
"""TFLite metrics helper for prod (borg) environment.
Attributes:
model_hash: A string containing the hash of the model binary.
model_path: A string containing the path of the model for debugging
purposes.
"""
def __init__(self,
model_hash: Optional[Text] = None,
model_path: Optional[Text] = None) -> None:
del self # Temporarily removing self until parameter logic is implemented.
if model_hash and not model_path or not model_hash and model_path:
raise ValueError('Both model metadata(model_hash, model_path) should be '
'given at the same time.')
if model_hash:
# TODO(b/180400857): Create stub once the service is implemented.
pass
def increase_counter_debugger_creation(self):
_counter_debugger_creation.get_cell().increase_by(1)
def increase_counter_interpreter_creation(self):
_counter_interpreter_creation.get_cell('python').increase_by(1)
def increase_counter_converter_attempt(self):
_counter_conversion_attempt.get_cell().increase_by(1)
def increase_counter_converter_success(self):
_counter_conversion_success.get_cell().increase_by(1)
def set_converter_param(self, name, value):
_gauge_conversion_params.get_cell(name).set(value)
def set_converter_error(
self, error_data: converter_error_data_pb2.ConverterErrorData):
error_code_str = converter_error_data_pb2.ConverterErrorData.ErrorCode.Name(
error_data.error_code)
_gauge_conversion_errors.get_cell(
error_data.component,
error_data.subcomponent,
error_data.operator.name,
error_code_str,
).set(error_data.error_message)
def set_converter_latency(self, value):
_gauge_conversion_latency.get_cell().set(value)
class TFLiteConverterMetrics(TFLiteMetrics):
"""Similar to TFLiteMetrics but specialized for converter.
A unique session id will be created for each new TFLiteConverterMetrics.
"""
def __init__(self) -> None:
super(TFLiteConverterMetrics, self).__init__()
session_id = uuid.uuid4().hex
self._metrics_exporter = metrics_wrapper.MetricsWrapper(session_id)
self._exported = False
def __del__(self):
if not self._exported:
self.export_metrics()
def set_export_required(self):
self._exported = False
def export_metrics(self):
self._metrics_exporter.ExportMetrics()
self._exported = True
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@python@metrics@metrics_nonportable.py@.PATH_END.py
|
{
"filename": "mpi4py_cluster.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/clusters/mpi4py_cluster.py",
"type": "Python"
}
|
# Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from jax._src import clusters
import socket
from importlib.util import find_spec
class Mpi4pyCluster(clusters.ClusterEnv):
name: str = "mpi4py"
opt_in_only_method: bool = True
@classmethod
def is_env_present(cls) -> bool:
# Relies on mpi4py:
return find_spec("mpi4py") is not None
@classmethod
def get_coordinator_address(cls, timeout_secs: int | None) -> str:
# Using mpi4py, figure out rank 0 and it's hostname.
# Then broadcast the hostname and port.
from mpi4py import MPI #type: ignore
# Get the global communicator:
COMM_WORLD = MPI.COMM_WORLD
# On rank 0, get the hostname:
if COMM_WORLD.Get_rank() == 0:
# Order all the hostnames, and find unique ones
hostname = socket.gethostname()
# Apparently, we want to pick a port in an ephemeral range...
port_id = hash(hostname) % 2**12 + (65535 - 2**12 + 1)
hostname = f'{hostname}:{port_id}'
else:
hostname = "None"
# Broadcast the host_ip to all ranks:
hostname = COMM_WORLD.bcast(hostname, root=0)
return hostname
@classmethod
def get_process_count(cls) -> int:
from mpi4py import MPI # pytype: disable=import-error
return int(MPI.COMM_WORLD.Get_size())
@classmethod
def get_process_id(cls) -> int:
from mpi4py import MPI # pytype: disable=import-error
return int(MPI.COMM_WORLD.Get_rank())
@classmethod
def get_local_process_id(cls) -> int | None:
# Using mpi4py, split the global communicator into sub communicators
# based on hostname. mpi will assign them ranks and that will allow
# a selection of the local process ID.
from mpi4py import MPI # pytype: disable=import-error
COMM_WORLD = MPI.COMM_WORLD
# This is the alternative method that is simpler:
new_comm = COMM_WORLD.Split_type(MPI.COMM_TYPE_SHARED)
# The rank in the new communicator - which is host-local only - IS the local rank:
return int(new_comm.Get_rank())
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@clusters@mpi4py_cluster.py@.PATH_END.py
|
{
"filename": "plot_screens.py",
"repo_name": "scintillometry/screen-database",
"repo_path": "screen-database_extracted/screen-database-main/plot_screens.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import numpy as np
from astropy.coordinates import SkyCoord
import astropy.coordinates as coord
import astropy.units as u
def plot_screens_3D(sources,screens,bounds=[-0.5,0.5,-0.5,0.5,-0.5,0.5],include_legend=False,cm="magma"):
"""
Plot the 3D locations and velocity vectors of scintillation screens.
Parameters
----------
sources: a pandas data frame or dictionary containing the Galactic longitude, Galactic latitude,
and distance of the source (typically pulsar)
screens: a pandas data frame or dictionary containing the source name, distance, orientation,
and velocity of scintillation screen(s)
bounds: an array of plotting bounds in kpc, [xmin,xmax,ymin,ymax,zmin,zmax]
include_legend: a boolean to set whether legend of sources is plotted
cm : color map to use, either string for named colour map or matplotlib colormap
Returns
-------
None
"""
if type(cm) is str:
cmap = plt.get_cmap("magma")
else:
cmap = cm
slicedCM = cmap(np.linspace(0, 1, len(sources)))
ax = plt.figure(figsize=(7,7)).add_subplot(projection='3d')
psr_coords = SkyCoord(l=sources['GL']<<u.deg,b=sources['GB']<<u.deg,distance = sources['DIST']<<u.kpc,frame='galactic')
i = 0
for psr_name in sources['NAME']:
ax.plot3D([0,psr_coords[i].cartesian.x.value],
[0,psr_coords[i].cartesian.y.value],
[0,psr_coords[i].cartesian.z.value],
label=psr_name,color=slicedCM[i])
picrs = psr_coords[i].icrs
scr = screens[screens['PULSAR']==psr_name]
if np.isnan(scr['v_meas'].values[0]):
scr_coords = SkyCoord(ra = np.tile(picrs.ra,len(scr)),dec = np.tile(picrs.dec,len(scr)),
distance = scr['DIST'].values*u.kpc,
frame='icrs').galactic.cartesian
ax.scatter3D(scr_coords.x,scr_coords.y,scr_coords.z,s=50,color=slicedCM[i])
else:
mu_ra = scr['v_meas']*np.sin(scr['theta']*u.deg)/scr['DIST']
mu_dec = scr['v_meas']*np.cos(scr['theta']*u.deg)/scr['DIST']
scr_coords = SkyCoord(ra = np.tile(picrs.ra,len(scr)),dec = np.tile(picrs.dec,len(scr)),
distance = scr['DIST'].values*u.kpc,
pm_ra_cosdec=mu_ra.values*u.km*u.rad/(u.s*u.kpc),
pm_dec = mu_ra.values*u.km*u.rad/(u.s*u.kpc),
frame='icrs').galactic.cartesian
scr_vec = scr_coords.differentials['s']
ax.quiver(scr_coords.x,scr_coords.y,scr_coords.z,scr_vec.d_x,scr_vec.d_y,scr_vec.d_z,
length=0.1,normalize=True,color=slicedCM[i])
ax.scatter3D(scr_coords.x,scr_coords.y, scr_coords.z, s=50, color=slicedCM[i])
i += 1
ax.set_xlim(bounds[0],bounds[1])
ax.set_ylim(bounds[2],bounds[3])
ax.set_zlim(bounds[4],bounds[5])
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if include_legend:
plt.legend()
def summary_plots(sources,screens):
"""
Plot some histograms and comparisons of screens data
Parameters
----------
sources: a pandas data frame or dictionary containing the distance, number of screens, and dispersion measure
of radio sources (typically pulsars)
screens: a pandas data frame or dictionary containing the source name, and fractional screen distances
of scintillation screen(s)
Returns
-------
None
"""
plt.figure(figsize=(7.5,7.5))
plt.subplot(221)
plt.hist(screens['s'])
plt.xlabel('Fractional screen distance, s')
plt.ylabel('Counts')
plt.subplot(222)
plt.hist(sources['NSCREENS'],bins=np.arange(0.5,np.max(sources['NSCREENS'])+1.5,1),rwidth=0.5)
plt.xlabel('Number of screens')
plt.ylabel('Counts')
plt.subplot(223)
plt.scatter(sources['DM'],sources['NSCREENS'])
plt.xlabel(r'Dispersion Measure (pc cm$^{-3}$)')
plt.ylabel('Number of screens')
plt.yticks(np.arange(1,np.max(sources['NSCREENS'])+1,1))
plt.subplot(224)
plt.scatter(sources['DIST'],sources['NSCREENS'])
plt.xlabel(r'Source Distance, $d_p$ (kpc)')
plt.ylabel('Number of screens')
plt.yticks(np.arange(1,np.max(sources['NSCREENS'])+1,1))
|
scintillometryREPO_NAMEscreen-databasePATH_START.@screen-database_extracted@screen-database-main@plot_screens.py@.PATH_END.py
|
{
"filename": "options.py",
"repo_name": "jdswinbank/Comet",
"repo_path": "Comet_extracted/Comet-master/comet/utility/options.py",
"type": "Python"
}
|
# Comet VOEvent Broker.
# Base class for command line options.
import os
from argparse import ArgumentParser, ArgumentTypeError
from lxml.etree import XPath, XPathSyntaxError
import comet.plugins
import comet.log as log
from comet.utility.voevent import parse_ivoid, BadIvoidError
__all__ = ["BaseOptions", "valid_ivoid", "valid_xpath"]
class BaseOptions(object):
def __init__(self):
if hasattr(self, "PROG"):
self.parser = ArgumentParser(prog=self.PROG)
else:
self.parser = ArgumentParser()
if "COMET_PLUGINPATH" in os.environ:
comet.plugins.__path__.extend(
os.environ.get("COMET_PLUGINPATH").split(os.pathsep)
)
self.parser.add_argument(
"--verbose",
"-v",
action="count",
help="Increase verbosity (may be specified " "more than once).",
)
self._configureParser()
def parseOptions(self, argv=None):
"""Parse argument list and set option values.
Parameters
----------
argv : iterable of `str`, optional
Set of arguments to parse. If unspecified, we rely on argparse to
pull appropriate values from `sys.argv`.
Returns
-------
self : `Options`
This object, as a convenience -- try ``Options().parse_options()``.
Notes
-----
This delegates to self.parser.parse_args for the bulk of the work, but
can also be used to add clean-up actions, etc.
"""
self._config = self.parser.parse_args(argv)
if self["verbose"] and self["verbose"] >= 2:
log.LEVEL = log.Levels.DEBUG
elif self["verbose"] and self["verbose"] >= 1:
log.LEVEL = log.Levels.INFO
else:
log.LEVEL = log.Levels.WARNING
self._checkOptions()
return self
def _configureParser(self):
"""Add any required options to the parser.
Override in subclasses.
"""
pass
def _checkOptions(self):
"""Perform any sanity checking required on the parsed options.
Override in subclasses.
"""
pass
def __getitem__(self, key):
"""Delegate item lookup to the associated `argparse.Namespace`."""
if hasattr(self, "_config") and hasattr(self._config, key):
return getattr(self._config, key)
raise KeyError(key)
def __contains__(self, key):
if hasattr(self, "_config"):
return hasattr(self._config, key)
else:
return False
def valid_ivoid(expression):
"""Check for a valid IVOID.
Parameters
----------
expression : `str`
Expression to check.
Returns
-------
expression : `str`
Identical to input.
Raises
------
ArgumentTypeError
If `expression` is not a valid IVOID.
"""
try:
parse_ivoid(expression)
except BadIvoidError as e:
raise ArgumentTypeError(
f"Invalid IVOA identifier: {expression}; "
f"Required format: "
f"ivo://authorityID/resourceKey#local_ID"
) from e
return expression
def valid_xpath(expression):
"""Check for a valid XPath filter.
Parameters
----------
expression : `str`
Expression to check.
Returns
-------
expression : `str`
Identical to input.
Raises
------
ArgumentTypeError
If `expression` is not a valid XPath expression.
"""
try:
XPath(expression)
except XPathSyntaxError as e:
raise ArgumentTypeError(f"Invalid XPath expression: " f"{expression}") from e
return expression
|
jdswinbankREPO_NAMECometPATH_START.@Comet_extracted@Comet-master@comet@utility@options.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter/unselected/textfont/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._color.ColorValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter@unselected@textfont@__init__.py@.PATH_END.py
|
{
"filename": "ppm.md",
"repo_name": "python-hydro/ppmpy",
"repo_path": "ppmpy_extracted/ppmpy-main/docs/ppm.md",
"type": "Markdown"
}
|
# The Piecewise Parabolic Method for Hydrodynamics
The PPM interface construction is more involved for hydrodynamics than advection
because there are 3 characteristic waves. The basic overview of the method is:
* Convert the conserved state, ${\bf U}$ to primitive variables, ${\bf q}$
* Reconstruct each of the primitive variables as a parabola.
We first find the values of the parabola on the left and right edges of the zone,
${\bf q}_{-,i}$ and ${\bf q}_{+,i}$. This is done by defining a conservative
cubic interpolant that passes through the 2 zones on each side of the interface.
The unlimited version of this would be:
$${\bf q}_{i+1/2} = \frac{7}{12} ({\bf q}_i + {\bf q}_{i+1}) - \frac{1}{12} ({\bf q}_{i-1} + {\bf q}_{i+2})$$
we then use this value to set:
$${\bf q}_{+,i} = {\bf q}_{-,i+1} = {\bf q}_{i+1/2}$$
Working zone-by-zone, the values ${\bf q}_{-,i}$ and ${\bf q}_{+,i}$
are then limited, and we define the parabolic reconstruction in zone
$i$ as:
$${\bf q}_i(x) = {\bf q}_{-,i} + \xi (\Delta {\bf q}_i + {\bf q}_{6,i} (1 - \xi))$$
where
$$\Delta {\bf q}_i = {\bf q}_{+,i} - {\bf q}_{-,i}$$
$${\bf q}_{6,i} = 6 \left({\bf q}_i - \frac{1}{2} ({\bf q}_{-,i} + {\bf q}_{+,i})\right )$$
and
$$\xi = \frac{x - x_{i-1/2}}{\Delta x}$$
* Integrate under the parabola for the distance $\lambda^{(\nu)}_i \Delta t$ for each of the
characteristic waves $\nu$: $u-c$, $u$, and $u+c$. We define the dimensionless wave speed, $\sigma_i^{(\nu)}$:
$$\sigma_i^{(\nu)} = \frac{\lambda^{(\nu)} \Delta t}{\Delta x}$$
From the right edge, we have:
\begin{align*}
\mathcal{I}_+^{(\nu)}({\bf q}_i) &=
\frac{1}{\sigma_i^{(\nu)} \Delta x} \int_{x_{i+1/2} - \sigma_i^{(\nu)}\Delta x}^{x_{i+1/2}} {\bf q}(x) \, dx \\
&= {\bf q}_{+,i} - \frac{\sigma_i^{(\nu)}}{2} \left [ \Delta {\bf q}_i - {\bf q}_{6,i} \left (1 - \frac{2}{3} \sigma_i^{(\nu)}\right )\right ]
\end{align*}
and from the left edge, we have:
\begin{align*}
\mathcal{I}_-^{(\nu)}({\bf q}_i) &=
\frac{1}{\sigma_i^{(\nu)} \Delta x} \int_{x_{i-1/2}}^{x_{i-1/2} + \sigma_i^{(\nu)}\Delta x} {\bf q}(x) \, dx \\
&= {\bf q}_{-,i} + \frac{\sigma_i^{(\nu)}}{2} \left [ \Delta {\bf q}_i + {\bf q}_{6,i} \left (1 - \frac{2}{3} \sigma_i^{(\nu)}\right )\right ]
\end{align*}
* Define a reference state. We are going to project the amount of
${\bf q}$ carried by each wave into the characteristic variables and
then sum up all of the jumps that move toward each interface. To
minimize the effects of this characteristic projection, we will
subtract off a reference state, $\tilde{\bf q}$:
$$\tilde{\bf q}_{+,i} = \mathcal{I}_+^{(+)}({\bf q}_i)$$
$$\tilde{\bf q}_{-,i} = \mathcal{I}_-^{(-)}({\bf q}_i)$$
In each case, we are picking the fastest wave moving toward the interface.
* Define the left and right states on the interfaces seen by zones $i$ by
adding up all of the jumps that reach that interface:
$${\bf q}_{i+1/2,L}^{n+1/2} = \tilde{{\bf q}}_+ -
\sum_{\nu;\lambda^{(\nu)}\ge 0} {\bf l}_i^{(\nu)} \cdot \left (
\tilde{{\bf q}}_+ - \mathcal{I}_+^{(\nu)}({\bf q}_i)
\right ) {\bf r}_i^{(\nu)}$$
$${\bf q}_{i-1/2,R}^{n+1/2} = \tilde{{\bf q}}_- -
\sum_{\nu;\lambda^{(\nu)}\le 0} {\bf l}_i^{(\nu)} \cdot \left (
\tilde{{\bf q}}_+ - \mathcal{I}_-^{(\nu)}({\bf q}_i)
\right ) {\bf r}_i^{(\nu)}$$
Notice that zone $i$ gives the left state on interface $i+1/2$ and the
right state on zone $i-1/2$.
We then solve the Riemann problem using these state.
|
python-hydroREPO_NAMEppmpyPATH_START.@ppmpy_extracted@ppmpy-main@docs@ppm.md@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/contourcarpet/colorbar/tickfont/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="contourcarpet.colorbar.tickfont",
**kwargs,
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@contourcarpet@colorbar@tickfont@_family.py@.PATH_END.py
|
{
"filename": "gaussian.py",
"repo_name": "kevin218/POET",
"repo_path": "POET_extracted/POET-master/code/lib/gaussian.py",
"type": "Python"
}
|
# $Author: patricio $
# $Revision: 285 $
# $Date: 2010-06-18 17:59:25 -0400 (Fri, 18 Jun 2010) $
# $HeadURL: file:///home/esp01/svn/code/python/branches/patricio/photpipe/lib/gaussian.py $
# $Id: gaussian.py 285 2010-06-18 21:59:25Z patricio $
#! /usr/bin/env python
'''
Name
----
gaussian
File
----
gaussian.py
Description
-----------
Routines for evaluating, estimating parameters of, and fitting Gaussians.
Package Contents
----------------
N-dimensional functions:
gaussian(x, width=1., center=0., height=None, params=None)
Evaluate the Gaussian function with given parameters at x
(n-dimensional).
fitgaussian(y, x)
Calculates a Gaussian fit to (y, x) data, returns (width,
center, height).
1-dimensional functions:
gaussianguess(y, x=None)
Crudely estimates the parameters of a Gaussian that fits the
(y, x) data.
Examples:
---------
See fitgaussian() example.
Revisions
---------
2007-09-17 0.1 jh@physics.ucf.edu Initial version 0.01, portions
adapted from http://www.scipy.org/Cookbook/FittingData.
2007-10-02 0.2 jh@physics.ucf.edu Started making N-dimensional,
put width before center in args.
2007-11-13 0.3 jh@physics.ucf.edu Made N-dimensional.
2008-12-02 0.4 nlust@physics.ucf.edu Made fit gaussian return errors, and
fixed a bug generating initial guesses
2009-10-25 0.5 jh@physics.ucf.edu Standardized all headers, fixed
an error in a fitgaussian example, added example
">>>"s and plot labels.
'''
import numpy as np
import scipy.optimize as so
import disk as d
def gaussian(x, width=1.0, center=0.0, height=None, bgpars=[0.0, 0.0, 0.0]):
"""
Evaluates the Gaussian and a background with given parameters at
locations in x.
Parameters
----------
x : ndarray (any shape)
Abcissa values. Arranged as the output of np.indices() but
may be float. The highest dimension must be equal to the
number of other dimensions (i.e., if x has 6 dimensions, the
highest dimension must have length 5, and each of those must
give the coordinate along the respective axis). May also be
1-dimensional. Default: np.indices(y.shape).
width : array_like
The width of the Gaussian function, sometimes called sigma.
If scalar, assumed constant for all dimensions. If array,
must be linear and the same length as the first dimension of
x. In this case, each element gives the width of the function
in the corresponding dimension. Default: [1.].
center : array_like
The mean value of the Gaussian function, sometimes called x0.
Same scalar/array behavior as width. Default: [0.].
height : scalar
The height of the Gaussian at its center. If not set,
initialized to the value that makes the Gaussian integrate to
1. If you want it to integrate to another number, leave
height alone and multiply the result by that other number
instead. Must be scalar. Default: [product(1./sqrt(2 * pi *
width**2))].
bgpars : ndarray or tuple, 3-element
Background parameters, the elements determine a X- and Y-linearly
dependant level, of the form:
f = Y*bgparam[0] + X*bgparam[1] + bgparam[2]
(Not tested for 1D yet).
Returns
-------
results : ndarray, same shape as x (or first element of x if
multidimensional)
This function returns the Gaussian function of the given
width(s), center(s), and height applied to its input plus a
linear background level. The Gaussian function is: f(x) =
1./sqrt(2 * pi * width**2) * exp(-0.5 * ((x - center) /
width)**2). It is defined in multiple dimensions as the
product of orthogonal, single-dimension Gaussians.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import gaussian as g
>>> x = np.arange(-10., 10.005, 0.01)
>>> plt.plot(x, g.gaussian(x))
>>> plt.title('Gaussian')
>>> plt.xlabel('Abcissa')
>>> plt.ylabel('Ordinate')
>>> # use an array [3] as a single parameter vector
>>> z = np.array([2., 2, 3])
>>> plt.plot(x, g.gaussian(x, *z))
>>> # Test that it integrates to 1.
>>> a = np.indices([100, 100]) - 50
>>> print(np.sum(g.gaussian(a, 3, 3)))
0.999999999999997
>>> print(np.sum(g.gaussian(a, np.array([1,2]), np.array([2,3]))))
1.0000000107
>>> plt.clf()
>>> plt.imshow(g.gaussian(a, [3,5], [7,3]))
>>> plt.title('2D Gaussian')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> A gaussian + a linear background level:
>>> g2 = g.gaussian(x, width=(1.2, 1.15), center=(13.2,15.75), height=4.3,
>>> bgpars=[0.05, 0.01, 1.0])
>>> plt.figure(1)
>>> plt.clf()
>>> plt.imshow(g2, origin='lower_left', interpolation='nearest')
>>> plt.colorbar()
>>> plt.title('2D Gaussian')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> plt.figure(2)
>>> plt.clf()
>>> plt.plot(g2[13,:])
>>> plt.title('X slice of 2D Gaussian')
>>> plt.xlabel('X')
>>> plt.ylabel('Z')
>>> plt.figure(3)
>>> plt.clf()
>>> plt.plot(g2[:,16])
>>> plt.title('Y slice of 2D Gaussian')
>>> plt.xlabel('Y')
>>> plt.ylabel('Z')
Revisions
---------
2007-09-17 0.1 jh@physics.ucf.edu Initial version 0.01
2007-10-02 0.2 jh@physics.ucf.edu Started making N-dimensional,
put width before center in args.
2007-11-13 0.3 jh@physics.ucf.edu Fixed docs, bugs, added param,
made N-dimensional
2009-10-01 0.4 jh@physics.ucf.edu Fixed docs.
2009-10-25 0.5 jh@physics.ucf.edu Added examples and plot labels.
2011-05-03 patricio Params option no longer sopported,
Added bgpars to add a background.
pcubillos@fulbrightmail.org
2017-XX-XX bbrooks@stsci.edu Added Patricio centering method
"""
ndim = np.ndim(x) - 1
if ndim == 0: # We use an indexing trick below that fails for 1D case.
ndim = 1
oldshape = np.shape(x)
x.shape = (1, x.shape[0])
# Make center a ndarray:
if type(center) != np.ndarray:
center += np.zeros(ndim)
# Make width a ndarray:
if type(width) != np.ndarray:
width += np.zeros(ndim)
r2pi = np.sqrt(2. * np.pi)
# Define height if needed:
if height is None:
height = np.product(1. / (width * r2pi))
ponent = 0.0
for i in np.arange(ndim):
ponent += ( (x[i] - center[i]) / width[i] )**2
if 'oldshape' in locals():
x.shape = oldshape
# Set up the background:
if ndim == 2:
background = x[0]*bgpars[0] + x[1]*bgpars[1] + bgpars[2]
else: # it must be 1D:
background = x*bgpars[0] + bgpars[2]
return height * np.exp(-0.5 * ponent) + background
def old_gaussianguess(y, x=None, mask=None):
"""
Crudely estimates the parameters of a Gaussian that fits the (y, x) data.
Parameters
----------
y : ndarray
Array giving the function values.
x : ndarray, same shape as y
(optional) An array of the same shape as y giving the
abcissas of y (if missing, uses array indices). Must be
sorted ascending (which is not checked).
Returns
-------
param : tuple, 3 elements
This function returns a tuple giving extimates of the (width,
center, height) of a Gaussian that might fit the input data.
See 'param' input parameter of gaussian() for format of this
tuple.
Notes
-----
Currently only works for 1D data.
If the data do not look Gaussian, and certainly if they contain
spikes higher/lower than the peak of the real Gaussian, the
parameter estimates will be poor. x must be sorted ascending
(which is not checked).
Method: The most extreme element of y (or its neighbor if a border
element) is the location and height of the peak. The routine
looks for the width at 0.6 of this maximum.
Todo:
When expanding to 2D, take arrays of X and Y coords rather than a
(redundant) 2D array. This will prevent irregular grids.
2011-05-05 patricio: This function doesnt work for 2D, I don't
even know if it works for 1D. If I ever have time I'll see what
can we do. The function below seems to work fine for our 2D data.
Examples
--------
>>> import gaussian as g
>>> x = np.arange(-10., 10.05, 0.1)
>>> y = g.gaussian(x)
>>> print(g.gaussianguess(y, x))
(0.99999999999999645, -3.5527136788005009e-14, 0.3989422804014327)
Revisions
---------
2007-09-17 0.1 jh@physics.ucf.edu Initial version 0.01
2007-11-13 0.2 jh@physics.ucf.edu Fixed docs, return order.
2008-12-02 0.3 nlust@physics.ucf.edu Fixed a bug where if an
initial guess was not provided, it would error out
2009-10-25 0.4 jh@physics.ucf.edu Converted to standard doc header.
"""
if y.ndim != 1 :
raise ArrayShapeError("y must be 1D, for now.")
if x is None :
x = np.indices(y.shape)[0]
else:
if x.shape == (1, y.shape):
oldshape = x.shape
x.shape = y.shape
elif x.shape != y.shape :
raise ArrayShapeError("x must have same shape as y (and be sorted).")
# Default mask:
if mask is None:
mask = np.ones(np.shape(y))
ymax = np.amax(y*mask)
#iymax = np.where(y == ymax)[0][0]
iymax = np.argmax(y*mask)
ymin = np.amin(y*mask)
#iymin = np.where(y == ymin)[0][0]
iymin = np.argmin(y*mask)
if np.abs(ymin) >= np.abs(ymax):
icenter = iymin
else:
icenter = iymax
icenter = np.clip(icenter, 1, x.size-2)
center = x[icenter]
height = y[icenter]
gtsigma = np.where(y > (0.6 * height))
width = (x[gtsigma[0].max()] - x[gtsigma[0].min()] ) / 2.
if 'oldshape' in locals():
x.shape = oldshape
return (width, center, height)
def gaussianguess(data, mask=None, yxguess=None):
# Default mask:
if mask is None:
mask = np.ones(np.shape(data))
# Center position guess, looking the max value:
if yxguess is None:
#Block will need to be updated and tested for python 3.5.
gcenter = np.unravel_index(np.argmax(data*mask), np.shape(data))
else:
gcenter = np.around(int(yxguess[0])), np.around(int(yxguess[1]))
# Height guess is value at gcenter position:
gheight = data[gcenter]
# The width guess is the sum of the number of pixels that are
# greater than two sigma of the values in the x and y direction.
# This gives a (very) rough guess, in pixels, how wide the PSF is.
sigma = np.array( [np.std(data[:, gcenter[1]]), # y std (of central column)
np.std(data[gcenter[0], :])] ) # x std (of central row)
gwidth = ( np.sum((data*mask)[:, gcenter[1]] > 2*sigma[0])/2.0,
np.sum((data*mask)[gcenter[0], :] > 2*sigma[1])/2.0 )
return (gwidth, gcenter, gheight)
def fitgaussian(y, x=None, bgpars=None, fitbg=0, guess=None,
mask=None, weights=None, maskg=False, yxguess=None):
"""
Fits an N-dimensional Gaussian to (value, coordinate) data.
Parameters
----------
y : ndarray
Array giving the values of the function.
x : ndarray
(optional) Array (any shape) giving the abcissas of y (if
missing, uses np.indices(y). The highest dimension must be
equal to the number of other dimensions (i.e., if x has 6
dimensions, the highest dimension must have length 5). The
rest of the dimensions must have the same shape as y. Must be
sorted ascending (which is not checked), if guess is not
given.
bgpars : ndarray or tuple, 3-elements
Background parameters, the elements determine a X- and Y-linearly
dependant level, of the form:
f = Y*bgparam[0] + X*bgparam[1] + bgparam[2]
(Not tested for 1D yet).
fitbg : Integer
This flag indicates the level of background fitting:
fitbg=0: No fitting, estimate the bg as median(data).
fitbg=1: Fit a constant to the bg (bg = c).
fitbg=2: Fit a plane as bg (bg = a*x + b*y + c).
guess : tuple, (width, center, height)
Tuple giving an initial guess of the Gaussian parameters for
the optimizer. If supplied, x and y can be any shape and need
not be sorted. See gaussian() for meaning and format of this
tuple.
mask : ndarray
Same shape as y. Values where its corresponding mask value is
0 are disregarded for the minimization. Only values where the
mask value is 1 are considered.
weights : ndarray
Same shape as y. This array defines weights for the
minimization, for scientific data the weights should be
1/sqrt(variance).
Returns
-------
params : ndarray
This array contains the best fitting values parameters: width,
center, height, and if requested, bgpars. with:
width : The fitted Gaussian widths in each dimension.
center : The fitted Gaussian center coordinate in each dimension.
height : The fitted height.
err : ndarray
An array containing the concatenated uncertainties
corresponding to the values of params. For example, 2D input
gives np.array([widthyerr, widthxerr, centeryerr, centerxerr,
heighterr]).
Notes
-----
If the input does not look anything like a Gaussian, the result
might not even be the best fit to that.
Method: First guess the parameters (if no guess is provided), then
call a Levenberg-Marquardt optimizer to finish the job.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import gaussian as g
>>> # parameters for X
>>> lx = -3. # low end of range
>>> hx = 5. # high end of range
>>> dx = 0.05 # step
>>> # parameters of the noise
>>> nc = 0.0 # noice center
>>> ns = 1.0 # noise width
>>> na = 0.2 # noise amplitude
>>> # 1D Example
>>> # parameters of the underlying Gaussian
>>> wd = 1.1 # width
>>> ct = 1.2 # center
>>> ht = 2.2 # height
>>> # x and y data to fit
>>> x = np.arange(lx, hx + dx / 2., dx)
>>> x += na * np.random.normal(nc, ns, x.size)
>>> y = g.gaussian(x, wd, ct, ht) + na * np.random.normal(nc, ns, x.size)
>>> s = x.argsort() # sort, in case noise violated order
>>> xs = x[s]
>>> ys = y[s]
>>> # calculate guess and fit
>>> (width, center, height) = g.gaussianguess(ys, xs)
>>> (fw, fc, fh, err) = g.fitgaussian(ys, xs)
>>> # plot results
>>> plt.clf()
>>> plt.plot(xs, ys)
>>> plt.plot(xs, g.gaussian(xs, wd, ct, ht))
>>> plt.plot(xs, g.gaussian(xs, width, center, height))
>>> plt.plot(xs, g.gaussian(xs, fw, fc, fh))
>>> plt.title('Gaussian Data, Guess, and Fit')
>>> plt.xlabel('Abcissa')
>>> plt.ylabel('Ordinate')
>>> # plot residuals
>>> plt.clf()
>>> plt.plot(xs, ys - g.gaussian(xs, fw, fc, fh))
>>> plt.title('Gaussian Fit Residuals')
>>> plt.xlabel('Abcissa')
>>> plt.ylabel('Ordinate')
>>> # 2D Example
>>> # parameters of the underlying Gaussian
>>> wd = (1.1, 3.2) # width
>>> ct = (1.2, 3.1) # center
>>> ht = 2.2 # height
>>> # x and y data to fit
>>> nx = (hx - lx) / dx + 1
>>> x = np.indices((nx, nx)) * dx + lx
>>> y = g.gaussian(x, wd, ct, ht) + na * np.random.normal(nc, ns, x.shape[1:])
>>> # calculate guess and fit
>>> #(width, center, height) = g.gaussianguess(y, x) # not in 2D yet...
>>> (fw, fc, fh, err) = g.fitgaussian(y, x, (wd, ct, ht))
>>> # plot results
>>> plt.clf()
>>> plt.title('2D Gaussian Given')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> plt.imshow( g.gaussian(x, wd, ct, ht))
>>> plt.clf()
>>> plt.title('2D Gaussian With Noise')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> plt.imshow(y)
>>> #plt.imshow( g.gaussian(x, width, center, height)) # not in 2D yet...
>>> plt.clf()
>>> plt.title('2D Gaussian Fit')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> plt.imshow( g.gaussian(x, fw, fc, fh))
>>> plt.clf()
>>> plt.title('2D Gaussian Fit Residuals')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>> plt.imshow(y - g.gaussian(x, fw, fc, fh))
>>> # All cases benefit from...
>>> # show difference between fit and underlying Gaussian
>>> # Random data, your answers WILL VARY.
>>> np.array(fw) - np.array(wd)
array([ 0.00210398, -0.00937687])
>>> np.array(fc) - np.array(ct)
array([-0.00260803, 0.00555011])
>>> np.array(fh) - np.array(ht)
0.0030143371034774269
>>> Last Example:
>>> x = np.indices((30,30))
>>> g1 = g.gaussian(x, width=(1.2, 1.15), center=(13.2,15.75), height=1e4,
>>> bgpars=[0.0, 0.0, 100.0])
>>> error = np.sqrt(g1) * np.random.randn(30,30)
>>> y = g1 + error
>>> var = g1
>>>
>>> plt.figure(1)
>>> plt.clf()
>>> plt.imshow(y, origin='lower_left', interpolation='nearest')
>>> plt.colorbar()
>>> plt.title('2D Gaussian')
>>> plt.xlabel('X')
>>> plt.ylabel('Y')
>>>
>>> guess = ((1.2,1.2),(13,16.),1e4)
>>> reload(g)
>>> fit = g.fitgaussian(y, x, bgpars=[0.0, 0.0, 110.], fitbg=1, guess=guess,
>>> mask=None, weights=1/np.sqrt(var))
>>> print(fit[0])
Revisions
---------
2007-09-17 Joe Initial version, portions adapted from
http://www.scipy.org/Cookbook/FittingData.
jh@physics.ucf.edu
2007-11-13 Joe Made N-dimensional.
2008-12-02 Nate Included error calculation, and return Fixed a bug
in which if the initial guess was None, and incorrect
shape array was generated. This caused gaussian guess
to fail.
nlust@physics.ucf.edu
2009-10-25 Converted to standard doc header, fixed examples to
return 4 parameters.
2011-05-03 patricio Added mask, weights, and background-fitting options.
pcubillos@fulbrightmail.org
"""
if x is None:
x = np.indices(np.shape(y))
else:
if ( ((x.ndim == 1) and (x.shape != y.shape))
or ((x.ndim > 1) and (x.shape[1:] != y.shape))):
raise ValueError("x must give coordinates of points in y.")
# Default mask: all good
if mask is None:
mask = np.ones(np.shape(y))
# Default weights: no weighting
if weights is None:
weights = np.ones(np.shape(y))
# Mask the gaussian if requested:
medmask = np.copy(mask)
if maskg and (yxguess is not None or guess is not None):
if yxguess is not None:
center = yxguess
elif guess is not None:
center = guess[1]
medmask *= (1 - d.disk(3, center, np.shape(y)))
# Estimate the median of the image:
medbg = np.median(y[np.where(medmask)])
if bgpars is None:
bgpars = [0.0, 0.0, medbg]
# get a guess if not provided
if guess is None:
if yxguess is None:
guess = gaussianguess(y-medbg, mask=mask)
else:
guess = gaussianguess(y-medbg, mask=mask, yxguess=yxguess)
# "ravel" the guess
gparams = np.append(guess[0], guess[1])
gparams = np.append(gparams, guess[2])
# Background params to fit:
if fitbg == 0:
bgparams = []
elif fitbg == 1:
bgparams = bgpars[2]
elif fitbg == 2:
bgparams = bgpars
# Concatenate sets of parameters we want to fit:
params = np.append(gparams, bgparams)
# Rest of parameters needed by residuals:
args = (x, y, mask, weights, bgpars, fitbg)
# The fit:
p, cov, info, mesg, success = so.leastsq(residuals, params, args,
full_output=True)
try:
err = np.sqrt(np.diagonal(cov))
except:
err = None
return p, err
def residuals(params, x, data, mask, weights, bgpars, fitbg):
"""
Calculates the residuals between data and a gaussian model
determined by the rest of the parameters. Used in fitgaussian.
Parameters
----------
params : 1D ndarray
This array contains the parameters desired to fit with
fitgaussian, depending on fitbg, the number of elements
varies.
x : ndarray
Array (any shape) giving the abcissas of data.
data : ndarray
Array giving the values of the function.
mask : ndarray
Same shape as data. Values where its corresponding mask value is
0 are disregarded for the minimization. Only values where the
mask value is 1 are considered.
weights : ndarray
Same shape as data. This array defines weights for the
minimization, for scientific data the weights should be
1/sqrt(variance).
bgpars : ndarray or tuple, 3-elements
Background parameters, the elements determine a X- and Y-linearly
dependant level, of the form:
background = Y*bgparam[0] + X*bgparam[1] + bgparam[2]
fitbg : Integer
This flag indicates the level of background fitting:
fitbg=0: No fitting, estimate the bg as median(data).
fitbg=1: Fit a constant to the bg (bg = c).
fitbg=2: Fit a plane as bg (bg = a*x + b*y + c).
Returns
-------
residuals : 1D ndarray
An array of the (unmasked) weighted residuals between data and
a gaussian model determined by params (and bgpars when
necessary).
Examples
--------
Revisions
---------
2011-05-03 patricio Initial version.
pcubillos@fulbrightmail.org
"""
# Use bgpars as default for background parameters, if those values
# are being fitted update them:
bgparams = bgpars
if fitbg == 1:
bgparams[2] = params[-1] # update
params = params[0:-1] # remove last parameters from params
elif fitbg == 2:
bgparams = params[-3:] # update
params = params[0:-3] # remove last parameters
# Extract width, center, and height from params:
data_dims = np.ndim(data)
params_len = len(params)
width = params[0 : data_dims]
center = params[data_dims:2*data_dims]
if params_len - 2*data_dims == 1:
height = params[2*data_dims]
else:
# when height is None, queda la cagada, avoid this case.
height = None
# Produce the model:
model = gaussian(x, width, center, height, bgparams).squeeze()
# Calculate residuals:
res = (model - data) * weights
# Return only unmasked values:
return res[np.where(mask)]
def gaussians(x, param):
"""
Evaluate more than 1 gaussian.
"""
ndim = x.ndim - 1
if ndim == 0: # We use an indexing trick below that fails for 1D case.
ndim = 1
oldshape = x.shape
x.shape = (1, x.shape[0])
# The number of gaussians:
ngauss = np.shape(param)[0]
if ngauss == 1:
param = [param]
result = np.zeros(x[0].shape)
for k in np.arange(ngauss): # Unpack parameters
pdim = len(param[k])
if pdim % 2: # pdim is odd (when height is specified)
pdim = (pdim - 1) / 2
height = param[k][-1]
else: # pdim is even
pdim = pdim / 2
height = None
width = param[k][ : pdim]
center = param[k][pdim : 2 * pdim]
if type(center) != np.ndarray:
center += np.zeros(ndim)
if type(width) != np.ndarray:
width += np.zeros(ndim)
if height is None:
height = np.product(1.0 / (width * np.sqrt(2.0 * np.pi)))
ponent = 0.0
for i in np.arange(ndim):
ponent += ( (x[i] - center[i]) / width[i] )**2.0
result += height * np.exp(-0.5 * ponent)
if 'oldshape' in locals(): # reshape it back if necessary
x.shape = oldshape
return result
def fitgaussians(y, x=None, guess=None, sigma=1.0):
"""
Fit position and flux of a data image with gaussians, same sigma
is applied to all dispersions.
Parameters:
-----------
y : array_like
Array giving the values of the function.
x : array_like
(optional) Array (any shape) giving the abcissas of y (if
missing, uses np.indices(y).
guess : 2D-tuple, [[width1, center1, height1],
[width2, center2, height2],
... ]
Tuple giving an initial guess of the Gaussian parameters for
the optimizer. If supplied, x and y can be any shape and need
not be sorted. See gaussian() for meaning and format of this
tuple.
"""
if x is None:
x = np.indices(y.shape)[0]
else:
if ( ((x.ndim == 1) and (x.shape != y.shape))
or ((x.ndim > 1) and (x.shape[1:] != y.shape))):
raise ValueError("x must give coordinates of points in y.")
# "ravel" the guess
ngauss = np.shape(guess)[0]
params = np.ravel(guess)
params = np.append(guess, sigma)
# Minimize residuals of the fit:
p, cov, info, mesg, success = so.leastsq(resids, params, args=(x,ngauss,y),
full_output=True)
sigma = p[-1]
p = np.reshape(p [0:-1], (ngauss, len(p [0:-1])/ngauss))
iscov = 0 if cov==None else 1
extra = (p, sigma, iscov, cov, info, mesg)
return np.array(p[0,0:2]), extra
def resids(param, x, ngauss, y):
sigma = param[-1]
param = np.reshape(param[0:-1], (ngauss, len(param[0:-1])/ngauss))
gss = []
for k in np.arange(ngauss):
gauss = np.append(sigma, np.append(sigma, param[k]))
gss = np.append(gss,gauss)
p = np.reshape(gss, (ngauss,len(gss)/ngauss))
return np.ravel(gaussians(x,param=p)-y)
|
kevin218REPO_NAMEPOETPATH_START.@POET_extracted@POET-master@code@lib@gaussian.py@.PATH_END.py
|
{
"filename": "_name.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/slider/_name.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="name", parent_name="layout.slider", **kwargs):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@slider@_name.py@.PATH_END.py
|
{
"filename": "roadrunner_model_example_1.ipynb",
"repo_name": "hpparvi/PyTransit",
"repo_path": "PyTransit_extracted/PyTransit-master/doc/source/notebooks/models/roadrunner/roadrunner_model_example_1.ipynb",
"type": "Jupyter Notebook"
}
|
# RoadRunner transit model example I - basics
*Author:* Hannu Parviainen<br>
*Last modified:* 23 April 2024
The *RoadRunner* transit model (RRModel, [Parviainen, 2020a](https://ui.adsabs.harvard.edu/abs/2020MNRAS.499.1633P/abstract)) implemented by ``pytransit.RoadRunnerModel`` is a fast transit model that allows for any radially symmetric function to be used to model stellar limb darkening. The model offers flexibility with performance that is similar or superior to the analytical quadratic model by [Mandel & Agol (2002)](https://ui.adsabs.harvard.edu/abs/2002ApJ...580L.171M/abstract) implemented by ``pytransit.QuadraticModel``.
The model follows the standard PyTransit API and introduces a straightforward approach to specifying the limb darkening model during initialization. Users can select from:
- one of pre-defined named analytical models (`constant`, `linear`, `quadratic`, `nonlinear`, `general`, `power-2`, and `power-2-pm`),
- an instance of a class that inherits `pytransit.LDModel`,
- a custom Python callable that takes an array of μ values and a parameter vector, and returns an array containing the stellar intensity profile evaluated at μ , or
- a tuple comprising two callables, where the first defines the limb darkening model and the second computes the integrated stellar surface brightness over the stellar disk.
This first example notebook shows how to use the *RoadRunner* transit model with the named limb darkening models, and the next notebooks show how to create your own limb darkening models.
```python
%matplotlib inline
from matplotlib.pyplot import subplots, setp
from matplotlib import rc
from numpy.random import normal, uniform
from numpy import arange, linspace, pi, repeat, tile, zeros
rc('figure', figsize=(13,5))
```
```python
def plot_lc(time, flux, c=None, ylim=(0.9865, 1.0025), ax=None):
if ax is None:
fig, ax = subplots()
else:
fig, ax = None, ax
ax.plot(time, flux, c=c)
ax.autoscale(axis='x', tight=True)
setp(ax, xlabel='Time [d]', ylabel='Flux', xlim=time[[0,-1]], ylim=ylim)
if fig is not None:
fig.tight_layout()
return ax
```
## Import the model
```python
from pytransit import RoadRunnerModel
```
## Example 1: simple light curve
We begin with a simple light curve without any fancy stuff such as multipassband modeling. First, we create a time array centred around zero
```python
time = linspace(-0.05, 0.05, 1500)
```
Next, we initialise and set up a ``RoadRunnerModel`` choosing to use the four-parameter *nonlinear* limb darkening model and giving it the mid-exposure time array
```python
tm = RoadRunnerModel('nonlinear')
tm.set_data(time)
```
### Evaluation for scalar parameters
After the transit model has been initialised and the data set, we can evaluate the model for a given radius ratio (``k``), limb darkening ccoefficients (``ldc``), zero epoch (``t0``), orbital period (``p``), scaled semi-major axis ($a/R_\star$, ``a``), orbital inclination (``i``), eccentricity (``e``), and argument of periastron (``w``). Eccentricity and argument of periastron are optional and default to zero if not given.
The ``tm.evaluate`` method returns a 1D array with shape (``npt``) with the transit model evaluated for each mid-exposure time given in the ``time`` array.
**Note:** The first ``tm.set_data`` and ``tm.evaluate`` evaluation takes a significantly longer time than the succeeding calls to these methods. This is because most of the *PyTransit* routines are accelerated with *numba*, and *numba* takes some time compiling all the required methods.
```python
flux1 = tm.evaluate(k=0.1, ldc=[0.36, 0.04, 0.1, 0.05], t0=0.0, p=1.0, a=4.2, i=0.5*pi, e=0.0, w=0.0)
```
```python
plot_lc(time, flux1);
```

### Evaluation for a set of parameters
Like the rest of the *PyTransit* transit models, the *RoadRunner* model can be evaluated simultaneously for a set of parameters. This is also done using *tm.evaluate*, but now each argument is a vector with ``npv`` values. Model evaluation is parallelised and can be significantly faster than looping over an parameter array in Python.
Now, the ``tm.evaluate`` returns a 2D array with shape ``[npv, npt]`` with the transit model evaluated for each parameter vector and mid-transit time given in the ``time`` array
```python
npv = 5
ks = normal(0.10, 0.002, (npv, 1))
t0s = normal(0, 0.001, npv)
ps = normal(1.0, 0.01, npv)
smas = normal(4.2, 0.1, npv)
incs = uniform(0.48*pi, 0.5*pi, npv)
es = uniform(0, 0.25, size=npv)
os = uniform(0, 2*pi, size=npv)
ldc = uniform(0, 0.2, size=(npv,1,4))
```
```python
flux2 = tm.evaluate(ks, ldc, t0s, ps, smas, incs, es, os)
```
```python
plot_lc(time, flux2.T);
```

### Supersampling
A single photometry observation is always an exposure over time. If the exposure time is short compared to the changes in the transit signal shape during the exposure, the observation can be modelled by evaluating the model at the mid-exposure time. However, if the exposure time is long, we need to simluate the integration by calculating the model average over the exposure time (although numerical integration is also a valid approach, it is slightly more demanding computationally and doesn't improve the accuracy significantly). This is achieved by supersampling the model, that is, evaluating the model at several locations inside the exposure and averaging the samples.
Evaluating the model many times for each observation naturally increases the computational burden of the model, but is necessary to model long-cadence observations from the *Kepler* and *TESS* telescopes.
```python
tm = RoadRunnerModel('nonlinear')
tm.set_data(time, exptimes=0.02, nsamples=10)
```
```python
flux3 = tm.evaluate(k=0.1, ldc=[0.36, 0.04, 0.1, 0.05], t0=0.0, p=1.0, a=4.2, i=0.5*pi, e=0.0, w=0.0)
```
```python
ax = plot_lc(time, flux1, c='0.75')
plot_lc(time, flux3, ax=ax);
```

## Example 2: heterogeneous light curve
### Multiple passbands
*PyTransit* aims to simplify modelling of heterogeneous light curves as much as possible. Here heterogeneous means that we can model light curves observed in different passbands, with different instruments, and with different supersampling requirements in one go. This is because most of the real exoplanet transit modelling science cases nowadays involve heterogeneous datasets, such as modelling long-cadence *Kepler* light curves together with short-cadence ground-based observations, or transmission spectroscopy where the light curves are created from a spectroscopic time series.
To model heterogeneous light curves, *PyTransit* designates each observation (exposure, datapoint) to a specific light curve, and each light curve to a specific passband. This is done throught the light curve index array (``lcids``) and passband index array (``pbids``). Light curve index array is an integer array giving an index for each observed datapoints (suchs as, the indices for dataset of light curves would be either 0 or 1), while the passband index array is an integer array containing a passband index for each light curve in the dataset. So, a dataset of two light curves observed in a same passband would be
times = [0, 1, 2, 3]
lcids = [0, 0, 1, 1]
pbids = [0, 0]
while a dataset containing two light curves observed in different passbands would be
times = [0, 1, 2, 3]
lcids = [0, 0, 1, 1]
pbids = [0, 1]
Let's create two datasets. The first one divides our single light curve into two halves parts and gives each a different light curve index (0 for the first half and 1 for the second)
```python
lcids1 = zeros(time.size, int)
lcids1[time.size//2:] = 1
```
```python
plot_lc(time, lcids1, ylim=(-0.5, 1.5));
```

The second dataset considers a more realistic scenario where we have three separate transits observed in two passbands. We create this by tiling our ``time`` array three times.
```python
time2 = tile(time, 3)
lcids2 = repeat([0, 1, 1], time.size)
```
```python
ax = plot_lc(arange(time2.size), lcids2, ylim=(-0.5, 1.5))
[ax.axvline(i*time.size, c='k', ls='--') for i in range(1,3)];
```

#### Achromatic radius ratio
Let's see how this works in practice. We divide our current light curve into two halves observed in different passbands. These passbands have different limb darkening, but we first assume that the radius ratio is achromatic.
```python
tm = RoadRunnerModel('power-2')
tm.set_data(time, lcids=lcids1, pbids=[0, 1])
```
```python
flux = tm.evaluate(k=0.1, ldc=[[3.1, 0.1],[2.1, 0.03]], t0=0.0, p=1.0, a=4.3, i=0.5*pi)
plot_lc(time, flux);
```

```python
tm.set_data(time2, lcids=lcids2, pbids=[0, 1])
```
```python
flux = tm.evaluate(k=0.1, ldc=[[3.1, 0.1],[2.1, 0.03]], t0=0.0, p=1.0, a=4.3, i=0.5*pi)
plot_lc(arange(flux.size), flux);
```

#### Chromatic radius ratio
Next, we assume that the radius ratio is chromatic, that is, it depends on the passband. This is achieved by giving the model an array of radius ratios (where the number should equal to the number of passbands) instead of giving it a scalar radius ratio.
```python
tm.set_data(time, lcids=lcids1, pbids=[0, 1])
```
```python
flux = tm.evaluate(k=[0.105, 0.08], ldc=[[3.1, 0.1],[2.1, 0.03]], t0=0.0, p=1.0, a=4.3, i=0.5*pi)
```
```python
plot_lc(time, flux);
```

```python
tm.set_data(time2, lcids=lcids2, pbids=[0, 1])
```
```python
flux = tm.evaluate(k=[0.105, 0.08], ldc=[[3.1, 0.1],[2.1, 0.03]], t0=0.0, p=1.0, a=4.3, i=0.5*pi)
plot_lc(arange(flux.size), flux);
```

### Different superampling rates
Next, let's set different supersampling rates to the two light curves. There's no reason why we couldn't also let them have different passbands, but it's better to keep things simple at this stage.
```python
tm.set_data(time, lcids=lcids1, exptimes=[0.0, 0.02], nsamples=[1, 10])
```
```python
flux = tm.evaluate(k=0.105, ldc=[3.1, 0.1], t0=0.0, p=1.0, a=4.3, i=0.5*pi)
```
```python
plot_lc(time, flux);
```

```python
tm.set_data(time2, lcids=lcids2, exptimes=[0.0, 0.02], nsamples=[1, 10])
```
```python
flux = tm.evaluate(k=0.105, ldc=[3.1, 0.1], t0=0.0, p=1.0, a=4.3, i=0.5*pi)
```
```python
plot_lc(arange(flux.size), flux);
```

### Everything together
Finally, let's throw everything together and create a set of light curves observed in different passbands, requiring different supersampling rates, assuming chromatic radius ratios, for a set of parameter vectors.
```python
tm = RoadRunnerModel('quadratic-tri')
time3 = tile(time, 3)
lcids3 = repeat([0, 1, 2], time.size)
tm.set_data(time3, lcids=lcids3, pbids=[0, 1, 2], exptimes=[0.0, 0.02, 0.0], nsamples=[1, 10, 1])
```
```python
npv = 5
ks = uniform(0.09, 0.1, (npv, 3))
t0s = normal(0, 0.002, npv)
ps = normal(1.0, 0.01, npv)
smas = normal(5.0, 0.1, npv)
incs = uniform(0.48*pi, 0.5*pi, npv)
es = uniform(0, 0.25, size=npv)
os = uniform(0, 2*pi, size=npv)
ldc = uniform(0, 0.5, size=(npv,3,2))
```
```python
flux = tm.evaluate(k=ks, ldc=ldc, t0=t0s, p=ps, a=smas, i=incs, e=es, w=os)
```
```python
plot_lc(arange(flux.shape[1]), flux.T + linspace(0, 0.06, npv), ylim=(0.986, 1.065));
```

---
<center>©2024 Hannu Parviainen</center>
|
hpparviREPO_NAMEPyTransitPATH_START.@PyTransit_extracted@PyTransit-master@doc@source@notebooks@models@roadrunner@roadrunner_model_example_1.ipynb@.PATH_END.py
|
{
"filename": "makePlanetInput.ipynb",
"repo_name": "stevepur/DR25-occurrence-public",
"repo_path": "DR25-occurrence-public_extracted/DR25-occurrence-public-main/GKbaseline_gaiaRadCut/makePlanetInput.ipynb",
"type": "Jupyter Notebook"
}
|
This notebook prepares a planet candidate catalog for the stellar population in the specified input stellar catalog. It computes the reliability, corrected planet radius and includes useful planet properties such as robovetter score. It outputs two catalogs, one that contains only PCs and one that contains all KOIs.
Reliability is given by
$$ R = \frac{N_{\mathrm{truePC}}}{N_{\mathrm{obsPC}}} = 1 - \frac{N_{\mathrm{obsFP}}}{N_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) = 1 - \frac{F_{\mathrm{obsFP}}}{F_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) $$
where $E = N_{\mathrm{obsFP}}/N_{\mathrm{trueFP}}$ is the false positive effectiveness, $F_{\mathrm{obsFP}} = N_{\mathrm{obsFP}}/N_{\mathrm{obsTCEs}}$ is the fraction of observed TCEs that are dispositioned as FP and $F_{\mathrm{obsPC}} = N_{\mathrm{obsPC}}/N_{\mathrm{obsTCEs}}$ is the fraction of TCEs dispositioned as PC.
We will separately measure $E$ and $F_{\mathrm{obsFP}}$ as binomial point processes with probabilities that depend on period and MES. Once we have $F_{\mathrm{obsFP}}$ then $F_{\mathrm{obsPC}} = 1 - F_{\mathrm{obsFP}}$, assuming that $N_{\mathrm{obsTCEs}} = N_{\mathrm{obsPC}} + N_{\mathrm{obsFP}}$.
We think of TCEs as consisting of two sets: those that are dispositioned as FP and those that are dispositioned as PC. We do this for both the observed TCEs, and for inverted/scrambled TCEs, where all TCEs are true false positives. Then we can think of the vetting process as drawing from the set of TCEs, with a probability $r$ of selecting either PCs or FPs. Then the probability distribution of selecting $c$ FPs from $n$ TCEs is given by the binomial distribution
$$P\{c\} = \left( \begin{array}{c} n \\ c \end{array} \right) r^c (1-r)^{n-c}.$$
To measure $E$ we use the inverted and scrambled data sets, where all detected TCEs are by definition FPs. We define $E$ as the probability of drawing FPs from inverted/scrambled TCEs, found via the Bayesian inference $p(E|n, c) \propto p(c|E, n) p(E)$, where
$$p(c|E, n) = \left( \begin{array}{c} n \\ c \end{array} \right) E^c (1-E)^{n-c}$$ and
$p(E)$ is a prior distribution of the probability $E$. By putting the data on a grid indexed by $i,j$, we can fit effectiveness as a function parameterized by a vector $\theta$, $E(\theta,\mathrm{period},\mathrm{MES})$, as $p(\theta)|n_{i,j}, c_{i,j}, \mathrm{period}_{i,j},\mathrm{MES}_{i,j}) \propto p(c_{i,j}|\theta, n_{i,j}, \mathrm{period}_{i,j},\mathrm{MES}_{i,j}) p(\theta)$, where $p(\theta)$ is some prior distribution of the parameters.
To measure $F_{\mathrm{obsFP}}$ we perform a similar inference using the set of observed TCEs, and inferring the probability of drawing c FPs from n observed TCEs. The inference in this case becomes $p(F_{\mathrm{obsFP}}|n, c) \propto p(c|F_{\mathrm{obsFP}}, n) p(F_{\mathrm{obsFP}})$, which we can parameterize interms of a function similar to effectiveness.
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spec
import pandas as pd
from astropy.io import ascii
from astropy.table import Table, vstack
import pickle
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import sys
sys.path.insert(0, '..')
import dr25Models as funcModels
```
Reliability is given by
$$ R = \frac{N_{\mathrm{truePC}}}{N_{\mathrm{obsPC}}} = 1 - \frac{N_{\mathrm{obsFP}}}{N_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) = 1 - \frac{F_{\mathrm{obsFP}}}{F_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) = 1 - \frac{F_{\mathrm{obsFP}}}{1 - F_{\mathrm{obsFP}}} \left( \frac{1 - E}{E} \right) $$
where $E = N_{\mathrm{obsFP}}/N_{\mathrm{trueFP}}$, $F_{\mathrm{obsFP}} = N_{\mathrm{obsFP}}/N_{\mathrm{obsTCEs}}$ is the fraction of observed TCEs that are dispositioned as FP and $F_{\mathrm{obsPC}} = N_{\mathrm{obsPC}}/N_{\mathrm{obsTCEs}}$ is the fraction of TCEs dispositioned as PC.
We get $E$ and $F_{\mathrm{obsFP}}$ from the outputs of the notebooks binomialFPEffectiveness.ipynb and binomialObsFPRate.ipynb.
```python
catalogType = "BergerRadCut"
# catalogType = "Berger2019"
# catalogType = "BergerRadCut"
# catalogType = "Burke"
# set the effectiveness model
fpEffModel = "rotatedLogisticX0"
# set the obs FP rate model
obsModel = "rotatedLogisticX0"
# read in the model parameters
tt = pd.read_pickle("fpEffectivenessTable.pkl")
tm = tt[tt.Model == fpEffModel]
fpEffXRange = tm.periodRange.values[0]
fpEffYRange = tm.mesRange.values[0]
fpEffTheta = tm.medianMCMCTheta.values[0]
tt = pd.read_pickle("obsFpTable.pkl")
tm = tt[tt.Model == obsModel]
obsXRange = tm.periodRange.values[0]
obsYRange = tm.mesRange.values[0]
obsTheta = tm.medianMCMCTheta.values[0]
```
```python
cellPeriod, cellMes = np.meshgrid(np.array(np.linspace(fpEffXRange[0], fpEffXRange[1], 200)),
np.array(np.linspace(fpEffYRange[0], fpEffYRange[1], 200)))
effFit = funcModels.evaluateModel(cellPeriod, cellMes, fpEffTheta, fpEffXRange, fpEffYRange, fpEffModel)
obsFit = funcModels.evaluateModel(cellPeriod, cellMes, obsTheta, obsXRange, obsYRange, obsModel)
```
```python
fig = plt.figure(figsize=plt.figaspect(0.3));
R = 1 - (obsFit/(1-obsFit))*((1-effFit)/effFit)
pR = R;
pR[pR<0] = 0;
ax = fig.add_subplot(1, 3, 1, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,0)
ax = fig.add_subplot(1, 3, 2, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,-90)
plt.title("Reliability");
ax = fig.add_subplot(1, 3, 3, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
fig, ax = plt.subplots(figsize=(10,10));
CS = ax.contour(cellPeriod, cellMes, pR, colors='k', levels = [.45, .5, .55, .6, .7, .75, .8, .85, .9, .95, .99]);
ax.clabel(CS, inline=1, fontsize=18);
ax.tick_params(axis = "both", labelsize = 18)
plt.xlabel("period (days)", fontsize = 24);
plt.ylabel("MES", fontsize = 24);
plt.savefig("reliabilityContours.pdf",bbox_inches='tight')
plt.title("Reliability Against False Alarms", fontsize = 24);
```


```python
fig = plt.figure(figsize=plt.figaspect(0.3));
R = (1-effFit)/effFit
pR = R;
pR[pR<0] = 0;
ax = fig.add_subplot(1, 3, 1, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,0)
ax = fig.add_subplot(1, 3, 2, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,-90)
plt.title("1-E/E");
ax = fig.add_subplot(1, 3, 3, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
fig, ax = plt.subplots(figsize=(5,5));
CS = ax.contour(cellPeriod, cellMes, pR);
ax.clabel(CS, inline=1, fontsize=10);
plt.xlabel("period");
plt.ylabel("MES");
```


```python
fig = plt.figure(figsize=plt.figaspect(0.3));
R = obsFit/(1-obsFit)
pR = R;
pR[pR<0] = 0;
ax = fig.add_subplot(1, 3, 1, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,0)
ax = fig.add_subplot(1, 3, 2, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,-90)
plt.title("obs/(1-obs)");
ax = fig.add_subplot(1, 3, 3, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, pR, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
fig, ax = plt.subplots(figsize=(5,5));
CS = ax.contour(cellPeriod, cellMes, pR);
ax.clabel(CS, inline=1, fontsize=10);
plt.xlabel("period");
plt.ylabel("MES");
```


```python
R = 1 - (obsFit/(1-obsFit))*((1-effFit)/effFit)
pR = R;
pR[pR<0] = 0;
sp = np.zeros([3,3])
sPeriod = np.array([[0, 10, 200], [0, 10, 200], [0, 10, 200]])
sMes = np.array([[0, 0, 0], [10, 10, 10], [30, 30, 30]])
sp[0,0] = np.mean(np.mean(pR[np.where((cellPeriod > 0) & (cellPeriod <= 20) & (cellMes > 20) & (cellMes <= 200))]))
sp[0,1] = np.mean(np.mean(pR[np.where((cellPeriod > 20) & (cellPeriod <= 200) & (cellMes > 20) & (cellMes <= 200))]))
sp[0,2] = np.mean(np.mean(pR[np.where((cellPeriod > 200) & (cellPeriod <= 500) & (cellMes > 20) & (cellMes <= 200))]))
sp[1,0] = np.mean(np.mean(pR[np.where((cellPeriod > 0) & (cellPeriod <= 20) & (cellMes > 10) & (cellMes <= 20))]))
sp[1,1] = np.mean(np.mean(pR[np.where((cellPeriod > 20) & (cellPeriod <= 200) & (cellMes > 10) & (cellMes <= 20))]))
sp[1,2] = np.mean(np.mean(pR[np.where((cellPeriod > 200) & (cellPeriod <= 500) & (cellMes > 10) & (cellMes <= 20))]))
sp[2,0] = np.mean(np.mean(pR[np.where((cellPeriod > 0) & (cellPeriod <= 20) & (cellMes > 0) & (cellMes <= 10))]))
sp[2,1] = np.mean(np.mean(pR[np.where((cellPeriod > 20) & (cellPeriod <= 200) & (cellMes > 0) & (cellMes <= 10))]))
sp[2,2] = np.mean(np.mean(pR[np.where((cellPeriod > 200) & (cellPeriod <= 500) & (cellMes > 0) & (cellMes <= 10))]))
x = np.array([[0, 1, 2], [0, 1, 2], [0, 1, 2]])
y = np.array([[0, 0, 0], [1, 1, 1], [2, 2, 2]])
dx = 1
dy = 1
imageSize = (3,3)
plt.figure(figsize=imageSize);
fig, ax = plt.subplots(figsize=imageSize);
da = np.transpose(sp);
ax.imshow(da);
# ax.imshow(da, origin='lower');
arrayShape = da.shape;
for i in range(arrayShape[0]):
for j in range(arrayShape[1]):
if da[i, j] < 0.7:
c = "w"
else:
c = "k"
text = ax.text(x[(j,i)]+dx/2, y[(j,i)]+dy/2, round(da[i, j],3),
ha="center", va="center", color=c);
sp
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/numpy/core/fromnumeric.py:2920: RuntimeWarning: Mean of empty slice.
out=out, **kwargs)
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/numpy/core/_methods.py:85: RuntimeWarning: invalid value encountered in double_scalars
ret = ret.dtype.type(ret / rcount)
array([[ nan, 0.99980999, 0.95908385],
[ nan, 0.99680568, 0.8944732 ],
[ nan, 0.94256453, 0.60988632]])
<Figure size 216x216 with 0 Axes>

```python
def computeReliabiltyPosterior(xp, yp, eSamples, oSamples):
r = np.zeros(np.shape(eSamples)[0])
for i in range(np.shape(eSamples)[0]):
e = funcModels.evaluateModel(xp, yp, eSamples[i,:], fpEffXRange, fpEffYRange, fpEffModel)
o = funcModels.evaluateModel(xp, yp, oSamples[i,:], obsXRange, obsYRange, obsModel)
r[i] = 1 - (o/(1-o))*((1-e)/e)
e = funcModels.evaluateModel(xp, yp, fpEffTheta, fpEffXRange, fpEffYRange, fpEffModel)
o = funcModels.evaluateModel(xp, yp, obsTheta, obsXRange, obsYRange, obsModel)
f = 1 - (o/(1-o))*((1-e)/e)
return r, f
```
```python
eSamples = np.load("binEffPosteriors_" + str(fpEffModel) + ".npy");
oSamples = np.load("binObsPosteriors_" + str(obsModel) + ".npy");
r1, f1 = computeReliabiltyPosterior(200., 25., eSamples, oSamples)
r2, f2 = computeReliabiltyPosterior(365., 10., eSamples, oSamples)
r3, f3 = computeReliabiltyPosterior(365., 8., eSamples, oSamples)
```
```python
ymax = 25000
plt.figure(figsize=(15,10))
plt.hist(r1, 20);
plt.plot([f1, f1], [0, ymax], color='k', linestyle='--', linewidth=1, alpha = 0.2)
plt.hist(r2, 100, alpha = 0.5);
plt.plot([f2, f2], [0, ymax], color='k', linestyle='--', linewidth=1, alpha = 1)
plt.hist(r3, 100, alpha = 0.5);
plt.plot([f3, f3], [0, ymax], color='k', linestyle='--', linewidth=1, alpha = 1)
plt.ylim(0, ymax)
plt.xlim(0, 1.2)
plt.tick_params(labelsize = 18)
plt.xlabel("$R_\mathrm{FA}$", fontSize = 24);
plt.savefig("reliabilityExamples.pdf",bbox_inches='tight')
print("f1:" + str(f1))
print("f2:" + str(f2))
print("f3:" + str(f3))
```
f1:0.9994323312674375
f2:0.7424431005551173
f3:0.46196912771177545

```python
import requests
from cStringIO import StringIO
if False:
selectStr = "kepid,kepoi_name,koi_tce_plnt_num,koi_pdisposition,koi_score,koi_period,koi_max_mult_ev,koi_prad,koi_prad_err1,koi_prad_err2,koi_ror,koi_ror_err1,koi_ror_err2"
urlDr25Koi = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=q1_q17_dr25_koi&select=" + selectStr
r = requests.get(urlDr25Koi)
if r.status_code != requests.codes.ok:
r.raise_for_status()
fh = StringIO(r.content)
dr25Koi = pd.read_csv(fh, dtype={"kepoi_name":str})
dr25Koi.to_csv("koiCatalogs/dr25_kois_archive.txt", index=False)
else:
dr25Koi = pd.read_csv("koiCatalogs/dr25_kois_archive.txt", dtype={"kepoi_name":str})
print("Loaded " + str(len(dr25Koi)) + " KOIs")
```
Loaded 8054 KOIs
```python
# restrict the population to stars in the Travis' catalog
if catalogType == "Berger2019":
dr25CleanStellarIso = pd.read_csv("../stellarCatalogs/dr25_stellar_berger2019_clean_GK.txt")
if catalogType == "BergerRadCut":
dr25CleanStellarIso = pd.read_csv("../stellarCatalogs/dr25_stellar_berger2019_clean_GaiaRadCut_GK.txt")
elif catalogType == "Burke":
dr25CleanStellarIso = pd.read_csv("../stellarCatalogs/dr25_stellar_supp_burke_clean_GK.txt")
dr25Koi = dr25Koi[dr25Koi.kepid.isin(dr25CleanStellarIso.kepid)]
dr25Koi = dr25Koi.reset_index(drop=True)
print("After removing planets not in Travis' list, we have " + str(len(dr25Koi)) + " KOIs")
```
After removing planets not in Travis' list, we have 2804 KOIs
```python
# merge in only iso_rad and uncertainties from the stellar table
dr25Koi = pd.merge(dr25Koi, dr25CleanStellarIso[["kepid","radius","radius_err1","radius_err2","teff"]], on="kepid", how="inner")
```
```python
# correct the planet radii with the new catalog
rEarth = 6356.8 # km
rSun = 695700 # km
dr25Koi['corrected_prad'] = dr25Koi['koi_ror']*dr25Koi['radius']*rSun/rEarth;
dr25Koi['corrected_prad_err1'] = np.sqrt(dr25Koi['koi_ror_err1']**2*dr25Koi['radius']**2
+dr25Koi['koi_ror']**2*dr25Koi['radius_err1']**2)*rSun/rEarth;
dr25Koi['corrected_prad_err2'] = -np.sqrt(dr25Koi['koi_ror_err2']**2*dr25Koi['radius']**2
+dr25Koi['koi_ror']**2*dr25Koi['radius_err2']**2)*rSun/rEarth;
dr25Koi = dr25Koi[~np.isnan(dr25Koi.koi_prad)]
```
```python
v = dr25Koi.corrected_prad_err1/dr25Koi.koi_prad_err1
plt.hist(v[v<5], 100);
```

```python
plt.hist(dr25Koi['corrected_prad'][dr25Koi['corrected_prad']<10], 100);
```

```python
fig, ax = plt.subplots(figsize=(15,10));
ax.errorbar(dr25Koi.koi_period, dr25Koi.koi_prad,
yerr = [-dr25Koi.koi_prad_err2, dr25Koi.koi_prad_err1],
fmt="k.", alpha = 0.5);
ax.errorbar(dr25Koi.koi_period, dr25Koi.corrected_prad,
yerr = [-dr25Koi.corrected_prad_err2, dr25Koi.corrected_prad_err1],
fmt="r.", alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Radius Change");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
```
(50, 400)

```python
```
```python
dr25Fpp = ascii.read("../data/q1_q17_dr25_koifpp.txt")
dr25FppPd = dr25Fpp.to_pandas()
```
```python
```
```python
mergedDr25Koi = pd.merge(dr25Koi, dr25FppPd, on="kepoi_name", how="inner")
```
```python
mergedDr25Koi.loc[:,"fpEffectiveness"] = pd.Series(
funcModels.evaluateModel(mergedDr25Koi.koi_period,
mergedDr25Koi.koi_max_mult_ev, fpEffTheta,
fpEffXRange, fpEffYRange, fpEffModel), index = mergedDr25Koi.index)
mergedDr25Koi.loc[:,"obsFpRate"] = pd.Series(
funcModels.evaluateModel(mergedDr25Koi.koi_period,
mergedDr25Koi.koi_max_mult_ev, obsTheta,
obsXRange, obsYRange, obsModel), index = mergedDr25Koi.index)
mergedDr25Koi.loc[:,"reliability"] = pd.Series(
1-(mergedDr25Koi.obsFpRate/(1-mergedDr25Koi.obsFpRate))
*(1-mergedDr25Koi.fpEffectiveness)/mergedDr25Koi.fpEffectiveness, index = mergedDr25Koi.index)
mergedDr25Koi.reliability[mergedDr25Koi.reliability < 0.] = 0.
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:14: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
```python
plt.hist(mergedDr25Koi.koi_score, 40);
plt.yscale('log', nonposy='clip')
```

```python
np.sum(np.isnan(mergedDr25Koi.fpp_prob) & mergedDr25Koi.koi_period > 50)
```
0
```python
mergedDr25Koi[np.abs(mergedDr25Koi.koi_period - mergedDr25Koi.fpp_koi_period)>1e-2]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>kepid_x</th>
<th>kepoi_name</th>
<th>koi_tce_plnt_num</th>
<th>koi_pdisposition</th>
<th>koi_score</th>
<th>koi_period</th>
<th>koi_max_mult_ev</th>
<th>koi_prad</th>
<th>koi_prad_err1</th>
<th>koi_prad_err2</th>
<th>...</th>
<th>corrected_prad</th>
<th>corrected_prad_err1</th>
<th>corrected_prad_err2</th>
<th>rowid</th>
<th>kepid_y</th>
<th>fpp_koi_period</th>
<th>fpp_prob</th>
<th>fpEffectiveness</th>
<th>obsFpRate</th>
<th>reliability</th>
</tr>
</thead>
<tbody>
<tr>
<th>2152</th>
<td>9394762</td>
<td>K05664.01</td>
<td>1</td>
<td>FALSE POSITIVE</td>
<td>0.0</td>
<td>77.138911</td>
<td>11.215458</td>
<td>3.39</td>
<td>1.02</td>
<td>-0.27</td>
<td>...</td>
<td>3.427388</td>
<td>22.922429</td>
<td>-1.060969</td>
<td>6112</td>
<td>9394762</td>
<td>308.57</td>
<td>0.68</td>
<td>0.989174</td>
<td>0.168771</td>
<td>0.997778</td>
</tr>
</tbody>
</table>
<p>1 rows × 27 columns</p>
</div>
```python
mergedDr25Koi["fpp_prob_use"] = mergedDr25Koi["fpp_prob"]
mergedDr25Koi.fpp_prob_use[np.isnan(mergedDr25Koi.fpp_prob)] = 1
mergedDr25Koi.fpp_prob_use[np.abs(mergedDr25Koi.koi_period - mergedDr25Koi.fpp_koi_period)>1e-2] = 1
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:2: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
This is separate from the ipykernel package so we can avoid doing imports until
```python
mergedDr25Koi[np.abs(mergedDr25Koi.koi_period - mergedDr25Koi.fpp_koi_period)>1e-2]
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>kepid_x</th>
<th>kepoi_name</th>
<th>koi_tce_plnt_num</th>
<th>koi_pdisposition</th>
<th>koi_score</th>
<th>koi_period</th>
<th>koi_max_mult_ev</th>
<th>koi_prad</th>
<th>koi_prad_err1</th>
<th>koi_prad_err2</th>
<th>...</th>
<th>corrected_prad_err1</th>
<th>corrected_prad_err2</th>
<th>rowid</th>
<th>kepid_y</th>
<th>fpp_koi_period</th>
<th>fpp_prob</th>
<th>fpEffectiveness</th>
<th>obsFpRate</th>
<th>reliability</th>
<th>fpp_prob_use</th>
</tr>
</thead>
<tbody>
<tr>
<th>2152</th>
<td>9394762</td>
<td>K05664.01</td>
<td>1</td>
<td>FALSE POSITIVE</td>
<td>0.0</td>
<td>77.138911</td>
<td>11.215458</td>
<td>3.39</td>
<td>1.02</td>
<td>-0.27</td>
<td>...</td>
<td>22.922429</td>
<td>-1.060969</td>
<td>6112</td>
<td>9394762</td>
<td>308.57</td>
<td>0.68</td>
<td>0.989174</td>
<td>0.168771</td>
<td>0.997778</td>
<td>1.0</td>
</tr>
</tbody>
</table>
<p>1 rows × 28 columns</p>
</div>
```python
mergedDr25Koi["totalReliability"] = (1-mergedDr25Koi.fpp_prob_use)*mergedDr25Koi.reliability
```
```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.koi_max_mult_ev, cmap="viridis",
c=mergedDr25Koi.reliability, edgecolors='k', s=100*mergedDr25Koi.totalReliability, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
plt.title("KOI Reliability, size = total reliability");
plt.ylim([7, 50])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=mergedDr25Koi.reliability, edgecolors='k', s=100*mergedDr25Koi.totalReliability, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Reliability, size = total reliability");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=mergedDr25Koi.reliability, edgecolors='k', s=100*mergedDr25Koi.reliability, alpha = 0.5);
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=mergedDr25Koi.reliability, edgecolors='k', s=100*mergedDr25Koi.totalReliability, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Reliability, size = reliability");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
dr25PC = mergedDr25Koi[mergedDr25Koi.koi_pdisposition == "CANDIDATE"]
dr25FP = mergedDr25Koi[mergedDr25Koi.koi_pdisposition == "FALSE POSITIVE"]
# remove those with corrected_prad = NAN
dr25PC = dr25PC[~np.isnan(dr25PC.corrected_prad)]
dr25FP = dr25FP[~np.isnan(dr25FP.corrected_prad)]
mergedDr25Koi = mergedDr25Koi[~np.isnan(mergedDr25Koi.corrected_prad)]
print("There are " + str(len(dr25PC)) + " PCs in " + str(len(dr25CleanStellarIso)) + " observed targets")
print("There are " + str(len(dr25FP)) + " FPs in " + str(len(dr25CleanStellarIso)) + " observed targets")
```
There are 2078 PCs in 67306 observed targets
There are 723 FPs in 67306 observed targets
```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.koi_max_mult_ev, cmap="viridis",
c=dr25PC.reliability, edgecolors='k', s=100*dr25PC.totalReliability, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("MES");
plt.title("PC Reliability, size = total reliability");
#plt.ylim([7, 30])
#plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=dr25PC.reliability, edgecolors='k', s=100*dr25PC.totalReliability, alpha = 0.8);
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, s=100*dr25PC.totalReliability,
c=dr25PC.reliability, facecolors='none', edgecolors='k', alpha = 0.8);
plt.yscale('log', nonposy='clip')
plt.xscale('log', nonposx='clip')
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("PC Reliability, size = reliability");
#plt.ylim([0, 2.5])
#plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=dr25PC.reliability, edgecolors='k', s=100*dr25PC.totalReliability, alpha = 0.5);
plt.xlabel("period", fontsize = 24);
plt.ylabel("corrected planet radius", fontsize = 24);
plt.title("PC Reliability, size = total reliability", fontsize = 24);
plt.ylim([0, 2.5])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
plt.savefig("pcReliability.pdf",bbox_inches='tight')
plt.plot([200, 200], [1, 2], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [1, 1], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [2, 2], color='k', linestyle='--', linewidth=1)
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.koi_prad, cmap="viridis",
c=dr25PC.reliability, edgecolors='k', s=100*dr25PC.totalReliability, alpha = 0.5);
plt.xlabel("period", fontsize = 24);
plt.ylabel("DR25 planet radius", fontsize = 24);
plt.title("PC Reliability, size = total reliability", fontsize = 24);
plt.ylim([0, 2.5])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability");
plt.savefig("pcReliability.pdf",bbox_inches='tight')
plt.plot([200, 200], [1, 2], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [1, 1], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [2, 2], color='k', linestyle='--', linewidth=1)
```
[<matplotlib.lines.Line2D at 0x7f856067ec90>]


```python
dr25PcInRange = dr25PC[(dr25PC.koi_period>50)&(dr25PC.koi_period<400)&(dr25PC.corrected_prad>0)&(dr25PC.corrected_prad<2.5)]
```
```python
fig, ax = plt.subplots(figsize=(15,10));
rs = mergedDr25Koi.totalReliability*mergedDr25Koi.koi_score
ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, marker="+", alpha=0.2);
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=rs, edgecolors='k', s=100*rs, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Total Reliability x Score");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("KOI Total Reliability x Score");
```

```python
plt.hist(dr25PC.corrected_prad/dr25PC.koi_prad, 100);
#plt.yscale('log', nonposy='clip')
```

```python
plt.hist(dr25CleanStellarIso.radius[dr25CleanStellarIso.radius<2]/dr25CleanStellarIso.radius_DR25[dr25CleanStellarIso.radius<2], 100);
#plt.yscale('log', nonposy='clip')
```

```python
dr25PC.to_csv("koiCatalogs/dr25_GK_PCs.csv", index=False)
mergedDr25Koi.to_csv("koiCatalogs/dr25_GK_KOIs.csv", index=False)
```
```python
fig, ax = plt.subplots(figsize=(15,10));
ax.errorbar(dr25PC.koi_period, dr25PC.koi_prad,
yerr = [-dr25PC.koi_prad_err2, dr25PC.koi_prad_err1],
fmt="k.", alpha = 0.5);
ax.errorbar(dr25PC.koi_period, dr25PC.corrected_prad,
yerr = [-dr25PC.corrected_prad_err2, dr25PC.corrected_prad_err1],
fmt="r.", alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Radius Change");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
```
(50, 400)

```python
plt.hist(dr25PC.koi_score, 40);
plt.yscale('log', nonposy='clip')
plt.title("PC score distribution")
plt.hist(dr25FP.koi_score, 40, alpha=0.5);
plt.yscale('log', nonposy='clip')
plt.title("FP score distribution")
```
Text(0.5,1,'FP score distribution')

```python
period_rng = (50, 200)
rp_rng = (1., 2.)
occPcs = dr25PC[(dr25PC.koi_period>=period_rng[0])&(dr25PC.koi_period<=period_rng[1])&(dr25PC.corrected_prad>=rp_rng[0])&(dr25PC.corrected_prad<=rp_rng[1])]
print("After radius correction there are " + str(len(occPcs)) + " PCs in " + str(len(dr25CleanStellarIso)) + " observed targets")
occPcs2 = dr25PC[(dr25PC.koi_period>=period_rng[0])&(dr25PC.koi_period<=period_rng[1])&(dr25PC.koi_prad>=rp_rng[0])&(dr25PC.koi_prad<=rp_rng[1])]
print("Before radius correction there are " + str(len(occPcs2)) + " PCs in " + str(len(dr25CleanStellarIso)) + " observed targets")
```
After radius correction there are 36 PCs in 67306 observed targets
Before radius correction there are 44 PCs in 67306 observed targets
```javascript
%%javascript
IPython.notebook.save_notebook()
```
<IPython.core.display.Javascript object>
```bash
%%bash -s "$catalogType"
jupyter nbconvert --to html makePlanetInput.ipynb
mv makePlanetInput.html htmlArchive/makePlanetInput_$1.html
```
[NbConvertApp] Converting notebook makePlanetInput.ipynb to html
[NbConvertApp] Writing 2028144 bytes to makePlanetInput.html
```python
plt.figure(figsize=(5,5));
plt.plot(dr25PC.koi_score, dr25PC.totalReliability, '.')
plt.xlabel("score")
plt.ylabel("total reliability")
plt.figure(figsize=(5,5));
plt.plot(dr25PC.koi_score, dr25PC.reliability, '.')
plt.xlabel("score")
plt.ylabel("instrumental reliability")
```
Text(0,0.5,'instrumental reliability')


```python
print(float((66036-60220))/60220)
```
0.0965792095649
```python
print((33.-25)/25)
```
0.32
```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=1-dr25PC.fpp_prob_use, edgecolors='k', alpha = 0.5);
plt.xlabel("period");
plt.ylabel("radius");
plt.title("PC FPP");
plt.ylim([0.75, 12])
plt.xlim([1, 50])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("FPP");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=dr25PC.totalReliability, edgecolors='k', s=100*dr25PC.reliability, alpha = 0.5);
plt.xlabel("period");
plt.ylabel("radius");
plt.title("PC total reliability");
plt.ylim([0.75, 12])
plt.xlim([1, 20])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("total reliability");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(dr25PC.koi_period, dr25PC.corrected_prad, cmap="viridis",
c=1-dr25PC.fpp_prob_use, edgecolors='k', s=100*dr25PC.reliability, alpha = 0.5);
plt.yscale('log', nonposy='clip')
plt.xscale('log', nonposx='clip')
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("PC astrophysical Reliability, size = instrumental FP reliability");
#plt.ylim([0, 2.5])
#plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("1-FPP");
```

```python
fig, ax = plt.subplots(figsize=(15,10));
scf = ax.scatter(mergedDr25Koi.koi_period, mergedDr25Koi.corrected_prad, cmap="viridis",
c=1-mergedDr25Koi.fpp_prob_use, edgecolors='k', s=100*mergedDr25Koi.reliability, alpha = 0.5);
plt.yscale('log', nonposy='clip')
plt.xscale('log', nonposx='clip')
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI astrophysical Reliability, size = instrumental FP reliability");
#plt.ylim([0, 2.5])
#plt.xlim([50, 400])
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("1-FPP");
```

```python
```
```python
```
|
stevepurREPO_NAMEDR25-occurrence-publicPATH_START.@DR25-occurrence-public_extracted@DR25-occurrence-public-main@GKbaseline_gaiaRadCut@makePlanetInput.ipynb@.PATH_END.py
|
{
"filename": "_hovertext.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/_hovertext.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HovertextValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="hovertext", parent_name="histogram", **kwargs):
super(HovertextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@_hovertext.py@.PATH_END.py
|
{
"filename": "plot_hysteresis.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/doc/examples/filters/plot_hysteresis.py",
"type": "Python"
}
|
"""
=======================
Hysteresis thresholding
=======================
*Hysteresis* is the lagging of an effect---a kind of inertia. In the
context of thresholding, it means that areas above some *low* threshold
are considered to be above the threshold *if* they are also connected
to areas above a higher, more stringent, threshold. They can thus be
seen as continuations of these high-confidence areas.
Below, we compare normal thresholding to hysteresis thresholding.
Notice how hysteresis allows one to ignore "noise" outside of the coin
edges.
"""
import matplotlib.pyplot as plt
from skimage import data, filters
fig, ax = plt.subplots(nrows=2, ncols=2)
image = data.coins()
edges = filters.sobel(image)
low = 0.1
high = 0.35
lowt = (edges > low).astype(int)
hight = (edges > high).astype(int)
hyst = filters.apply_hysteresis_threshold(edges, low, high)
ax[0, 0].imshow(image, cmap='gray')
ax[0, 0].set_title('Original image')
ax[0, 1].imshow(edges, cmap='magma')
ax[0, 1].set_title('Sobel edges')
ax[1, 0].imshow(lowt, cmap='magma')
ax[1, 0].set_title('Low threshold')
ax[1, 1].imshow(hight + hyst, cmap='magma')
ax[1, 1].set_title('Hysteresis threshold')
for a in ax.ravel():
a.axis('off')
plt.tight_layout()
plt.show()
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@doc@examples@filters@plot_hysteresis.py@.PATH_END.py
|
{
"filename": "_sizeref.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/marker/_sizeref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizerefValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="sizeref", parent_name="scatter.marker", **kwargs):
super(SizerefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@marker@_sizeref.py@.PATH_END.py
|
{
"filename": "test_config.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/tests/test_config.py",
"type": "Python"
}
|
import builtins
import time
from concurrent.futures import ThreadPoolExecutor
import pytest
import sklearn
from sklearn import config_context, get_config, set_config
from sklearn.utils.fixes import _IS_WASM
from sklearn.utils.parallel import Parallel, delayed
def test_config_context():
assert get_config() == {
"assume_finite": False,
"working_memory": 1024,
"print_changed_only": True,
"display": "diagram",
"array_api_dispatch": False,
"pairwise_dist_chunk_size": 256,
"enable_cython_pairwise_dist": True,
"transform_output": "default",
"enable_metadata_routing": False,
"skip_parameter_validation": False,
}
# Not using as a context manager affects nothing
config_context(assume_finite=True)
assert get_config()["assume_finite"] is False
with config_context(assume_finite=True):
assert get_config() == {
"assume_finite": True,
"working_memory": 1024,
"print_changed_only": True,
"display": "diagram",
"array_api_dispatch": False,
"pairwise_dist_chunk_size": 256,
"enable_cython_pairwise_dist": True,
"transform_output": "default",
"enable_metadata_routing": False,
"skip_parameter_validation": False,
}
assert get_config()["assume_finite"] is False
with config_context(assume_finite=True):
with config_context(assume_finite=None):
assert get_config()["assume_finite"] is True
assert get_config()["assume_finite"] is True
with config_context(assume_finite=False):
assert get_config()["assume_finite"] is False
with config_context(assume_finite=None):
assert get_config()["assume_finite"] is False
# global setting will not be retained outside of context that
# did not modify this setting
set_config(assume_finite=True)
assert get_config()["assume_finite"] is True
assert get_config()["assume_finite"] is False
assert get_config()["assume_finite"] is True
assert get_config() == {
"assume_finite": False,
"working_memory": 1024,
"print_changed_only": True,
"display": "diagram",
"array_api_dispatch": False,
"pairwise_dist_chunk_size": 256,
"enable_cython_pairwise_dist": True,
"transform_output": "default",
"enable_metadata_routing": False,
"skip_parameter_validation": False,
}
# No positional arguments
with pytest.raises(TypeError):
config_context(True)
# No unknown arguments
with pytest.raises(TypeError):
config_context(do_something_else=True).__enter__()
def test_config_context_exception():
assert get_config()["assume_finite"] is False
try:
with config_context(assume_finite=True):
assert get_config()["assume_finite"] is True
raise ValueError()
except ValueError:
pass
assert get_config()["assume_finite"] is False
def test_set_config():
assert get_config()["assume_finite"] is False
set_config(assume_finite=None)
assert get_config()["assume_finite"] is False
set_config(assume_finite=True)
assert get_config()["assume_finite"] is True
set_config(assume_finite=None)
assert get_config()["assume_finite"] is True
set_config(assume_finite=False)
assert get_config()["assume_finite"] is False
# No unknown arguments
with pytest.raises(TypeError):
set_config(do_something_else=True)
def set_assume_finite(assume_finite, sleep_duration):
"""Return the value of assume_finite after waiting `sleep_duration`."""
with config_context(assume_finite=assume_finite):
time.sleep(sleep_duration)
return get_config()["assume_finite"]
@pytest.mark.parametrize("backend", ["loky", "multiprocessing", "threading"])
def test_config_threadsafe_joblib(backend):
"""Test that the global config is threadsafe with all joblib backends.
Two jobs are spawned and sets assume_finite to two different values.
When the job with a duration 0.1s completes, the assume_finite value
should be the same as the value passed to the function. In other words,
it is not influenced by the other job setting assume_finite to True.
"""
assume_finites = [False, True, False, True]
sleep_durations = [0.1, 0.2, 0.1, 0.2]
items = Parallel(backend=backend, n_jobs=2)(
delayed(set_assume_finite)(assume_finite, sleep_dur)
for assume_finite, sleep_dur in zip(assume_finites, sleep_durations)
)
assert items == [False, True, False, True]
@pytest.mark.xfail(_IS_WASM, reason="cannot start threads")
def test_config_threadsafe():
"""Uses threads directly to test that the global config does not change
between threads. Same test as `test_config_threadsafe_joblib` but with
`ThreadPoolExecutor`."""
assume_finites = [False, True, False, True]
sleep_durations = [0.1, 0.2, 0.1, 0.2]
with ThreadPoolExecutor(max_workers=2) as e:
items = [
output
for output in e.map(set_assume_finite, assume_finites, sleep_durations)
]
assert items == [False, True, False, True]
def test_config_array_api_dispatch_error(monkeypatch):
"""Check error is raised when array_api_compat is not installed."""
# Hide array_api_compat import
orig_import = builtins.__import__
def mocked_import(name, *args, **kwargs):
if name == "array_api_compat":
raise ImportError
return orig_import(name, *args, **kwargs)
monkeypatch.setattr(builtins, "__import__", mocked_import)
with pytest.raises(ImportError, match="array_api_compat is required"):
with config_context(array_api_dispatch=True):
pass
with pytest.raises(ImportError, match="array_api_compat is required"):
set_config(array_api_dispatch=True)
def test_config_array_api_dispatch_error_numpy(monkeypatch):
"""Check error when NumPy is too old"""
# Pretend that array_api_compat is installed.
orig_import = builtins.__import__
def mocked_import(name, *args, **kwargs):
if name == "array_api_compat":
return object()
return orig_import(name, *args, **kwargs)
monkeypatch.setattr(builtins, "__import__", mocked_import)
monkeypatch.setattr(sklearn.utils._array_api.numpy, "__version__", "1.20")
with pytest.raises(ImportError, match="NumPy must be 1.21 or newer"):
with config_context(array_api_dispatch=True):
pass
with pytest.raises(ImportError, match="NumPy must be 1.21 or newer"):
set_config(array_api_dispatch=True)
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@tests@test_config.py@.PATH_END.py
|
{
"filename": "exceptions.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/_plotly_utils/exceptions.py",
"type": "Python"
}
|
class PlotlyError(Exception):
pass
class PlotlyEmptyDataError(PlotlyError):
pass
class PlotlyGraphObjectError(PlotlyError):
def __init__(self, message="", path=(), notes=()):
"""
General graph object error for validation failures.
:param (str|unicode) message: The error message.
:param (iterable) path: A path pointing to the error.
:param notes: Add additional notes, but keep default exception message.
"""
self.message = message
self.plain_message = message # for backwards compat
self.path = list(path)
self.notes = notes
super(PlotlyGraphObjectError, self).__init__(message)
def __str__(self):
"""This is called by Python to present the error message."""
format_dict = {
"message": self.message,
"path": "[" + "][".join(repr(k) for k in self.path) + "]",
"notes": "\n".join(self.notes),
}
return "{message}\n\nPath To Error: {path}\n\n{notes}".format(**format_dict)
class PlotlyDictKeyError(PlotlyGraphObjectError):
def __init__(self, obj, path, notes=()):
"""See PlotlyGraphObjectError.__init__ for param docs."""
format_dict = {"attribute": path[-1], "object_name": obj._name}
message = "'{attribute}' is not allowed in '{object_name}'".format(
**format_dict
)
notes = [obj.help(return_help=True)] + list(notes)
super(PlotlyDictKeyError, self).__init__(
message=message, path=path, notes=notes
)
class PlotlyDictValueError(PlotlyGraphObjectError):
def __init__(self, obj, path, notes=()):
"""See PlotlyGraphObjectError.__init__ for param docs."""
format_dict = {"attribute": path[-1], "object_name": obj._name}
message = "'{attribute}' has invalid value inside '{object_name}'".format(
**format_dict
)
notes = [obj.help(path[-1], return_help=True)] + list(notes)
super(PlotlyDictValueError, self).__init__(
message=message, notes=notes, path=path
)
class PlotlyListEntryError(PlotlyGraphObjectError):
def __init__(self, obj, path, notes=()):
"""See PlotlyGraphObjectError.__init__ for param docs."""
format_dict = {"index": path[-1], "object_name": obj._name}
message = "Invalid entry found in '{object_name}' at index, '{index}'".format(
**format_dict
)
notes = [obj.help(return_help=True)] + list(notes)
super(PlotlyListEntryError, self).__init__(
message=message, path=path, notes=notes
)
class PlotlyDataTypeError(PlotlyGraphObjectError):
def __init__(self, obj, path, notes=()):
"""See PlotlyGraphObjectError.__init__ for param docs."""
format_dict = {"index": path[-1], "object_name": obj._name}
message = "Invalid entry found in '{object_name}' at index, '{index}'".format(
**format_dict
)
note = "It's invalid because it doesn't contain a valid 'type' value."
notes = [note] + list(notes)
super(PlotlyDataTypeError, self).__init__(
message=message, path=path, notes=notes
)
class PlotlyKeyError(KeyError):
"""
KeyErrors are not printed as beautifully as other errors (this is so that
{}[''] prints "KeyError: ''" and not "KeyError:"). So here we use
LookupError's __str__ to make a PlotlyKeyError object which will print nicer
error messages for KeyErrors.
"""
def __str__(self):
return LookupError.__str__(self)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@_plotly_utils@exceptions.py@.PATH_END.py
|
{
"filename": "index.md",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/docs/en/tasks/index.md",
"type": "Markdown"
}
|
---
comments: true
description: Explore Ultralytics YOLO11 for detection, segmentation, classification, OBB, and pose estimation with high accuracy and speed. Learn how to apply each task.
keywords: Ultralytics YOLO11, detection, segmentation, classification, oriented object detection, pose estimation, computer vision, AI framework
---
# Ultralytics YOLO11 Tasks
<img width="1024" src="https://github.com/ultralytics/docs/releases/download/0/ultralytics-yolov8-tasks-banner.avif" alt="Ultralytics YOLO supported tasks">
YOLO11 is an AI framework that supports multiple [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) **tasks**. The framework can be used to perform [detection](detect.md), [segmentation](segment.md), [obb](obb.md), [classification](classify.md), and [pose](pose.md) estimation. Each of these tasks has a different objective and use case.
<p align="center">
<br>
<iframe loading="lazy" width="720" height="405" src="https://www.youtube.com/embed/NAs-cfq9BDw"
title="YouTube video player" frameborder="0"
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share"
allowfullscreen>
</iframe>
<br>
<strong>Watch:</strong> Explore Ultralytics YOLO Tasks: <a href="https://www.ultralytics.com/glossary/object-detection">Object Detection</a>, Segmentation, OBB, Tracking, and Pose Estimation.
</p>
## [Detection](detect.md)
Detection is the primary task supported by YOLO11. It involves detecting objects in an image or video frame and drawing bounding boxes around them. The detected objects are classified into different categories based on their features. YOLO11 can detect multiple objects in a single image or video frame with high [accuracy](https://www.ultralytics.com/glossary/accuracy) and speed.
[Detection Examples](detect.md){ .md-button }
## [Segmentation](segment.md)
Segmentation is a task that involves segmenting an image into different regions based on the content of the image. Each region is assigned a label based on its content. This task is useful in applications such as [image segmentation](https://www.ultralytics.com/glossary/image-segmentation) and medical imaging. YOLO11 uses a variant of the U-Net architecture to perform segmentation.
[Segmentation Examples](segment.md){ .md-button }
## [Classification](classify.md)
Classification is a task that involves classifying an image into different categories. YOLO11 can be used to classify images based on their content. It uses a variant of the EfficientNet architecture to perform classification.
[Classification Examples](classify.md){ .md-button }
## [Pose](pose.md)
Pose/keypoint detection is a task that involves detecting specific points in an image or video frame. These points are referred to as keypoints and are used to track movement or pose estimation. YOLO11 can detect keypoints in an image or video frame with high accuracy and speed.
[Pose Examples](pose.md){ .md-button }
## [OBB](obb.md)
Oriented object detection goes a step further than regular object detection with introducing an extra angle to locate objects more accurate in an image. YOLO11 can detect rotated objects in an image or video frame with high accuracy and speed.
[Oriented Detection](obb.md){ .md-button }
## Conclusion
YOLO11 supports multiple tasks, including detection, segmentation, classification, oriented object detection and keypoints detection. Each of these tasks has different objectives and use cases. By understanding the differences between these tasks, you can choose the appropriate task for your computer vision application.
## FAQ
### What tasks can Ultralytics YOLO11 perform?
Ultralytics YOLO11 is a versatile AI framework capable of performing various computer vision tasks with high accuracy and speed. These tasks include:
- **[Detection](detect.md):** Identifying and localizing objects in images or video frames by drawing bounding boxes around them.
- **[Segmentation](segment.md):** Segmenting images into different regions based on their content, useful for applications like medical imaging.
- **[Classification](classify.md):** Categorizing entire images based on their content, leveraging variants of the EfficientNet architecture.
- **[Pose estimation](pose.md):** Detecting specific keypoints in an image or video frame to track movements or poses.
- **[Oriented Object Detection (OBB)](obb.md):** Detecting rotated objects with an added orientation angle for enhanced accuracy.
### How do I use Ultralytics YOLO11 for object detection?
To use Ultralytics YOLO11 for object detection, follow these steps:
1. Prepare your dataset in the appropriate format.
2. Train the YOLO11 model using the detection task.
3. Use the model to make predictions by feeding in new images or video frames.
!!! example
=== "Python"
```python
from ultralytics import YOLO
# Load a pre-trained YOLO model (adjust model type as needed)
model = YOLO("yolo11n.pt") # n, s, m, l, x versions available
# Perform object detection on an image
results = model.predict(source="image.jpg") # Can also use video, directory, URL, etc.
# Display the results
results[0].show() # Show the first image results
```
=== "CLI"
```bash
# Run YOLO detection from the command line
yolo detect model=yolo11n.pt source="image.jpg" # Adjust model and source as needed
```
For more detailed instructions, check out our [detection examples](detect.md).
### What are the benefits of using YOLO11 for segmentation tasks?
Using YOLO11 for segmentation tasks provides several advantages:
1. **High Accuracy:** The segmentation task leverages a variant of the U-Net architecture to achieve precise segmentation.
2. **Speed:** YOLO11 is optimized for real-time applications, offering quick processing even for high-resolution images.
3. **Multiple Applications:** It is ideal for medical imaging, autonomous driving, and other applications requiring detailed image segmentation.
Learn more about the benefits and use cases of YOLO11 for segmentation in the [segmentation section](segment.md).
### Can Ultralytics YOLO11 handle pose estimation and keypoint detection?
Yes, Ultralytics YOLO11 can effectively perform pose estimation and keypoint detection with high accuracy and speed. This feature is particularly useful for tracking movements in sports analytics, healthcare, and human-computer interaction applications. YOLO11 detects keypoints in an image or video frame, allowing for precise pose estimation.
For more details and implementation tips, visit our [pose estimation examples](pose.md).
### Why should I choose Ultralytics YOLO11 for oriented object detection (OBB)?
Oriented Object Detection (OBB) with YOLO11 provides enhanced [precision](https://www.ultralytics.com/glossary/precision) by detecting objects with an additional angle parameter. This feature is beneficial for applications requiring accurate localization of rotated objects, such as aerial imagery analysis and warehouse automation.
- **Increased Precision:** The angle component reduces false positives for rotated objects.
- **Versatile Applications:** Useful for tasks in geospatial analysis, robotics, etc.
Check out the [Oriented Object Detection section](obb.md) for more details and examples.
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@docs@en@tasks@index.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/chart-studio/chart_studio/plotly/__init__.py",
"type": "Python"
}
|
"""
plotly
======
This module defines functionality that requires interaction between your
local machine and Plotly. Almost all functionality used here will require a
verifiable account (username/api-key pair) and a network connection.
"""
from .plotly import (
sign_in,
update_plot_options,
get_credentials,
iplot,
plot,
iplot_mpl,
plot_mpl,
get_figure,
Stream,
image,
grid_ops,
meta_ops,
file_ops,
get_config,
get_grid,
dashboard_ops,
presentation_ops,
create_animations,
icreate_animations,
parse_grid_id_args,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@chart-studio@chart_studio@plotly@__init__.py@.PATH_END.py
|
{
"filename": "nucliadb.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/vectorstores/nucliadb.ipynb",
"type": "Jupyter Notebook"
}
|
# NucliaDB
You can use a local NucliaDB instance or use [Nuclia Cloud](https://nuclia.cloud).
When using a local instance, you need a Nuclia Understanding API key, so your texts are properly vectorized and indexed. You can get a key by creating a free account at [https://nuclia.cloud](https://nuclia.cloud), and then [create a NUA key](https://docs.nuclia.dev/docs/docs/using/understanding/intro).
```python
%pip install --upgrade --quiet langchain langchain-community nuclia
```
## Usage with nuclia.cloud
```python
from langchain_community.vectorstores.nucliadb import NucliaDB
API_KEY = "YOUR_API_KEY"
ndb = NucliaDB(knowledge_box="YOUR_KB_ID", local=False, api_key=API_KEY)
```
## Usage with a local instance
Note: By default `backend` is set to `http://localhost:8080`.
```python
from langchain_community.vectorstores.nucliadb import NucliaDB
ndb = NucliaDB(knowledge_box="YOUR_KB_ID", local=True, backend="http://my-local-server")
```
## Add and delete texts to your Knowledge Box
```python
ids = ndb.add_texts(["This is a new test", "This is a second test"])
```
```python
ndb.delete(ids=ids)
```
## Search in your Knowledge Box
```python
results = ndb.similarity_search("Who was inspired by Ada Lovelace?")
print(results[0].page_content)
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@vectorstores@nucliadb.ipynb@.PATH_END.py
|
{
"filename": "tl_dish.py",
"repo_name": "TianlaiProject/tlpipe",
"repo_path": "tlpipe_extracted/tlpipe-master/tlpipe/map/drift/telescope/tl_dish.py",
"type": "Python"
}
|
import os
import abc
import numpy as np
from scipy.special import jn
from cora.util import coord
from ..core import telescope
def ang_conv(ang):
"""
Covert the string represents of angle in degree to float number in degree.
Parameters
----------
ang : string
string represents the angle in the format `xx:xx:xx`
"""
ang = ang.split(":")
tmp = 0.0
for n in range(len(ang)):
tmp += float(ang[n]) / 60.0**n
return tmp
def latlon_to_sphpol(latlon):
zenith = np.array([np.pi / 2.0 - np.radians(latlon[0]),
np.remainder(np.radians(latlon[1]), 2*np.pi)])
return zenith
def beam_circular(angpos, zenith, diameter):
"""Beam pattern for a uniformly illuminated circular dish.
Parameters
----------
angpos : np.ndarray
Array of angular positions
zenith : np.ndarray
Co-ordinates of the zenith.
diameter : scalar
Diameter of the dish (in units of wavelength).
Returns
-------
beam : np.ndarray
Beam pattern at each position in angpos.
"""
def jinc(x):
return 0.5 * (jn(0, x) + jn(2, x))
x = (1.0 - coord.sph_dot(angpos, zenith)**2)**0.5 * np.pi * diameter
return 2*jinc(x)
class TlDishArray(object):
"""A abstract base class describing the Tianlai dishe array for inheriting by sub-classes.
Attributes
----------
ants : list
List of antennas to use, number starts from 1.
zenith : [lat, lon]
Geometric position of the array.
pointing: [az, alt, twist]
Antenna beam to point at (az, alt) with specified right-hand twist to polarizations.
Polarization y is assumed to be +pi/2 azimuth from pol x.
freq_lower, freq_higher : scalar
The lower / upper bound of the lowest / highest frequency bands.
num_freq : scalar
The number of frequency bands (only use for setting up the frequency
binning). Generally using `nfreq` is preferred.
tsys_flat : scalar
The system temperature (in K). Override `tsys` for anything more
sophisticated.
dish_width : scalar
Width of the dish in metres.
center_dish : integer
The reference dish.
freq_inds: list
Choose frequency channels to include.
"""
__metaclass__ = abc.ABCMeta # Enforce Abstract class
def __init__(self, dish_width=6.0, feedpos=np.zeros((0, 3)), pointing=[0.0, 90.0, 0.0]):
self.dish_width = dish_width
self.ants = len(feedpos)
self.feedpos = feedpos[:, :2]
self.pointing = pointing
# Give the widths in the U and V directions in metres (used for
# calculating the maximum l and m)
@property
def u_width(self):
return self.dish_width
@property
def v_width(self):
return self.dish_width
# Set the feed array of feed positions (in metres EW, NS)
@property
def _single_feedpositions(self):
## An (nfeed,2) array of the feed positions relative to an arbitary point (in m)
# pos = np.loadtxt(os.path.dirname(__file__) + '/16dishes_coord.txt')
# cpos = pos[self.center_dish] # central antenna coordinate
# pos -= cpos
# pos = pos[np.array(self.ants)-1] # choose antennas to include
# return pos
return self.feedpos
_point_direction = None
@property
def point_dirction(self):
"""The pointing vector [theta, phi], which is the direction of the maximum beam response."""
if self._point_direction is None:
self.set_pointing()
# return self._point_direction
return np.array([self._point_direction[0], 0.0]) # make phi = 0 for convenience
def set_pointing(self):
"""Set the antenna beam to point at (az, alt). """
az, alt, twist = np.radians(self.pointing)
lat = np.pi/2 - self.zenith[0]
lon = self.zenith[1]
saz, caz = np.sin(az), np.cos(az)
salt, calt = np.sin(alt), np.cos(alt)
slat, clat = np.sin(lat), np.cos(lat)
slon, clon = np.sin(lon), np.cos(lon)
# matrix to convert vector in topocentric coordinate to equatorial coordinate (x starts from the vernal equinox)
top2eq_m = np.array([[-slon, -slat*clon, clat*clon],
[ clon, -slat*slon, clat*slon],
[ 0, clat, slat]])
p_top = np.array([saz*calt, caz*calt, salt]) # point_direction in topocentric coord
p_eq = np.dot(top2eq_m, p_top) # point_direction in equatorial coord
self._point_direction = coord.cart_to_sph(p_eq)[-2:]
class TlUnpolarisedDishArray(TlDishArray, telescope.SimpleUnpolarisedTelescope):
"""A Telescope describing the Tianlai non-polarized dishe array.
See Also
--------
This class also inherits some useful properties, such as `zenith` for
giving the telescope location and `tsys_flat` for giving the system
temperature.
"""
def __init__(self, latitude=45, longitude=0, freqs=[], band_width=None, tsys_flat=50.0, ndays=1.0, accuracy_boost=1.0, l_boost=1.0, bl_range=[0.0, 1.0e7], auto_correlations=False, local_origin=True, dish_width=6.0, feedpos=np.zeros((0, 3)), pointing=[0.0, 90.0, 0.0]):
TlDishArray.__init__(self, dish_width, feedpos, pointing)
telescope.SimpleUnpolarisedTelescope.__init__(self, latitude, longitude, freqs, band_width, tsys_flat, ndays, accuracy_boost, l_boost, bl_range, auto_correlations, local_origin)
def beam(self, feed, freq):
"""Beam for a particular feed.
Parameters
----------
feed : integer
Index for the feed.
freq : integer
Index for the frequency.
Returns
-------
beam : np.ndarray
A Healpix map (of size self._nside) of the beam. Potentially
complex.
"""
# return beam_circular(self._angpos, self.zenith,
# self.dish_width / self.wavelengths[freq])
return beam_circular(self._angpos, self.point_dirction,
self.dish_width / self.wavelengths[freq])
# class TlPolarisedDishArray(telescope.SimplePolarisedTelescope):
# """A Telescope describing the Tianlai polarized dishe array.
# See Also
# --------
# This class also inherits some useful properties, such as `zenith` for
# giving the telescope location and `tsys_flat` for giving the system
# temperature.
# """
# # Implement the X and Y beam patterns (assuming all feeds are identical).
# # These need to return a vector for each position on the sky
# # (self._angpos) in thetahat, phihat coordinates.
# def beamx(self, feed, freq):
# """Beam for the X polarisation feed.
# Parameters
# ----------
# feed : integer
# Index for the feed.
# freq : integer
# Index for the frequency.
# Returns
# -------
# beam : np.ndarray
# Healpix maps (of size [self._nside, 2]) of the field pattern in the
# theta and phi directions.
# """
# # Calculate beam amplitude
# # beam = beam_circular(self._angpos, self.zenith,
# # self.dish_width / self.wavelengths[freq])
# return beam_circular(self._angpos, self.point_dirction,
# self.dish_width / self.wavelengths[freq])
# # Add a vector direction to beam - X beam is EW (phihat)
# beam = beam[:, np.newaxis] * np.array([0.0, 1.0])
# return beam
# def beamy(self, feed, freq):
# """Beam for the Y polarisation feed.
# Parameters
# ----------
# feed : integer
# Index for the feed.
# freq : integer
# Index for the frequency.
# Returns
# -------
# beam : np.ndarray
# Healpix maps (of size [self._nside, 2]) of the field pattern in the
# theta and phi directions.
# """
# # Calculate beam amplitude
# # beam = beam_circular(self._angpos, self.zenith,
# # self.dish_width / self.wavelengths[freq])
# return beam_circular(self._angpos, self.point_dirction,
# self.dish_width / self.wavelengths[freq])
# # Add a vector direction to beam - Y beam is NS (thetahat)
# # Fine provided beam does not cross a pole.
# beam = beam[:, np.newaxis] * np.array([1.0, 0.0])
# return beam
|
TianlaiProjectREPO_NAMEtlpipePATH_START.@tlpipe_extracted@tlpipe-master@tlpipe@map@drift@telescope@tl_dish.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "OSSOS/SurveySimulator",
"repo_path": "SurveySimulator_extracted/SurveySimulator-master/python/README.md",
"type": "Markdown"
}
|
## The python version of the Survey Simulator
The python implementation creates a `module` using the `F95` source code and then build a set of
python classes to interact with the simulator via the `F95` compiled module `ossssim.SurveySubsF95`
The python code is documented and `pydoc` provides some details on how to use the classes.
See `examples` for some implementations of various solar sysytem models being passed through the Survey Simulator
### Installation
The python process install the package in your system or home area, depending on how you instal the package.
The basic process is:
`pip install .`
the `pydoc ossssim` for details or look in the `examples` directory.
### Contents
- ossssim : The python module
- ossssim/lib : The compiled fortran module will be here after you build
- test : A set of unit tests
- examples : Some examples of using the python implementation of the Survey Simulator.
- Manifest.in : data files that should be installed with `ossssim`
- setup.py : standard python installation script using `setuptools`
|
OSSOSREPO_NAMESurveySimulatorPATH_START.@SurveySimulator_extracted@SurveySimulator-master@python@README.md@.PATH_END.py
|
{
"filename": "val.md",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/docs/en/reference/models/yolo/segment/val.md",
"type": "Markdown"
}
|
---
description: Explore the YOLO Segmentation Validator module for validating segment models. Understand its usage, metrics, and implementation within the Ultralytics framework.
keywords: YOLO, segmentation, validator, Ultralytics, model validation, machine learning, deep learning, AI, computer vision
---
# Reference for `ultralytics/models/yolo/segment/val.py`
!!! note
This file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/segment/val.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/models/yolo/segment/val.py). If you spot a problem please help fix it by [contributing](https://docs.ultralytics.com/help/contributing/) a [Pull Request](https://github.com/ultralytics/ultralytics/edit/main/ultralytics/models/yolo/segment/val.py) 🛠️. Thank you 🙏!
<br>
## ::: ultralytics.models.yolo.segment.val.SegmentationValidator
<br><br>
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@docs@en@reference@models@yolo@segment@val.md@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "athob/py-ananke",
"repo_path": "py-ananke_extracted/py-ananke-main/src/ananke/utils.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
Module miscellaneous utilities
"""
from typing import Optional, List
import re
import docstring_parser as DS_parser
import pandas as pd
from Galaxia_ananke import utils as Gutils
__all__ = ['compare_given_and_required', 'confirm_equal_length_arrays_in_dict', 'RecordingDataFrame', 'extract_parameters_from_docstring', 'extract_notes_from_docstring']
compare_given_and_required = Gutils.compare_given_and_required
confirm_equal_length_arrays_in_dict = Gutils.confirm_equal_length_arrays_in_dict
class RecordingDataFrame(pd.DataFrame):
"""
Pandas DataFrame that records all its used keys from getitem
"""
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self._record_of_all_used_keys = set()
def _add_to_record_of_all_used_keys(self, keys):
if isinstance(keys, str):
keys = [keys]
for key in keys:
self._record_of_all_used_keys.add(key)
def __getitem__(self, key):
self._add_to_record_of_all_used_keys(key)
return super().__getitem__(key)
# def __setitem__(self, key, value):
# self._add_to_record_of_all_used_keys(key)
# super().__setitem__(key, value)
# def __delitem__(self, key):
# self._add_to_record_of_all_used_keys(key)
# super().__delitem__(key)
@property
def record_of_all_used_keys(self):
return self._record_of_all_used_keys
def extract_parameters_from_docstring(docstring: str, parameters: Optional[List[str]] = None, ignore: Optional[List[str]] = None) -> str:
input_DS = DS_parser.parse(docstring)
output_DS = DS_parser.Docstring()
output_DS.style = input_DS.style
output_DS.meta = [param
for param in input_DS.params
if (True if parameters is None else param.arg_name in parameters) and (True if ignore is None else param.arg_name not in ignore)]
temp_docstring = re.split("\n-*\n",DS_parser.compose(output_DS),maxsplit=1)[1]
return '\n'.join([line if line[:1] in ['', ' '] else f"\n{line}" for line in temp_docstring.split('\n')])
def extract_notes_from_docstring(docstring: str) -> str:
input_DS = DS_parser.parse(docstring)
output_DS = DS_parser.Docstring()
output_DS.style = input_DS.style
output_DS.meta = [meta for meta in input_DS.meta if 'notes' in meta.args]
return re.split("\n-*\n",DS_parser.compose(output_DS),maxsplit=1)[1]
|
athobREPO_NAMEpy-anankePATH_START.@py-ananke_extracted@py-ananke-main@src@ananke@utils.py@.PATH_END.py
|
{
"filename": "O1O2data.py",
"repo_name": "CosmoStatGW/MGCosmoPop",
"repo_path": "MGCosmoPop_extracted/MGCosmoPop-master/MGCosmoPop/dataStructures/O1O2data.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# Copyright (c) 2021 Michele Mancarella <michele.mancarella@unige.ch>
#
# All rights reserved. Use of this source code is governed by a modified BSD
# license that can be found in the LICENSE file.
from .ABSdata import Data, LVCData
import numpy as np
import astropy.units as u
import h5py
import os
import sys
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
from astropy.cosmology import Planck15
from cosmology.cosmo import Cosmo
import Globals
class O1O2Data(LVCData):
def __init__(self, fname, which_metadata='GWOSC', force_BNS=False, **kwargs):#nObsUse=None, nSamplesUse=None, dist_unit=u.Gpc, events_use=None, which_spins='skip' ):
self.post_file_extension='.hdf5'
self.force_BNS=force_BNS
import pandas as pd
if which_metadata=='GWOSC':
print('Using SNRS and far from the public version of the GWTC-3 catalog from the GWOSC')
self.metadata = pd.read_csv(os.path.join(fname, 'GWTC-1-confident.csv'))
else:
print('Using best SNRS and far from all pipelines as reported in the GWTC-3 catalog paper')
self.metadata = pd.read_csv(os.path.join(Globals.dataPath, 'all_metadata_pipelines_best.csv'))
LVCData.__init__(self, fname, **kwargs) #nObsUse=nObsUse, nSamplesUse=nSamplesUse, dist_unit=dist_unit, events_use=events_use, which_spins=which_spins)
def _set_Tobs(self):
# The first observing run (O1) ran from September 12th, 2015 to January 19th, 2016 --> 129 days
# From https://journals.aps.org/prx/pdf/10.1103/PhysRevX.6.041015:
# after data quality flags, the remaining coincident analysis time in O1 is 48.3 days with GSTLal analysis, 46.1 with pycbc
# The second observing run (O2) ran from November 30th, 2016 to August 25th, 2017 --> 267 days
# During the O2 run the duty cycles were 62% for LIGO Hanford and 61% for LIGO Livingston,
# so that two detectors were in observing mode 46.4% of the time and at least one detector
# was in observing mode 75.6% of the time.
# From https://journals.aps.org/prx/pdf/10.1103/PhysRevX.9.031040 :
# During O2, the individual LIGO detectors had duty factors of approximately 60% with a LIGO
# network duty factor of about 45%. Times with significant instrumental disturbances are flagged and removed,
# resulting in about 118 days of data suitable for coincident analysis
#self.Tobs= (48.3+118)/365. # yrs
self.Tobs= (129+267)/365. # yrs
def _get_not_BBHs(self):
if not self.force_BNS:
return ['GW170817', ]
else:
return []
def _name_conditions(self, f ):
return ( ( 'prior' not in f.split('.')[0] ) & (f.split('_')[0][:2]=='GW') )
def _get_name_from_fname(self, fname):
return fname.split('.')[0].split('_')[0]
def _load_data_event(self, fname, event, nSamplesUse, which_spins='skip'):
data_path = os.path.join(fname, event+'_GWTC-1'+self.post_file_extension)
with h5py.File(data_path, 'r') as f:
try:
posterior_samples = f['Overall_posterior']
except Exception as e:
print(e)
print(f.keys())
_keys = ['m1_detector_frame_Msun', 'm2_detector_frame_Msun', 'luminosity_distance_Mpc',
'right_ascension', 'declination', 'costheta_jn']
m1z, m2z, dL, ra, dec, costh = [posterior_samples[k] for k in _keys]
try:
w = posterior_samples['weights_bin']
except Exception as e:
print(e)
w = np.ones(1)
if which_spins=='skip':
spins=[]
elif which_spins=='chiEff':
#print('chi_p not available for O1-O2 data ! ')
s1 = posterior_samples['spin1']
s2 = posterior_samples['spin2']
cost1 = posterior_samples['costilt1']
cost2 = posterior_samples['costilt2']
sint1 = np.sqrt(1-cost1**2)
sint2 = np.sqrt(1-cost2**2)
chi1z = s1*cost1
chi2z = s2*cost2
q = m2z/m1z
chiEff = (chi1z+q*chi2z)/(1+q)
chiP = np.max( np.array([s1*sint1, (4*q+3)/(4+3*q)*q*s2*sint2 ]) , axis=0 )
spins=[chiEff, chiP]
elif which_spins=='s1s2':
raise NotImplementedError()
s1 = posterior_samples['spin1']
s2 = posterior_samples['spin2']
spins=[s1,s2]
elif which_spins=='chi1zchi2z':
s1 = posterior_samples['spin1']*posterior_samples['costilt1']
s2 = posterior_samples['spin2']*posterior_samples['costilt2']
spins=[s1,s2]
elif which_spins=='default':
s1 = posterior_samples['spin1']
s2 = posterior_samples['spin2']
cost1 = posterior_samples['costilt1']
cost2 = posterior_samples['costilt2']
spins = [s1, s2, cost1, cost2]
iota = np.arccos(costh)
# Downsample if needed
#all_ds = self._downsample( [m1z, m2z, dL, w, *spins,], nSamplesUse)
#m1z = all_ds[0]
#m2z= all_ds[1]
#dL = all_ds[2]
#spins = all_ds[4:]
#ws = all_ds[3]
return m1z, m2z, dL, ra, dec, iota, spins, w
class O1O2InjectionsData(Data):
def __init__(self, fname, nInjUse=None, dist_unit=u.Gpc, ifar_th=1 , which_spins='skip', snr_th=None ):
self.dist_unit=dist_unit
self.m1z, self.m2z, self.dL, self.spins, self.log_weights_sel, self.N_gen, self.Tobs = self._load_data(fname, nInjUse, which_spins=which_spins )
self.logN_gen = np.log(self.N_gen)
#self.log_weights_sel = np.log(self.weights_sel)
assert (self.m1z > 0).all()
assert (self.m2z > 0).all()
assert (self.dL > 0).all()
assert(self.m2z<self.m1z).all()
#self.Tobs=0.5
self.chiEff = np.zeros(self.m1z.shape)
print('Obs time: %s yrs' %self.Tobs )
self.ifar_th=ifar_th
#gstlal_ifar, pycbc_ifar, pycbc_bbh_ifar = conditions_arr
self.condition = np.full(self.m1z.shape, True)
#(gstlal_ifar > ifar_th) | (pycbc_ifar > ifar_th) | (pycbc_bbh_ifar > ifar_th)
# np.full(self.m1z.shape, True) #
def get_theta(self):
return np.array( [self.m1z, self.m2z, self.dL ] )
def _load_data(self, fname, nInjUse, which_spins='skip'):
with h5py.File(fname, 'r') as f:
print(f.attrs.keys())
print(f.keys())
Tobs = 1.084931506849315 #f.attrs['analysis_time_s']/(365.25*24*3600) # years (48.3+118)/365. #
Ndraw = 7.1e07 #f.attrs['total_generated']
m1 = np.array(f['mass1_source'])
m2 = np.array(f['mass2_source'])
z = np.array(f['redshift'])
#s1z = np.array(f['injections/spin1z'])
#s2z = np.array(f['injections/spin2z'])
if which_spins=='skip':
spins=[]
elif which_spins=='chiEff':
chi1z = np.array(f['spin1z'])
chi2z = np.array(f['spin2z'])
q = m2/m1
chiEff = (chi1z+q*chi2z)/(1+q)
print('chi_p not available for O2 selection effects ! ')
spins=[chiEff, np.full(chiEff.shape, np.NaN)]
#raise NotImplementedError()
elif which_spins=='s1s2':
raise NotImplementedError()
s1 = np.array(f['spin1z'])
s2 = np.array(f['spin2z'])
spins=[s1,s2]
p_draw = np.array(f['sampling_pdf'])
if which_spins=='skip':
print('Removing factor of 1/2 for each spin dimension from p_draw...')
p_draw *= 4
log_p_draw = np.log(p_draw)
#gstlal_ifar = np.array(f['injections/ifar_gstlal'])
#pycbc_ifar = np.array(f['injections/ifar_pycbc_full'])
#pycbc_bbh_ifar = np.array(f['injections/ifar_pycbc_bbh'])
m1z = m1*(1+z)
m2z = m2*(1+z)
dL = np.array(Planck15.luminosity_distance(z).to(self.dist_unit).value)
#dL = np.array(f['injections/distance']) #in Mpc for GWTC2 !
#if self.dist_unit==u.Gpc:
# dL*=1e-03
print('Re-weighting p_draw to go to detector frame quantities...')
myCosmo = Cosmo(dist_unit=self.dist_unit)
#p_draw/=(1+z)**2
#p_draw/=myCosmo.ddL_dz(z, Planck15.H0.value, Planck15.Om0, -1., 1., 0) #z, H0, Om, w0, Xi0, n
log_p_draw -=2*np.log1p(z)
log_p_draw -= myCosmo.log_ddL_dz(z, Planck15.H0.value, Planck15.Om0, -1., 1., 0. )
print('Number of total injections: %s' %Ndraw)
print('Number of injections that pass first threshold: %s' %p_draw.shape[0])
self.max_z = np.max(z)
print('Max redshift of injections: %s' %self.max_z)
return m1z, m2z, dL , spins, log_p_draw , Ndraw, Tobs#, (gstlal_ifar, pycbc_ifar, pycbc_bbh_ifar)
|
CosmoStatGWREPO_NAMEMGCosmoPopPATH_START.@MGCosmoPop_extracted@MGCosmoPop-master@MGCosmoPop@dataStructures@O1O2data.py@.PATH_END.py
|
{
"filename": "skymap.py",
"repo_name": "Fermipy/fermipy",
"repo_path": "fermipy_extracted/fermipy-master/fermipy/skymap.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import copy
import numpy as np
import healpy as hp
from scipy.interpolate import RegularGridInterpolator
from scipy.ndimage import map_coordinates
from astropy.io import fits
from astropy.wcs import WCS
from astropy.table import Table
from astropy.coordinates import SkyCoord
from astropy.coordinates import Galactic, ICRS
import gammapy
import fermipy.utils as utils
import fermipy.wcs_utils as wcs_utils
import fermipy.hpx_utils as hpx_utils
import fermipy.fits_utils as fits_utils
from fermipy.hpx_utils import HPX, HpxToWcsMapping
def coadd_maps(geom, maps, preserve_counts=True):
"""Coadd a sequence of `~gammapy.maps.Map` objects."""
# FIXME: This functionality should be built into the Map.coadd method
map_out = gammapy.maps.Map.from_geom(geom)
for m in maps:
m_tmp = m
if isinstance(m, gammapy.maps.HpxNDMap):
if m.geom.order < map_out.geom.order:
factor = map_out.geom.nside // m.geom.nside
m_tmp = m.upsample(factor, preserve_counts=preserve_counts)
map_out.coadd(m_tmp)
return map_out
def make_coadd_map(maps, proj, shape, preserve_counts=True):
if isinstance(proj, WCS):
return make_coadd_wcs(maps, proj, shape)
elif isinstance(proj, HPX):
return make_coadd_hpx(maps, proj, shape, preserve_counts=preserve_counts)
else:
raise Exception("Can't co-add map of unknown type %s" % type(proj))
def make_coadd_wcs(maps, wcs, shape):
data = np.zeros(shape)
axes = wcs_utils.wcs_to_axes(wcs, shape)
for m in maps:
c = wcs_utils.wcs_to_coords(m.wcs, m.counts.shape)
o = np.histogramdd(c.T, bins=axes[::-1], weights=np.ravel(m.counts))[0]
data += o
return Map(data, copy.deepcopy(wcs))
def make_coadd_hpx(maps, hpx, shape, preserve_counts=True):
data = np.zeros(shape)
axes = hpx_utils.hpx_to_axes(hpx, shape)
for m in maps:
if m.hpx.order != hpx.order:
m_copy = m.ud_grade(hpx.order, preserve_counts)
else:
m_copy = m
c = hpx_utils.hpx_to_coords(m_copy.hpx, m_copy.counts.shape)
o = np.histogramdd(c.T, bins=axes, weights=np.ravel(m_copy.counts))[0]
data += o
return HpxMap(data, copy.deepcopy(hpx))
def read_map_from_fits(fitsfile, extname=None):
"""
"""
proj, f, hdu = fits_utils.read_projection_from_fits(fitsfile, extname)
if isinstance(proj, WCS):
ebins = fits_utils.find_and_read_ebins(f)
m = Map(hdu.data, proj, ebins=ebins)
elif isinstance(proj, HPX):
m = HpxMap.create_from_hdu(hdu, proj.ebins)
else:
raise Exception("Did not recognize projection type %s" % type(proj))
return m
class Map_Base(object):
""" Abstract representation of a 2D or 3D counts map."""
def __init__(self, counts):
self._counts = counts
@property
def counts(self):
return self._counts
@property
def data(self):
return self._counts
@data.setter
def data(self, val):
if val.shape != self.data.shape:
raise Exception('Wrong shape.')
self._counts = val
def get_pixel_skydirs(self):
"""Get a list of sky coordinates for the centers of every pixel. """
raise NotImplementedError("MapBase.get_pixel_skydirs()")
def get_pixel_indices(self, lats, lons):
"""Return the indices in the flat array corresponding to a set of coordinates """
raise NotImplementedError("MapBase.get_pixel_indices()")
def sum_over_energy(self):
"""Reduce a counts cube to a counts map by summing over the energy planes """
raise NotImplementedError("MapBase.sum_over_energy()")
def get_map_values(self, lons, lats, ibin=None):
"""Return the map values corresponding to a set of coordinates. """
raise NotImplementedError("MapBase.get_map_values()")
def interpolate(self, lon, lat, egy=None):
"""Return the interpolated map values corresponding to a set of coordinates. """
raise NotImplementedError("MapBase.interpolate()")
class Map(Map_Base):
""" Representation of a 2D or 3D counts map using WCS. """
def __init__(self, counts, wcs, ebins=None):
"""
Parameters
----------
counts : `~numpy.ndarray`
Counts array in row-wise ordering (LON is first dimension).
"""
Map_Base.__init__(self, counts)
self._wcs = wcs
self._npix = counts.shape[::-1]
if len(self._npix) == 3:
self._xindex = 2
self._yindex = 1
elif len(self._npix) == 2:
self._xindex = 1
self._yindex = 0
else:
raise Exception('Wrong number of dimensions for Map object.')
# if len(self._npix) != 3 and len(self._npix) != 2:
# raise Exception('Wrong number of dimensions for Map object.')
self._width = np.array([np.abs(self.wcs.wcs.cdelt[0]) * self.npix[0],
np.abs(self.wcs.wcs.cdelt[1]) * self.npix[1]])
self._pix_center = np.array([(self.npix[0] - 1.0) / 2.,
(self.npix[1] - 1.0) / 2.])
self._pix_size = np.array([np.abs(self.wcs.wcs.cdelt[0]),
np.abs(self.wcs.wcs.cdelt[1])])
self._skydir = SkyCoord.from_pixel(self._pix_center[0],
self._pix_center[1],
self.wcs)
self._ebins = ebins
if ebins is not None:
self._ectr = np.exp(utils.edge_to_center(np.log(ebins)))
else:
self._ectr = None
@property
def wcs(self):
return self._wcs
@property
def npix(self):
return self._npix
@property
def skydir(self):
"""Return the sky coordinate of the image center."""
return self._skydir
@property
def width(self):
"""Return the dimensions of the image."""
return self._width
@property
def pix_size(self):
"""Return the pixel size along the two image dimensions."""
return self._pix_size
@property
def pix_center(self):
"""Return the ROI center in pixel coordinates."""
return self._pix_center
@classmethod
def create_from_hdu(cls, hdu, wcs):
return cls(hdu.data.T, wcs)
@classmethod
def create_from_fits(cls, fitsfile, **kwargs):
hdu = kwargs.get('hdu', 0)
with fits.open(fitsfile) as hdulist:
header = hdulist[hdu].header
data = hdulist[hdu].data
header = fits.Header.fromstring(header.tostring())
wcs = WCS(header)
ebins = None
if 'ENERGIES' in hdulist:
tab = Table.read(fitsfile, 'ENERGIES')
ectr = np.array(tab.columns[0])
ebins = np.exp(utils.center_to_edge(np.log(ectr)))
elif 'EBOUNDS' in hdulist:
tab = Table.read(fitsfile, 'EBOUNDS')
emin = np.array(tab['E_MIN']) / 1E3
emax = np.array(tab['E_MAX']) / 1E3
ebins = np.append(emin, emax[-1])
return cls(data, wcs, ebins)
@classmethod
def create(cls, skydir, cdelt, npix, coordsys='CEL', projection='AIT', ebins=None, differential=False):
crpix = np.array([n / 2. + 0.5 for n in npix])
if ebins is not None:
if differential:
nebins = len(ebins)
else:
nebins = len(ebins) - 1
data = np.zeros(list(npix) + [nebins]).T
naxis = 3
else:
data = np.zeros(npix).T
naxis = 2
wcs = wcs_utils.create_wcs(skydir, coordsys, projection,
cdelt, crpix, naxis=naxis, energies=ebins)
return cls(data, wcs, ebins=ebins)
def create_image_hdu(self, name=None, **kwargs):
return fits.ImageHDU(self.counts, header=self.wcs.to_header(),
name=name)
def create_primary_hdu(self):
return fits.PrimaryHDU(self.counts, header=self.wcs.to_header())
def sum_over_energy(self):
""" Reduce a 3D counts cube to a 2D counts map
"""
# Note that the array is using the opposite convention from WCS
# so we sum over axis 0 in the array, but drop axis 2 in the WCS object
return Map(np.sum(self.counts, axis=0), self.wcs.dropaxis(2))
def xypix_to_ipix(self, xypix, colwise=False):
"""Return the flattened pixel indices from an array multi-dimensional
pixel indices.
Parameters
----------
xypix : list
List of pixel indices in the order (LON,LAT,ENERGY).
colwise : bool
Use column-wise pixel indexing.
"""
return np.ravel_multi_index(xypix, self.npix,
order='F' if colwise else 'C',
mode='raise')
def ipix_to_xypix(self, ipix, colwise=False):
"""Return array multi-dimensional pixel indices from flattened index.
Parameters
----------
colwise : bool
Use column-wise pixel indexing.
"""
return np.unravel_index(ipix, self.npix,
order='F' if colwise else 'C')
def ipix_swap_axes(self, ipix, colwise=False):
""" Return the transposed pixel index from the pixel xy coordinates
if colwise is True (False) this assumes the original index was
in column wise scheme
"""
xy = self.ipix_to_xypix(ipix, colwise)
return self.xypix_to_ipix(xy, not colwise)
def get_pixel_skydirs(self):
"""Get a list of sky coordinates for the centers of every pixel.
"""
xpix = np.linspace(0, self.npix[0] - 1., self.npix[0])
ypix = np.linspace(0, self.npix[1] - 1., self.npix[1])
xypix = np.meshgrid(xpix, ypix, indexing='ij')
return SkyCoord.from_pixel(np.ravel(xypix[0]),
np.ravel(xypix[1]), self.wcs)
def get_pixel_indices(self, lons, lats, ibin=None):
"""Return the indices in the flat array corresponding to a set of coordinates
Parameters
----------
lons : array-like
'Longitudes' (RA or GLON)
lats : array-like
'Latitidues' (DEC or GLAT)
ibin : int or array-like
Extract data only for a given energy bin. None -> extract data for all energy bins.
Returns
----------
pixcrd : list
Pixel indices along each dimension of the map.
"""
lons = np.array(lons, ndmin=1)
lats = np.array(lats, ndmin=1)
if len(lats) != len(lons):
raise RuntimeError('Map.get_pixel_indices, input lengths '
'do not match %i %i' % (len(lons), len(lats)))
if len(self._npix) == 2:
pix_x, pix_y = self._wcs.wcs_world2pix(lons, lats, 0)
pixcrd = [np.floor(pix_x).astype(int), np.floor(pix_y).astype(int)]
elif len(self._npix) == 3:
all_lons = np.expand_dims(lons, -1)
all_lats = np.expand_dims(lats, -1)
if ibin is None:
all_bins = (np.expand_dims(
np.arange(self.npix[2]), -1) * np.ones(lons.shape)).T
else:
all_bins = ibin
l = self.wcs.wcs_world2pix(all_lons, all_lats, all_bins, 0)
pix_x = l[0]
pix_y = l[1]
pixcrd = [np.floor(l[0]).astype(int), np.floor(l[1]).astype(int),
all_bins.astype(int)]
return pixcrd
def get_map_values(self, lons, lats, ibin=None):
"""Return the map values corresponding to a set of coordinates.
Parameters
----------
lons : array-like
'Longitudes' (RA or GLON)
lats : array-like
'Latitidues' (DEC or GLAT)
ibin : int or array-like
Extract data only for a given energy bin. None -> extract data for all bins
Returns
----------
vals : numpy.ndarray((n))
Values of pixels in the flattened map, np.nan used to flag
coords outside of map
"""
pix_idxs = self.get_pixel_indices(lons, lats, ibin)
idxs = copy.copy(pix_idxs)
m = np.empty_like(idxs[0], dtype=bool)
m.fill(True)
for i, p in enumerate(pix_idxs):
m &= (pix_idxs[i] >= 0) & (pix_idxs[i] < self._npix[i])
idxs[i][~m] = 0
vals = self.counts.T[idxs]
vals[~m] = np.nan
return vals
def interpolate(self, lon, lat, egy=None):
if len(self.npix) == 2:
pixcrd = self.wcs.wcs_world2pix(lon, lat, 0)
else:
if egy is None:
egy = self._ectr
pixcrd = self.wcs.wcs_world2pix(lon, lat, egy, 0)
pixcrd[2] = np.array(utils.val_to_pix(np.log(self._ectr),
np.log(egy)), ndmin=1)
points = []
for npix in self.npix:
points += [np.linspace(0, npix - 1., npix)]
data = self.counts
fn = RegularGridInterpolator(points, data.T,
bounds_error=False,
fill_value=None)
return fn(np.column_stack(pixcrd))
def interpolate_at_skydir(self, skydir):
coordsys = wcs_utils.get_coordsys(self.wcs)
if coordsys == 'CEL':
skydir = skydir.transform_to('icrs')
return self.interpolate(skydir.ra.deg, skydir.dec.deg)
else:
skydir = skydir.transform_to('galactic')
return self.interpolate(skydir.l.deg, skydir.b.deg)
class HpxMap(Map_Base):
""" Representation of a 2D or 3D counts map using HEALPix. """
def __init__(self, counts, hpx):
""" C'tor, fill with a counts vector and a HPX object """
super(HpxMap, self).__init__(counts)
self._hpx = hpx
self._wcs2d = None
self._hpx2wcs = None
@property
def hpx(self):
return self._hpx
@classmethod
def create_from_hdu(cls, hdu, ebins):
""" Creates and returns an HpxMap object from a FITS HDU.
hdu : The FITS
ebins : Energy bin edges [optional]
"""
hpx = HPX.create_from_hdu(hdu, ebins)
colnames = hdu.columns.names
cnames = []
if hpx.conv.convname == 'FGST_SRCMAP_SPARSE':
pixs = hdu.data.field('PIX')
chans = hdu.data.field('CHANNEL')
keys = chans * hpx.npix + pixs
vals = hdu.data.field('VALUE')
nebin = len(ebins)
data = np.zeros((nebin, hpx.npix))
data.flat[keys] = vals
else:
for c in colnames:
if c.find(hpx.conv.colstring) == 0:
cnames.append(c)
nebin = len(cnames)
data = np.ndarray((nebin, hpx.npix))
for i, cname in enumerate(cnames):
data[i, 0:] = hdu.data.field(cname)
return cls(data, hpx)
@classmethod
def create_from_hdulist(cls, hdulist, **kwargs):
""" Creates and returns an HpxMap object from a FITS HDUList
extname : The name of the HDU with the map data
ebounds : The name of the HDU with the energy bin data
"""
extname = kwargs.get('hdu', hdulist[1].name)
ebins = fits_utils.find_and_read_ebins(hdulist)
return cls.create_from_hdu(hdulist[extname], ebins)
@classmethod
def create_from_fits(cls, fitsfile, **kwargs):
hdulist = fits.open(fitsfile)
return cls.create_from_hdulist(hdulist, **kwargs)
def create_image_hdu(self, name=None, **kwargs):
kwargs['extname'] = name
return self.hpx.make_hdu(self.counts, **kwargs)
def make_wcs_from_hpx(self, sum_ebins=False, proj='CAR', oversample=2,
normalize=True):
"""Make a WCS object and convert HEALPix data into WCS projection
NOTE: this re-calculates the mapping, if you have already
calculated the mapping it is much faster to use
convert_to_cached_wcs() instead
Parameters
----------
sum_ebins : bool
sum energy bins over energy bins before reprojecting
proj : str
WCS-projection
oversample : int
Oversampling factor for WCS map
normalize : bool
True -> perserve integral by splitting HEALPix values between bins
returns (WCS object, np.ndarray() with reprojected data)
"""
self._wcs_proj = proj
self._wcs_oversample = oversample
self._wcs_2d = self.hpx.make_wcs(2, proj=proj, oversample=oversample)
self._hpx2wcs = HpxToWcsMapping(self.hpx, self._wcs_2d)
wcs, wcs_data = self.convert_to_cached_wcs(self.counts, sum_ebins,
normalize)
return wcs, wcs_data
def convert_to_cached_wcs(self, hpx_in, sum_ebins=False, normalize=True):
""" Make a WCS object and convert HEALPix data into WCS projection
Parameters
----------
hpx_in : `~numpy.ndarray`
HEALPix input data
sum_ebins : bool
sum energy bins over energy bins before reprojecting
normalize : bool
True -> perserve integral by splitting HEALPix values between bins
returns (WCS object, np.ndarray() with reprojected data)
"""
if self._hpx2wcs is None:
raise Exception('HpxMap.convert_to_cached_wcs() called '
'before make_wcs_from_hpx()')
if len(hpx_in.shape) == 1:
wcs_data = np.ndarray(self._hpx2wcs.npix)
loop_ebins = False
hpx_data = hpx_in
elif len(hpx_in.shape) == 2:
if sum_ebins:
wcs_data = np.ndarray(self._hpx2wcs.npix)
hpx_data = hpx_in.sum(0)
loop_ebins = False
else:
wcs_data = np.ndarray((self.counts.shape[0],
self._hpx2wcs.npix[0],
self._hpx2wcs.npix[1]))
hpx_data = hpx_in
loop_ebins = True
else:
raise Exception('Wrong dimension for HpxMap %i' %
len(hpx_in.shape))
if loop_ebins:
for i in range(hpx_data.shape[0]):
self._hpx2wcs.fill_wcs_map_from_hpx_data(
hpx_data[i], wcs_data[i], normalize)
pass
wcs_data.reshape((self.counts.shape[0], self._hpx2wcs.npix[
0], self._hpx2wcs.npix[1]))
# replace the WCS with a 3D one
wcs = self.hpx.make_wcs(3, proj=self._wcs_proj,
energies=np.log10(self.hpx.ebins),
oversample=self._wcs_oversample)
else:
self._hpx2wcs.fill_wcs_map_from_hpx_data(
hpx_data, wcs_data, normalize)
wcs_data.reshape(self._hpx2wcs.npix)
wcs = self._wcs_2d
return wcs, wcs_data
def get_pixel_skydirs(self):
"""Get a list of sky coordinates for the centers of every pixel. """
sky_coords = self._hpx.get_sky_coords()
if self.hpx.coordsys == 'GAL':
return SkyCoord(l=sky_coords.T[0], b=sky_coords.T[1], unit='deg', frame='galactic')
else:
return SkyCoord(ra=sky_coords.T[0], dec=sky_coords.T[1], unit='deg', frame='icrs')
def get_pixel_indices(self, lats, lons):
"""Return the indices in the flat array corresponding to a set of coordinates """
return self._hpx.get_pixel_indices(lats, lons)
def sum_over_energy(self):
""" Reduce a counts cube to a counts map """
# We sum over axis 0 in the array, and drop the energy binning in the
# hpx object
return HpxMap(np.sum(self.counts, axis=0), self.hpx.copy_and_drop_energy())
def get_map_values(self, lons, lats, ibin=None):
"""Return the indices in the flat array corresponding to a set of coordinates
Parameters
----------
lons : array-like
'Longitudes' (RA or GLON)
lats : array-like
'Latitidues' (DEC or GLAT)
ibin : int or array-like
Extract data only for a given energy bin. None -> extract data for all bins
Returns
----------
vals : numpy.ndarray((n))
Values of pixels in the flattened map, np.nan used to flag
coords outside of map
"""
theta = np.pi / 2. - np.radians(lats)
phi = np.radians(lons)
pix = hp.ang2pix(self.hpx.nside, theta, phi, nest=self.hpx.nest)
if self.data.ndim == 2:
return self.data[:, pix] if ibin is None else self.data[ibin, pix]
else:
return self.data[pix]
def interpolate(self, lon, lat, egy=None, interp_log=True):
"""Interpolate map values.
Parameters
----------
interp_log : bool
Interpolate the z-coordinate in logspace.
"""
if self.data.ndim == 1:
theta = np.pi / 2. - np.radians(lat)
phi = np.radians(lon)
return hp.pixelfunc.get_interp_val(self.counts, theta,
phi, nest=self.hpx.nest)
else:
return self._interpolate_cube(lon, lat, egy, interp_log)
def _interpolate_cube(self, lon, lat, egy=None, interp_log=True):
"""Perform interpolation on a healpix cube. If egy is None
then interpolation will be performed on the existing energy
planes.
"""
shape = np.broadcast(lon, lat, egy).shape
lon = lon * np.ones(shape)
lat = lat * np.ones(shape)
theta = np.pi / 2. - np.radians(lat)
phi = np.radians(lon)
vals = []
for i, _ in enumerate(self.hpx.evals):
v = hp.pixelfunc.get_interp_val(self.counts[i], theta,
phi, nest=self.hpx.nest)
vals += [np.expand_dims(np.array(v, ndmin=1), -1)]
vals = np.concatenate(vals, axis=-1)
if egy is None:
return vals.T
egy = egy * np.ones(shape)
if interp_log:
xvals = utils.val_to_pix(np.log(self.hpx.evals), np.log(egy))
else:
xvals = utils.val_to_pix(self.hpx.evals, egy)
vals = vals.reshape((-1, vals.shape[-1]))
xvals = np.ravel(xvals)
v = map_coordinates(vals, [np.arange(vals.shape[0]), xvals],
order=1)
return v.reshape(shape)
def swap_scheme(self):
"""
"""
hpx_out = self.hpx.make_swapped_hpx()
if self.hpx.nest:
if self.data.ndim == 2:
data_out = np.vstack([hp.pixelfunc.reorder(
self.data[i], n2r=True) for i in range(self.data.shape[0])])
else:
data_out = hp.pixelfunc.reorder(self.data, n2r=True)
else:
if self.data.ndim == 2:
data_out = np.vstack([hp.pixelfunc.reorder(
self.data[i], r2n=True) for i in range(self.data.shape[0])])
else:
data_out = hp.pixelfunc.reorder(self.data, r2n=True)
return HpxMap(data_out, hpx_out)
def expanded_counts_map(self):
""" return the full counts map """
if self.hpx._ipix is None:
return self.counts
output = np.zeros(
(self.counts.shape[0], self.hpx._maxpix), self.counts.dtype)
for i in range(self.counts.shape[0]):
output[i][self.hpx._ipix] = self.counts[i]
return output
def explicit_counts_map(self, pixels=None):
""" return a counts map with explicit index scheme
Parameters
----------
pixels : `np.ndarray` or None
If set, grab only those pixels.
If none, grab only non-zero pixels
"""
# No pixel index, so build one
if self.hpx._ipix is None:
if self.data.ndim == 2:
summed = self.counts.sum(0)
if pixels is None:
nz = summed.nonzero()[0]
else:
nz = pixels
data_out = np.vstack(self.data[i].flat[nz]
for i in range(self.data.shape[0]))
else:
if pixels is None:
nz = self.data.nonzero()[0]
else:
nz = pixels
data_out = self.data[nz]
return (nz, data_out)
else:
if pixels is None:
return (self.hpx._ipix, self.data)
# FIXME, can we catch this
raise RuntimeError(
'HPX.explicit_counts_map called with pixels for a map that already has pixels')
def sparse_counts_map(self):
""" return a counts map with sparse index scheme
"""
if self.hpx._ipix is None:
flatarray = self.data.flattern()
else:
flatarray = self.expanded_counts_map()
nz = flatarray.nonzero()[0]
data_out = flatarray[nz]
return (nz, data_out)
def ud_grade(self, order, preserve_counts=False):
"""
"""
new_hpx = self.hpx.ud_graded_hpx(order)
if new_hpx.evals is None:
nebins = 1
else:
nebins = len(new_hpx.evals)
shape = self.counts.shape
if preserve_counts:
power = -2.
else:
power = 0
if len(shape) == 1:
new_data = hp.pixelfunc.ud_grade(self.counts,
nside_out=new_hpx.nside,
order_in=new_hpx.ordering,
order_out=new_hpx.ordering,
power=power)
else:
new_data = np.vstack([hp.pixelfunc.ud_grade(self.counts[i],
nside_out=new_hpx.nside,
order_in=new_hpx.ordering,
order_out=new_hpx.ordering,
power=power) for i in range(shape[0])])
return HpxMap(new_data, new_hpx)
|
FermipyREPO_NAMEfermipyPATH_START.@fermipy_extracted@fermipy-master@fermipy@skymap.py@.PATH_END.py
|
{
"filename": "base.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/io/base.py",
"type": "Python"
}
|
import textwrap
import struct
import numpy
import os.path
from amuse.support.core import late
from amuse.support import exceptions
registered_fileformat_processors = {}
class IoException(exceptions.CoreException):
formatstring = "IO exception: {0}"
class UnsupportedFormatException(IoException):
"""Raised when the given format is not supported by AMUSE."""
formatstring = "You tried to load or save a file with fileformat '{0}', but this format is not in the supported formats list"
class CannotSaveException(IoException):
"""Raised when the given format cannot save data (only reading of data is supported for the format)"""
formatstring = "You tried to save a file with fileformat '{0}', but this format is not supported for writing files"
class CannotLoadException(IoException):
"""Raised when the given format cannot read data (only saving of data is supported for the format)"""
formatstring = "You tried to load a file with fileformat '{0}', but this format is not supported for reading files"
class format_option(late):
def __init__(self, initializer):
late.__init__(self, initializer)
self.__doc__ = initializer.__doc__
def get_name(self):
return self.initializer.__name__
def _get_processor_factory(format):
if isinstance(format, str):
if not format in registered_fileformat_processors:
raise UnsupportedFormatException(format)
processor_factory = registered_fileformat_processors[format]
else:
processor_factory = format
return processor_factory
def write_set_to_file(
set, filename, format="amuse", **format_specific_keyword_arguments
):
"""
Write a set to the given file in the given format.
:argument filename: name of the file to write the data to
:argument format: name of a registered format or
a :class:`FileFormatProcessor` subclass (must be a
class and not an instance)
All other keywords are set as attributes on the fileformat processor. To
determine the supported options for a processor call
:func:`get_options_for_format`
"""
processor_factory = _get_processor_factory(format)
processor = processor_factory(filename, set=set, format=format)
processor.set_options(format_specific_keyword_arguments)
processor.store()
def read_set_from_file(filename, format="amuse", **format_specific_keyword_arguments):
"""
Read a set from the given file in the given format.
:argument filename: name of the file to read the data from
:argument format: name of a registered format or
a :class:`FileFormatProcessor` subclass (must be a
class and not an instance)
All other keywords are set as attributes on the fileformat processor. To
determine the supported options for a processor call
:func:`get_options_for_format`
"""
if "stream" not in format_specific_keyword_arguments and not os.path.exists(
filename
):
raise IoException("Error: file '{0}' does not exist.".format(filename))
processor_factory = _get_processor_factory(format)
processor = processor_factory(filename, format=format)
processor.set_options(format_specific_keyword_arguments)
return processor.load()
class ReportTable(object):
"""
Report quantities and values to a file.
:argument filename: name of the file to write the data to
:argument format: name of a registered format ('csv' or 'txt')
All other keywords are set as attributes on the fileformat processor. To
determine the supported options for a processor call
:func:`get_options_for_format`
Important options fot text and comma separated files are:
:argument attribute_types: list of the units to store the values in
:argument attribute_names: list of the names of for the values
(used in the header of the file and when using add_row with keyword parameters)
Writes data per row to a file. Ideal for storing intermediate values of
one Particle or one Gridpoint during a run.
Example usage::
report = ReportTable(
"hrdiagram.txt", "txt",
attribute_types=(units.Myr, units.K, units.LSun),
attribute_names=('age', 'temperature_at_time', 'luminosity_at_time')
)
report.add_row(particle.age, particle.temperature_at_time, particle.luminosity_at_time)
"""
def __init__(self, filename, format="csv", **format_specific_keyword_arguments):
processor_factory = _get_processor_factory(format)
self.processor = processor_factory(filename, format=format)
self.processor.set_options(format_specific_keyword_arguments)
self.processor.open_stream()
self.processor.write_header()
def add_row(self, *fields, **fieldsbyname):
"""
Add a row to the report, columns can be added by name or by
position in list. If columns are given by name the order
does not matter and will alway follow to order given in the
'attribute_names' option specified when creating the ReportTable.
Example usage::
report.add_row(
particle.age,
particle.temperature_at_time,
particle.luminosity_at_time
)
report.add_row(
temperature_at_time = particle.temperature_at_time,
age = particle.age,
luminosity_at_time = particle.luminosity_at_time
)
"""
row = list(fields)
if len(fieldsbyname) > 0:
names = self.processor.attribute_names
if len(names) >= len(row):
row.extend([0] * (len(names) - len(row)))
names_to_index = {}
for i, name in enumerate(names):
names_to_index[name] = i
for name, value in fieldsbyname.items():
index = names_to_index[name]
row[index] = value
self.processor.write_row(row)
def close(self):
self.processor.close_stream()
def get_options_for_format(
format="amuse",
):
"""Retuns a list of tuples, each tuple contains the
name of the option, a description of the option and
the default values.
:argument format: name of a registered format or
a :class:`FileFormatProcessor` subclass (must be a
class and not an instance)
"""
processor_factory = _get_processor_factory(format)
processor = processor_factory(format=format)
return list(processor.get_description_of_options())
def add_fileformat_processor(class_of_the_format):
"""
Register the specified class, so that it can be used
by the :func:`write_set_to_file` and :func:`read_set_from_file`
functions.
Do not call this method directly, instead use :func:`FileFormatProcessor.register`
"""
for x in class_of_the_format.provided_formats:
registered_fileformat_processors[x] = class_of_the_format
_update_documentation_strings()
def _update_documentation_strings():
for methodname in ["write_set_to_file", "read_set_from_file"]:
method = globals()[methodname]
if not hasattr(method, "_original_doc"):
method._original_doc = method.__doc__
new_doc = method._original_doc
new_doc += "\n Registered file formats:\n\n"
sorted_formatnames = sorted(registered_fileformat_processors.keys())
for x in sorted_formatnames:
processor = registered_fileformat_processors[x]
processor_doc = processor.__doc__
if processor_doc is None or len(processor_doc) == 0:
continue
processor_doc = processor_doc.strip()
line = processor_doc.splitlines()[0]
line = " **" + x + "**,\n " + line + "\n"
new_doc += line
method.__doc__ = new_doc
class FileFormatProcessor(object):
"""
Abstract base class of all fileformat processors
All classes providing loading or storing of files should be
subclasses of this base class.
Every subclass must support the *filename*, *set* and
*format* arguments. The arguments must all be optional.
:argument filename: name of the file the read the data from
:argument set: set (of particles or entities) to store in the file
:argument format: format of the file, will be a string or class
:attribute provided_formats: list of strings of the formats provided
by the processor
"""
provided_formats = []
def __init__(self, filename=None, set=None, format=None):
self.filename = filename
self.set = set
self.format = format
@classmethod
def get_options(cls):
attribute_names = dir(cls)
result = {}
for x in attribute_names:
if x.startswith("_"):
continue
attribute_value = getattr(cls, x)
if isinstance(attribute_value, format_option):
result[x] = attribute_value
return result
@classmethod
def register(cls):
"""
Register this class, so that it can be found by name
int the :func:`write_set_to_file` and :func:`read_set_from_file`
functions.
"""
add_fileformat_processor(cls)
def set_options(self, dictionary):
supported_options = self.get_options()
for key, value in dictionary.items():
if key in supported_options:
setattr(self, key, value)
else:
self.extra_attributes[key] = value
def store(self):
"""
Stores the set in the file.
The set and the file are both properties
of the processor.
"""
raise CannotSaveException(self.format)
def load(self):
"""
Loads the set from the file and returns
the set.
"""
raise CannotLoadException(self.format)
def store_string(self):
raise CannotSaveException(self.format)
def load_string(self, string):
raise CannotLoadException(self.format)
def get_description_of_options(self):
"""Yields tuples, each tuple contains the
name of the option, a description of the option and
the default values
"""
for option, method in self.get_options().items():
default_value = getattr(self, option)
doc = method.__doc__
if doc is None:
doc = ""
description = textwrap.dedent(doc)
yield (option, description, default_value)
@format_option
def extra_attributes(self):
"""Extra attributes to store with the data set. Some
formats (moste notably the amuse native format)
can store extra attributes with the set in file. The
'write_set_to_file' function will collect all keyword arguments
that do not match to an option into the extra attributes
dictionary.
"""
return {}
class FullTextFileFormatProcessor(FileFormatProcessor):
"""
Abstract base class of all fileformat processors that process
their data by first reading the complete text string
Subclasses need to implement the
:func:`store_string` and :func:`load_string` methods.
"""
def store(self):
with open(self.filename, "w") as f:
f.write(self.store_string())
def load(self):
with open(self.filename, "r") as f:
return self.load_string(f.read())
def store_string(self):
"""Return a string representation of the particle set"""
raise CannotSaveException(self.format)
def load_string(self, string):
"""Return a particle set, read from the string"""
raise CannotLoadException(self.format)
class BinaryFileFormatProcessor(FileFormatProcessor):
"""
Abstract base class of all fileformat processors that process
their data by first reading the complete text string
Subclasses need to implement the
:func:`store_file` and / or :func:`load_file` methods.
"""
def store(self):
with open(self.filename, "wb") as f:
self.store_file(f)
def load(self):
with open(self.filename, "rb") as f:
return self.load_file(f)
def store_file(self, file):
"""Store the data on the opened file"""
raise CannotSaveException(self.format)
def load_file(self, string):
"""Return a particle set, read from the binary file"""
raise CannotLoadException(self.format)
class FortranFileFormatProcessor(BinaryFileFormatProcessor):
"""
Abstract base class of all fileformat processors that process
their data by first reading fortran blocks
Subclasses need to implement the
:func:`store_file` and / or :func:`load_file` methods.
"""
@format_option
def endianness(self):
"""The endianness of the binary date stored in the file"""
return "@" # native
@late
def float_type(self):
result = numpy.dtype(numpy.float32)
if self.endianness == "@":
return result
else:
return result.newbyteorder(self.endianness)
@late
def double_type(self):
result = numpy.dtype(numpy.float64)
if self.endianness == "@":
return result
else:
return result.newbyteorder(self.endianness)
@late
def uint_type(self):
result = numpy.dtype(numpy.uint32)
if self.endianness == "@":
return result
else:
return result.newbyteorder(self.endianness)
@late
def ulong_type(self):
result = numpy.dtype(numpy.uint64)
if self.endianness == "@":
return result
else:
return result.newbyteorder(self.endianness)
@late
def int_type(self):
result = numpy.dtype(numpy.int32)
if self.endianness == "@":
return result
else:
return result.newbyteorder(self.endianness)
def read_fortran_block(self, file):
"""Returns one block read from file. Checks if the
block is consistant. Result is an array of bytes
"""
format = self.endianness + "I"
bytes = file.read(4)
if not bytes:
return None
length_of_block = struct.unpack(format, bytes)[0]
result = file.read(length_of_block)
bytes = file.read(4)
length_of_block_after = struct.unpack(format, bytes)[0]
if length_of_block_after != length_of_block:
raise IoException(
"Block is mangled sizes don't match before: {0}, after: {1}".format(
length_of_block, length_of_block_after
)
)
return result
def read_fortran_block_floats(self, file):
bytes = self.read_fortran_block(file)
return numpy.frombuffer(bytes, dtype=self.float_type)
def read_fortran_block_doubles(self, file):
bytes = self.read_fortran_block(file)
return numpy.frombuffer(bytes, dtype=self.double_type)
def read_fortran_block_uints(self, file):
bytes = self.read_fortran_block(file)
return numpy.frombuffer(bytes, dtype=self.uint_type)
def read_fortran_block_ulongs(self, file):
bytes = self.read_fortran_block(file)
return numpy.frombuffer(bytes, dtype=self.ulong_type)
def read_fortran_block_ints(self, file):
bytes = self.read_fortran_block(file)
return numpy.frombuffer(bytes, dtype=self.int_type)
def read_fortran_block_float_vectors(self, file, size=3):
result = self.read_fortran_block_floats(file)
return result.reshape(len(result) // size, size)
def write_fortran_block(self, file, input):
format = self.endianness + "I"
input_bytes = bytearray(input)
length_of_block = len(input_bytes)
file.write(struct.pack(format, length_of_block))
file.write(input_bytes)
file.write(struct.pack(format, length_of_block))
def write_fortran_block_floats(self, file, values):
array = numpy.asarray(values, dtype=self.float_type)
self.write_fortran_block(file, array.data)
def write_fortran_block_doubles(self, file, values):
array = numpy.asarray(values, dtype=self.double_type)
self.write_fortran_block(file, array.data)
def write_fortran_block_uints(self, file, values):
array = numpy.asarray(values, dtype=self.uint_type)
self.write_fortran_block(file, array.data)
def write_fortran_block_ulongs(self, file, values):
array = numpy.asarray(values, dtype=self.ulong_type)
self.write_fortran_block(file, array.data)
def write_fortran_block_ints(self, file, values):
array = numpy.asarray(values, dtype=self.int_type)
self.write_fortran_block(file, array.data)
def write_fortran_block_float_vectors(self, file, values, size=3):
array = numpy.asarray(values, dtype=self.float_type)
array = array.reshape(len(array) * size)
self.write_fortran_block(file, array.data)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@io@base.py@.PATH_END.py
|
{
"filename": "tinygp_multidimensional_matern32_activity.py",
"repo_name": "LucaMalavolta/PyORBIT",
"repo_path": "PyORBIT_extracted/PyORBIT-main/pyorbit/models/tinygp_multidimensional_matern32_activity.py",
"type": "Python"
}
|
from pyorbit.subroutines.common import *
from pyorbit.models.abstract_model import *
from pyorbit.keywords_definitions import *
from scipy.linalg import cho_factor, cho_solve, lapack, LinAlgError
from scipy import matrix, spatial
import sys
__all__ = ['TinyGP_Multidimensional_Matern32Activity']
try:
import jax
jax.config.update("jax_enable_x64", True)
import jax.numpy as jnp
from tinygp import kernels, GaussianProcess
if sys.version_info[1] < 10:
raise Warning("You should be using Python 3.10 - tinygp may not work")
class LatentKernel_Multi_Matern32(kernels.Kernel):
"""A custom kernel based on Matern32
Args:
kernel: The kernel function describing the latent process. This can be any other
``tinygp`` kernel.
coeff_prim: The primal coefficients for each class. This can be thought of as how
much the latent process itself projects into the observations for that class.
This should be an array with an entry for each class of observation.
coeff_deriv: The derivative coefficients for each class. This should have the same
shape as ``coeff_prim``.
"""
try:
kernel : kernels.Kernel
coeff_prim: jax.Array | float
coeff_deriv: jax.Array | float
except:
pass
def __init__(self, kernel, coeff_prim, coeff_deriv):
self.kernel = kernel
self.coeff_prim, self.coeff_deriv = jnp.broadcast_arrays(
jnp.asarray(coeff_prim), jnp.asarray(coeff_deriv)
)
def evaluate(self, X1, X2):
t1, label1 = X1
t2, label2 = X2
# Differentiate the kernel function: the first derivative wrt x1
Kp = jax.grad(self.kernel.evaluate, argnums=0)
# ... and the second derivative
Kpp = jax.grad(Kp, argnums=1)
# Evaluate the kernel matrix and all of its relevant derivatives
K = self.kernel.evaluate(t1, t2)
d2K_dx1dx2 = Kpp(t1, t2)
# For stationary kernels, these are related just by a minus sign, but we'll
# evaluate them both separately for generality's sake
dK_dx2 = jax.grad(self.kernel.evaluate, argnums=1)(t1, t2)
dK_dx1 = Kp(t1, t2)
# Extract the coefficients
a1 = self.coeff_prim[label1]
a2 = self.coeff_prim[label2]
b1 = self.coeff_deriv[label1]
b2 = self.coeff_deriv[label2]
# Construct the matrix element
return (
a1 * a2 * K
+ a1 * b2 * dK_dx2
+ b1 * a2 * dK_dx1
+ b1 * b2 * d2K_dx1dx2
)
def _build_tinygp_multidimensional_matern32(params):
base_kernel = kernels.Matern32(scale=jnp.abs(params["scale"]))
kernel = LatentKernel_Multi_Matern32(base_kernel, params['coeff_prime'], params['coeff_deriv'])
return GaussianProcess(
kernel, params['X'], diag=jnp.abs(params['diag']), mean=0.0
)
@jax.jit
def _loss_tinygp_multi_matern32(params):
gp = _build_tinygp_multidimensional_matern32(params)
return gp.log_probability(params['y'])
except:
pass
class TinyGP_Multidimensional_Matern32Activity(AbstractModel):
'''
- matern32_rho: the scale of the Matern32 kernel;
- matern32_multigp_sigma: the amplitude of the correlations;
- matern32_multigp_sigma_deriv: amplitude of the first derivative
'''
default_common = 'activity'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_class = 'gp_multidimensional_matern32_activity'
self.internal_likelihood = True
self.delayed_lnlk_computation = True
self.list_pams_common = OrderedSet([
'matern32_rho', # time scale of the Matern32
])
self.list_pams_dataset = OrderedSet([
'matern32_multigp_sigma', # Amplitude of the covariance matrix
'matern32_multigp_sigma_deriv' # Amplitude of the first derivative of the covariance matrix
])
self.internal_parameter_values = None
self._dist_t1 = None
self._dist_t2 = None
self._added_datasets = 0
self.dataset_ordering = {}
self.inds_cache = None
self._dataset_x0 = []
self._dataset_label = []
self._dataset_e2 = []
self._dataset_names = {}
self._dataset_nindex = []
self.use_derivative_dict = {}
self.internal_coeff_prime = []
self.internal_coeff_deriv = []
self._dataset_ej2 = []
self._dataset_res = []
self._added_datasets = 0
self._n_cov_matrix = 0
self.pi2 = np.pi * np.pi
self.use_stellar_rotation_period = False
def initialize_model(self, mc, **kwargs):
try:
for common_ref in self.common_ref:
if mc.common_models[common_ref].model_class == 'activity':
self.use_stellar_rotation_period = getattr(mc.common_models[common_ref], 'use_stellar_rotation_period', False)
break
except:
self.use_stellar_rotation_period = False
for keyword in keywords_stellar_rotation:
self.use_stellar_rotation_period = kwargs.get(keyword, self.use_stellar_rotation_period)
if self.use_stellar_rotation_period:
self.list_pams_common.update(['rotation_period'])
self.list_pams_common.discard('matern32_rho')
def initialize_model_dataset(self, mc, dataset, **kwargs):
""" when reloading the .p files, the object is not reinitialized, so we have to skip the
incremental addition of datasets if they are already present """
if dataset.name_ref in self._dataset_names:
return
self._dataset_nindex.append([self._n_cov_matrix,
self._n_cov_matrix+dataset.n])
self._dataset_x0 = np.append(self._dataset_x0, dataset.x0)
self._dataset_label = np.append(self._dataset_label, np.zeros_like(dataset.x0, dtype=int) + self._added_datasets)
self._dataset_e2 = np.append(self._dataset_e2, dataset.e**2)
self._dataset_names[dataset.name_ref] = self._added_datasets
self._n_cov_matrix += dataset.n
self._added_datasets += 1
self._dataset_ej2 = self._dataset_e2 * 1.
self._dataset_res = self._dataset_e2 * 0.
self.internal_coeff_prime = np.empty(self._added_datasets)
self.internal_coeff_deriv = np.empty(self._added_datasets)
self._X = (self._dataset_x0, self._dataset_label.astype(int))
if 'derivative'in kwargs:
use_derivative = kwargs['derivative'].get(dataset.name_ref, False)
elif dataset.name_ref in kwargs:
use_derivative = kwargs[dataset.name_ref].get('derivative', False)
else:
use_derivative = True
if not use_derivative:
self.fix_list[dataset.name_ref] = {'matern32_multigp_sigma_deriv': [0., 0.]}
return
def add_internal_dataset(self, parameter_values, dataset):
self.internal_parameter_values = parameter_values
if self.use_stellar_rotation_period:
self.internal_parameter_values['matern32_rho'] = parameter_values['rotation_period']
d_ind = self._dataset_names[dataset.name_ref]
d_nstart, d_nend = self._dataset_nindex[d_ind]
self._dataset_ej2[d_nstart:d_nend] = self._dataset_e2[d_nstart:d_nend] + dataset.jitter**2.0
self._dataset_res[d_nstart:d_nend] = dataset.residuals
self.internal_coeff_prime[d_ind] = parameter_values['matern32_multigp_sigma']
self.internal_coeff_deriv[d_ind] = parameter_values['matern32_multigp_sigma_deriv']
def lnlk_compute(self):
theta_dict = dict(
scale=self.internal_parameter_values['matern32_rho'],
diag=self._dataset_ej2,
X=self._X,
y=self._dataset_res,
coeff_prime=self.internal_coeff_prime,
coeff_deriv=self.internal_coeff_deriv
)
return _loss_tinygp_multi_matern32(theta_dict)
def sample_predict(self, dataset, x0_input=None, return_covariance=False, return_variance=False):
dataset_index = self._dataset_names[dataset.name_ref]
if x0_input is None:
l_nstart, l_nend = self._dataset_nindex[dataset_index]
X_input = self._X
else:
l_nstart, l_nend = len(x0_input)*dataset_index, len(x0_input)*(dataset_index+1)
temp_input = []
temp_label = []
for ii in range(0, self._added_datasets):
temp_input = np.append(temp_input, x0_input)
temp_label = np.append(temp_label, np.zeros_like(x0_input, dtype=int) + ii)
X_input = (temp_input, temp_label.astype(int))
theta_dict = dict(
scale=self.internal_parameter_values['matern32_rho'],
diag=self._dataset_ej2,
X=self._X,
y=self._dataset_res,
coeff_prime=self.internal_coeff_prime,
coeff_deriv=self.internal_coeff_deriv,
x0_predict = X_input
)
gp = _build_tinygp_multidimensional_matern32(theta_dict)
_, cond_gp = gp.condition(theta_dict['y'], theta_dict['x0_predict'])
#mu = cond_gp.mean
#std = np.sqrt(cond_gp.variance)
mu_full = cond_gp.loc # or cond_gp.mean?
mu = mu_full[l_nstart:l_nend]
std = np.sqrt(cond_gp.variance)[l_nstart:l_nend]
if return_variance:
return mu, std
else:
return mu
|
LucaMalavoltaREPO_NAMEPyORBITPATH_START.@PyORBIT_extracted@PyORBIT-main@pyorbit@models@tinygp_multidimensional_matern32_activity.py@.PATH_END.py
|
{
"filename": "test_convolve_kernels.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/convolution/tests/test_convolve_kernels.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import itertools
import pytest
import numpy as np
from numpy.testing import assert_almost_equal
from ..convolve import convolve, convolve_fft
from ..kernels import Gaussian2DKernel, Box2DKernel, Tophat2DKernel
from ..kernels import Moffat2DKernel
SHAPES_ODD = [[15, 15], [31, 31]]
SHAPES_EVEN = [[8, 8], [16, 16], [32, 32]]
WIDTHS = [2, 3, 4, 5]
KERNELS = []
for shape in SHAPES_ODD:
for width in WIDTHS:
KERNELS.append(Gaussian2DKernel(width,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
KERNELS.append(Box2DKernel(width,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
KERNELS.append(Tophat2DKernel(width,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
KERNELS.append(Moffat2DKernel(width, 2,
x_size=shape[0],
y_size=shape[1],
mode='oversample',
factor=10))
class Test2DConvolutions(object):
@pytest.mark.parametrize('kernel', KERNELS)
def test_centered_makekernel(self, kernel):
"""
Test smoothing of an image with a single positive pixel
"""
shape = kernel.array.shape
x = np.zeros(shape)
xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape]
x[xslice] = 1.0
c2 = convolve_fft(x, kernel, boundary='fill')
c1 = convolve(x, kernel, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize('kernel', KERNELS)
def test_random_makekernel(self, kernel):
"""
Test smoothing of an image made of random noise
"""
shape = kernel.array.shape
x = np.random.randn(*shape)
c2 = convolve_fft(x, kernel, boundary='fill')
c1 = convolve(x, kernel, boundary='fill')
# not clear why, but these differ by a couple ulps...
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, WIDTHS)))
def test_uniform_smallkernel(self, shape, width):
"""
Test smoothing of an image with a single positive pixel
Uses a simple, small kernel
"""
if width % 2 == 0:
# convolve does not accept odd-shape kernels
return
kernel = np.ones([width, width])
x = np.zeros(shape)
xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape]
x[xslice] = 1.0
c2 = convolve_fft(x, kernel, boundary='fill')
c1 = convolve(x, kernel, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
@pytest.mark.parametrize(('shape', 'width'), list(itertools.product(SHAPES_ODD, [1, 3, 5])))
def test_smallkernel_Box2DKernel(self, shape, width):
"""
Test smoothing of an image with a single positive pixel
Compares a small uniform kernel to the Box2DKernel
"""
kernel1 = np.ones([width, width]) / np.float(width) ** 2
kernel2 = Box2DKernel(width, mode='oversample', factor=10)
x = np.zeros(shape)
xslice = [slice(sh // 2, sh // 2 + 1) for sh in shape]
x[xslice] = 1.0
c2 = convolve_fft(x, kernel2, boundary='fill')
c1 = convolve_fft(x, kernel1, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
c2 = convolve(x, kernel2, boundary='fill')
c1 = convolve(x, kernel1, boundary='fill')
assert_almost_equal(c1, c2, decimal=12)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@convolution@tests@test_convolve_kernels.py@.PATH_END.py
|
{
"filename": "tess_io.py",
"repo_name": "yuliang419/AstroNet-Triage",
"repo_path": "AstroNet-Triage_extracted/AstroNet-Triage-master/light_curve_util/tess_io.py",
"type": "Python"
}
|
# Copyright 2018 Liang Yu.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for reading TESS data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import h5py
import numpy as np
from tensorflow import gfile
def tess_filenames(tic,
base_dir='/Users/liangyu/Documents/EBClassify/astronet/astronet/tess/',
sector=1,
injected=False,
inject_dir='/pdo/users/yuliang',
check_existence=True):
"""Returns the light curve filename for a TESS target star.
Args:
tic: TIC of the target star. May be an int or a possibly zero-
padded string.
base_dir: Base directory containing Kepler data.
sector: Int, sector number of data.
cam: Int, camera number of data.
ccd: Int, CCD number of data.
injected: Bool, whether target also has a light curve with injected planets.
injected_dir: Directory containing light curves with injected transits.
check_existence: If True, only return filenames corresponding to files that
exist.
Returns:
filename for given TIC.
"""
tic = str(tic)
if not injected:
# modify this as needed
dir = os.path.join(base_dir, 'sector-' + str(sector))
base_name = "%s.h5" % (tic)
filename = os.path.join(dir, base_name)
else:
filename = os.path.join(inject_dir, tic + '.lc')
if not check_existence or gfile.Exists(filename):
return filename
return
def read_tess_light_curve(filename, flux_key='KSPMagnitude', invert=False):
"""Reads time and flux measurements for a Kepler target star.
Args:
filename: str name of h5 file containing light curve.
flux_key: Key of h5 column containing detrended flux.
invert: Whether to invert the flux measurements by multiplying by -1.
Returns:
time: Numpy array; the time values of the light curve.
flux: Numpy array corresponding to the time array.
"""
f = h5py.File(filename, "r")
apgroup = f["LightCurve"]["AperturePhotometry"]
bestap = apgroup.attrs["bestap"]
api = apgroup["Aperture_%.3d" % bestap]
time = np.array(f["LightCurve"]["BJD"])
mag = np.array(api[flux_key])
if 'QFLAG' in f["LightCurve"].keys():
quality_flag = np.where(np.array(f["LightCurve"]['QFLAG']) == 0)
# Remove outliers
time = time[quality_flag]
mag = mag[quality_flag]
# Remove NaN flux values.
valid_indices = np.where(np.isfinite(mag))
time = time[valid_indices]
mag = mag[valid_indices]
else:
# manually remove sector 1 outliers
bad = np.array([0, 1, 2, 31, 49, 88, 121, 152, 186, 188, 199,
224, 225, 228, 241, 340, 359, 361, 463, 464, 465, 481,
482, 483, 546, 547, 583, 584, 598, 599, 600, 601, 602,
631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641,
642, 643, 644, 645, 646, 647, 648, 649, 650, 651, 652,
653, 654, 655, 656, 657, 658, 659, 660, 661, 662, 663,
664, 665, 666, 667, 668, 669, 670, 671, 672, 673, 674,
675, 676, 677, 678, 723, 726, 727, 730, 748, 749, 752,
753, 754, 755, 756, 817, 819, 839, 853, 854, 855, 866,
872, 873, 874, 875, 969, 971, 977, 987, 992, 993, 994,
995, 996, 997, 998, 999, 1000, 1001, 1002, 1003, 1005, 1006,
1007, 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015, 1016, 1017,
1018, 1019, 1020, 1021, 1022, 1023, 1024, 1025, 1026, 1027, 1028,
1029, 1030, 1031, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1049, 1050,
1051, 1052, 1053, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061,
1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072,
1073, 1074, 1075, 1076, 1077, 1078, 1079, 1080, 1081, 1082, 1083,
1084, 1085, 1086, 1087, 1088, 1089, 1090, 1091, 1092, 1093, 1094,
1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1103, 1104, 1108,
1112, 1113, 1114, 1141, 1175, 1180, 1183, 1191, 1193, 1195, 1196,
1208, 1209, 1210, 1214, 1225, 1226, 1231, 1232, 1233, 1235, 1258,
1278, 1279, 1280])
bad = bad[bad < len(time)]
mask = np.ones(len(time))
mask[bad] = 0
mask = mask.astype(bool)
time = time[mask]
mag = mag[mask]
valid_indices = np.where(np.isfinite(mag))
time = time[valid_indices]
mag = mag[valid_indices]
if invert:
mag *= -1
return time, mag
|
yuliang419REPO_NAMEAstroNet-TriagePATH_START.@AstroNet-Triage_extracted@AstroNet-Triage-master@light_curve_util@tess_io.py@.PATH_END.py
|
{
"filename": "demo_MBS_collision_trimesh.py",
"repo_name": "projectchrono/chrono",
"repo_path": "chrono_extracted/chrono-main/src/demos/python/mbs/demo_MBS_collision_trimesh.py",
"type": "Python"
}
|
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2019 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
import pychrono.core as chrono
import pychrono.irrlicht as chronoirr
print ("Example: create a rigid body based on a .obj mesh file");
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
# ---------------------------------------------------------------------
#
# Create the simulation sys and add items
#
sys = chrono.ChSystemNSC()
sys.SetCollisionSystemType(chrono.ChCollisionSystem.Type_BULLET)
# Set the global collision margins. This is especially important for very large or
# very small objects. Set this before creating shapes. Not before creating sys.
chrono.ChCollisionModel.SetDefaultSuggestedEnvelope(0.001);
chrono.ChCollisionModel.SetDefaultSuggestedMargin(0.001);
# ---------------------------------------------------------------------
#
# Create the simulation sys and add items
#
# Create a contact material (with default properties, shared by all collision shapes)
contact_material = chrono.ChContactMaterialNSC()
# Create a floor
mfloor = chrono.ChBodyEasyBox(3, 0.2, 3, 1000,True,True, contact_material)
mfloor.SetFixed(True)
mfloor.GetVisualShape(0).SetColor(chrono.ChColor(0.2, 0.2, 0.6))
sys.Add(mfloor)
# Now we will create a falling object whose shape is defined by a .obj mesh.
#
# NOTE: collision detection with generic concave meshes is slower and less
# robust than any other options for collision shapes, so use it if defining
# collision shapes via primitives like spheres boxes cylinders or their
# clusters is too complex.
#
# NOTE: the mesh shape is a .obj file in Wavefront file format,
# you can generate it from 3D modelers such as Blender, Maya, etc.
#
# NOTE: for collision purposes, the .obj mesh must be "watertight", i.e. having
# no gaps in edges, no repeated vertexes, etc.
#
# NOTE: for visualization purposes only, i.e. if you do not use the mesh also for
# collision, the mesh does not need to be watertight.
# Method A:
# - use the ChBodyEasyMesh
# This will automatically create the visualization mesh, the collision mesh,
# and will automatically compute the mass property (COG position respect to REF,
# mass and inertia tensor) given an uniform density.
body_A= chrono.ChBodyEasyMesh(chrono.GetChronoDataFile('models/bulldozer/shoe_view.obj'), # mesh filename
7000, # density kg/m^3
True, # automatically compute mass and inertia
True, # visualize?>
True, # collide?
contact_material, # contact material
)
body_A.SetPos(chrono.ChVector3d(0.5,0.5,0))
body_A.GetVisualShape(0).SetColor(chrono.ChColor(0.2, 0.6, 0.2))
sys.Add(body_A)
# Method B:
# - create a ChBodyAuxRef,
# - set mass and inertia tensor as you like
# - set COG center of mass position respect to REF reference as you like
# - attach a visualization shape based on a .obj triangle mesh
# - add contact shape based on a .obj triangle mesh
# This is more complicate than method A, yet this can be still preferred if you
# need deeper control, ex. you want to provide two different meshes, one
# with high level of detail just for the visualization and a coarse one for
# collision, or if you want to set custom COG and inertia values, etc.
# Rigid body part
body_B= chrono.ChBodyAuxRef()
body_B.SetPos(chrono.ChVector3d(0,0.5,0))
body_B.SetMass(16)
body_B.SetInertiaXX(chrono.ChVector3d(0.270,0.400,0.427))
body_B.SetInertiaXY(chrono.ChVector3d(0.057,0.037,-0.062))
body_B.SetFrameCOMToRef(chrono.ChFramed(
chrono.ChVector3d( 0.12,0.0,0),
chrono.ChQuaterniond(1,0,0,0)))
# Attach a visualization shape .
# First load a .obj from disk into a ChTriangleMeshConnected:
mesh_for_visualization = chrono.ChTriangleMeshConnected()
mesh_for_visualization.LoadWavefrontMesh(chrono.GetChronoDataFile('models/bulldozer/shoe_view.obj'))
# Optionally: you can scale/shrink/rotate the mesh using this:
mesh_for_visualization.Transform(chrono.ChVector3d(0.01,0,0), chrono.ChMatrix33d(1))
# Now the triangle mesh is inserted in a ChVisualShapeTriangleMesh visualization asset,
# and added to the body
visualization_shape = chrono.ChVisualShapeTriangleMesh()
visualization_shape.SetMesh(mesh_for_visualization)
visualization_shape.SetColor(chrono.ChColor(0.6, 0.2, 0.2))
body_B.AddVisualShape(visualization_shape)
# Add the collision shape.
# Again load a .obj file in Wavefront file format. NOTE: in this
# example we use the same .obj file as for visualization, but here one
# could do a better thing: using a different low-level-of-detail mesh for the
# collision, so the simulation performance is not affected by many details such
# as bolts and chamfers that may be wanted only for visualization.
mesh_for_collision = chrono.ChTriangleMeshConnected()
mesh_for_collision.LoadWavefrontMesh(chrono.GetChronoDataFile('models/bulldozer/shoe_view.obj'))
# Optionally: you can scale/shrink/rotate the mesh using this:
mesh_for_collision.Transform(chrono.ChVector3d(0.01,0,0), chrono.ChMatrix33d(1))
body_B_ct_shape = chrono.ChCollisionShapeTriangleMesh(contact_material, mesh_for_collision, False, False)
body_B.AddCollisionShape(body_B_ct_shape)
body_B.EnableCollision(True)
sys.Add(body_B)
# Create an Irrlicht application to visualize the sys
vis = chronoirr.ChVisualSystemIrrlicht()
vis.AttachSystem(sys)
vis.SetWindowSize(1024,768)
vis.SetWindowTitle('Trimesh collision demo')
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddSkyBox()
vis.AddCamera(chrono.ChVector3d(0.5,0.5,1))
vis.AddTypicalLights()
# Run the simulation
while vis.Run():
vis.BeginScene()
vis.Render()
vis.EndScene()
sys.DoStepDynamics(5e-3)
|
projectchronoREPO_NAMEchronoPATH_START.@chrono_extracted@chrono-main@src@demos@python@mbs@demo_MBS_collision_trimesh.py@.PATH_END.py
|
{
"filename": "avroUtils.py",
"repo_name": "jlenain/flaapluc",
"repo_path": "flaapluc_extracted/flaapluc-master/flaapluc/avroUtils.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Time-stamp: "2022-11-17 12:16:18 jlenain"
import io
import json
import fastavro
# From https://github.com/astrolabsoftware/fink-client/blob/master/fink_client/avroUtils.py
def encode_into_avro(alert: dict, schema_file: str) -> str:
"""Encode a dict record into avro bytes
Parameters
----------
alert: dict
A Dictionary of alert data
schema_file: str
Path of avro schema file
Returns
----------
value: str
a bytes string with avro encoded alert data
"""
with open(schema_file) as f:
schema = json.load(f)
parsed_schema = fastavro.parse_schema(schema)
b = io.BytesIO()
fastavro.schemaless_writer(b, parsed_schema, alert)
return b.getvalue()
|
jlenainREPO_NAMEflaaplucPATH_START.@flaapluc_extracted@flaapluc-master@flaapluc@avroUtils.py@.PATH_END.py
|
{
"filename": "CODE_OF_CONDUCT.md",
"repo_name": "RadioAstronomySoftwareGroup/pyuvdata",
"repo_path": "pyuvdata_extracted/pyuvdata-main/CODE_OF_CONDUCT.md",
"type": "Markdown"
}
|
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the RASG managers at [rasgmanagers@gmail.com](mailto:rasgmanagers@gmail.com).
The list of managers who will receive the email is in the [readme](README.md).
To report an issue involving any of the managers, you can email any subset of the managers directly.
All complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The RASG managers are
obligated to maintain confidentiality with regard to the reporter of an incident.
Project maintainers or contributors who do not follow or enforce the Code of
Conduct in good faith may be temporary or permanently removed from the project
team or blocked from RASG repositories, Slack channels or telecons.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq
|
RadioAstronomySoftwareGroupREPO_NAMEpyuvdataPATH_START.@pyuvdata_extracted@pyuvdata-main@CODE_OF_CONDUCT.md@.PATH_END.py
|
{
"filename": "make_para_maps.ipynb",
"repo_name": "SpandanCh/Barnard5_filaments",
"repo_path": "Barnard5_filaments_extracted/Barnard5_filaments-main/make_para_maps.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
import pyspeckit
from matplotlib.colors import ListedColormap
```
```python
```
## Read data files
```python
nh3_11, hd_amm = fits.getdata('datacubes/nh3_11_whole_cube_8as_3px.fits', header=True)
npeaks, hd2d = fits.getdata('bayesian_fit_files/npeaks_cut5.fits', header=True)
snr = fits.getdata('bayesian_fit_files/snr11.fits')
```
```python
```
#### Area masks (from Schmiedeke et al., 2021)
```python
from os import listdir
fls = listdir('data_files_prev_works/masks_anika')
fls = [i for i in fls if 'rgrd.fits' in i]
msk_regs = {ms[:-10] : fits.getdata('data_files_prev_works/masks_anika/'+ms) for ms in fls}
```
#### coherent core boundary from Pineda et al., 2010
```python
coh_core_bndry_fle = 'data_files_prev_works/coherent_core_bound_SD.fits'
```
### Aplpy plots
```python
from aplpy import FITSFigure
from astropy.io.fits import PrimaryHDU
import astropy.units as u
import matplotlib as mpl
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
```
```python
def aplpy_plot(fig= None, fgsz=(7,7), data=None, hd=None, cmap='inferno', vmin=9.7, vmax=10.7, vmid=None,
colourscale_stretch='linear', colourbar=True, colourbar_text=r'$\rm{v_{LSR}}\ (km\,s^{-1})$',
colourbar_loc='right', beam_colour='green', bm_clr='k', x_bm_lbl=0.07, y_bm_lbl=0.07,
scalebar_corner='top right', background_grey=False, bckgr_gr_clr='0.8', zoom=False,
sbplt=[0, 0, 1, 1], contours=False, contour_clr='k') :
"""
return aplpy FITSfigure for given data
fig : matplotlib figure instance. If not given, a new figure is created
fgsz : size of the figure, if created new. Default is (7,7)
data : 2d array containing the data to be plotted, required
hd : associated header, required
cmap : colourmap to be used. Default is 'inferno'
vmin, vmax : min,max of the colourscale. Default is (9.7, 10.7)
vmid : vmid for the colour scale. Required if the stretch is 'log'. Default is 'None'
colourscale_stretch : stretch for the colour scale. Default is 'linear'
colourbar : bool. Whether the colourbar should be shown. Default is 'True'
colourbar_text : text accompanying the colourbar. Default is for v_lsr
colourbar_loc : location of the colourbar on the map. Default is 'right'
beam_colour : colour of the beam to be shown. Default is 'green'
bm_clr : colour of the text accompanying beam (the region name), default is k
x_bm_lbl, y_bm_lbl : position of the accompanying text (region name). Default is (0.07, 0.07)
scalebar_corner : position of the scale bar. Default is 'top right'
background_grey : whether to set background NaN to a different colour instead of the default white.
Default is 'False'
bckgr_gr_clr : colour of the background NaN. Default is '0.8'
zoom : ff set to 'True', the figure shows a zoomed-in view of the B5 core. Default is 'False'
sbplt : which subplot to use. Only used if an existing figure is passed. Default is [0, 0, 1, 1]
contours : whether to show the contours of the different regions (filaments 1 & 2,
condensations 1, 2 & 3). Default is False
contour_clr : colour of the region contours, default is black
"""
# generate a new matplotlib figure if one is not provided
if not fig:
fig = plt.figure(figsize=fgsz)
# create an aplpy.FITSfigure with given data and header
fig = FITSFigure( data=PrimaryHDU(data=data, header=hd), figure=fig, subplot=sbplt)
# set background colour to the one given
if background_grey:
cmap = plt.get_cmap(cmap)
cmap.set_bad(bckgr_gr_clr)
# show the colour map
fig.show_colorscale( cmap=cmap, vmin=vmin, vmax=vmax,stretch=colourscale_stretch, vmid=vmid)
# set coloud bar instance
if colourbar:
fig.add_colorbar()
fig.colorbar.show(location=colourbar_loc, axis_label_text=colourbar_text)
# add beam
fig.add_beam(color=beam_colour)
# add scalebar at a distance of 302 pc (for B5)
distance = 302 *u.pc
scale=(0.1*u.pc/distance*u.rad).to(u.deg)
fig.add_scalebar(scale, label='0.1 pc', corner=scalebar_corner)
# fig.set_nan_color('0.7')
fig.ticks.set_color('black')
# write name of the region : B5
fig.add_label( x_bm_lbl, y_bm_lbl, 'B5', relative=True, color= bm_clr, size=15)
# zoom around the core
if zoom:
fig.recenter(56.914, 32.879, width=0.13, height=0.138)
# show contours of filament 1, filament 2, condensation 1, condensation 2 and condensation 3
if contours :
fig.show_contour(msk_regs['fil1'], colors=[contour_clr], levels=[0])
fig.show_contour(msk_regs['fil2'], colors=[contour_clr], levels=[0])
fig.show_contour(msk_regs['cnd1'], colors=[contour_clr], levels=[0], linestyles='dashed')
fig.show_contour(msk_regs['cnd2'], colors=[contour_clr], levels=[0], linestyles='dashed')
fig.show_contour(msk_regs['cnd3'], colors=[contour_clr], levels=[0], linestyles='dashed')
return fig
```
### calculate simple moment 0
```python
sum_11 = np.nansum(nh3_11, axis=0) # sum along the spectral axis
dv_ms = (hd_amm['CDELT3']/hd_amm['CRVAL3']) * 299792458 # width of one channel in m/s, calculated using rest
# frequency and frequency resolution
dv_kms = dv_ms/1000 # channel width in km/s
integ_inten = sum_11 * dv_kms # integrated intensity
integ_inten[integ_inten == 0] = np.nan # np.nunsum returns sum of NaNs as 0. This is to set
# the integrated intensity in pixels without emission
# as NaNs instead of 0
```
#### write out/read in the moment 0 data
```python
# fits.writeto('datacubes/nh3_11_mom0.fits', integ_inten, hd2d, overwrite=True)
integ_inten = fits.getdata('datacubes/nh3_11_mom0.fits')
```
```python
```
#### plot the integrated intensity in $\rm NH_3$ (1,1)
```python
fig = aplpy_plot(data=integ_inten, hd=hd2d, cmap='inferno', zoom=True, fgsz=(6,10), vmin=0, vmax=15, vmid=-2,
contours=True, contour_clr='k', background_grey=True, bckgr_gr_clr='0.5',
colourbar_text=r'Integrated Intensity (K km$\,s^{-1}$)', colourscale_stretch='log')
# show the coherent core boundary
fig.show_contour(coh_core_bndry_fle, levels=[0.5], colors='w')
### show markers pointing to the different regions inside the core ###
# Clump-1
fig.show_arrows(56.92, 32.92, 0.02, 0.002, width=0.3, head_width=2, head_length=2, color='w')
fig.add_label( 0.18, 0.82, 'clump-1', relative=True, color='w',size=13, weight=1000, horizontalalignment='left')
# Filament 1 and 2
fig.add_label( 0.44, 0.65, 'Fil-1', relative=True, color='w',size=13, weight=1000, rotation=70,
horizontalalignment='left')
fig.add_label( 0.52, 0.28, 'Fil-2', relative=True, color='w',size=13, weight=1000, horizontalalignment='left')
# Condensations 1, 2, and 3
fig.show_arrows(56.915, 32.873, -0.02, 0.006, width=0.3, head_width=2, head_length=2, color='w')
fig.add_label( 0.63, 0.5, 'cond-1', relative=True, color='w',size=13, weight=1000, horizontalalignment='left')
fig.show_arrows(56.925, 32.868, 0.02, 0.006, width=0.3, head_width=2, head_length=2, color='w')
fig.add_label( 0.26, 0.479, 'cond-2', relative=True, color='w',size=13, weight=1000)
fig.show_arrows(56.927, 32.858, 0.02, -0.003, width=0.3, head_width=2, head_length=2, color='w')
fig.add_label( 0.25, 0.3, 'cond-3', relative=True, color='w',size=13, weight=1000)
fig.savefig('plots/mom0_sel-chan_w-labels.pdf', dpi=1000)
```
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.

```python
```
### Show example spectra with 2, 3 component fits
```python
from astropy.wcs import WCS
wcs = WCS(hd2d)
from matplotlib.patches import Circle
```
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
```python
# x,y co-ordinates of the positions chosen for the example spectra
xlist = np.array([115, 125, 94])
ylist = np.array([73, 59, 94])
# lengths of the arrows in x- and y-axes
dxlist = np.array([30, 15, -40])
dylist = np.array([10, -40, 20])
# number labels of the positions (with 1-, 2-, and 3-components detected, respectively)
labels = np.array([1, 2, 3])
# calculate positions of markers and lengths of arrows in world co-ordinates
ra_list, dec_list = wcs.all_pix2world(xlist, ylist, 0) # position of pixel
ra_list_text, dec_list_text = wcs.all_pix2world(xlist + dxlist, ylist + dylist, 0) # position of marker
dra_list = ra_list - ra_list_text # length of arrow in RA
ddec_list = dec_list - dec_list_text # length of arrow in Dec
```
```python
cmaplst = ['#f0f9e8', '#ccebc5', '#7bccc4', '#2b8cbe'] # colours representing number of components fit
lcmap = ListedColormap(cmaplst) # set colourmap with these colours
lcmap.set_bad('#f0f0f0') # set NaN colour
# select positions above SNR=4
npeaks_snr5 = npeaks.copy() # duplicate of the npeaks data array
npeaks_snr5[npeaks_snr5 == 0] = 10 # set aside the 'true' 0s (pixels with no fits)
npeaks_snr5 = npeaks_snr5*(snr>4) # filter pixels with SNR>4, set all others to 0
npeaks_snr5[npeaks_snr5 == 0] = np.nan # set the SNR<=4 pixels as NaN
npeaks_snr5[npeaks_snr5 == 10] = 0 # restore the 'true' 0 component pixels
# show number of components fit across the region
fig = aplpy_plot(data=npeaks_snr5, hd=hd2d, cmap=lcmap, zoom=True, fgsz=(6,10), vmin=-0.5, vmax=3.5,
colourbar_text='number of components fit', background_grey=True, beam_colour='k', contours=True)
# a pixel can only have 0, 1, 2, or 3 components
fig.colorbar.set_ticks([0, 1, 2, 3])
# show the coherent core boundary
fig.show_contour(coh_core_bndry_fle, levels=[0.5], colors='k', linestyles='dotted')
# show and label the positions for the spectra to be shown
for i in range(len(labels)):
# labels
fig.add_label( ra_list_text[i], dec_list_text[i], labels[i], color='k', size=14, weight=500,
bbox=dict(boxstyle="circle", fc="0.8", alpha=1))
# arrows pointing to the pixels
fig.show_arrows(ra_list_text, dec_list_text, dra_list, ddec_list, head_width=2, width=0.1, color='k')
fig.savefig('plots/npeaks_w-contours_w-ex-spec-loc.pdf', dpi=1000)
```
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.

```python
```
## plot example spectra with fits
#### load files
```python
# NH3 (1,1) cube
cb11 = pyspeckit.Cube('datacubes/nh3_11_whole_cube_8as_3px.fits')
# fitcubes from 1-, 2-, and 3-component fits
ftcb_1cmp_tk6 = fits.getdata('bayesian_fit_files/B5-mle-x1.fits')
ftcb_2cmp_tk6 = fits.getdata('bayesian_fit_files/B5-mle-x2.fits')
ftcb_3cmp_tk6 = fits.getdata('bayesian_fit_files/B5-mle-x3.fits')
```
#### get xarr
```python
import astropy.units as u
```
```python
cb11.unit="K"
cb11.xarr.refX = pyspeckit.spectrum.models.ammonia.freq_dict['oneone']
cb11.xarr.refX_unit='Hz'
cb11.xarr.velocity_convention='radio'
cb11.xarr.convert_to_unit(u.km/u.s)
xarr11 = cb11.get_spectrum(37,36).xarr
```
#### model function for different number of components
```python
modelf = pyspeckit.spectrum.models.ammonia.cold_ammonia_model().n_modelfunc
modelf2 = pyspeckit.spectrum.models.ammonia.cold_ammonia_model(npeaks=2).n_modelfunc
modelf3 = pyspeckit.spectrum.models.ammonia.cold_ammonia_model(npeaks=3).n_modelfunc
```
### generate and plot model from fit parameters
```python
def plot_spec_1(x=None, y=None, ftcb1=None, res=False, res_off=0.5, res_ls='solid', xlim=(7,13),
c_sp='k', c_md_t='g', lw_md=0.5):
"""
plots spectra at given pixel along with a 1-component fit
inputs :
x, y : coordinates of the pixel
ftcb1 : array with the fit parameters of a 1-component fit
res : if set 'True', plots the residual. Default is 'False'
res_off : negative offset in y for the residuals, default is 0.5
res_ls : linestyle of the residuals, default is 'solid'
xlim : range in velocity axis to be plotted in km/s, default is 7-13
c_sp : colour of the spectra to be plotted, default is black
c_md_t : colour of the fit, default is green
lw_md : linewidth of the model spectra plotted, default is 0.5
"""
# generate axis to plot the spectra
fig, ax = plt.subplots(ncols=1, nrows=1, gridspec_kw={'left':0.1, 'right':0.98,'top':0.97,'bottom':0.13},
figsize=(7,3))
# get spectrum at given pixel from thh cube
sp = cb11.get_spectrum(x,y)
# plot spectra
ax.plot(sp.xarr, sp.data, drawstyle='steps-mid', lw=0.5, color=c_sp)
# get fit parameter from fitcube
par = ftcb1[:6,y,x]
# generate model from parameters
md = modelf(par)(xarr11)
# plot model
ax.plot(sp.xarr, md, drawstyle='steps-mid', lw=lw_md, color=c_md_t)
# plot residuals
if res:
ax.plot(sp.xarr, (sp.data-md)-res_off, drawstyle='steps-mid', lw=0.5, color='0.4', ls=res_ls)
ax.set_xlim(xlim)
plt.locator_params(axis='y', nbins=6)
plt.locator_params(axis='x', nbins=6)
ax.set_xlabel(r'velocity $(km\, s^{-1})$')
ax.set_ylabel(r'$ T_{MB}\ (K)$')
plt.tight_layout()
return fig, ax
```
```python
def plot_spec_2(x=None, y=None, ftcb2=None, res=False, res_off=0.5, res_ls='solid', indiv=True, md_sum=True,
xlim=(7,13), c_sp='k', c_md_t='g', c_md_n='r', c_md_b='b', lw_md=0.5):
"""
plots spectra at given pixel along with a 2-component fit
inputs :
x, y : coordinates of the pixel
ftcb2 : array with the fit parameters of a 2-component fit
res : if set 'True', plots the residual. Default is 'False'
res_off : negative offset in y for the residuals, default is 0.5
res_ls : linestyle of the residuals, default is 'solid'
indiv : if set 'True', plots the individual components of the fit. Default is 'True'
md_sum : if set 'True', plots the resultant 2-component fit. Default is 'True'
xlim : range in velocity axis to be plotted in km/s, default is 7-13
c_sp : colour of the spectra to be plotted, default is black
c_md_t : colour of the fit, default is green
c_md_n : colour of the first component, default is red
c_md_b : colour of the second component, default is blue
lw_md : linewidth of the model spectra plotted, default is 0.5
"""
# generate axis to plot the spectra
fig, ax = plt.subplots(ncols=1, nrows=1, gridspec_kw={'left':0.1, 'right':0.98,'top':0.97,'bottom':0.13},
figsize=(7,3))
# get spectrum at given pixel from thh cube
sp = cb11.get_spectrum(x,y)
# plot spectra
ax.plot(sp.xarr, sp.data, drawstyle='steps-mid', lw=0.5, color=c_sp)
# get fit parameter from fitcube
parn = ftcb2[:6,y,x]
parb = ftcb2[6:12,y,x]
# generate model from parameters and plot
# 2-component model
if md_sum:
pars = list(parn) + list(parb)
md = modelf2(pars)(xarr11)
ax.plot(sp.xarr, md, drawstyle='steps-mid', lw=lw_md, color=c_md_t)
# individual components
if indiv:
mdn = modelf(parn)(xarr11)
mdb = modelf(parb)(xarr11)
ax.plot(sp.xarr, mdn, drawstyle='steps-mid', lw=lw_md, color=c_md_n)
ax.plot(sp.xarr, mdb, drawstyle='steps-mid', lw=lw_md, color=c_md_b)
# plot residuals
if res:
ax.plot(sp.xarr, (sp.data-md)-res_off, drawstyle='steps-mid', lw=0.5, color='0.4', ls=res_ls)
ax.set_xlim(xlim)
plt.locator_params(axis='y', nbins=6)
plt.locator_params(axis='x', nbins=6)
ax.set_xlabel(r'velocity $(km\, s^{-1})$')
ax.set_ylabel(r'$ T_{MB}\ (K)$')
return fig, ax
```
```python
def plot_spec_3(x=None, y=None, ftcb3=None, res=False, res_off=0.5, res_ls='solid', indiv=True, md_sum=True,
xlim=(7,13), c_sp='k', c_md_t='g', c_md_1='r', c_md_2='purple', c_md_3='b', lw_md=0.5):
"""
plots spectra at given pixel along with a 3-component fit
inputs :
x, y : coordinates of the pixel
ftcb3 : array with the fit parameters of a 3-component fit
res : if set 'True', plots the residual. Default is 'False'
res_off : negative offset in y for the residuals, default is 0.5
res_ls : linestyle of the residuals, default is 'solid'
indiv : if set 'True', plots the individual components of the fit. Default is 'True'
md_sum : if set 'True', plots the resultant 2-component fit. Default is 'True'
xlim : range in velocity axis to be plotted in km/s, default is 7-13
c_sp : colour of the spectra to be plotted, default is black
c_md_t : colour of the fit, default is green
c_md_1 : colour of the first component, default is red
c_md_2 : colour of the second component, default is purple
c_md_3 : colour of the third component, default is blue
lw_md : linewidth of the model spectra plotted, default is 0.5
"""
# generate axis to plot the spectra
fig, ax = plt.subplots(ncols=1, nrows=1, gridspec_kw={'left':0.1, 'right':0.98,'top':0.97,'bottom':0.13},
figsize=(7,3))
# get spectrum at given pixel from thh cube
sp = cb11.get_spectrum(x,y)
# plot spectra
ax.plot(sp.xarr, sp.data, drawstyle='steps-mid', lw=0.5, color=c_sp)
# get fit parameter from fitcube
par1 = ftcb3[:6,y,x]
par2 = ftcb3[6:12,y,x]
par3 = ftcb3[12:18,y,x]
# avoid pixels with any of the fit parameters going crazy
if np.isfinite(par1[2]+par2[2]+par3[2]):
# 3-component model
if md_sum:
pars = list(par1) + list(par2) + list(par3)
md = modelf3(pars)(xarr11)
ax.plot(sp.xarr, md, drawstyle='steps-mid', lw=lw_md, color=c_md_t)
# individual components
if indiv:
md1 = modelf(par1)(xarr11)
md2 = modelf(par2)(xarr11)
md3 = modelf(par3)(xarr11)
ax.plot(sp.xarr, md1, drawstyle='steps-mid', lw=lw_md, color=c_md_1)
ax.plot(sp.xarr, md2, drawstyle='steps-mid', lw=lw_md, color=c_md_2)
ax.plot(sp.xarr, md3, drawstyle='steps-mid', lw=lw_md, color=c_md_3)
# plot residuals
if res:
ax.plot(sp.xarr, (sp.data-md)-res_off, drawstyle='steps-mid', lw=0.5, color='0.4', ls=res_ls)
ax.set_xlim(xlim)
plt.locator_params(axis='y', nbins=6)
plt.locator_params(axis='x', nbins=6)
ax.set_xlabel(r'velocity $(km\, s^{-1})$')
ax.set_ylabel(r'$ T_{MB}\ (K)$')
return fig, ax
```
```python
```
### spectra and fit in example pixels
##### coordinates chosen from inspection of npeaks map
#### 1-component fit
```python
xp = 115
yp = 73
fig, ax = plot_spec_1(x=xp, y=yp, res=True, xlim=(8,12), res_off=0.4, ftcb1=ftcb_1cmp_tk6, lw_md=2,
c_md_t='#1b9e77', res_ls='dashed')
ax.text( 0.94, 0.85, '1', horizontalalignment='right', transform=ax.transAxes, color='k', fontsize=20,
bbox=dict(boxstyle="circle", fc="0.8", alpha=1))
fig.savefig('plots/example_spectra/x{0}y{1}_1cmp.pdf'.format(xp, yp))
```
This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.

#### 2-component fit
```python
xp = 125
yp = 59
fig, ax = plot_spec_2(x=xp, y=yp, indiv=True,res=True, xlim=(8,12),res_off=0.5, ftcb2=ftcb_2cmp_tk6,
c_md_b='b', c_md_n='#d95f02', c_md_t='#1b9e77', lw_md=2, res_ls='dashed')
ax.text( 0.94, 0.85, '2', horizontalalignment='right', transform=ax.transAxes, color='k', fontsize=20,
bbox=dict(boxstyle="circle", fc="0.8", alpha=1))
fig.savefig('plots/example_spectra/x{0}y{1}_2cmp.pdf'.format(xp, yp))
```
tex > trot in the ammonia model. This is unphysical and suggests that you may need to constrain tex. See ammonia_model_restricted_tex.

#### 3-component fit
```python
xp = 94
yp = 94
fig, ax = plot_spec_3(x=xp, y=yp, indiv=True, res=True, xlim=(8,12),res_off=0.5, ftcb3=ftcb_3cmp_tk6,
c_md_1='#d95f02', c_md_2='b', c_md_3='purple', c_md_t='#1b9e77', lw_md=2, res_ls='dashed')
ax.text( 0.94, 0.85, '3', horizontalalignment='right', transform=ax.transAxes, color='k', fontsize=20,
bbox=dict(boxstyle="circle", fc="0.8", alpha=1))
fig.savefig('plots/example_spectra/x{0}y{1}_3cmp.pdf'.format(xp, yp))
```

```python
```
```python
```
## Plot velocities and velocity dispersions of different components
#### obtain data from files
```python
arr1_1 = fits.getdata('sorted_comps/first_sort_vel-sig-tmb_extended.fits') # extended component
dark_blue_arr = fits.getdata('sorted_comps/dark_blue_vel-sig-tmb.fits') # dark blue component
mid_blue_arr = fits.getdata('sorted_comps/mid_blue_vel-sig-tmb.fits') # mid blue component
mid_red_arr = fits.getdata('sorted_comps/mid_red_vel-sig-tmb.fits') # mid red component
dark_red_arr = fits.getdata('sorted_comps/dark_red_vel-sig-tmb.fits') # dark red component
```
### extended component
##### centroid velocity
```python
vel_ext = arr1_1[0]*(snr>4) # only consider pixels with SNR>4, for 'good' single-component fit
vel_ext[vel_ext == 0] = np.nan # set other pixels as NaNs
# show plot
fig = aplpy_plot(data=vel_ext, hd=hd2d, cmap='RdYlBu_r', zoom=True, fgsz=(6,10), contours=True,
background_grey=True)
# label condensations
# fig.add_label( 0.534, 0.52, 'cond-1', relative=True, color='k',size=10, weight=500, rotation=70,
# horizontalalignment='left')
# fig.add_label( 0.38, 0.47, 'cond-2', relative=True, color='k',size=10, weight=500, rotation=60)
# fig.add_label( 0.45, 0.28, 'cond-3', relative=True, color='k',size=10, weight=500, rotation=310)
# show the coherent core boundary
fig.show_contour(coh_core_bndry_fle, levels=[0.5], colors='k', linestyles='dotted')
fig.savefig('plots/vel_extended_w-coh-cont.pdf', dpi=1000)
```
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.

##### velocity dispersion
```python
sig_ext = arr1_1[1]*(snr>4) # only consider pixels with SNR>4, for 'good' single-component fit
sig_ext[sig_ext == 0] = np.nan # set other pixels as NaNs
# show plot
fig = aplpy_plot(data=sig_ext, hd=hd2d, cmap='inferno', zoom=True, fgsz=(6,10), vmin=0, vmax=0.5, contours=True,
contour_clr='w', background_grey=True, colourbar_text=r'$\sigma_v\ (km\,s^{-1}$)')
# show the coherent core boundary
fig.show_contour(coh_core_bndry_fle, levels=[0.5], colors='w', linestyles='dotted', linewidths=3)
fig.show_contour(coh_core_bndry_fle, levels=[0.5], colors='k', linestyles='dotted', linewidths=1)
fig.savefig('plots/sig_extended_w-coh-cont.pdf', dpi=200)
```
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.

```python
```
### Additional components
##### centroid velocity
```python
# create actual figure instance
fig = plt.figure(figsize=(6,9))
# subplot with dark blue component
fig1 = aplpy_plot(fig=fig, data=dark_blue_arr[0], hd=hd2d, cmap='RdYlBu_r', zoom=True, sbplt=[0.12, 0.5, 0.4, 0.4],
colourbar=False, vmin=9.7, vmax=10.7, contours=True, background_grey=True, y_bm_lbl=0.1)
fig1.tick_labels.hide_x()
fig1.axis_labels.hide()
# subplot with mid blue component
fig2 = aplpy_plot(fig=fig, data=mid_blue_arr[0], hd=hd2d, cmap='RdYlBu_r', zoom=True, sbplt=[0.55, 0.5, 0.4, 0.4],
colourbar=False, vmin=9.7, vmax=10.7, contours=True, background_grey=True, y_bm_lbl=0.1)
fig2.tick_labels.hide()
fig2.axis_labels.hide()
# subplot with mid red component
fig3 = aplpy_plot(fig=fig, data=mid_red_arr[0], hd=hd2d, cmap='RdYlBu_r', zoom=True, sbplt=[0.12, 0.07, 0.4, 0.4],
colourbar=False, vmin=9.7, vmax=10.7, contours=True, background_grey=True, y_bm_lbl=0.1)
fig3.axis_labels.hide_y()
# subplot with dark red component
fig4 = aplpy_plot(fig=fig, data=dark_red_arr[0], hd=hd2d, cmap='RdYlBu_r', zoom=True, sbplt=[0.55, 0.07, 0.4, 0.4],
colourbar=False, vmin=9.7, vmax=10.7, contours=True, background_grey=True, y_bm_lbl=0.1)
fig4.tick_labels.hide_y()
fig4.axis_labels.hide_y()
# zoom on each subplot to clearly show the filaments
fig1.recenter(56.907, 32.8833, width=0.07, height=0.11)
fig2.recenter(56.907, 32.8833, width=0.07, height=0.11)
fig3.recenter(56.907, 32.8833, width=0.07, height=0.11)
fig4.recenter(56.907, 32.8833, width=0.07, height=0.11)
# write name of components shown on each subplot
fig1.add_label( 0.8, 0.85, 'far-blue', relative=True, color='b', size=15, weight=500)
fig2.add_label( 0.8, 0.85, 'mid-blue', relative=True, color='dodgerblue', size=15, weight=500)
fig3.add_label( 0.8, 0.85, 'mid-red', relative=True, color='#ef6548', size=15, weight=500)
fig4.add_label( 0.8, 0.85, 'far-red', relative=True, color='r', size=15, weight=500)
# add labels to the subplots
fig1.add_label( 0.12, 0.93, '(a)', relative=True, color='k', size=14, weight=500)
fig2.add_label( 0.12, 0.93, '(b)', relative=True, color='k', size=14, weight=500)
fig3.add_label( 0.12, 0.93, '(c)', relative=True, color='k', size=14, weight=500)
fig4.add_label( 0.12, 0.93, '(d)', relative=True, color='k', size=14, weight=500)
# show the coherent core boundary
fig1.show_contour(coh_core_bndry_fle, levels=[0.5], colors='k', linestyles='dotted')
fig2.show_contour(coh_core_bndry_fle, levels=[0.5], colors='k', linestyles='dotted')
fig3.show_contour(coh_core_bndry_fle, levels=[0.5], colors='k', linestyles='dotted')
fig4.show_contour(coh_core_bndry_fle, levels=[0.5], colors='k', linestyles='dotted')
plt.tight_layout()
fig.savefig('plots/vel_4-extra-comp_w-coh-cont_new.pdf', dpi=1000)
```
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
This figure includes Axes that are not compatible with tight_layout, so results might be incorrect.

##### velocity dispersion
```python
# create actual figure instance
fig = plt.figure(figsize=(6,10))
# subplot with dark blue component
fig1 = aplpy_plot(fig=fig, data=dark_blue_arr[1], hd=hd2d, cmap='inferno', zoom=True, sbplt=[0.12, 0.52, 0.4, 0.425],
colourbar=False, vmin=0, vmax=0.4, contours=True, contour_clr='w', background_grey=True, y_bm_lbl=0.1)
fig1.tick_labels.hide_x()
fig1.axis_labels.hide_x()
# subplot with mid blue component
fig2 = aplpy_plot(fig=fig, data=mid_blue_arr[1], hd=hd2d, cmap='inferno', zoom=True, sbplt=[0.55, 0.52, 0.4, 0.425],
colourbar=False, vmin=0, vmax=0.4, contours=True, contour_clr='w', background_grey=True, y_bm_lbl=0.1)
fig2.tick_labels.hide()
fig2.axis_labels.hide()
# subplot with mid red component
fig3 = aplpy_plot(fig=fig, data=mid_red_arr[1], hd=hd2d, cmap='inferno', zoom=True, sbplt=[0.12, 0.07, 0.4, 0.425],
colourbar=False, vmin=0, vmax=0.4, contours=True, contour_clr='w', background_grey=True, y_bm_lbl=0.1)
# subplot with dark red component
fig4 = aplpy_plot(fig=fig, data=dark_red_arr[1], hd=hd2d, cmap='inferno', zoom=True, sbplt=[0.55, 0.07, 0.4, 0.425],
colourbar=False, vmin=0, vmax=0.4, contours=True, contour_clr='w', background_grey=True, y_bm_lbl=0.1)
fig4.tick_labels.hide_y()
fig4.axis_labels.hide_y()
# zoom on each subplot to clearly show the filaments
fig1.recenter(56.907, 32.8833, width=0.07, height=0.11)
fig2.recenter(56.907, 32.8833, width=0.07, height=0.11)
fig3.recenter(56.907, 32.8833, width=0.07, height=0.11)
fig4.recenter(56.907, 32.8833, width=0.07, height=0.11)
# write name of components shown on each subplot
fig1.add_label( 0.8, 0.5, 'far-blue', relative=True, color='b', size=15, weight=500)
fig2.add_label( 0.8, 0.5, 'mid-blue', relative=True, color='dodgerblue', size=15, weight=500)
fig3.add_label( 0.8, 0.5, 'mid-red', relative=True, color='#ef6548', size=15, weight=500)
fig4.add_label( 0.8, 0.5, 'far-red', relative=True, color='r', size=15, weight=500)
# add labels to the subplots
fig1.add_label( 0.12, 0.93, '(a)', relative=True, color='k', size=14, weight=500)
fig2.add_label( 0.12, 0.93, '(b)', relative=True, color='k', size=14, weight=500)
fig3.add_label( 0.12, 0.93, '(c)', relative=True, color='k', size=14, weight=500)
fig4.add_label( 0.12, 0.93, '(d)', relative=True, color='k', size=14, weight=500)
# show the coherent core boundary
fig1.show_contour(coh_core_bndry_fle, levels=[0.5], colors='k', linestyles='dotted')
fig2.show_contour(coh_core_bndry_fle, levels=[0.5], colors='k', linestyles='dotted')
fig3.show_contour(coh_core_bndry_fle, levels=[0.5], colors='k', linestyles='dotted')
fig4.show_contour(coh_core_bndry_fle, levels=[0.5], colors='k', linestyles='dotted')
plt.tight_layout()
fig.savefig('plots/sig_4-extra-comp_w-coh-cont.pdf', dpi=1000)
```
```python
```
```python
```
### Bayes K maps
```python
K_arr_12 = fits.getdata('bayesian_fit_files/B5-Ks.fits')
K_arr_34 = fits.getdata('bayesian_fit_files/B5-Ks.fits')
```
##### set pixels with SNR<=4 to NaNs
```python
K_arr_01_snr5 = K_arr_12[0].copy()
K_arr_01_snr5[K_arr_01_snr5 == 0] = -2
K_arr_01_snr5 = K_arr_01_snr5*(snr>4)
K_arr_01_snr5[K_arr_01_snr5 == 0] = np.nan
K_arr_01_snr5[K_arr_01_snr5 == -2] = 0
```
```python
K_arr_12_snr5 = K_arr_12[1].copy()
K_arr_12_snr5[K_arr_12_snr5 == 0] = -2
K_arr_12_snr5 = K_arr_12_snr5*(snr>4)
K_arr_12_snr5[K_arr_12_snr5 == 0] = np.nan
K_arr_12_snr5[K_arr_12_snr5 == -2] = 0
```
```python
K_arr_23_snr5 = K_arr_34[2].copy()
K_arr_23_snr5[K_arr_23_snr5 == 0] = -2
K_arr_23_snr5 = K_arr_23_snr5*(snr>4)
K_arr_23_snr5[K_arr_23_snr5 == 0] = np.nan
K_arr_23_snr5[K_arr_23_snr5 == -2] = 0
```
```python
lcmap = 'viridis'
fig = aplpy_plot(data=K_arr_01_snr5, hd=hd2d, cmap=lcmap, zoom=True, fgsz=(6,10), vmin=-2, vmax=2000, vmid=-3,
colourbar_text=r'ln $K^1_0$', background_grey=True, beam_colour='k', contours=True,
colourscale_stretch='log')
fig.colorbar.set_ticks([0, 10, 100, 1000])
fig.savefig('plots/K01_arr_w-contours.pdf', dpi=200)
```
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.

```python
lcmap = 'viridis'
fig = aplpy_plot(data=K_arr_12_snr5, hd=hd2d, cmap=lcmap, zoom=True, fgsz=(6,10), vmin=0, vmax=100, vmid=-1,
colourbar_text=r'ln $K^2_1$', background_grey=True, beam_colour='k', contours=False,
colourscale_stretch='log')
fig.colorbar.set_ticks([0, 5, 10, 100])
# show contours of filaments and condensations
fig.show_contour(msk_regs['fil1'], colors=['w'], levels=[0])
fig.show_contour(msk_regs['fil2'], colors=['w'], levels=[0])
fig.show_contour(msk_regs['cnd1'], colors=['k'], levels=[0], linestyles='dashed')
fig.show_contour(msk_regs['cnd2'], colors=['k'], levels=[0], linestyles='dashed')
fig.show_contour(msk_regs['cnd3'], colors=['k'], levels=[0], linestyles='dashed')
fig.savefig('plots/K12_arr_w-contours.pdf', dpi=200)
```
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.

```python
lcmap = 'viridis'
fig = aplpy_plot(data=K_arr_23_snr5, hd=hd2d, cmap=lcmap, zoom=True, fgsz=(6,10), vmin=-1, vmax=20, vmid=-3,
colourbar_text=r'ln $K^3_2$', background_grey=True, beam_colour='k', contours=True,
colourscale_stretch='log', contour_clr='w')
fig.colorbar.set_ticks([0, 5, 10])
# show contours of filaments and condensations
fig.show_contour(msk_regs['fil1'], colors=['w'], levels=[0])
fig.show_contour(msk_regs['fil2'], colors=['w'], levels=[0])
fig.show_contour(msk_regs['cnd1'], colors=['k'], levels=[0], linestyles='dashed')
fig.show_contour(msk_regs['cnd2'], colors=['w'], levels=[0], linestyles='dashed')
fig.show_contour(msk_regs['cnd3'], colors=['k'], levels=[0], linestyles='dashed')
fig.savefig('plots/K23_arr_w-contours.pdf', dpi=200)
```
'obsfix' made the change 'Set OBSGEO-L to -107.618000 from OBSGEO-[XYZ].
Set OBSGEO-B to 34.078827 from OBSGEO-[XYZ].
Set OBSGEO-H to 2115.607 from OBSGEO-[XYZ]'.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.
WARNING: No WCS information found in header - using pixel coordinates [aplpy.header]
No contour levels were found within the data range.

```python
```
|
SpandanChREPO_NAMEBarnard5_filamentsPATH_START.@Barnard5_filaments_extracted@Barnard5_filaments-main@make_para_maps.ipynb@.PATH_END.py
|
{
"filename": "_column.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/map/domain/_column.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColumnValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(self, plotly_name="column", parent_name="layout.map.domain", **kwargs):
super(ColumnValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@map@domain@_column.py@.PATH_END.py
|
{
"filename": "time_varying.md",
"repo_name": "MWATelescope/mwa_hyperdrive",
"repo_path": "mwa_hyperdrive_extracted/mwa_hyperdrive-main/mdbook/src/user/di_cal/advanced/time_varying.md",
"type": "Markdown"
}
|
# Varying solutions over time
~~~admonish tip
See [this page](../../../defs/blocks.md) for information on timeblocks.
~~~
By default, `di-calibrate` uses only one "timeblock", i.e. all data timesteps
are averaged together during calibration. This provides good signal-to-noise,
but it is possible that calibration is improved by taking time variations into
account. This is done with `--timesteps-per-timeblock` (`-t` for short).
If `--timesteps-per-timeblock` is given a value of 4, then every 4 timesteps are
calibrated together and written out as a timeblock. Values with time units (e.g.
`8s`) are also accepted; in this case, every 8 seconds worth of data are
averaged during calibration and written out as a timeblock.
Depending on the number of timesteps in the data, using `-t` could result in
*many* timeblocks written to the calibration solutions. Each solution timeblock
is plotted when these solutions are given to `solutions-plot`. For each timestep
in question, the best solution timeblock is used when running `solutions-apply`.
## Implementation
When multiple timeblocks are to be made, `hyperdrive` will do a pass of
calibration using *all* timesteps to provide each timeblock's calibration with a
good "initial guess" of what their solutions should be.
|
MWATelescopeREPO_NAMEmwa_hyperdrivePATH_START.@mwa_hyperdrive_extracted@mwa_hyperdrive-main@mdbook@src@user@di_cal@advanced@time_varying.md@.PATH_END.py
|
{
"filename": "_stylesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sankey/link/hoverlabel/font/_stylesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StylesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="stylesrc",
parent_name="sankey.link.hoverlabel.font",
**kwargs,
):
super(StylesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sankey@link@hoverlabel@font@_stylesrc.py@.PATH_END.py
|
{
"filename": "auth.md",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/docs/en/reference/hub/auth.md",
"type": "Markdown"
}
|
---
description: Learn how to manage API key and cookie-based authentication in Ultralytics with the Auth class. Step-by-step guide for effective authentication.
keywords: Ultralytics, authentication, API key, cookies, Auth class, YOLO, API, guide
---
# Reference for `ultralytics/hub/auth.py`
!!! note
This file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/hub/auth.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/hub/auth.py). If you spot a problem please help fix it by [contributing](https://docs.ultralytics.com/help/contributing/) a [Pull Request](https://github.com/ultralytics/ultralytics/edit/main/ultralytics/hub/auth.py) 🛠️. Thank you 🙏!
<br>
## ::: ultralytics.hub.auth.Auth
<br><br>
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@docs@en@reference@hub@auth.md@.PATH_END.py
|
{
"filename": "test_params_estim.py",
"repo_name": "mavrix93/LightCurvesClassifier",
"repo_path": "LightCurvesClassifier_extracted/LightCurvesClassifier-master/test/stars_processing/test_params_estim.py",
"type": "Python"
}
|
"""
Created on Jan 25, 2017
@author: Martin Vo
"""
import unittest
import numpy as np
from lcc.entities.star import Star
from lcc.stars_processing.deciders.supervised_deciders import QDADec
from lcc.stars_processing.descriptors.abbe_value_descr import AbbeValueDescr
from lcc.stars_processing.descriptors.curves_shape_descr import CurvesShapeDescr
from lcc.stars_processing.tools.params_estim import ParamsEstimator
# TODO: Need to be fixed
class Test(unittest.TestCase):
def setUp(self):
N = 20
x = np.linspace(0, 10, 100)
self.template = []
for ii in range(N):
st = Star(name="TemplateStar%i" % ii)
st.putLightCurve([x, np.cos(x) + np.random.normal(x) * 0.1])
self.template.append(st)
self.variables = []
for ii in range(N):
st = Star(name="VariableStar%i" % ii)
st.putLightCurve([x, np.sin(x) + np.random.normal(x) * 0.1])
self.variables.append(st)
self.noisy = []
for ii in range(N):
st = Star(name="NonvariableStar%i" % ii)
st.putLightCurve([x, np.random.normal(x) * 2])
self.noisy.append(st)
def testName(self):
deciders = [QDADec]
descriptors = [AbbeValueDescr, CurvesShapeDescr]
static_params = {"AbbeValueDescr": {"bins": 100},
"CurvesShapeDescr": {"comp_stars": self.template}}
tuned_params = [{"CurvesShapeDescr": {"days_per_bin": 3, "alphabet_size": 10}},
{"CurvesShapeDescr": {"days_per_bin": 0.5, "alphabet_size": 12}}]
est = ParamsEstimator(self.variables, self.noisy, descriptors, deciders,
tuned_params, static_params=static_params)
star_filter, stat, best_params = est.fit()
assert best_params is not None
if __name__ == "__main__":
unittest.main()
|
mavrix93REPO_NAMELightCurvesClassifierPATH_START.@LightCurvesClassifier_extracted@LightCurvesClassifier-master@test@stars_processing@test_params_estim.py@.PATH_END.py
|
{
"filename": "testidefix.py",
"repo_name": "idefix-code/idefix",
"repo_path": "idefix_extracted/idefix-master/test/Planet/PlanetSpiral2D/python/testidefix.py",
"type": "Python"
}
|
#!/usr/bin/env python
# coding: utf-8
import sys
import os
sys.path.append(os.getenv("IDEFIX_DIR"))
from pytools.vtk_io import readVTK
import numpy as np
import matplotlib.pyplot as plt
on = 3
ds = readVTK(f"../data.{on:04d}.vtk", geometry="polar")
Rmed = ds.x
phimed = (ds.y/(np.pi))%2*np.pi-np.pi
dR = np.ediff1d(ds.x)[0]
h0 = 0.05
flaring = 0.0
# Planet in a fixed circular orbit
Rp = 1.0
hp = h0*pow(Rp,flaring)
# See Bae+Zhu (2018, I)
# Dominant azimuthal wavenumber when
# perturbation driven by point mass perturber
mdom = int((1/2)*pow(hp,-1))
# Lindblad resonance radii
Rmp = pow(1+1/mdom,2/3)*Rp
Rmm = pow(1-1/mdom,2/3)*Rp
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def omega(
R,
*,
keplerian=True,
info=False,
):
if info:
print(f"keplerian = {keplerian}")
if keplerian:
return(pow(R,-3/2))
def soundSpeed(
R,
*,
h0=h0,
flaring=flaring,
isotherm=True,
info=False,
):
if info:
print(f"isotherm = {isotherm}")
print(f"h0 = {h0:.2f}")
print(f"flaring = {flaring:.2f}")
if isotherm:
return(h0*pow(R,flaring-1/2))
def _integral(
RR,
*,
resonance=-1,
m=mdom,
Rp=Rp,
h0=h0,
flaring=flaring,
info=False,
):
if info:
print(f"planet in Rp = {Rp:.2f}")
print(f"dominant phi-wavenumber m = {m}")
Rm = pow(1+np.sign(resonance)*1/m,2/3)*Rp
data = (omega(Rmed, info=info)/soundSpeed(Rmed, h0=h0, flaring=flaring, info=info))*pow(abs(pow(1-pow(Rmed/Rp,3/2),2)-1/m/m),1/2)
k = find_nearest(Rmed, Rm)
kk = find_nearest(Rmed, RR)
if np.sign(resonance) > 0:
integ = (
np.nansum(
(data * dR)[k : kk + 1],
dtype="float64",
)
)
else:
integ = -(
np.nansum(
(data * dR)[kk : k + 1],
dtype="float64",
)
)
return integ
# Phase equation for m,n
#(phi-wavenumber,order of the spiral)
def phaseEquation(
RR,
*,
resonance=-1,
m=mdom,
n=0,
Rp=Rp,
info=False,
):
phieq = -np.sign(RR-Rp)*np.pi/4/m + 2*np.pi*n/m -_integral(
RR,
resonance=resonance,
m=mdom,
Rp=Rp,
h0=h0,
flaring=flaring,
info=info,
)
return phieq
plot = False
RwkzMin = find_nearest(Rmed, 0.7)
RwkzMax = find_nearest(Rmed, 1.35)
spiralR = []
spiralP = []
spiralP_theo = []
for ir in range(RwkzMin, RwkzMax+1):
if (Rmed[ir] < Rmm):
rho = ds.data["RHO"][ir,:,0]
spiralR.append(Rmed[ir])
spiralP.append(phimed[find_nearest(rho,rho.max())])
spiralP_theo.append(phaseEquation(Rmed[ir],resonance=-1))
if (Rmed[ir] > Rmp):
rho = ds.data["RHO"][ir,:,0]
spiralR.append(Rmed[ir])
spiralP.append(phimed[find_nearest(rho,rho.max())])
spiralP_theo.append(phaseEquation(Rmed[ir],resonance=+1))
spiralR_theo = spiralR
spiralP = np.array(spiralP)
spiralP_theo = (np.array(spiralP_theo)/(np.pi))%2*np.pi-np.pi
if plot:
fig, ax = plt.subplots(figsize=(5,4))
ax.axvline(x=1.0, ls="--", c="k")
ax.scatter(spiralR_theo,spiralP_theo, c="r",marker="+", label="theoretical")
ax.scatter(spiralR,spiralP,c="k",marker="x", label=r"max $\rho$ simulation")
fig.tight_layout()
ax.legend(frameon=False)
fig2, ax2 = plt.subplots()
ax2.scatter(spiralR, 100*(spiralP-spiralP_theo)/spiralP_theo)
fig2.tight_layout()
plt.show()
mean = np.mean(100*(spiralP-spiralP_theo)/spiralP_theo)
std = np.std(100*(spiralP-spiralP_theo)/spiralP_theo)
error_mean = mean
error_dispersion = np.max([abs(mean+std),abs(mean-std)])
print("Mean Error=%.2f"%error_mean)
print("max(mean +/- std)=%.2f"%error_dispersion)
if error_mean<1.0 and error_dispersion<5.0:
print("SUCCESS!")
sys.exit(0)
else:
print("FAILURE!")
sys.exit(1)
|
idefix-codeREPO_NAMEidefixPATH_START.@idefix_extracted@idefix-master@test@Planet@PlanetSpiral2D@python@testidefix.py@.PATH_END.py
|
{
"filename": "objects.KDE.ipynb",
"repo_name": "mwaskom/seaborn",
"repo_path": "seaborn_extracted/seaborn-master/doc/_docstrings/objects.KDE.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import seaborn.objects as so
from seaborn import load_dataset
penguins = load_dataset("penguins")
```
This stat estimates transforms observations into a smooth function representing the estimated density:
```python
p = so.Plot(penguins, x="flipper_length_mm")
p.add(so.Area(), so.KDE())
```
Adjust the smoothing bandwidth to see more or fewer details:
```python
p.add(so.Area(), so.KDE(bw_adjust=0.25))
```
The curve will extend beyond observed values in the dataset:
```python
p2 = p.add(so.Bars(alpha=.3), so.Hist("density"))
p2.add(so.Line(), so.KDE())
```
Control the range of the density curve relative to the observations using `cut`:
```python
p2.add(so.Line(), so.KDE(cut=0))
```
When observations are assigned to the `y` variable, the density will be shown for `x`:
```python
so.Plot(penguins, y="flipper_length_mm").add(so.Area(), so.KDE())
```
Use `gridsize` to increase or decrease the resolution of the grid where the density is evaluated:
```python
p.add(so.Dots(), so.KDE(gridsize=100))
```
Or pass `None` to evaluate the density at the original datapoints:
```python
p.add(so.Dots(), so.KDE(gridsize=None))
```
Other variables will define groups for the estimation:
```python
p.add(so.Area(), so.KDE(), color="species")
```
By default, the density is normalized across all groups (i.e., the joint density is shown); pass `common_norm=False` to show conditional densities:
```python
p.add(so.Area(), so.KDE(common_norm=False), color="species")
```
Or pass a list of variables to condition on:
```python
(
p.facet("sex")
.add(so.Area(), so.KDE(common_norm=["col"]), color="species")
)
```
This stat can be combined with other transforms, such as :class:`Stack` (when `common_grid=True`):
```python
p.add(so.Area(), so.KDE(), so.Stack(), color="sex")
```
Set `cumulative=True` to integrate the density:
```python
p.add(so.Line(), so.KDE(cumulative=True))
```
```python
```
|
mwaskomREPO_NAMEseabornPATH_START.@seaborn_extracted@seaborn-master@doc@_docstrings@objects.KDE.ipynb@.PATH_END.py
|
{
"filename": "linalg.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/jax/_src/numpy/linalg.py",
"type": "Python"
}
|
# Copyright 2018 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from collections.abc import Sequence
from functools import partial
import itertools
import math
import warnings
import numpy as np
import operator
from typing import Literal, NamedTuple, overload
import jax
from jax import jit, custom_jvp
from jax import lax
from jax._src import deprecations
from jax._src.lax import lax as lax_internal
from jax._src.lax.lax import PrecisionLike
from jax._src.lax import linalg as lax_linalg
from jax._src.numpy import lax_numpy as jnp
from jax._src.numpy import reductions, ufuncs
from jax._src.numpy.util import promote_dtypes_inexact, check_arraylike
from jax._src.util import canonicalize_axis, set_module
from jax._src.typing import ArrayLike, Array, DTypeLike, DeprecatedArg
export = set_module('jax.numpy.linalg')
class EighResult(NamedTuple):
eigenvalues: jax.Array
eigenvectors: jax.Array
class QRResult(NamedTuple):
Q: jax.Array
R: jax.Array
class SlogdetResult(NamedTuple):
sign: jax.Array
logabsdet: jax.Array
class SVDResult(NamedTuple):
U: jax.Array
S: jax.Array
Vh: jax.Array
def _H(x: ArrayLike) -> Array:
return ufuncs.conjugate(jnp.matrix_transpose(x))
def _symmetrize(x: Array) -> Array: return (x + _H(x)) / 2
@export
@partial(jit, static_argnames=['upper'])
def cholesky(a: ArrayLike, *, upper: bool = False) -> Array:
"""Compute the Cholesky decomposition of a matrix.
JAX implementation of :func:`numpy.linalg.cholesky`.
The Cholesky decomposition of a matrix `A` is:
.. math::
A = U^HU
or
.. math::
A = LL^H
where `U` is an upper-triangular matrix and `L` is a lower-triangular matrix, and
:math:`X^H` is the Hermitian transpose of `X`.
Args:
a: input array, representing a (batched) positive-definite hermitian matrix.
Must have shape ``(..., N, N)``.
upper: if True, compute the upper Cholesky decomposition `U`. if False
(default), compute the lower Cholesky decomposition `L`.
Returns:
array of shape ``(..., N, N)`` representing the Cholesky decomposition
of the input. If the input is not Hermitian positive-definite, The result
will contain NaN entries.
See also:
- :func:`jax.scipy.linalg.cholesky`: SciPy-style Cholesky API
- :func:`jax.lax.linalg.cholesky`: XLA-style Cholesky API
Examples:
A small real Hermitian positive-definite matrix:
>>> x = jnp.array([[2., 1.],
... [1., 2.]])
Lower Cholesky factorization:
>>> jnp.linalg.cholesky(x)
Array([[1.4142135 , 0. ],
[0.70710677, 1.2247449 ]], dtype=float32)
Upper Cholesky factorization:
>>> jnp.linalg.cholesky(x, upper=True)
Array([[1.4142135 , 0.70710677],
[0. , 1.2247449 ]], dtype=float32)
Reconstructing ``x`` from its factorization:
>>> L = jnp.linalg.cholesky(x)
>>> jnp.allclose(x, L @ L.T)
Array(True, dtype=bool)
"""
check_arraylike("jnp.linalg.cholesky", a)
a, = promote_dtypes_inexact(jnp.asarray(a))
L = lax_linalg.cholesky(a)
return L.mT.conj() if upper else L
@overload
def svd(
a: ArrayLike,
full_matrices: bool = True,
*,
compute_uv: Literal[True],
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> SVDResult:
...
@overload
def svd(
a: ArrayLike,
full_matrices: bool,
compute_uv: Literal[True],
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> SVDResult:
...
@overload
def svd(
a: ArrayLike,
full_matrices: bool = True,
*,
compute_uv: Literal[False],
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> Array:
...
@overload
def svd(
a: ArrayLike,
full_matrices: bool,
compute_uv: Literal[False],
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> Array:
...
@overload
def svd(
a: ArrayLike,
full_matrices: bool = True,
compute_uv: bool = True,
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> Array | SVDResult:
...
@export
@partial(
jit,
static_argnames=(
"full_matrices",
"compute_uv",
"hermitian",
"subset_by_index",
),
)
def svd(
a: ArrayLike,
full_matrices: bool = True,
compute_uv: bool = True,
hermitian: bool = False,
subset_by_index: tuple[int, int] | None = None,
) -> Array | SVDResult:
r"""Compute the singular value decomposition.
JAX implementation of :func:`numpy.linalg.svd`, implemented in terms of
:func:`jax.lax.linalg.svd`.
The SVD of a matrix `A` is given by
.. math::
A = U\Sigma V^H
- :math:`U` contains the left singular vectors and satisfies :math:`U^HU=I`
- :math:`V` contains the right singular vectors and satisfies :math:`V^HV=I`
- :math:`\Sigma` is a diagonal matrix of singular values.
Args:
a: input array, of shape ``(..., N, M)``
full_matrices: if True (default) compute the full matrices; i.e. ``u`` and ``vh`` have
shape ``(..., N, N)`` and ``(..., M, M)``. If False, then the shapes are
``(..., N, K)`` and ``(..., K, M)`` with ``K = min(N, M)``.
compute_uv: if True (default), return the full SVD ``(u, s, vh)``. If False then return
only the singular values ``s``.
hermitian: if True, assume the matrix is hermitian, which allows for a more efficient
implementation (default=False)
subset_by_index: (TPU-only) Optional 2-tuple [start, end] indicating the range of
indices of singular values to compute. For example, if ``[n-2, n]`` then
``svd`` computes the two largest singular values and their singular vectors.
Only compatible with ``full_matrices=False``.
Returns:
A tuple of arrays ``(u, s, vh)`` if ``compute_uv`` is True, otherwise the array ``s``.
- ``u``: left singular vectors of shape ``(..., N, N)`` if ``full_matrices`` is True
or ``(..., N, K)`` otherwise.
- ``s``: singular values of shape ``(..., K)``
- ``vh``: conjugate-transposed right singular vectors of shape ``(..., M, M)``
if ``full_matrices`` is True or ``(..., K, M)`` otherwise.
where ``K = min(N, M)``.
See also:
- :func:`jax.scipy.linalg.svd`: SciPy-style SVD API
- :func:`jax.lax.linalg.svd`: XLA-style SVD API
Examples:
Consider the SVD of a small real-valued array:
>>> x = jnp.array([[1., 2., 3.],
... [6., 5., 4.]])
>>> u, s, vt = jnp.linalg.svd(x, full_matrices=False)
>>> s # doctest: +SKIP
Array([9.361919 , 1.8315067], dtype=float32)
The singular vectors are in the columns of ``u`` and ``v = vt.T``. These vectors are
orthonormal, which can be demonstrated by comparing the matrix product with the
identity matrix:
>>> jnp.allclose(u.T @ u, jnp.eye(2), atol=1E-5)
Array(True, dtype=bool)
>>> v = vt.T
>>> jnp.allclose(v.T @ v, jnp.eye(2), atol=1E-5)
Array(True, dtype=bool)
Given the SVD, ``x`` can be reconstructed via matrix multiplication:
>>> x_reconstructed = u @ jnp.diag(s) @ vt
>>> jnp.allclose(x_reconstructed, x)
Array(True, dtype=bool)
"""
check_arraylike("jnp.linalg.svd", a)
a, = promote_dtypes_inexact(jnp.asarray(a))
if hermitian:
w, v = lax_linalg.eigh(a, subset_by_index=subset_by_index)
s = lax.abs(v)
if compute_uv:
sign = lax.sign(v)
idxs = lax.broadcasted_iota(np.int64, s.shape, dimension=s.ndim - 1)
s, idxs, sign = lax.sort((s, idxs, sign), dimension=-1, num_keys=1)
s = lax.rev(s, dimensions=[s.ndim - 1])
idxs = lax.rev(idxs, dimensions=[s.ndim - 1])
sign = lax.rev(sign, dimensions=[s.ndim - 1])
u = jnp.take_along_axis(w, idxs[..., None, :], axis=-1)
vh = _H(u * sign[..., None, :].astype(u.dtype))
return SVDResult(u, s, vh)
else:
return lax.rev(lax.sort(s, dimension=-1), dimensions=[s.ndim-1])
if compute_uv:
u, s, vh = lax_linalg.svd(
a,
full_matrices=full_matrices,
compute_uv=True,
subset_by_index=subset_by_index,
)
return SVDResult(u, s, vh)
else:
return lax_linalg.svd(
a,
full_matrices=full_matrices,
compute_uv=False,
subset_by_index=subset_by_index,
)
@export
@partial(jit, static_argnames=('n',))
def matrix_power(a: ArrayLike, n: int) -> Array:
"""Raise a square matrix to an integer power.
JAX implementation of :func:`numpy.linalg.matrix_power`, implemented via
repeated squarings.
Args:
a: array of shape ``(..., M, M)`` to be raised to the power `n`.
n: the integer exponent to which the matrix should be raised.
Returns:
Array of shape ``(..., M, M)`` containing the matrix power of a to the n.
Examples:
>>> a = jnp.array([[1., 2.],
... [3., 4.]])
>>> jnp.linalg.matrix_power(a, 3)
Array([[ 37., 54.],
[ 81., 118.]], dtype=float32)
>>> a @ a @ a # equivalent evaluated directly
Array([[ 37., 54.],
[ 81., 118.]], dtype=float32)
This also supports zero powers:
>>> jnp.linalg.matrix_power(a, 0)
Array([[1., 0.],
[0., 1.]], dtype=float32)
and also supports negative powers:
>>> with jnp.printoptions(precision=3):
... jnp.linalg.matrix_power(a, -2)
Array([[ 5.5 , -2.5 ],
[-3.75, 1.75]], dtype=float32)
Negative powers are equivalent to matmul of the inverse:
>>> inv_a = jnp.linalg.inv(a)
>>> with jnp.printoptions(precision=3):
... inv_a @ inv_a
Array([[ 5.5 , -2.5 ],
[-3.75, 1.75]], dtype=float32)
"""
check_arraylike("jnp.linalg.matrix_power", a)
arr, = promote_dtypes_inexact(jnp.asarray(a))
if arr.ndim < 2:
raise TypeError("{}-dimensional array given. Array must be at least "
"two-dimensional".format(arr.ndim))
if arr.shape[-2] != arr.shape[-1]:
raise TypeError("Last 2 dimensions of the array must be square")
try:
n = operator.index(n)
except TypeError as err:
raise TypeError(f"exponent must be an integer, got {n}") from err
if n == 0:
return jnp.broadcast_to(jnp.eye(arr.shape[-2], dtype=arr.dtype), arr.shape)
elif n < 0:
arr = inv(arr)
n = abs(n)
if n == 1:
return arr
elif n == 2:
return arr @ arr
elif n == 3:
return (arr @ arr) @ arr
z = result = None
while n > 0:
z = arr if z is None else (z @ z) # type: ignore[operator]
n, bit = divmod(n, 2)
if bit:
result = z if result is None else (result @ z)
assert result is not None
return result
@export
@jit
def matrix_rank(
M: ArrayLike, rtol: ArrayLike | None = None, *,
tol: ArrayLike | DeprecatedArg | None = DeprecatedArg()) -> Array:
"""Compute the rank of a matrix.
JAX implementation of :func:`numpy.linalg.matrix_rank`.
The rank is calculated via the Singular Value Decomposition (SVD), and determined
by the number of singular values greater than the specified tolerance.
Args:
M: array of shape ``(..., N, K)`` whose rank is to be computed.
rtol: optional array of shape ``(...)`` specifying the tolerance. Singular values
smaller than `rtol * largest_singular_value` are considered to be zero. If
``rtol`` is None (the default), a reasonable default is chosen based the
floating point precision of the input.
tol: deprecated alias of the ``rtol`` argument. Will result in a
:class:`DeprecationWarning` if used.
Returns:
array of shape ``a.shape[-2]`` giving the matrix rank.
Notes:
The rank calculation may be inaccurate for matrices with very small singular
values or those that are numerically ill-conditioned. Consider adjusting the
``rtol`` parameter or using a more specialized rank computation method in such cases.
Examples:
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> jnp.linalg.matrix_rank(a)
Array(2, dtype=int32)
>>> b = jnp.array([[1, 0], # Rank-deficient matrix
... [0, 0]])
>>> jnp.linalg.matrix_rank(b)
Array(1, dtype=int32)
"""
check_arraylike("jnp.linalg.matrix_rank", M)
# TODO(micky774): deprecated 2024-5-14, remove after deprecation expires.
if not isinstance(tol, DeprecatedArg):
rtol = tol
del tol
deprecations.warn(
"jax-numpy-linalg-matrix_rank-tol",
("The tol argument for linalg.matrix_rank is deprecated. "
"Please use rtol instead."),
stacklevel=2
)
M, = promote_dtypes_inexact(jnp.asarray(M))
if M.ndim < 2:
return (M != 0).any().astype(jnp.int32)
S = svd(M, full_matrices=False, compute_uv=False)
if rtol is None:
rtol = S.max(-1) * np.max(M.shape[-2:]).astype(S.dtype) * jnp.finfo(S.dtype).eps
rtol = jnp.expand_dims(rtol, np.ndim(rtol))
return reductions.sum(S > rtol, axis=-1)
@custom_jvp
def _slogdet_lu(a: Array) -> tuple[Array, Array]:
dtype = lax.dtype(a)
lu, pivot, _ = lax_linalg.lu(a)
diag = jnp.diagonal(lu, axis1=-2, axis2=-1)
is_zero = reductions.any(diag == jnp.array(0, dtype=dtype), axis=-1)
iota = lax.expand_dims(jnp.arange(a.shape[-1], dtype=pivot.dtype),
range(pivot.ndim - 1))
parity = reductions.count_nonzero(pivot != iota, axis=-1)
if jnp.iscomplexobj(a):
sign = reductions.prod(diag / ufuncs.abs(diag).astype(diag.dtype), axis=-1)
else:
sign = jnp.array(1, dtype=dtype)
parity = parity + reductions.count_nonzero(diag < 0, axis=-1)
sign = jnp.where(is_zero,
jnp.array(0, dtype=dtype),
sign * jnp.array(-2 * (parity % 2) + 1, dtype=dtype))
logdet = jnp.where(
is_zero, jnp.array(-jnp.inf, dtype=dtype),
reductions.sum(ufuncs.log(ufuncs.abs(diag)).astype(dtype), axis=-1))
return sign, ufuncs.real(logdet)
@custom_jvp
def _slogdet_qr(a: Array) -> tuple[Array, Array]:
# Implementation of slogdet using QR decomposition. One reason we might prefer
# QR decomposition is that it is more amenable to a fast batched
# implementation on TPU because of the lack of row pivoting.
if jnp.issubdtype(lax.dtype(a), jnp.complexfloating):
raise NotImplementedError("slogdet method='qr' not implemented for complex "
"inputs")
n = a.shape[-1]
a, taus = lax_linalg.geqrf(a)
# The determinant of a triangular matrix is the product of its diagonal
# elements. We are working in log space, so we compute the magnitude as the
# the trace of the log-absolute values, and we compute the sign separately.
a_diag = jnp.diagonal(a, axis1=-2, axis2=-1)
log_abs_det = reductions.sum(ufuncs.log(ufuncs.abs(a_diag)), axis=-1)
sign_diag = reductions.prod(ufuncs.sign(a_diag), axis=-1)
# The determinant of a Householder reflector is -1. So whenever we actually
# made a reflection (tau != 0), multiply the result by -1.
sign_taus = reductions.prod(jnp.where(taus[..., :(n-1)] != 0, -1, 1), axis=-1).astype(sign_diag.dtype)
return sign_diag * sign_taus, log_abs_det
@export
@partial(jit, static_argnames=('method',))
def slogdet(a: ArrayLike, *, method: str | None = None) -> SlogdetResult:
"""
Compute the sign and (natural) logarithm of the determinant of an array.
JAX implementation of :func:`numpy.linalg.slogdet`.
Args:
a: array of shape ``(..., M, M)`` for which to compute the sign and log determinant.
method: the method to use for determinant computation. Options are
- ``'lu'`` (default): use the LU decomposition.
- ``'qr'``: use the QR decomposition.
Returns:
A tuple of arrays ``(sign, logabsdet)``, each of shape ``a.shape[:-2]``
- ``sign`` is the sign of the determinant.
- ``logabsdet`` is the natural log of the determinant's absolute value.
See also:
:func:`jax.numpy.linalg.det`: direct computation of determinant
Examples:
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> sign, logabsdet = jnp.linalg.slogdet(a)
>>> sign # -1 indicates negative determinant
Array(-1., dtype=float32)
>>> jnp.exp(logabsdet) # Absolute value of determinant
Array(2., dtype=float32)
"""
check_arraylike("jnp.linalg.slogdet", a)
a, = promote_dtypes_inexact(jnp.asarray(a))
a_shape = jnp.shape(a)
if len(a_shape) < 2 or a_shape[-1] != a_shape[-2]:
raise ValueError(f"Argument to slogdet() must have shape [..., n, n], got {a_shape}")
if method is None or method == "lu":
return SlogdetResult(*_slogdet_lu(a))
elif method == "qr":
return SlogdetResult(*_slogdet_qr(a))
else:
raise ValueError(f"Unknown slogdet method '{method}'. Supported methods "
"are 'lu' (`None`), and 'qr'.")
def _slogdet_jvp(primals, tangents):
x, = primals
g, = tangents
sign, ans = slogdet(x)
ans_dot = jnp.trace(solve(x, g), axis1=-1, axis2=-2)
if jnp.issubdtype(jnp._dtype(x), jnp.complexfloating):
sign_dot = (ans_dot - ufuncs.real(ans_dot).astype(ans_dot.dtype)) * sign
ans_dot = ufuncs.real(ans_dot)
else:
sign_dot = jnp.zeros_like(sign)
return (sign, ans), (sign_dot, ans_dot)
_slogdet_lu.defjvp(_slogdet_jvp)
_slogdet_qr.defjvp(_slogdet_jvp)
def _cofactor_solve(a: ArrayLike, b: ArrayLike) -> tuple[Array, Array]:
"""Equivalent to det(a)*solve(a, b) for nonsingular mat.
Intermediate function used for jvp and vjp of det.
This function borrows heavily from jax.numpy.linalg.solve and
jax.numpy.linalg.slogdet to compute the gradient of the determinant
in a way that is well defined even for low rank matrices.
This function handles two different cases:
* rank(a) == n or n-1
* rank(a) < n-1
For rank n-1 matrices, the gradient of the determinant is a rank 1 matrix.
Rather than computing det(a)*solve(a, b), which would return NaN, we work
directly with the LU decomposition. If a = p @ l @ u, then
det(a)*solve(a, b) =
prod(diag(u)) * u^-1 @ l^-1 @ p^-1 b =
prod(diag(u)) * triangular_solve(u, solve(p @ l, b))
If a is rank n-1, then the lower right corner of u will be zero and the
triangular_solve will fail.
Let x = solve(p @ l, b) and y = det(a)*solve(a, b).
Then y_{n}
x_{n} / u_{nn} * prod_{i=1...n}(u_{ii}) =
x_{n} * prod_{i=1...n-1}(u_{ii})
So by replacing the lower-right corner of u with prod_{i=1...n-1}(u_{ii})^-1
we can avoid the triangular_solve failing.
To correctly compute the rest of y_{i} for i != n, we simply multiply
x_{i} by det(a) for all i != n, which will be zero if rank(a) = n-1.
For the second case, a check is done on the matrix to see if `solve`
returns NaN or Inf, and gives a matrix of zeros as a result, as the
gradient of the determinant of a matrix with rank less than n-1 is 0.
This will still return the correct value for rank n-1 matrices, as the check
is applied *after* the lower right corner of u has been updated.
Args:
a: A square matrix or batch of matrices, possibly singular.
b: A matrix, or batch of matrices of the same dimension as a.
Returns:
det(a) and cofactor(a)^T*b, aka adjugate(a)*b
"""
a, = promote_dtypes_inexact(jnp.asarray(a))
b, = promote_dtypes_inexact(jnp.asarray(b))
a_shape = jnp.shape(a)
b_shape = jnp.shape(b)
a_ndims = len(a_shape)
if not (a_ndims >= 2 and a_shape[-1] == a_shape[-2]
and b_shape[-2:] == a_shape[-2:]):
msg = ("The arguments to _cofactor_solve must have shapes "
"a=[..., m, m] and b=[..., m, m]; got a={} and b={}")
raise ValueError(msg.format(a_shape, b_shape))
if a_shape[-1] == 1:
return a[..., 0, 0], b
# lu contains u in the upper triangular matrix and l in the strict lower
# triangular matrix.
# The diagonal of l is set to ones without loss of generality.
lu, pivots, permutation = lax_linalg.lu(a)
dtype = lax.dtype(a)
batch_dims = lax.broadcast_shapes(lu.shape[:-2], b.shape[:-2])
x = jnp.broadcast_to(b, batch_dims + b.shape[-2:])
lu = jnp.broadcast_to(lu, batch_dims + lu.shape[-2:])
# Compute (partial) determinant, ignoring last diagonal of LU
diag = jnp.diagonal(lu, axis1=-2, axis2=-1)
iota = lax.expand_dims(jnp.arange(a_shape[-1], dtype=pivots.dtype),
range(pivots.ndim - 1))
parity = reductions.count_nonzero(pivots != iota, axis=-1)
sign = jnp.asarray(-2 * (parity % 2) + 1, dtype=dtype)
# partial_det[:, -1] contains the full determinant and
# partial_det[:, -2] contains det(u) / u_{nn}.
partial_det = reductions.cumprod(diag, axis=-1) * sign[..., None]
lu = lu.at[..., -1, -1].set(1.0 / partial_det[..., -2])
permutation = jnp.broadcast_to(permutation, (*batch_dims, a_shape[-1]))
iotas = jnp.ix_(*(lax.iota(jnp.int32, b) for b in (*batch_dims, 1)))
# filter out any matrices that are not full rank
d = jnp.ones(x.shape[:-1], x.dtype)
d = lax_linalg.triangular_solve(lu, d, left_side=True, lower=False)
d = reductions.any(ufuncs.logical_or(ufuncs.isnan(d), ufuncs.isinf(d)), axis=-1)
d = jnp.tile(d[..., None, None], d.ndim*(1,) + x.shape[-2:])
x = jnp.where(d, jnp.zeros_like(x), x) # first filter
x = x[iotas[:-1] + (permutation, slice(None))]
x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=True,
unit_diagonal=True)
x = jnp.concatenate((x[..., :-1, :] * partial_det[..., -1, None, None],
x[..., -1:, :]), axis=-2)
x = lax_linalg.triangular_solve(lu, x, left_side=True, lower=False)
x = jnp.where(d, jnp.zeros_like(x), x) # second filter
return partial_det[..., -1], x
def _det_2x2(a: Array) -> Array:
return (a[..., 0, 0] * a[..., 1, 1] -
a[..., 0, 1] * a[..., 1, 0])
def _det_3x3(a: Array) -> Array:
return (a[..., 0, 0] * a[..., 1, 1] * a[..., 2, 2] +
a[..., 0, 1] * a[..., 1, 2] * a[..., 2, 0] +
a[..., 0, 2] * a[..., 1, 0] * a[..., 2, 1] -
a[..., 0, 2] * a[..., 1, 1] * a[..., 2, 0] -
a[..., 0, 0] * a[..., 1, 2] * a[..., 2, 1] -
a[..., 0, 1] * a[..., 1, 0] * a[..., 2, 2])
@custom_jvp
def _det(a):
sign, logdet = slogdet(a)
return sign * ufuncs.exp(logdet).astype(sign.dtype)
@_det.defjvp
def _det_jvp(primals, tangents):
x, = primals
g, = tangents
y, z = _cofactor_solve(x, g)
return y, jnp.trace(z, axis1=-1, axis2=-2)
@export
@jit
def det(a: ArrayLike) -> Array:
"""
Compute the determinant of an array.
JAX implementation of :func:`numpy.linalg.det`.
Args:
a: array of shape ``(..., M, M)`` for which to compute the determinant.
Returns:
An array of determinants of shape ``a.shape[:-2]``.
See also:
:func:`jax.scipy.linalg.det`: Scipy-style API for determinant.
Examples:
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> jnp.linalg.det(a)
Array(-2., dtype=float32)
"""
check_arraylike("jnp.linalg.det", a)
a, = promote_dtypes_inexact(jnp.asarray(a))
a_shape = jnp.shape(a)
if len(a_shape) >= 2 and a_shape[-1] == 2 and a_shape[-2] == 2:
return _det_2x2(a)
elif len(a_shape) >= 2 and a_shape[-1] == 3 and a_shape[-2] == 3:
return _det_3x3(a)
elif len(a_shape) >= 2 and a_shape[-1] == a_shape[-2]:
return _det(a)
else:
msg = "Argument to _det() must have shape [..., n, n], got {}"
raise ValueError(msg.format(a_shape))
@export
def eig(a: ArrayLike) -> tuple[Array, Array]:
"""
Compute the eigenvalues and eigenvectors of a square array.
JAX implementation of :func:`numpy.linalg.eig`.
Args:
a: array of shape ``(..., M, M)`` for which to compute the eigenvalues and vectors.
Returns:
A tuple ``(eigenvalues, eigenvectors)`` with
- ``eigenvalues``: an array of shape ``(..., M)`` containing the eigenvalues.
- ``eigenvectors``: an array of shape ``(..., M, M)``, where column ``v[:, i]`` is the
eigenvector corresponding to the eigenvalue ``w[i]``.
Notes:
- This differs from :func:`numpy.linalg.eig` in that the return type of
:func:`jax.numpy.linalg.eig` is always complex64 for 32-bit input, and complex128
for 64-bit input.
- At present, non-symmetric eigendecomposition is only implemented on the CPU and
GPU backends. For more details about the GPU implementation, see the
documentation for :func:`jax.lax.linalg.eig`.
See also:
- :func:`jax.numpy.linalg.eigh`: eigenvectors and eigenvalues of a Hermitian matrix.
- :func:`jax.numpy.linalg.eigvals`: compute eigenvalues only.
Examples:
>>> a = jnp.array([[1., 2.],
... [2., 1.]])
>>> w, v = jnp.linalg.eig(a)
>>> with jax.numpy.printoptions(precision=4):
... w
Array([ 3.+0.j, -1.+0.j], dtype=complex64)
>>> v
Array([[ 0.70710677+0.j, -0.70710677+0.j],
[ 0.70710677+0.j, 0.70710677+0.j]], dtype=complex64)
"""
check_arraylike("jnp.linalg.eig", a)
a, = promote_dtypes_inexact(jnp.asarray(a))
w, v = lax_linalg.eig(a, compute_left_eigenvectors=False)
return w, v
@export
@jit
def eigvals(a: ArrayLike) -> Array:
"""
Compute the eigenvalues of a general matrix.
JAX implementation of :func:`numpy.linalg.eigvals`.
Args:
a: array of shape ``(..., M, M)`` for which to compute the eigenvalues.
Returns:
An array of shape ``(..., M)`` containing the eigenvalues.
See also:
- :func:`jax.numpy.linalg.eig`: computes eigenvalues eigenvectors of a general matrix.
- :func:`jax.numpy.linalg.eigh`: computes eigenvalues eigenvectors of a Hermitian matrix.
Notes:
- This differs from :func:`numpy.linalg.eigvals` in that the return type of
:func:`jax.numpy.linalg.eigvals` is always complex64 for 32-bit input, and
complex128 for 64-bit input.
- At present, non-symmetric eigendecomposition is only implemented on the CPU backend.
Examples:
>>> a = jnp.array([[1., 2.],
... [2., 1.]])
>>> w = jnp.linalg.eigvals(a)
>>> with jnp.printoptions(precision=2):
... w
Array([ 3.+0.j, -1.+0.j], dtype=complex64)
"""
check_arraylike("jnp.linalg.eigvals", a)
a, = promote_dtypes_inexact(jnp.asarray(a))
return lax_linalg.eig(a, compute_left_eigenvectors=False,
compute_right_eigenvectors=False)[0]
@export
@partial(jit, static_argnames=('UPLO', 'symmetrize_input'))
def eigh(a: ArrayLike, UPLO: str | None = None,
symmetrize_input: bool = True) -> EighResult:
"""
Compute the eigenvalues and eigenvectors of a Hermitian matrix.
JAX implementation of :func:`numpy.linalg.eigh`.
Args:
a: array of shape ``(..., M, M)``, containing the Hermitian (if complex)
or symmetric (if real) matrix.
UPLO: specifies whether the calculation is done with the lower triangular
part of ``a`` (``'L'``, default) or the upper triangular part (``'U'``).
symmetrize_input: if True (default) then input is symmetrized, which leads
to better behavior under automatic differentiation.
Returns:
A namedtuple ``(eigenvalues, eigenvectors)`` where
- ``eigenvalues``: an array of shape ``(..., M)`` containing the eigenvalues,
sorted in ascending order.
- ``eigenvectors``: an array of shape ``(..., M, M)``, where column ``v[:, i]`` is the
normalized eigenvector corresponding to the eigenvalue ``w[i]``.
See also:
- :func:`jax.numpy.linalg.eig`: general eigenvalue decomposition.
- :func:`jax.numpy.linalg.eigvalsh`: compute eigenvalues only.
- :func:`jax.scipy.linalg.eigh`: SciPy API for Hermitian eigendecomposition.
- :func:`jax.lax.linalg.eigh`: XLA API for Hermitian eigendecomposition.
Examples:
>>> a = jnp.array([[1, -2j],
... [2j, 1]])
>>> w, v = jnp.linalg.eigh(a)
>>> w
Array([-1., 3.], dtype=float32)
>>> with jnp.printoptions(precision=3):
... v
Array([[-0.707+0.j , -0.707+0.j ],
[ 0. +0.707j, 0. -0.707j]], dtype=complex64)
"""
check_arraylike("jnp.linalg.eigh", a)
if UPLO is None or UPLO == "L":
lower = True
elif UPLO == "U":
lower = False
else:
msg = f"UPLO must be one of None, 'L', or 'U', got {UPLO}"
raise ValueError(msg)
a, = promote_dtypes_inexact(jnp.asarray(a))
v, w = lax_linalg.eigh(a, lower=lower, symmetrize_input=symmetrize_input)
return EighResult(w, v)
@export
@partial(jit, static_argnames=('UPLO',))
def eigvalsh(a: ArrayLike, UPLO: str | None = 'L') -> Array:
"""
Compute the eigenvalues of a Hermitian matrix.
JAX implementation of :func:`numpy.linalg.eigvalsh`.
Args:
a: array of shape ``(..., M, M)``, containing the Hermitian (if complex)
or symmetric (if real) matrix.
UPLO: specifies whether the calculation is done with the lower triangular
part of ``a`` (``'L'``, default) or the upper triangular part (``'U'``).
Returns:
An array of shape ``(..., M)`` containing the eigenvalues, sorted in
ascending order.
See also:
- :func:`jax.numpy.linalg.eig`: general eigenvalue decomposition.
- :func:`jax.numpy.linalg.eigh`: computes eigenvalues and eigenvectors of a
Hermitian matrix.
Examples:
>>> a = jnp.array([[1, -2j],
... [2j, 1]])
>>> w = jnp.linalg.eigvalsh(a)
>>> w
Array([-1., 3.], dtype=float32)
"""
check_arraylike("jnp.linalg.eigvalsh", a)
a, = promote_dtypes_inexact(jnp.asarray(a))
w, _ = eigh(a, UPLO)
return w
# TODO(micky774): deprecated 2024-5-14, remove wrapper after deprecation expires.
@export
def pinv(a: ArrayLike, rtol: ArrayLike | None = None,
hermitian: bool = False, *,
rcond: ArrayLike | DeprecatedArg | None = DeprecatedArg()) -> Array:
"""Compute the (Moore-Penrose) pseudo-inverse of a matrix.
JAX implementation of :func:`numpy.linalg.pinv`.
Args:
a: array of shape ``(..., M, N)`` containing matrices to pseudo-invert.
rtol: float or array_like of shape ``a.shape[:-2]``. Specifies the cutoff
for small singular values.of shape ``(...,)``.
Cutoff for small singular values; singular values smaller
``rtol * largest_singular_value`` are treated as zero. The default is
determined based on the floating point precision of the dtype.
hermitian: if True, then the input is assumed to be Hermitian, and a more
efficient algorithm is used (default: False)
rcond: deprecated alias of the ``rtol`` argument. Will result in a
:class:`DeprecationWarning` if used.
Returns:
An array of shape ``(..., N, M)`` containing the pseudo-inverse of ``a``.
See also:
- :func:`jax.numpy.linalg.inv`: multiplicative inverse of a square matrix.
Notes:
:func:`jax.numpy.linalg.prng` differs from :func:`numpy.linalg.prng` in the
default value of `rcond``: in NumPy, the default is `1e-15`. In JAX, the
default is ``10. * max(num_rows, num_cols) * jnp.finfo(dtype).eps``.
Examples:
>>> a = jnp.array([[1, 2],
... [3, 4],
... [5, 6]])
>>> a_pinv = jnp.linalg.pinv(a)
>>> a_pinv # doctest: +SKIP
Array([[-1.333332 , -0.33333257, 0.6666657 ],
[ 1.0833322 , 0.33333272, -0.41666582]], dtype=float32)
The pseudo-inverse operates as a multiplicative inverse so long as the
output is not rank-deficient:
>>> jnp.allclose(a_pinv @ a, jnp.eye(2), atol=1E-4)
Array(True, dtype=bool)
"""
if not isinstance(rcond, DeprecatedArg):
rtol = rcond
del rcond
deprecations.warn(
"jax-numpy-linalg-pinv-rcond",
("The rcond argument for linalg.pinv is deprecated. "
"Please use rtol instead."),
stacklevel=2
)
return _pinv(a, rtol, hermitian)
@partial(custom_jvp, nondiff_argnums=(1, 2))
@partial(jit, static_argnames=('hermitian'))
def _pinv(a: ArrayLike, rtol: ArrayLike | None = None, hermitian: bool = False) -> Array:
# Uses same algorithm as
# https://github.com/numpy/numpy/blob/v1.17.0/numpy/linalg/linalg.py#L1890-L1979
check_arraylike("jnp.linalg.pinv", a)
arr, = promote_dtypes_inexact(jnp.asarray(a))
m, n = arr.shape[-2:]
if m == 0 or n == 0:
return jnp.empty(arr.shape[:-2] + (n, m), arr.dtype)
arr = ufuncs.conj(arr)
if rtol is None:
max_rows_cols = max(arr.shape[-2:])
rtol = 10. * max_rows_cols * jnp.array(jnp.finfo(arr.dtype).eps)
rtol = jnp.asarray(rtol)
u, s, vh = svd(arr, full_matrices=False, hermitian=hermitian)
# Singular values less than or equal to ``rtol * largest_singular_value``
# are set to zero.
rtol = lax.expand_dims(rtol[..., jnp.newaxis], range(s.ndim - rtol.ndim - 1))
cutoff = rtol * s[..., 0:1]
s = jnp.where(s > cutoff, s, jnp.inf).astype(u.dtype)
res = jnp.matmul(vh.mT, ufuncs.divide(u.mT, s[..., jnp.newaxis]),
precision=lax.Precision.HIGHEST)
return lax.convert_element_type(res, arr.dtype)
@_pinv.defjvp
@jax.default_matmul_precision("float32")
def _pinv_jvp(rtol, hermitian, primals, tangents):
# The Differentiation of Pseudo-Inverses and Nonlinear Least Squares Problems
# Whose Variables Separate. Author(s): G. H. Golub and V. Pereyra. SIAM
# Journal on Numerical Analysis, Vol. 10, No. 2 (Apr., 1973), pp. 413-432.
# (via https://en.wikipedia.org/wiki/Moore%E2%80%93Penrose_inverse#Derivative)
a, = primals # m x n
a_dot, = tangents
p = pinv(a, rtol=rtol, hermitian=hermitian) # n x m
if hermitian:
# svd(..., hermitian=True) symmetrizes its input, and the JVP must match.
a = _symmetrize(a)
a_dot = _symmetrize(a_dot)
# TODO(phawkins): this could be simplified in the Hermitian case if we
# supported triangular matrix multiplication.
m, n = a.shape[-2:]
if m >= n:
s = (p @ _H(p)) @ _H(a_dot) # nxm
t = (_H(a_dot) @ _H(p)) @ p # nxm
p_dot = -(p @ a_dot) @ p + s - (s @ a) @ p + t - (p @ a) @ t
else: # m < n
s = p @ (_H(p) @ _H(a_dot))
t = _H(a_dot) @ (_H(p) @ p)
p_dot = -p @ (a_dot @ p) + s - s @ (a @ p) + t - p @ (a @ t)
return p, p_dot
@export
@jit
def inv(a: ArrayLike) -> Array:
"""Return the inverse of a square matrix
JAX implementation of :func:`numpy.linalg.inv`.
Args:
a: array of shape ``(..., N, N)`` specifying square array(s) to be inverted.
Returns:
Array of shape ``(..., N, N)`` containing the inverse of the input.
Notes:
In most cases, explicitly computing the inverse of a matrix is ill-advised. For
example, to compute ``x = inv(A) @ b``, it is more performant and numerically
precise to use a direct solve, such as :func:`jax.scipy.linalg.solve`.
See Also:
- :func:`jax.scipy.linalg.inv`: SciPy-style API for matrix inverse
- :func:`jax.numpy.linalg.solve`: direct linear solver
Examples:
Compute the inverse of a 3x3 matrix
>>> a = jnp.array([[1., 2., 3.],
... [2., 4., 2.],
... [3., 2., 1.]])
>>> a_inv = jnp.linalg.inv(a)
>>> a_inv # doctest: +SKIP
Array([[ 0. , -0.25 , 0.5 ],
[-0.25 , 0.5 , -0.25000003],
[ 0.5 , -0.25 , 0. ]], dtype=float32)
Check that multiplying with the inverse gives the identity:
>>> jnp.allclose(a @ a_inv, jnp.eye(3), atol=1E-5)
Array(True, dtype=bool)
Multiply the inverse by a vector ``b``, to find a solution to ``a @ x = b``
>>> b = jnp.array([1., 4., 2.])
>>> a_inv @ b
Array([ 0. , 1.25, -0.5 ], dtype=float32)
Note, however, that explicitly computing the inverse in such a case can lead
to poor performance and loss of precision as the size of the problem grows.
Instead, you should use a direct solver like :func:`jax.numpy.linalg.solve`:
>>> jnp.linalg.solve(a, b)
Array([ 0. , 1.25, -0.5 ], dtype=float32)
"""
check_arraylike("jnp.linalg.inv", a)
arr = jnp.asarray(a)
if arr.ndim < 2 or arr.shape[-1] != arr.shape[-2]:
raise ValueError(
f"Argument to inv must have shape [..., n, n], got {arr.shape}.")
return solve(
arr, lax.broadcast(jnp.eye(arr.shape[-1], dtype=arr.dtype), arr.shape[:-2]))
@export
@partial(jit, static_argnames=('ord', 'axis', 'keepdims'))
def norm(x: ArrayLike, ord: int | str | None = None,
axis: None | tuple[int, ...] | int = None,
keepdims: bool = False) -> Array:
"""Compute the norm of a matrix or vector.
JAX implementation of :func:`numpy.linalg.norm`.
Args:
x: N-dimensional array for which the norm will be computed.
ord: specify the kind of norm to take. Default is Frobenius norm for matrices,
and the 2-norm for vectors. For other options, see Notes below.
axis: integer or sequence of integers specifying the axes over which the norm
will be computed. Defaults to all axes of ``x``.
keepdims: if True, the output array will have the same number of dimensions as
the input, with the size of reduced axes replaced by ``1`` (default: False).
Returns:
array containing the specified norm of x.
Notes:
The flavor of norm computed depends on the value of ``ord`` and the number of
axes being reduced.
For **vector norms** (i.e. a single axis reduction):
- ``ord=None`` (default) computes the 2-norm
- ``ord=inf`` computes ``max(abs(x))``
- ``ord=-inf`` computes min(abs(x))``
- ``ord=0`` computes ``sum(x!=0)``
- for other numerical values, computes ``sum(abs(x) ** ord)**(1/ord)``
For **matrix norms** (i.e. two axes reductions):
- ``ord='fro'`` or ``ord=None`` (default) computes the Frobenius norm
- ``ord='nuc'`` computes the nuclear norm, or the sum of the singular values
- ``ord=1`` computes ``max(abs(x).sum(0))``
- ``ord=-1`` computes ``min(abs(x).sum(0))``
- ``ord=2`` computes the 2-norm, i.e. the largest singular value
- ``ord=-2`` computes the smallest singular value
Examples:
Vector norms:
>>> x = jnp.array([3., 4., 12.])
>>> jnp.linalg.norm(x)
Array(13., dtype=float32)
>>> jnp.linalg.norm(x, ord=1)
Array(19., dtype=float32)
>>> jnp.linalg.norm(x, ord=0)
Array(3., dtype=float32)
Matrix norms:
>>> x = jnp.array([[1., 2., 3.],
... [4., 5., 7.]])
>>> jnp.linalg.norm(x) # Frobenius norm
Array(10.198039, dtype=float32)
>>> jnp.linalg.norm(x, ord='nuc') # nuclear norm
Array(10.762535, dtype=float32)
>>> jnp.linalg.norm(x, ord=1) # 1-norm
Array(10., dtype=float32)
Batched vector norm:
>>> jnp.linalg.norm(x, axis=1)
Array([3.7416575, 9.486833 ], dtype=float32)
"""
check_arraylike("jnp.linalg.norm", x)
x, = promote_dtypes_inexact(jnp.asarray(x))
x_shape = jnp.shape(x)
ndim = len(x_shape)
if axis is None:
# NumPy has an undocumented behavior that admits arbitrary rank inputs if
# `ord` is None: https://github.com/numpy/numpy/issues/14215
if ord is None:
return ufuncs.sqrt(reductions.sum(ufuncs.real(x * ufuncs.conj(x)), keepdims=keepdims))
axis = tuple(range(ndim))
elif isinstance(axis, tuple):
axis = tuple(canonicalize_axis(x, ndim) for x in axis)
else:
axis = (canonicalize_axis(axis, ndim),)
num_axes = len(axis)
if num_axes == 1:
return vector_norm(x, ord=2 if ord is None else ord, axis=axis, keepdims=keepdims)
elif num_axes == 2:
row_axis, col_axis = axis # pytype: disable=bad-unpacking
if ord is None or ord in ('f', 'fro'):
return ufuncs.sqrt(reductions.sum(ufuncs.real(x * ufuncs.conj(x)), axis=axis,
keepdims=keepdims))
elif ord == 1:
if not keepdims and col_axis > row_axis:
col_axis -= 1
return reductions.amax(reductions.sum(ufuncs.abs(x), axis=row_axis, keepdims=keepdims),
axis=col_axis, keepdims=keepdims)
elif ord == -1:
if not keepdims and col_axis > row_axis:
col_axis -= 1
return reductions.amin(reductions.sum(ufuncs.abs(x), axis=row_axis, keepdims=keepdims),
axis=col_axis, keepdims=keepdims)
elif ord == jnp.inf:
if not keepdims and row_axis > col_axis:
row_axis -= 1
return reductions.amax(reductions.sum(ufuncs.abs(x), axis=col_axis, keepdims=keepdims),
axis=row_axis, keepdims=keepdims)
elif ord == -jnp.inf:
if not keepdims and row_axis > col_axis:
row_axis -= 1
return reductions.amin(reductions.sum(ufuncs.abs(x), axis=col_axis, keepdims=keepdims),
axis=row_axis, keepdims=keepdims)
elif ord in ('nuc', 2, -2):
x = jnp.moveaxis(x, axis, (-2, -1))
if ord == 2:
reducer = reductions.amax
elif ord == -2:
reducer = reductions.amin
else:
# `sum` takes an extra dtype= argument, unlike `amax` and `amin`.
reducer = reductions.sum # type: ignore[assignment]
y = reducer(svd(x, compute_uv=False), axis=-1)
if keepdims:
y = jnp.expand_dims(y, axis)
return y
else:
raise ValueError(f"Invalid order '{ord}' for matrix norm.")
else:
raise ValueError(
f"Invalid axis values ({axis}) for jnp.linalg.norm.")
@overload
def qr(a: ArrayLike, mode: Literal["r"]) -> Array: ...
@overload
def qr(a: ArrayLike, mode: str = "reduced") -> Array | QRResult: ...
@export
@partial(jit, static_argnames=('mode',))
def qr(a: ArrayLike, mode: str = "reduced") -> Array | QRResult:
"""Compute the QR decomposition of an array
JAX implementation of :func:`numpy.linalg.qr`.
The QR decomposition of a matrix `A` is given by
.. math::
A = QR
Where `Q` is a unitary matrix (i.e. :math:`Q^HQ=I`) and `R` is an upper-triangular
matrix.
Args:
a: array of shape (..., M, N)
mode: Computational mode. Supported values are:
- ``"reduced"`` (default): return `Q` of shape ``(..., M, K)`` and `R` of shape
``(..., K, N)``, where ``K = min(M, N)``.
- ``"complete"``: return `Q` of shape ``(..., M, M)`` and `R` of shape ``(..., M, N)``.
- ``"raw"``: return lapack-internal representations of shape ``(..., M, N)`` and ``(..., K)``.
- ``"r"``: return `R` only.
Returns:
A tuple ``(Q, R)`` (if ``mode`` is not ``"r"``) otherwise an array ``R``,
where:
- ``Q`` is an orthogonal matrix of shape ``(..., M, K)`` (if ``mode`` is ``"reduced"``)
or ``(..., M, M)`` (if ``mode`` is ``"complete"``).
- ``R`` is an upper-triangular matrix of shape ``(..., M, N)`` (if ``mode`` is
``"r"`` or ``"complete"``) or ``(..., K, N)`` (if ``mode`` is ``"reduced"``)
with ``K = min(M, N)``.
See also:
- :func:`jax.scipy.linalg.qr`: SciPy-style QR decomposition API
- :func:`jax.lax.linalg.qr`: XLA-style QR decomposition API
Examples:
Compute the QR decomposition of a matrix:
>>> a = jnp.array([[1., 2., 3., 4.],
... [5., 4., 2., 1.],
... [6., 3., 1., 5.]])
>>> Q, R = jnp.linalg.qr(a)
>>> Q # doctest: +SKIP
Array([[-0.12700021, -0.7581426 , -0.6396022 ],
[-0.63500065, -0.43322435, 0.63960224],
[-0.7620008 , 0.48737738, -0.42640156]], dtype=float32)
>>> R # doctest: +SKIP
Array([[-7.8740077, -5.080005 , -2.4130025, -4.953006 ],
[ 0. , -1.7870499, -2.6534991, -1.028908 ],
[ 0. , 0. , -1.0660033, -4.050814 ]], dtype=float32)
Check that ``Q`` is orthonormal:
>>> jnp.allclose(Q.T @ Q, jnp.eye(3), atol=1E-5)
Array(True, dtype=bool)
Reconstruct the input:
>>> jnp.allclose(Q @ R, a)
Array(True, dtype=bool)
"""
check_arraylike("jnp.linalg.qr", a)
a, = promote_dtypes_inexact(jnp.asarray(a))
if mode == "raw":
a, taus = lax_linalg.geqrf(a)
return QRResult(a.mT, taus)
if mode in ("reduced", "r", "full"):
full_matrices = False
elif mode == "complete":
full_matrices = True
else:
raise ValueError(f"Unsupported QR decomposition mode '{mode}'")
q, r = lax_linalg.qr(a, full_matrices=full_matrices)
if mode == "r":
return r
return QRResult(q, r)
@export
@jit
def solve(a: ArrayLike, b: ArrayLike) -> Array:
"""Solve a linear system of equations
JAX implementation of :func:`numpy.linalg.solve`.
This solves a (batched) linear system of equations ``a @ x = b``
for ``x`` given ``a`` and ``b``.
Args:
a: array of shape ``(..., N, N)``.
b: array of shape ``(N,)`` (for 1-dimensional right-hand-side) or
``(..., N, M)`` (for batched 2-dimensional right-hand-side).
Returns:
An array containing the result of the linear solve. The result has shape ``(..., N)``
if ``b`` is of shape ``(N,)``, and has shape ``(..., N, M)`` otherwise.
See also:
- :func:`jax.scipy.linalg.solve`: SciPy-style API for solving linear systems.
- :func:`jax.lax.custom_linear_solve`: matrix-free linear solver.
Examples:
A simple 3x3 linear system:
>>> A = jnp.array([[1., 2., 3.],
... [2., 4., 2.],
... [3., 2., 1.]])
>>> b = jnp.array([14., 16., 10.])
>>> x = jnp.linalg.solve(A, b)
>>> x
Array([1., 2., 3.], dtype=float32)
Confirming that the result solves the system:
>>> jnp.allclose(A @ x, b)
Array(True, dtype=bool)
"""
check_arraylike("jnp.linalg.solve", a, b)
a, b = promote_dtypes_inexact(jnp.asarray(a), jnp.asarray(b))
if b.ndim == 1:
signature = "(m,m),(m)->(m)"
elif a.ndim == b.ndim + 1:
# Deprecation warning added 2024-02-06
warnings.warn("jnp.linalg.solve: batched 1D solves with b.ndim > 1 are deprecated, "
"and in the future will be treated as a batched 2D solve. "
"Use solve(a, b[..., None])[..., 0] to avoid this warning.",
category=FutureWarning)
signature = "(m,m),(m)->(m)"
else:
signature = "(m,m),(m,n)->(m,n)"
return jnp.vectorize(lax_linalg._solve, signature=signature)(a, b)
def _lstsq(a: ArrayLike, b: ArrayLike, rcond: float | None, *,
numpy_resid: bool = False) -> tuple[Array, Array, Array, Array]:
# TODO: add lstsq to lax_linalg and implement this function via those wrappers.
# TODO: add custom jvp rule for more robust lstsq differentiation
a, b = promote_dtypes_inexact(a, b)
if a.shape[0] != b.shape[0]:
raise ValueError("Leading dimensions of input arrays must match")
b_orig_ndim = b.ndim
if b_orig_ndim == 1:
b = b[:, None]
if a.ndim != 2:
raise TypeError(
f"{a.ndim}-dimensional array given. Array must be two-dimensional")
if b.ndim != 2:
raise TypeError(
f"{b.ndim}-dimensional array given. Array must be one or two-dimensional")
m, n = a.shape
dtype = a.dtype
if a.size == 0:
s = jnp.empty(0, dtype=a.dtype)
rank = jnp.array(0, dtype=int)
x = jnp.empty((n, *b.shape[1:]), dtype=a.dtype)
else:
if rcond is None:
rcond = jnp.finfo(dtype).eps * max(n, m)
else:
rcond = jnp.where(rcond < 0, jnp.finfo(dtype).eps, rcond)
u, s, vt = svd(a, full_matrices=False)
mask = s >= jnp.array(rcond, dtype=s.dtype) * s[0]
rank = mask.sum()
safe_s = jnp.where(mask, s, 1).astype(a.dtype)
s_inv = jnp.where(mask, 1 / safe_s, 0)[:, jnp.newaxis]
uTb = jnp.matmul(u.conj().T, b, precision=lax.Precision.HIGHEST)
x = jnp.matmul(vt.conj().T, s_inv * uTb, precision=lax.Precision.HIGHEST)
# Numpy returns empty residuals in some cases. To allow compilation, we
# default to returning full residuals in all cases.
if numpy_resid and (rank < n or m <= n):
resid = jnp.asarray([])
else:
b_estimate = jnp.matmul(a, x, precision=lax.Precision.HIGHEST)
resid = norm(b - b_estimate, axis=0) ** 2
if b_orig_ndim == 1:
x = x.ravel()
return x, resid, rank, s
_jit_lstsq = jit(partial(_lstsq, numpy_resid=False))
@export
def lstsq(a: ArrayLike, b: ArrayLike, rcond: float | None = None, *,
numpy_resid: bool = False) -> tuple[Array, Array, Array, Array]:
"""
Return the least-squares solution to a linear equation.
JAX implementation of :func:`numpy.linalg.lstsq`.
Args:
a: array of shape ``(M, N)`` representing the coefficient matrix.
b: array of shape ``(M,)`` or ``(M, K)`` representing the right-hand side.
rcond: Cut-off ratio for small singular values. Singular values smaller than
``rcond * largest_singular_value`` are treated as zero. If None (default),
the optimal value will be used to reduce floating point errors.
numpy_resid: If True, compute and return residuals in the same way as NumPy's
`linalg.lstsq`. This is necessary if you want to precisely replicate NumPy's
behavior. If False (default), a more efficient method is used to compute residuals.
Returns:
Tuple of arrays ``(x, resid, rank, s)`` where
- ``x`` is a shape ``(N,)`` or ``(N, K)`` array containing the least-squares solution.
- ``resid`` is the sum of squared residual of shape ``()`` or ``(K,)``.
- ``rank`` is the rank of the matrix ``a``.
- ``s`` is the singular values of the matrix ``a``.
Examples:
>>> a = jnp.array([[1, 2],
... [3, 4]])
>>> b = jnp.array([5, 6])
>>> x, _, _, _ = jnp.linalg.lstsq(a, b)
>>> with jnp.printoptions(precision=3):
... print(x)
[-4. 4.5]
"""
check_arraylike("jnp.linalg.lstsq", a, b)
if numpy_resid:
return _lstsq(a, b, rcond, numpy_resid=True)
return _jit_lstsq(a, b, rcond)
@export
def cross(x1: ArrayLike, x2: ArrayLike, /, *, axis=-1):
r"""Compute the cross-product of two 3D vectors
JAX implementation of :func:`numpy.linalg.cross`
Args:
x1: N-dimensional array, with ``x1.shape[axis] == 3``
x2: N-dimensional array, with ``x2.shape[axis] == 3``, and other axes
broadcast-compatible with ``x1``.
axis: axis along which to take the cross product (default: -1).
Returns:
array containing the result of the cross-product
See Also:
:func:`jax.numpy.cross`: more flexible cross-product API.
Examples:
Showing that :math:`\hat{x} \times \hat{y} = \hat{z}`:
>>> x = jnp.array([1., 0., 0.])
>>> y = jnp.array([0., 1., 0.])
>>> jnp.linalg.cross(x, y)
Array([0., 0., 1.], dtype=float32)
Cross product of :math:`\hat{x}` with all three standard unit vectors,
via broadcasting:
>>> xyz = jnp.eye(3)
>>> jnp.linalg.cross(x, xyz, axis=-1)
Array([[ 0., 0., 0.],
[ 0., 0., 1.],
[ 0., -1., 0.]], dtype=float32)
"""
check_arraylike("jnp.linalg.outer", x1, x2)
x1, x2 = jnp.asarray(x1), jnp.asarray(x2)
if x1.shape[axis] != 3 or x2.shape[axis] != 3:
raise ValueError(
"Both input arrays must be (arrays of) 3-dimensional vectors, "
f"but they have {x1.shape[axis]=} and {x2.shape[axis]=}"
)
return jnp.cross(x1, x2, axis=axis)
@export
def outer(x1: ArrayLike, x2: ArrayLike, /) -> Array:
"""Compute the outer product of two 1-dimensional arrays.
JAX implementation of :func:`numpy.linalg.outer`.
Args:
x1: array
x2: array
Returns:
array containing the outer product of ``x1`` and ``x2``
See also:
:func:`jax.numpy.outer`: similar function in the main :mod:`jax.numpy` module.
Examples:
>>> x1 = jnp.array([1, 2, 3])
>>> x2 = jnp.array([4, 5, 6])
>>> jnp.linalg.outer(x1, x2)
Array([[ 4, 5, 6],
[ 8, 10, 12],
[12, 15, 18]], dtype=int32)
"""
check_arraylike("jnp.linalg.outer", x1, x2)
x1, x2 = jnp.asarray(x1), jnp.asarray(x2)
if x1.ndim != 1 or x2.ndim != 1:
raise ValueError(f"Input arrays must be one-dimensional, but they are {x1.ndim=} {x2.ndim=}")
return x1[:, None] * x2[None, :]
@export
def matrix_norm(x: ArrayLike, /, *, keepdims: bool = False, ord: str | int = 'fro') -> Array:
"""Compute the norm of a matrix or stack of matrices.
JAX implementation of :func:`numpy.linalg.matrix_norm`
Args:
x: array of shape ``(..., M, N)`` for which to take the norm.
keepdims: if True, keep the reduced dimensions in the output.
ord: A string or int specifying the type of norm; default is the Frobenius norm.
See :func:`numpy.linalg.norm` for details on available options.
Returns:
array containing the norm of ``x``. Has shape ``x.shape[:-2]`` if ``keepdims`` is
False, or shape ``(..., 1, 1)`` if ``keepdims`` is True.
See also:
- :func:`jax.numpy.linalg.vector_norm`: Norm of a vector or stack of vectors.
- :func:`jax.numpy.linalg.norm`: More general matrix or vector norm.
Examples:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> jnp.linalg.matrix_norm(x)
Array(16.881943, dtype=float32)
"""
check_arraylike('jnp.linalg.matrix_norm', x)
return norm(x, ord=ord, keepdims=keepdims, axis=(-2, -1))
@export
def matrix_transpose(x: ArrayLike, /) -> Array:
"""Transpose a matrix or stack of matrices.
JAX implementation of :func:`numpy.linalg.matrix_transpose`.
Args:
x: array of shape ``(..., M, N)``
Returns:
array of shape ``(..., N, M)`` containing the matrix transpose of ``x``.
See also:
:func:`jax.numpy.transpose`: more general transpose operation.
Examples:
Transpose of a single matrix:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> jnp.linalg.matrix_transpose(x)
Array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)
Transpose of a stack of matrices:
>>> x = jnp.array([[[1, 2],
... [3, 4]],
... [[5, 6],
... [7, 8]]])
>>> jnp.linalg.matrix_transpose(x)
Array([[[1, 3],
[2, 4]],
<BLANKLINE>
[[5, 7],
[6, 8]]], dtype=int32)
For convenience, the same computation can be done via the
:attr:`~jax.Array.mT` property of JAX array objects:
>>> x.mT
Array([[[1, 3],
[2, 4]],
<BLANKLINE>
[[5, 7],
[6, 8]]], dtype=int32)
"""
check_arraylike('jnp.linalg.matrix_transpose', x)
x_arr = jnp.asarray(x)
ndim = x_arr.ndim
if ndim < 2:
raise ValueError(f"matrix_transpose requres at least 2 dimensions; got {ndim=}")
return jax.lax.transpose(x_arr, (*range(ndim - 2), ndim - 1, ndim - 2))
@export
def vector_norm(x: ArrayLike, /, *, axis: int | tuple[int, ...] | None = None, keepdims: bool = False,
ord: int | str = 2) -> Array:
"""Compute the vector norm of a vector or batch of vectors.
JAX implementation of :func:`numpy.linalg.vector_norm`.
Args:
x: N-dimensional array for which to take the norm.
axis: optional axis along which to compute the vector norm. If None (default)
then ``x`` is flattened and the norm is taken over all values.
keepdims: if True, keep the reduced dimensions in the output.
ord: A string or int specifying the type of norm; default is the 2-norm.
See :func:`numpy.linalg.norm` for details on available options.
Returns:
array containing the norm of ``x``.
See also:
- :func:`jax.numpy.linalg.matrix_norm`: Norm of a matrix or stack of matrices.
- :func:`jax.numpy.linalg.norm`: More general matrix or vector norm.
Examples:
Norm of a single vector:
>>> x = jnp.array([1., 2., 3.])
>>> jnp.linalg.vector_norm(x)
Array(3.7416575, dtype=float32)
Norm of a batch of vectors:
>>> x = jnp.array([[1., 2., 3.],
... [4., 5., 7.]])
>>> jnp.linalg.vector_norm(x, axis=1)
Array([3.7416575, 9.486833 ], dtype=float32)
"""
check_arraylike('jnp.linalg.vector_norm', x)
if ord is None or ord == 2:
return ufuncs.sqrt(reductions.sum(ufuncs.real(x * ufuncs.conj(x)), axis=axis,
keepdims=keepdims))
elif ord == jnp.inf:
return reductions.amax(ufuncs.abs(x), axis=axis, keepdims=keepdims)
elif ord == -jnp.inf:
return reductions.amin(ufuncs.abs(x), axis=axis, keepdims=keepdims)
elif ord == 0:
return reductions.sum(x != 0, dtype=jnp.finfo(lax.dtype(x)).dtype,
axis=axis, keepdims=keepdims)
elif ord == 1:
# Numpy has a special case for ord == 1 as an optimization. We don't
# really need the optimization (XLA could do it for us), but the Numpy
# code has slightly different type promotion semantics, so we need a
# special case too.
return reductions.sum(ufuncs.abs(x), axis=axis, keepdims=keepdims)
elif isinstance(ord, str):
msg = f"Invalid order '{ord}' for vector norm."
if ord == "inf":
msg += "Use 'jax.numpy.inf' instead."
if ord == "-inf":
msg += "Use '-jax.numpy.inf' instead."
raise ValueError(msg)
else:
abs_x = ufuncs.abs(x)
ord_arr = lax_internal._const(abs_x, ord)
ord_inv = lax_internal._const(abs_x, 1. / ord_arr)
out = reductions.sum(abs_x ** ord_arr, axis=axis, keepdims=keepdims)
return ufuncs.power(out, ord_inv)
@export
def vecdot(x1: ArrayLike, x2: ArrayLike, /, *, axis: int = -1,
precision: PrecisionLike = None,
preferred_element_type: DTypeLike | None = None) -> Array:
"""Compute the (batched) vector conjugate dot product of two arrays.
JAX implementation of :func:`numpy.linalg.vecdot`.
Args:
x1: left-hand side array.
x2: right-hand side array. Size of ``x2[axis]`` must match size of ``x1[axis]``,
and remaining dimensions must be broadcast-compatible.
axis: axis along which to compute the dot product (default: -1)
precision: either ``None`` (default), which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
such values indicating precision of ``x1`` and ``x2``.
preferred_element_type: either ``None`` (default), which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
array containing the conjugate dot product of ``x1`` and ``x2`` along ``axis``.
The non-contracted dimensions are broadcast together.
See also:
- :func:`jax.numpy.vecdot`: similar API in the ``jax.numpy`` namespace.
- :func:`jax.numpy.linalg.matmul`: matrix multiplication.
- :func:`jax.numpy.linalg.tensordot`: general tensor dot product.
Examples:
Vector dot product of two 1D arrays:
>>> x1 = jnp.array([1, 2, 3])
>>> x2 = jnp.array([4, 5, 6])
>>> jnp.linalg.vecdot(x1, x2)
Array(32, dtype=int32)
Batched vector dot product of two 2D arrays:
>>> x1 = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> x2 = jnp.array([[2, 3, 4]])
>>> jnp.linalg.vecdot(x1, x2, axis=-1)
Array([20, 47], dtype=int32)
"""
check_arraylike('jnp.linalg.vecdot', x1, x2)
return jnp.vecdot(x1, x2, axis=axis, precision=precision,
preferred_element_type=preferred_element_type)
@export
def matmul(x1: ArrayLike, x2: ArrayLike, /, *,
precision: PrecisionLike = None,
preferred_element_type: DTypeLike | None = None) -> Array:
"""Perform a matrix multiplication.
JAX implementation of :func:`numpy.linalg.matmul`.
Args:
x1: first input array, of shape ``(..., N)``.
x2: second input array. Must have shape ``(N,)`` or ``(..., N, M)``.
In the multi-dimensional case, leading dimensions must be broadcast-compatible
with the leading dimensions of ``x1``.
precision: either ``None`` (default), which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
such values indicating precision of ``x1`` and ``x2``.
preferred_element_type: either ``None`` (default), which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
array containing the matrix product of the inputs. Shape is ``x1.shape[:-1]``
if ``x2.ndim == 1``, otherwise the shape is ``(..., M)``.
See Also:
:func:`jax.numpy.matmul`: NumPy API for this function.
:func:`jax.numpy.linalg.vecdot`: batched vector product.
:func:`jax.numpy.linalg.tensordot`: batched tensor product.
Examples:
Vector dot products:
>>> x1 = jnp.array([1, 2, 3])
>>> x2 = jnp.array([4, 5, 6])
>>> jnp.linalg.matmul(x1, x2)
Array(32, dtype=int32)
Matrix dot product:
>>> x1 = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> x2 = jnp.array([[1, 2],
... [3, 4],
... [5, 6]])
>>> jnp.linalg.matmul(x1, x2)
Array([[22, 28],
[49, 64]], dtype=int32)
For convenience, in all cases you can do the same computation using
the ``@`` operator:
>>> x1 @ x2
Array([[22, 28],
[49, 64]], dtype=int32)
"""
check_arraylike('jnp.linalg.matmul', x1, x2)
return jnp.matmul(x1, x2, precision=precision,
preferred_element_type=preferred_element_type)
@export
def tensordot(x1: ArrayLike, x2: ArrayLike, /, *,
axes: int | tuple[Sequence[int], Sequence[int]] = 2,
precision: PrecisionLike = None,
preferred_element_type: DTypeLike | None = None) -> Array:
"""Compute the tensor dot product of two N-dimensional arrays.
JAX implementation of :func:`numpy.linalg.tensordot`.
Args:
x1: N-dimensional array
x2: M-dimensional array
axes: integer or tuple of sequences of integers. If an integer `k`, then
sum over the last `k` axes of ``x1`` and the first `k` axes of ``x2``,
in order. If a tuple, then ``axes[0]`` specifies the axes of ``x1`` and
``axes[1]`` specifies the axes of ``x2``.
precision: either ``None`` (default), which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``) or a tuple of two
such values indicating precision of ``x1`` and ``x2``.
preferred_element_type: either ``None`` (default), which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
Returns:
array containing the tensor dot product of the inputs
See also:
- :func:`jax.numpy.tensordot`: equivalent API in the :mod:`jax.numpy` namespace.
- :func:`jax.numpy.einsum`: NumPy API for more general tensor contractions.
- :func:`jax.lax.dot_general`: XLA API for more general tensor contractions.
Examples:
>>> x1 = jnp.arange(24.).reshape(2, 3, 4)
>>> x2 = jnp.ones((3, 4, 5))
>>> jnp.linalg.tensordot(x1, x2)
Array([[ 66., 66., 66., 66., 66.],
[210., 210., 210., 210., 210.]], dtype=float32)
Equivalent result when specifying the axes as explicit sequences:
>>> jnp.linalg.tensordot(x1, x2, axes=([1, 2], [0, 1]))
Array([[ 66., 66., 66., 66., 66.],
[210., 210., 210., 210., 210.]], dtype=float32)
Equivalent result via :func:`~jax.numpy.einsum`:
>>> jnp.einsum('ijk,jkm->im', x1, x2)
Array([[ 66., 66., 66., 66., 66.],
[210., 210., 210., 210., 210.]], dtype=float32)
Setting ``axes=1`` for two-dimensional inputs is equivalent to a matrix
multiplication:
>>> x1 = jnp.array([[1, 2],
... [3, 4]])
>>> x2 = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> jnp.linalg.tensordot(x1, x2, axes=1)
Array([[ 9, 12, 15],
[19, 26, 33]], dtype=int32)
>>> x1 @ x2
Array([[ 9, 12, 15],
[19, 26, 33]], dtype=int32)
Setting ``axes=0`` for one-dimensional inputs is equivalent to
:func:`jax.numpy.linalg.outer`:
>>> x1 = jnp.array([1, 2])
>>> x2 = jnp.array([1, 2, 3])
>>> jnp.linalg.tensordot(x1, x2, axes=0)
Array([[1, 2, 3],
[2, 4, 6]], dtype=int32)
>>> jnp.linalg.outer(x1, x2)
Array([[1, 2, 3],
[2, 4, 6]], dtype=int32)
"""
check_arraylike('jnp.linalg.tensordot', x1, x2)
return jnp.tensordot(x1, x2, axes=axes, precision=precision,
preferred_element_type=preferred_element_type)
@export
def svdvals(x: ArrayLike, /) -> Array:
"""Compute the singular values of a matrix.
JAX implementation of :func:`numpy.linalg.svdvals`.
Args:
x: array of shape ``(..., M, N)`` for which singular values will be computed.
Returns:
array of singular values of shape ``(..., K)`` with ``K = min(M, N)``.
See also:
:func:`jax.numpy.linalg.svd`: compute singular values and singular vectors
Examples:
>>> x = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> jnp.linalg.svdvals(x)
Array([9.508031 , 0.7728694], dtype=float32)
"""
check_arraylike('jnp.linalg.svdvals', x)
return svd(x, compute_uv=False, hermitian=False)
@export
def diagonal(x: ArrayLike, /, *, offset: int = 0) -> Array:
"""Extract the diagonal of an matrix or stack of matrices.
JAX implementation of :func:`numpy.linalg.diagonal`.
Args:
x: array of shape ``(..., M, N)`` from which the diagonal will be extracted.
offset: positive or negative offset from the main diagonal.
Returns:
Array of shape ``(..., K)`` where ``K`` is the length of the specified diagonal.
See Also:
- :func:`jax.numpy.diagonal`: more general functionality for extracting diagonals.
- :func:`jax.numpy.diag`: create a diagonal matrix from values.
Examples:
Diagonals of a single matrix:
>>> x = jnp.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]])
>>> jnp.linalg.diagonal(x)
Array([ 1, 6, 11], dtype=int32)
>>> jnp.linalg.diagonal(x, offset=1)
Array([ 2, 7, 12], dtype=int32)
>>> jnp.linalg.diagonal(x, offset=-1)
Array([ 5, 10], dtype=int32)
Batched diagonals:
>>> x = jnp.arange(24).reshape(2, 3, 4)
>>> jnp.linalg.diagonal(x)
Array([[ 0, 5, 10],
[12, 17, 22]], dtype=int32)
"""
check_arraylike('jnp.linalg.diagonal', x)
return jnp.diagonal(x, offset=offset, axis1=-2, axis2=-1)
@export
def tensorinv(a: ArrayLike, ind: int = 2) -> Array:
"""Compute the tensor inverse of an array.
JAX implementation of :func:`numpy.linalg.tensorinv`.
This computes the inverse of the :func:`~jax.numpy.linalg.tensordot`
operation with the same ``ind`` value.
Args:
a: array to be inverted. Must have ``prod(a.shape[:ind]) == prod(a.shape[ind:])``
ind: positive integer specifying the number of indices in the tensor product.
Returns:
array of shape ``(*a.shape[ind:], *a.shape[:ind])`` containing the
tensor inverse of ``a``.
See also:
- :func:`jax.numpy.linalg.tensordot`
- :func:`jax.numpy.linalg.tensorsolve`
Examples:
>>> key = jax.random.key(1337)
>>> x = jax.random.normal(key, shape=(2, 2, 4))
>>> xinv = jnp.linalg.tensorinv(x, 2)
>>> xinv_x = jnp.linalg.tensordot(xinv, x, axes=2)
>>> jnp.allclose(xinv_x, jnp.eye(4), atol=1E-4)
Array(True, dtype=bool)
"""
check_arraylike("tensorinv", a)
arr = jnp.asarray(a)
ind = operator.index(ind)
if ind <= 0:
raise ValueError(f"ind must be a positive integer; got {ind=}")
contracting_shape, batch_shape = arr.shape[:ind], arr.shape[ind:]
flatshape = (math.prod(contracting_shape), math.prod(batch_shape))
if flatshape[0] != flatshape[1]:
raise ValueError("tensorinv is only possible when the product of the first"
" `ind` dimensions equals that of the remaining dimensions."
f" got {arr.shape=} with {ind=}.")
return inv(arr.reshape(flatshape)).reshape(*batch_shape, *contracting_shape)
@export
def tensorsolve(a: ArrayLike, b: ArrayLike, axes: tuple[int, ...] | None = None) -> Array:
"""Solve the tensor equation a x = b for x.
JAX implementation of :func:`numpy.linalg.tensorsolve`.
Args:
a: input array. After reordering via ``axes`` (see below), shape must be
``(*b.shape, *x.shape)``.
b: right-hand-side array.
axes: optional tuple specifying axes of ``a`` that should be moved to the end
Returns:
array x such that after reordering of axes of ``a``, ``tensordot(a, x, x.ndim)``
is equivalent to ``b``.
See also:
- :func:`jax.numpy.linalg.tensordot`
- :func:`jax.numpy.linalg.tensorinv`
Examples:
>>> key1, key2 = jax.random.split(jax.random.key(8675309))
>>> a = jax.random.normal(key1, shape=(2, 2, 4))
>>> b = jax.random.normal(key2, shape=(2, 2))
>>> x = jnp.linalg.tensorsolve(a, b)
>>> x.shape
(4,)
Now show that ``x`` can be used to reconstruct ``b`` using
:func:`~jax.numpy.linalg.tensordot`:
>>> b_reconstructed = jnp.linalg.tensordot(a, x, axes=x.ndim)
>>> jnp.allclose(b, b_reconstructed)
Array(True, dtype=bool)
"""
check_arraylike("tensorsolve", a, b)
a_arr, b_arr = jnp.asarray(a), jnp.asarray(b)
if axes is not None:
a_arr = jnp.moveaxis(a_arr, axes, len(axes) * (a_arr.ndim - 1,))
out_shape = a_arr.shape[b_arr.ndim:]
if a_arr.shape[:b_arr.ndim] != b_arr.shape:
raise ValueError("After moving axes to end, leading shape of a must match shape of b."
f" got a.shape={a_arr.shape}, b.shape={b_arr.shape}")
if b_arr.size != math.prod(out_shape):
raise ValueError("Input arrays must have prod(a.shape[:b.ndim]) == prod(a.shape[b.ndim:]);"
f" got a.shape={a_arr.shape}, b.ndim={b_arr.ndim}.")
a_arr = a_arr.reshape(b_arr.size, math.prod(out_shape))
return solve(a_arr, b_arr.ravel()).reshape(out_shape)
@export
def multi_dot(arrays: Sequence[ArrayLike], *, precision: PrecisionLike = None) -> Array:
"""Efficiently compute matrix products between a sequence of arrays.
JAX implementation of :func:`numpy.linalg.multi_dot`.
JAX internally uses the opt_einsum library to compute the most efficient
operation order.
Args:
arrays: sequence of arrays. All must be two-dimensional, except the first
and last which may be one-dimensional.
precision: either ``None`` (default), which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``).
Returns:
an array representing the equivalent of ``reduce(jnp.matmul, arrays)``, but
evaluated in the optimal order.
This function exists because the cost of computing sequences of matmul operations
can differ vastly depending on the order in which the operations are evaluated.
For a single matmul, the number of floating point operations (flops) required to
compute a matrix product can be approximated this way:
>>> def approx_flops(x, y):
... # for 2D x and y, with x.shape[1] == y.shape[0]
... return 2 * x.shape[0] * x.shape[1] * y.shape[1]
Suppose we have three matrices that we'd like to multiply in sequence:
>>> key1, key2, key3 = jax.random.split(jax.random.key(0), 3)
>>> x = jax.random.normal(key1, shape=(200, 5))
>>> y = jax.random.normal(key2, shape=(5, 100))
>>> z = jax.random.normal(key3, shape=(100, 10))
Because of associativity of matrix products, there are two orders in which we might
evaluate the product ``x @ y @ z``, and both produce equivalent outputs up to floating
point precision:
>>> result1 = (x @ y) @ z
>>> result2 = x @ (y @ z)
>>> jnp.allclose(result1, result2, atol=1E-4)
Array(True, dtype=bool)
But the computational cost of these differ greatly:
>>> print("(x @ y) @ z flops:", approx_flops(x, y) + approx_flops(x @ y, z))
(x @ y) @ z flops: 600000
>>> print("x @ (y @ z) flops:", approx_flops(y, z) + approx_flops(x, y @ z))
x @ (y @ z) flops: 30000
The second approach is about 20x more efficient in terms of estimated flops!
``multi_dot`` is a function that will automatically choose the fastest
computational path for such problems:
>>> result3 = jnp.linalg.multi_dot([x, y, z])
>>> jnp.allclose(result1, result3, atol=1E-4)
Array(True, dtype=bool)
We can use JAX's :ref:`ahead-of-time-lowering` tools to estimate the total flops
of each approach, and confirm that ``multi_dot`` is choosing the more efficient
option:
>>> jax.jit(lambda x, y, z: (x @ y) @ z).lower(x, y, z).cost_analysis()['flops']
600000.0
>>> jax.jit(lambda x, y, z: x @ (y @ z)).lower(x, y, z).cost_analysis()['flops']
30000.0
>>> jax.jit(jnp.linalg.multi_dot).lower([x, y, z]).cost_analysis()['flops']
30000.0
"""
check_arraylike('jnp.linalg.multi_dot', *arrays)
arrs: list[Array] = list(map(jnp.asarray, arrays))
if len(arrs) < 2:
raise ValueError(f"multi_dot requires at least two arrays; got len(arrays)={len(arrs)}")
if not (arrs[0].ndim in (1, 2) and arrs[-1].ndim in (1, 2) and
all(a.ndim == 2 for a in arrs[1:-1])):
raise ValueError("multi_dot: input arrays must all be two-dimensional, except for"
" the first and last array which may be 1 or 2 dimensional."
f" Got array shapes {[a.shape for a in arrs]}")
if any(a.shape[-1] != b.shape[0] for a, b in zip(arrs[:-1], arrs[1:])):
raise ValueError("multi_dot: last dimension of each array must match first dimension"
f" of following array. Got array shapes {[a.shape for a in arrs]}")
einsum_axes: list[tuple[int, ...]] = [(i, i+1) for i in range(len(arrs))]
if arrs[0].ndim == 1:
einsum_axes[0] = einsum_axes[0][1:]
if arrs[-1].ndim == 1:
einsum_axes[-1] = einsum_axes[-1][:1]
return jnp.einsum(*itertools.chain(*zip(arrs, einsum_axes)), # type: ignore[call-overload]
optimize='auto', precision=precision)
@export
@partial(jit, static_argnames=['p'])
def cond(x: ArrayLike, p=None):
"""Compute the condition number of a matrix.
JAX implementation of :func:`numpy.linalg.cond`.
The condition number is defined as ``norm(x, p) * norm(inv(x), p)``. For ``p = 2``
(the default), the condition number is the ratio of the largest to the smallest
singular value.
Args:
x: array of shape ``(..., M, N)`` for which to compute the condition number.
p: the order of the norm to use. One of ``{None, 1, -1, 2, -2, inf, -inf, 'fro'}``;
see :func:`jax.numpy.linalg.norm` for the meaning of these. The default is ``p = None``,
which is equivalent to ``p = 2``. If not in ``{None, 2, -2}`` then ``x`` must be square,
i.e. ``M = N``.
Returns:
array of shape ``x.shape[:-2]`` containing the condition number.
See also:
:func:`jax.numpy.linalg.norm`
Examples:
Well-conditioned matrix:
>>> x = jnp.array([[1, 2],
... [2, 1]])
>>> jnp.linalg.cond(x)
Array(3., dtype=float32)
Ill-conditioned matrix:
>>> x = jnp.array([[1, 2],
... [0, 0]])
>>> jnp.linalg.cond(x)
Array(inf, dtype=float32)
"""
check_arraylike("cond", x)
arr = jnp.asarray(x)
if arr.ndim < 2:
raise ValueError(f"jnp.linalg.cond: input array must be at least 2D; got {arr.shape=}")
if arr.shape[-1] == 0 or arr.shape[-2] == 0:
raise ValueError(f"jnp.linalg.cond: input array must not be empty; got {arr.shape=}")
if p is None or p == 2:
s = svdvals(x)
return s[..., 0] / s[..., -1]
elif p == -2:
s = svdvals(x)
r = s[..., -1] / s[..., 0]
else:
if arr.shape[-2] != arr.shape[-1]:
raise ValueError(f"jnp.linalg.cond: for {p=}, array must be square; got {arr.shape=}")
r = norm(x, ord=p, axis=(-2, -1)) * norm(inv(x), ord=p, axis=(-2, -1))
# Convert NaNs to infs where original array has no NaNs.
return jnp.where(ufuncs.isnan(r) & ~ufuncs.isnan(x).any(axis=(-2, -1)), jnp.inf, r)
@export
def trace(x: ArrayLike, /, *,
offset: int = 0, dtype: DTypeLike | None = None) -> Array:
"""Compute the trace of a matrix.
JAX implementation of :func:`numpy.linalg.trace`.
Args:
x: array of shape ``(..., M, N)`` and whose innermost two
dimensions form MxN matrices for which to take the trace.
offset: positive or negative offset from the main diagonal
(default: 0).
dtype: data type of the returned array (default: ``None``). If ``None``,
then output dtype will match the dtype of ``x``, promoted to default
precision in the case of integer types.
Returns:
array of batched traces with shape ``x.shape[:-2]``
See also:
- :func:`jax.numpy.trace`: similar API in the ``jax.numpy`` namespace.
Examples:
Trace of a single matrix:
>>> x = jnp.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]])
>>> jnp.linalg.trace(x)
Array(18, dtype=int32)
>>> jnp.linalg.trace(x, offset=1)
Array(21, dtype=int32)
>>> jnp.linalg.trace(x, offset=-1, dtype="float32")
Array(15., dtype=float32)
Batched traces:
>>> x = jnp.arange(24).reshape(2, 3, 4)
>>> jnp.linalg.trace(x)
Array([15, 51], dtype=int32)
"""
check_arraylike('jnp.linalg.trace', x)
return jnp.trace(x, offset=offset, axis1=-2, axis2=-1, dtype=dtype)
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@jax@_src@numpy@linalg.py@.PATH_END.py
|
{
"filename": "_ticks.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choroplethmapbox/colorbar/_ticks.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicksValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="ticks", parent_name="choroplethmapbox.colorbar", **kwargs
):
super(TicksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["outside", "inside", ""]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@choroplethmapbox@colorbar@_ticks.py@.PATH_END.py
|
{
"filename": "_tickvalssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterternary/marker/colorbar/_tickvalssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="tickvalssrc",
parent_name="scatterternary.marker.colorbar",
**kwargs
):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterternary@marker@colorbar@_tickvalssrc.py@.PATH_END.py
|
{
"filename": "FitsHandlers.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/GalfitModule/Classes/FitsHandlers.py",
"type": "Python"
}
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import sys
from os.path import join as pj
from os.path import exists
import subprocess
from copy import deepcopy
from IPython import get_ipython
from astropy.io import fits
import gc
import psutil
import numpy as np
import scipy.linalg as slg
from scipy.stats import norm, kstest
from skimage.draw import disk, ellipse
import imageio.v3 as iio
import matplotlib.pyplot as plt
# In[2]:
# For debugging purposes
from IPython import get_ipython
def in_notebook():
ip = get_ipython()
if ip:
return True
else:
return False
# In[3]:
_HOME_DIR = os.path.expanduser("~")
if in_notebook():
_SPARCFIRE_DIR = pj(_HOME_DIR, "sparcfire_matt")
_MODULE_DIR = pj(_SPARCFIRE_DIR, "GalfitModule")
else:
try:
_SPARCFIRE_DIR = os.environ["SPARCFIRE_HOME"]
_MODULE_DIR = pj(_SPARCFIRE_DIR, "GalfitModule")
except KeyError:
if __name__ == "__main__":
print("SPARCFIRE_HOME is not set. Please run 'setup.bash' inside SpArcFiRe directory if not done so already.")
print("Checking the current directory for GalfitModule, otherwise quitting.")
_MODULE_DIR = pj(os.getcwd(), "GalfitModule")
if not exists(_MODULE_DIR):
raise Exception("Could not find GalfitModule!")
sys.path.append(_MODULE_DIR)
#from Classes.Parameters import *
#from Classes.Components import *
from Classes.Containers import *
from Functions.helper_functions import *
# In[4]:
class HDU:
def __init__(self,
name = "observation",
header = {},
data = None
):
self._hdu_info = {
"name" : name,
"header" : deepcopy(dict(header)),
"data" : deepcopy(np.array(data))
}
# ==========================================================================================================
@property
def name(self):
return self._hdu_info.get("name", "")
@name.setter
def name(self, new_name):
self._hdu_info["name"] = new_name
@property
def header(self):
return self._hdu_info.get("header", {})
@header.setter
def header(self, new_header):
self._hdu_info["header"] = deepcopy(dict(new_header))
@property
def data(self):
return self._hdu_info.get("data", "")
@data.setter
def data(self, new_data):
self._hdu_info["data"] = deepcopy(np.array(new_data))
# ==========================================================================================================
def __str__(self):
header_str = ""
for k,v in self.header.items():
header_str += f"{k} = {v}\n"
output_str = f"{self.name}\n{header_str}Img size: {np.shape(self.data)}"
return output_str
# In[5]:
class FitsFile:
def __init__(self,
filepath,
names = ["observation"],
from_galfit = False,
wait = False,
**kwargs
):
self.filepath = filepath
self.all_hdu = {}
# Use split over rstrip in case a waveband designation is given
# (rstrip will remove any character that matches in the substring)
# i.e. 12345678910_g would lose the _g for "_galfit_out.fits"
# TODO: Replace rstrip with split in the rest of these scripts...
self.gname = kwargs.get("gname", os.path.basename(filepath).split("_galfit_out.fits")[0])
assert os.path.splitext(filepath)[-1].lower() == ".fits", "File being passed into FitsHandler must be .fits!"
try:
file_in = fits.open(filepath)
except FileNotFoundError:
print(f"Can't open to read the file, {filepath}. Check name/permissions/directory.")
raise(Exception())
except OSError as ose:
print(f"Something went wrong! {ose}")
raise(Exception())
# FITS starts the index at 0 but GALFIT outputs the observation image at 1
# Also converting the header to a dict to save some trouble
assert_str = f"Number of HDU names fed to object ({len(names)}) does not match number of HDUs in {filepath} ({len(file_in)})!"
if from_galfit:
assert len(names) + 1 == len(file_in), assert_str
self.num_imgs = len(file_in) - 1
else:
assert len(names) == len(file_in), assert_str
self.num_imgs = len(file_in)
for i, name in enumerate(names):
index = i
if from_galfit:
index += 1
header = deepcopy(dict(file_in[index].header))
data = deepcopy(file_in[index].data)
hdu = HDU(name = name, header = header, data = data)
self.all_hdu[name] = hdu
if self.num_imgs == 1:
self.header = self.all_hdu[names[0]].header
self.data = self.all_hdu[names[0]].data
self.file = file_in
# Wait is for continuing to use the file in some other capacity
# i.e. for outputfits below to grab more info
if not wait:
#file_in.close(verbose = True)
self.close()
#print("Did it close?", file_in.closed)
# assert hdu_num == 4, "File being passed into FitsHandler has too few output HDUs."
# ==========================================================================================================
def check_hdu_type(self, hdu):
assert isinstance(hdu, HDU), "Input HDU is not an HDU class!"
# ==========================================================================================================
@property
def observation(self):
return self.all_hdu.get("observation", None)
@observation.setter
def observation(self, new_hdu):
self.check_hdu_type(new_hdu)
self.all_hdu["observation"] = new_hdu
# ==========================================================================================================
#def close(self):
# self.file.close(verbose = True)
# return
def close(self):
"""Destructor for closing FITS files."""
for ext in self.file:
try:
del ext.data
del ext.header
except AttributeError as ae:
pass
gc.collect()
try:
self.file.close(verbose = True)
except Exception as ee:
print(f'Failed to close FITS instance for {self.gname}: {ee}')
print("May already be closed...")
# ==========================================================================================================
def to_png(self, cleanup = True, **kwargs): #tmp_fits_path = "", tmp_png_path = "", out_filename = ""):
gname = kwargs.get("gname", self.gname)
# TODO: BAD ASSUMPTION MOVING FORWARD
#tmp_fits_path = kwargs.get("tmp_fits_path", self.filepath)
fits_path = kwargs.get("fits_path", self.filepath)
# .../galfits -> galfit_png
#tmp_png_dir = os.path.split(tmp_fits_path)[0].rstrip("s") + "_png"
tmp_png_dir = kwargs.get("tmp_png_dir", "./")
tmp_png_path = pj(tmp_png_dir, gname)
tmp_png_path = kwargs.get("tmp_png_path", tmp_png_path)
# TODO: Add starmask into output fits file as an image block
# with fits.open(starmask_path) as sm:
# starmask_HDU = fits.ImageHDU(data = sm.data, header = sm.header, name = "STARMASK")
# with fits.open(fits_path, mode='update', output_verify='ignore') as fits_hdu:
# fits_hdu.append(starmask_HDU)
starmask_dir = kwargs.get("starmask_dir", "./")
# Temporarily hardcoding the suffix here
starmask_path = pj(starmask_dir, f"{gname}_star-rm.fits")
out_png_dir = kwargs.get("out_png_dir", "./")
capture_output = bool(kwargs.get("silent", False))
combined_suffix = kwargs.get("combined_suffix", "combined")
primary_img_num = kwargs.get("primary_img_num", 1)
fitspng_param = "0.25,1" #1,150"
fitspng_param_model = "0.25,0.75"
# Different conventions... 0 is used for model/observation only
if self.num_imgs == 1:
primary_img_num = 0
else:
if exists(starmask_path):
# copied from below
# ASSUME (for now) that this is doable
# TODO: remove this when starmask is incorporated into fits files
feedme = FeedmeContainer(path_to_feedme = fits_path, header = GalfitHeader())
feedme.from_file(list(self.all_hdu.values())[1].header)
crop_box = feedme.header.region_to_fit.value
# To adjust for python indexing
# Also, reminder, non-inclusive of end
xbox_min, xbox_max, ybox_min, ybox_max = crop_box[0] - 1, crop_box[1], crop_box[2] - 1, crop_box[3]
with fits.open(starmask_path) as fits_starmask:
# masked pixels have value 1, other pixels 0
# so invert those with a bit of quick math
starmask_data = np.abs(fits_starmask[0].data - 1)
starmask_data = starmask_data[xbox_min : xbox_max, ybox_min : ybox_max]
with fits.open(fits_path, mode='update', output_verify='ignore') as fits_hdu:
try:
fits_hdu[primary_img_num + 2].data *= starmask_data
except ValueError:
print("Broadcasting issue when attempting to mask the residual array.")
print("Leaving it alone")
im1 = f"{tmp_png_path}_observation.png"
im2 = f"{tmp_png_path}_out.png"
im3 = f"{tmp_png_path}_residual.png"
# run_fitspng from helper_functions, string path to fitspng program
fitspng_cmd1 = f"{run_fitspng} -fr \"{fitspng_param}\" -o {im1} {fits_path}[{primary_img_num}]"
fitspng_cmd2 = f"{run_fitspng} -fr \"{fitspng_param_model}\" -o {im2} {fits_path}[{primary_img_num + 1}]"
fitspng_cmd3 = f"{run_fitspng} -fr \"{fitspng_param}\" -o {im3} {fits_path}[{primary_img_num + 2}]"
cmds = [fitspng_cmd1, fitspng_cmd2, fitspng_cmd3]
output_png_files = [im1, im2, im3]
# for n-images
for i in range(primary_img_num + 3, self.num_imgs):
png_name = f"{tmp_png_path}_image{i}.png"
output_png_files.append(png_name)
cmds.append(
f"{run_fitspng} -fr \"{fitspng_param}\" -o {png_name} {fits_path}[{i}]"
)
# sp is from helper_functions, subprocess.run call
for cmd in cmds[:self.num_imgs]:
# We must capture this call to check if the conversion worked
fitspng_out = sp(cmd, capture_output = True)
if "error" in fitspng_out.stderr.lower():
print("Skipping fitspng conversion... there is likely a library (libcfitsio) issue.")
print(f"Error is:\n{fitspng_out.stderr}")
self.combined_png = ""
return
if self.num_imgs == 1:
combined_suffix = ""
# Adding 'magick' to use the portable version in the GalfitModule
run_montage = shutil.which("magick")
if not run_montage:
run_montage = shutil.which("montage")
if not run_montage:
print("Cannot find 'magick' or 'montage' via 'which'.")
print("Proceeding to generate individual pngs without combining them.")
self.combined_png = ""
cleanup = False
else:
run_montage += " montage"
montage_cmd = run_montage + " " + \
" ".join(im_cmd for idx, im_cmd in enumerate(output_png_files)
if idx + 1 <= self.num_imgs)
tiling = f"1x{self.num_imgs}"
if kwargs.get("horizontal", None):
tiling = f"{self.num_imgs}x1"
combined_suffix += "_horizontal"
# Combining the images using ImageMagick
# If this is a single image, it'll also resize for me so that's why I leave it in
montage_cmd += f" -tile {tiling} -geometry \"175x175+2+2\" " \
f"{pj(out_png_dir, gname)}_{combined_suffix}.png"
if run_montage:
_ = sp(montage_cmd, capture_output = capture_output)
self.combined_png = f"{pj(out_png_dir, gname)}_{combined_suffix}.png"
if cleanup:
_ = rm_files(*output_png_files)
else:
self.observation_png = im1
self.model_png = im2
self.residual_png = im3
self.all_png = output_png_files
# ==========================================================================================================
def __sub__(self, other):
names = self.all_hdu.keys()
assert_str1 = "Cannot subtract the data from these two FITS files, they do not contain the same number of HDUs!"
assert len(self.all_hdu) == len(other.all_hdu), assert_str1
assert_str2 = "Cannot subtract the data from these two FITS files, they do not have the same image dimensions!\n"
for i, (a, b) in enumerate(zip(self.all_hdu.values(), other.all_hdu.values())):
shape_a = np.shape(a.data)
shape_b = np.shape(b.data)
assert shape_a == shape_b, assert_str2 + f"At HDU {i}, the images have shapes {shape_a} & {shape_b}."
# Python doesn't care if they're different lengths but
# (for instance in the residual) we don't want to compare one to one
result = {k : a.data - b.data for k, a, b in zip(names, self.all_hdu.values(), other.all_hdu.values())}
return result
# ==========================================================================================================
# def __str__(self):
# pass
#return output_str
# ==========================================================================================================
def header_dict(self, name = ""):
if name:
output_dict = dict(self.all_hdu[name].header)
else:
output_dict = {name : dict(hdu.header) for name, hdu in self.all_hdu.items()}
return output_dict
# ==========================================================================================================
# In[6]:
class OutputFits(FitsFile):
def __init__(
self,
filepath,
load_default = True,
**kwargs
):
FitsFile.__init__(
self,
filepath = filepath,
names = ["observation", "model", "residual"],
wait = True,
from_galfit = True,
**kwargs
)
# Dict is very redundant here but just for funsies
# FITS header not Feedme header
self.header = deepcopy(dict(self.model.header))
# Can call the helper directly since we're just using the header dict
#_header.from_file_helper_dict(self.header)
self.feedme = FeedmeContainer(path_to_feedme = filepath, header = GalfitHeader(), load_default = load_default, **kwargs)
self.feedme.from_file(self.header)
self.data = deepcopy(self.model.data)
self.bulge_mask = np.ones(np.shape(self.data))
self.close()
# ==========================================================================================================
@property
def model(self):
return self.all_hdu.get("model", None)
@model.setter
def model(self, new_hdu):
self.check_hdu_type(new_hdu)
self.all_hdu["model"] = new_hdu
@property
def residual(self):
return self.all_hdu.get("residual", None)
@residual.setter
def residual(self, new_hdu):
self.check_hdu_type(new_hdu)
self.all_hdu["residual"] = new_hdu
# ==========================================================================================================
def generate_bulge_mask(self, sparcfire_csv):
# Thanks Azra!
bulge_mask = np.ones(np.shape(self.model.data))
try:
info = pd.read_csv(sparcfire_csv, dtype = str).dropna()
except FileNotFoundError as fe:
print(fe)
return bulge_mask
if "rejected" in info[' fit_state']:
print(f"SpArcFiRe fit_state 'rejected'. Cannot determine the bulge mask for {self.gname}.")
return bulge_mask
try:
input_size = float(str(info[' iptSz'][0]).split()[0][1:])
except Exception as e:
print(f"There is an issue determining the bulge mask for {self.gname}.")
return bulge_mask
bulge_rad = float(info[' bulgeMajAxsLen'][0])
# In radians
bulge_angle = float(info[' bulgeMajAxsAngle'][0])
axis_ratio = float(info[' bulgeAxisRatio'][0]) # Maj/minor
if axis_ratio < 0.5:
axis_ratio = 0.5
# + 1 added for effect
major_rad = int(bulge_rad * len(self.model.data[0]) // input_size) + 1
minor_rad = int(major_rad/axis_ratio)
crop_box = self.feedme.header.region_to_fit.value
# To adjust for python indexing
xbox_min, ybox_min = crop_box[0] - 1, crop_box[2] - 1
# Shifting everything to origin
center_x, center_y = np.array(self.feedme.bulge.position.value, dtype = int) - 1 -\
np.array((xbox_min, ybox_min), dtype = int)
# center_x2, bulge_y2 = np.array(self.feedme.bulge.position, dtype = int) - \
# np.array((xbox_min, ybox_min), dtype = int) + \
# rad
#xx, yy = disk((center_x, center_y), major_rad)
try:
xx, yy = ellipse(center_x, center_y, major_rad, minor_rad, rotation = bulge_angle, shape = np.shape(self.model.data))
except Exception as e:
print(e)
print(self.gname)
print(center_x, center_y, major_rad, minor_rad, rotation)
return bulge_mask
bulge_mask[xx, yy] = 0
self.bulge_mask = bulge_mask
# temp = self.model.data
# plt.imshow(temp, origin = "lower")
# plt.show()
# temp[xx, yy] = np.min(self.model.data[np.nonzero(self.model.data)])
# plt.imshow(temp, origin = "lower")
# plt.show()
#self.close()
return bulge_mask
# ==========================================================================================================
def generate_cluster_mask(self, cluster_mask_png, crop_box):
# 1237668297135030610-D_clusMask.png
cluster_mask = None
try:
cluster_img = iio.imread(cluster_mask_png, mode = "L")
except FileNotFoundError as fe:
print(fe)
return cluster_mask
xbox_min, xbox_max, ybox_min, ybox_max = crop_box[0] - 1, crop_box[1], crop_box[2] - 1, crop_box[3]
cluster_img = cluster_img[xbox_min:xbox_max, ybox_min:ybox_max]
cluster_mask = deepcopy(cluster_img)
# Mask non-clusters
cluster_mask[cluster_img == 0] = 1
# Leave clusters alone
cluster_mask[cluster_img != 0] = 0
self.cluster_mask = cluster_mask
return cluster_mask
# ==========================================================================================================
def generate_masked_residual(
self,
mask,
use_bulge_mask = False,
use_cluster_mask = False,
update_fits_header = True
):
small_number = 1e-8
crop_box = self.feedme.header.region_to_fit.value
# To adjust for python indexing
# Also, reminder, non-inclusive of end
xbox_min, xbox_max, ybox_min, ybox_max = crop_box[0] - 1, crop_box[1], crop_box[2] - 1, crop_box[3]
# To invert the matrix since galfit keeps 0 valued areas
crop_mask = 1
if mask is not None and np.any(mask.data):
cropped_mask = mask.data[xbox_min:xbox_max, ybox_min:ybox_max]
mask_shape = np.shape(mask.data)
# Fixing two common issues, hence two if statements
if np.shape(cropped_mask) != np.shape(self.model.data):
# The issue seems to pop up at the max border when either the min or the max runs up
# against the original image size
print("Shape mismatch between crop mask and model. Likely an indexing issue due to crop mask running into image bounds. Attempting to fix.")
diff = np.array(np.shape(cropped_mask)) - np.array(np.shape(self.model.data))
# One or the other... No graceful way to do this
if xbox_min == 0:
xbox_max -= diff[0]
else:
xbox_min -= diff[0]
if ybox_min == 0:
ybox_max -= diff[0]
else:
ybox_min -= diff[0]
cropped_mask = mask.data[xbox_min:xbox_max, ybox_min:ybox_max]
# Now we're just trying to brute force it and hope for the best
if np.shape(cropped_mask) != np.shape(self.model.data):
print("Shape mismatch (again) between crop mask and model. Likely an indexing issue due to crop mask running into image bounds. Attempting to fix.")
diff = np.abs(np.array(np.shape(cropped_mask)) - np.array(np.shape(self.model.data)))
cropped_mask = np.pad(cropped_mask, ((diff[0],0), (diff[1],0)), 'constant')
# Giving up and proceeding without
if np.shape(cropped_mask) != np.shape(self.model.data):
print("Shape mismatch. Proceeding without crop mask.")
cropped_mask = 0
crop_mask = 1 - cropped_mask
feedme_dir, feedme_file = os.path.split(self.feedme.path_to_feedme)
if use_bulge_mask:
if exists(pj(feedme_dir, f"{self.gname}.csv")):
crop_mask = self.generate_bulge_mask(pj(feedme_dir, f"{self.gname}.csv")) * crop_mask
else:
# REQUIRES GENERATE_BULGE_MASK TO BE RUN SEPARATE WITH CSV FILE SPECIFIED
try:
crop_mask = self.bulge_mask * crop_mask
except AttributeError:
print(f"Could not generate bulge mask for {self.gname}. Check location of csv or run generate_bulge_mask with a specified csv file.")
except ValueError:
print(f"Could not generate bulge mask for {self.gname}. There may be an issue with sparcfire output (broadcast issue).")
if use_cluster_mask:
# Use reprojected mask
cmask_filename = pj(feedme_dir, f"{self.gname}-K_clusMask-reprojected.png")
if exists(cmask_filename):
crop_mask = self.generate_cluster_mask(cmask_filename, crop_box) * crop_mask
else:
# REQUIRES GENERATE_CLUSTER_MASK TO BE RUN SEPARATE WITH CSV FILE SPECIFIED
try:
crop_mask = self.cluster_mask * crop_mask
except AttributeError:
print(f"Could not generate cluster mask for {self.gname}. Check location of csv or run generate_bulge_mask with a specified csv file.")
except ValueError:
print(f"Could not generate cluster mask for {self.gname}. There may be an issue with sparcfire output (broadcast issue).")
try:
# compare to gaussian with same mean, std via kstest
# if p value high, not that different
self.masked_residual = (self.observation.data - self.model.data)*crop_mask
exclude_masked_pixels = self.masked_residual[np.abs(self.masked_residual) > 0]
mean = np.mean(exclude_masked_pixels)
std = np.std(exclude_masked_pixels)
gaussian = norm.rvs(size = len(exclude_masked_pixels), loc = mean, scale = std, random_state = 0)
self.kstest = kstest(gaussian, exclude_masked_pixels.flatten())
pvalue = self.kstest.pvalue
#statistic = self.kstest.statistic
# gaussian = norm.rvs(size = len(self.masked_residual)**2, loc = mean, scale = std, random_state = 0)
# noised_masked_pixels = np.where(np.abs(self.masked_residual.flatten()) > 0, self.masked_residual.flatten(), gaussian)
# self.kstest = kstest(gaussian, noised_masked_pixels)
self.norm_observation = slg.norm(crop_mask*self.observation.data)
self.norm_model = slg.norm(crop_mask*self.model.data)
self.norm_residual = slg.norm(crop_mask*self.residual.data)
self.masked_residual_normalized = self.masked_residual/min(self.norm_observation, self.norm_model)
self.wayne_residual = self.norm_residual/self.norm_observation
self.wayne_quality = pvalue/self.wayne_residual # bigger is better
# obs_model = 1 - np.divide(
# crop_mask*self.observation.data/self.norm_observation,
# crop_mask*self.model.data/self.norm_model + small_number
# )
# model_obs = 1 - np.divide(
# crop_mask*self.model.data/self.norm_model,
# crop_mask*self.observation.data/self.norm_observation + small_number
# )
# Replace negative values with 1 - reciprocal
# self.masked_residual_ratio = np.where(obs_model >= 0, obs_model, model_obs)
# Masked residual normalized
# I seem to use this acronym a lot
self.nmr = slg.norm(self.masked_residual_normalized)
# self.nmrr = slg.norm(self.masked_residual_ratio)
if update_fits_header:
with fits.open(self.filepath, mode='update', output_verify='ignore') as hdul:
hdul[2].header["NMR"] = (round(self.nmr, 8), "Norm of the masked residual")
hdul[2].header["W_NMR"] = (round(self.wayne_residual, 8), "Wayne's residual")
#hdul[2].header["W_Q"] = (round(self.nmr, 8), "Wayne's quality measure")
# pvalue is sometimes none but round can't handle it
if isinstance(pvalue, float): # and isinstance(statistic, float):
hdul[2].header["KS_P"] = (round(pvalue, 8), "p value of kstest vs noise")
#hdul[2].header["KS_STAT"] = (round(statistic, 8), "statistic value of kstest vs noise")
else:
hdul[2].header["KS_P"] = (None, "p value of kstest vs noise")
#hdul[2].header["KS_STAT"] = (None, "statistic value of kstest vs noise")
except ValueError:
print(f"There may be a broadcast issue, observation, model, crop mask: ", end = "")
print(f"{np.shape(self.observation.data)}, {np.shape(self.model.data)}, {np.shape(crop_mask)}")
# print(np.shape(mask_fits_file.data))
# print(np.shape(fits_file.data))
# print(crop_box)
#self.close()
return None
#self.close()
return self.masked_residual_normalized
# ==========================================================================================================
# In[7]:
if __name__ == "__main__":
from RegTest.RegTest import *
# In[8]:
# Testing from_file
if __name__ == "__main__":
gname = "1237671124296532233"
obs = pj(SAMPLE_DIR, f"{gname}.fits")
model = pj(SAMPLE_DIR, f"{gname}_galfit_out.fits")
mask = pj(SAMPLE_DIR, f"{gname}_star-rm.fits")
test_obs = FitsFile(obs)
test_model = OutputFits(model)
test_mask = FitsFile(mask)
print(test_obs.observation)
print()
print(test_model.feedme)
print()
print(test_model.model)
# Purposefully do not fill in some of the header parameters
# since those do not exist in the output FITS header
# This is done to remind the user/programmer that the
# OutputFits object only serves to represent the header
# nothing more, nothing less and so also reminds them to
# use a different method to fill in the header.
#print(test_model.feedme.header)
# _header = GalfitHeader()
# _header.from_file_helper(test_out.header)
# crop_box = _header.region_to_fit
# # To adjust for python indexing
# box_min, box_max = crop_box[0] - 1, crop_box[1]
# print(np.shape(test_in.data[box_min:box_max, box_min:box_max]))
print("\nThese should all be the same .")
print(np.shape(test_model.observation.data))
print(np.shape(test_model.data))
print(np.shape(test_model.residual.data))
crop_box = test_model.feedme.header.region_to_fit.value
# + 1 to account for python indexing
crop_rad = crop_box[1] - crop_box[0] + 1
print(f"({crop_rad}, {crop_rad})")
print("Andddd pre crop")
print(np.shape(test_obs.observation.data))
# In[9]:
# Unit test to check value of masked residual
if __name__ == "__main__":
# Turn off updating FITS header to avoid modifying test data
print("No bulge mask")
_ = test_model.generate_masked_residual(test_mask, use_bulge_mask = False, update_fits_header = False)
print(f"Norm of the observation: {test_model.norm_observation:.4f}")
print(f"Norm of the model: {test_model.norm_model:.4f}")
print(f"Norm of the residual: {test_model.norm_residual:.4f}")
print(f"Norm of the masked residual: {test_model.nmr:.4f}")
print(f"Wayne's residual: {test_model.wayne_residual:.4f}")
#print(f"Norm of the masked residual ratio: {test_model.nmrr:.8f}")
print(f"kstest p value: {test_model.kstest.pvalue:.4f}")
#print(f"kstest statistic: {test_model.kstest.statistic:.4f}")
print("\nNow with bulge mask")
_ = test_model.generate_bulge_mask(pj(TEST_DATA_DIR, "test-out", gname, f"{gname}.csv"))
_ = test_model.generate_masked_residual(test_mask, update_fits_header = False)
print(f"Norm of the observation: {test_model.norm_observation:.4f}")
print(f"Norm of the model: {test_model.norm_model:.4f}")
print(f"Norm of the residual: {test_model.norm_residual:.4f}")
print(f"Norm of the masked residual: {test_model.nmr:.4f}")
print(f"Wayne's residual: {test_model.wayne_residual:.4f}")
#print(f"Norm of the masked residual ratio: {test_model.nmrr:.8f}")
print(f"kstest p value: {test_model.kstest.pvalue:.4f}")
#print(f"kstest statistic: {test_model.kstest.statistic:.4f}")
#print(np.min(test_model.observation.data))
# In[10]:
if __name__ == "__main__":
model = pj(SAMPLE_DIR, f"sample_model_galfit_out.fits")
model_to_update = pj(TEST_OUTPUT_DIR, f"temp_galfit_out.fits")
if exists(model_to_update):
#sp(f"rm -f {model_to_update}")
rm_files(model_to_update)
_ = sp(f"cp {model} {model_to_update}")
test_model = OutputFits(model_to_update)
print("Checking FITS header update with NMR")
print("Does the updated FITS file contain NMR and KStest keys?")
keys_to_check = ("NMR", "KS_P", "W_NMR")
# TODO: replace fits file with one without those header options
# Expect False
print("Before... (expect False)", all(k in test_model.header for k in keys_to_check))
assert not all(k in test_model.header for k in keys_to_check), "Expected False."
_ = test_model.generate_masked_residual(test_mask)
test_model = OutputFits(model_to_update)
print("After...", all(k in test_model.header for k in keys_to_check))
assert all(k in test_model.header for k in keys_to_check), "Expected True."
# In[11]:
if __name__ == "__main__":
print("Checking if all FITS files are closed...")
print("Expect True:", not any("fits" in pof.path for pof in psutil.Process().open_files()))
assert not any("fits" in pof.path for pof in psutil.Process().open_files()), "Expected True."
# In[12]:
if __name__ == "__main__":
export_to_py("FitsHandlers", pj(_MODULE_DIR, "Classes", "FitsHandlers"))
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@GalfitModule@Classes@FitsHandlers.py@.PATH_END.py
|
{
"filename": "castor_source.py",
"repo_name": "AndreaSimongini/CASTOR",
"repo_path": "CASTOR_extracted/CASTOR-main/Scripts/castor_source.py",
"type": "Python"
}
|
######################################################################################################
# ######################################################
# ____ _ ____ _____ ___ ____ ################################. ####################
# / ___| / \ / ___|_ _/ _ \| _ \ ############################### ##############
# | | / _ \ \___ \ | || | | | |_) | ####################### # ###########
# | |___ / ___ \ ___) || || |_| | _ < #################### ###########
# \____/_/ \_\____/ |_| \___/|_| \_\ ##################. ############
# ################## ############
# ################# ################
# ################ ################
# Welcome to CASTOR v1.0 ############### .#############
# ############### ## #############
# In case of problems please ### .- +###################
# contact # ###################
# andrea.simongini@inaf.it # # ##################
# ## ##########################################
# ######################################################
######################################################################################################
######################################################################################################
######################################################################################################
######################################################################################################
######################################################################################################
#
# Define the needed libraries
#
#
import numpy as np
import pandas as pd
import os
import math
import time as TT
import george
import warnings
import matplotlib.pyplot as plt
from tabulate import tabulate
from collections import defaultdict
from joblib import Parallel, delayed
from astropy.io import ascii
from scipy import optimize as op
from scipy.stats import chisquare
from scipy.optimize import curve_fit
from scipy.integrate import trapezoid as trapz
from functools import partial
from george.kernels import Matern32Kernel
from multiprocessing import Pool
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import ConstantKernel, Matern
from sklearn.exceptions import ConvergenceWarning
#
# Some parameters you may want to change
#
limit_sp_points = 5000
n_templates = 50
alpha_templ = 1e-12
n_cores = 5
acc = 0.8
cal_parms = m, q = ([0.9186, 1.2233])
#
#
# Other ancillary definitions
#
#
bandpass = {}
bandpass['filterlist'] = filterlist = ["UVW2", "UVM2", "UVW1", "u'", "u", "U", "B", "g",
"g'", "V", "r", "r'", "R", "i", "i'", "I", "z", "z'",
"Y", "J", "H", "Ks", "K"]
bandpass['centroid'] = centroid = ([1928, 2246, 2600, 3500, 3546, 3571, 4344, 4767,
4825, 5456, 6156, 6261, 6442, 7472, 7672, 7994, 8917, 9097,
10380, 12500, 16300, 21450, 21900])
bandpass['fwhm'] = fwhm = ([657, 498, 693, 80, 457, 524, 797, 928, 1379,
807, 813, 1382, 1377, 891, 1535, 1079, 1183, 1370,
2130, 3110, 4020, 3770, 3930])
lines = {'Nitrogen' : 3485 , 'Calcium II' : 3706 , 'Calcium H&K' : 3932 ,
'Hydrogen d' : 4102 , 'Hydrogen g' : 4341 , 'Helium Ia' : 4471 ,
'Magnesium I]' : 4571 , 'Hydrogen b' : 4861 , 'Iron IIa' : 4900 ,
'Iron IIb' : 5300 , 'Iron IIc' : 5500 , 'Oxygen Ia' : 5577 ,
'Helium Ib' : 5876 , 'Silicon II' : 6150 , 'Oxygen Ib' : 6300 ,
'Hydrogen a' : 6563 , 'Helium Ic' : 6678 , 'Helium Id' : 7065 ,
'Iron IId' : 7155 , '[Calcium II]' : 7300 , 'Oxygen Ic' : 7330 ,
'Carbon IV' : 7724 , 'Oxygen Id' : 7776 , 'Calcium nir' : 8500
}
light_vel_A = 3e+18
light_vel_km = 3e+5
Hubble_70 = 70
M_sun_g = 1.98892e+33
R_sun_mpc = 2.25e-14
h_planck = 6.63e-27
k_boltz = 1.38e-16
mpc_to_cm = 3.0856775813e+24
days_to_sec = 86400
km_to_cm = 100000
neutrino_weight = 100 / 0.1
def sampling_step(x):
dx = np.diff(np.sort(x))
return dx[dx != 0]
def nll(p, y, gp, computed):
gp.set_parameter_vector(p)
ll = gp.log_likelihood(y, quiet=True)
return -ll if np.isfinite(ll) else 1e25
def grad_nll(p, y, gp, computed):
gp.set_parameter_vector(p)
return -gp.grad_log_likelihood(y, quiet=True)
def blackbody(wave, temperature, radius, zeta, distance):
black_func_cost = ( 2 * h_planck * light_vel_A**2 ) * 10e+16
black_func_var = ( 1 / (np.exp(h_planck * light_vel_A / (wave * k_boltz * temperature)) -1 )) / wave**5
modified_flux = ( zeta * radius / distance ) ** 2 * np.pi * black_func_cost * black_func_var * wave
return modified_flux
def read_excel(excel_path):
name_file = 'Training_Set.xlsx'
list_of_names = pd.read_excel(excel_path + name_file)['Name'].tolist()
list_of_types = pd.read_excel(excel_path + name_file)['Type'].tolist()
list_of_reds = pd.read_excel(excel_path + name_file)['Redshift'].tolist()
return list_of_names, list_of_types, list_of_reds
#
#
#
#
# Class objects
#
#
#
#
#
class comparison():
def __init__(self, sn_name, sn_path, training_set, training_path):
self.sn_name = sn_name
self.sn_path = sn_path
self.training_set = training_set
self.training_path = training_path
def collect_lightcurves(self, lc_path):
lc_set = defaultdict(list)
band_dict = defaultdict(list)
with open(lc_path, 'r') as file:
for line in file:
parts = line.strip().split()
time = float(parts[0])
mag = float(parts[1])
emag = float(parts[2])
band = parts[3]
band_dict[band].append((time, mag, emag))
for band, values in band_dict.items():
lc_set[f'time_{band}'], lc_set[f'mag_{band}'], lc_set[f'emag_{band}'] = zip(*values)
lc_filters = list(band_dict.keys())
ordered_lc_filters = [band for band in filterlist if band in lc_filters]
return ordered_lc_filters, lc_set
def time_of_explosion(self, lc_set, lc_filters):
t0, t1, dt = [], [], []
for filtro in lc_filters:
time = np.array(lc_set['time_' + filtro])
t1.append(time[0])
dt.append(abs(np.mean(np.diff(time[:10]))))
t0 = np.array(t1) - np.array(dt)
min_value = np.min(t0)
err_t0 = np.std(dt)
return min_value, err_t0
def rescale_time(self, lc_filters, lc_set, t0):
new_set = {}
for filtro in lc_filters:
time = np.array(lc_set['time_' + filtro])
mag = np.array(lc_set['mag_' + filtro])
emag = np.array(lc_set['emag_' + filtro])
scale_time = time - t0
sorted_indices = np.argsort(scale_time)
scale_time = scale_time[sorted_indices]
mag = mag[sorted_indices]
emag = emag[sorted_indices]
new_set['time_%s' % filtro] = scale_time
new_set['mag_%s' % filtro] = mag
new_set['emag_%s' % filtro] = emag
return new_set
def process_filter(self, filtro, ref_new_set, ref_time):
x = np.array(ref_new_set['time_' + filtro])
y = np.array(ref_new_set['mag_' + filtro])
yerr = np.array(ref_new_set['emag_' + filtro])
amplitude = np.mean(y)
lengthscale0 = np.mean(x)
lengthscale1 = np.min(sampling_step(x))
lengthscale2 = np.max(sampling_step(x))
k0 = amplitude * Matern32Kernel(lengthscale0)
k1 = amplitude * Matern32Kernel(lengthscale1)
k2 = amplitude * Matern32Kernel(lengthscale2)
kernel = k1 + k2 + k0
gp = george.GP(kernel)
star = gp.compute(x, yerr)
p0 = gp.get_parameter_vector()
results = op.minimize(nll, p0, args = (y, gp, star), jac=grad_nll, method="L-BFGS-B")
gp.set_parameter_vector(results.x)
mu, cov = gp.predict(y, ref_time)
std = np.sqrt(np.diag(cov))
return {'time_' + filtro: ref_time, 'mag_' + filtro: mu, 'std_' + filtro: std}
def gaussian_process(self, common_filters, ref_new_set, sn_new_set):
gp_set = defaultdict(list)
results = Parallel(n_jobs=n_cores)(delayed(self.process_filter)(filtro, ref_new_set, np.array(sn_new_set['time_' + filtro])) for filtro in common_filters)
for result in results:
gp_set.update(result)
return gp_set
def chi_squared(self, gp_set, sn_new_set, common_filters):
chi2_total_norm = ([])
chi2_total = ([])
for filtro in common_filters:
mag1 = gp_set['mag_' + filtro]
mag2 = np.array(sn_new_set['mag_' + filtro])
mag2 = np.sum(mag1)/np.sum(mag2) * mag2
chi2_single, _ = chisquare(f_obs=mag1, f_exp=mag2)
chi2_total_norm = np.append(chi2_total_norm, chi2_single/len(mag2))
chi2_total = np.append(chi2_total, chi2_single)
total_chi2 = np.sum(chi2_total_norm)
total_chi2_norm = total_chi2 / len(common_filters)
return (total_chi2_norm)
def plot_comparison(self, final_name, sn_filters, sn_new_set, out_path):
ref_filters, ref_lc_set = self.collect_lightcurves(self.training_path + "data_lightcurves/" + final_name + ".dat")
ref_t0, _ = self.time_of_explosion(ref_lc_set, ref_filters)
ref_new_set = self.rescale_time(ref_filters, ref_lc_set, ref_t0)
common_filters = [filtro for filtro in sn_filters if filtro in ref_filters]
gp_set = self.gaussian_process(common_filters, ref_new_set, sn_new_set)
num_rows = (len(common_filters) + 1) // 2
fig, ax = plt.subplots(num_rows, 2, figsize=(10, 4 * num_rows))
if num_rows == 1:
ax = ax.reshape(1, -1)
for i, filtro in enumerate(common_filters):
row = i // 2
col = i % 2
ref_time, ref_mag = gp_set['time_' + filtro], np.array(gp_set['mag_' + filtro])
sn_time, sn_mag = sn_new_set['time_' + filtro], np.array(sn_new_set['mag_' + filtro])
sn_mag = np.sum(ref_mag)/np.sum(sn_mag) * sn_mag
ax[row, col].plot(ref_time, ref_mag, '--', color='red', label=final_name)
ax[row, col].scatter(sn_time, sn_mag, color='blue', label=self.sn_name)
ax[row, col].invert_yaxis()
ax[row, col].set_xlim(-1, 100)
ax[row, col].set_xlabel('Time')
ax[row, col].set_ylabel('Magnitude')
ax[row, col].set_title(f'Filter: {filtro}')
ax[row, col].grid()
ax[row, col].legend()
plt.tight_layout()
figname = out_path + "comparison.png"
plt.savefig(figname, dpi=300)
plt.close()
def total_comparison(self, out_path, to_save=True):
sn_filters, sn_lc_set = self.collect_lightcurves(self.sn_path)
sn_t0, err_t0 = self.time_of_explosion(sn_lc_set, sn_filters)
sn_new_set = self.rescale_time(sn_filters, sn_lc_set, sn_t0)
total_chi2 = []
total_t0 = []
for ref_name in self.training_set:
data_path = self.training_path + "data_lightcurves/" + ref_name + ".dat"
ref_filters, ref_lc_set = self.collect_lightcurves(data_path)
ref_t0, _ = self.time_of_explosion(ref_lc_set, ref_filters)
ref_new_set = self.rescale_time(ref_filters, ref_lc_set, ref_t0)
common_filters = set(ref_filters) & set(sn_filters)
if len(ref_filters) == 0 or self.sn_name == ref_name:
chi_norm = np.inf
else:
gp_set = self.gaussian_process(common_filters, ref_new_set, sn_new_set)
chi_norm = self.chi_squared(gp_set, sn_new_set, common_filters)
total_chi2.append(chi_norm)
total_t0.append(ref_t0)
best_guess = np.argmin(total_chi2)
ref_t0 = total_t0[best_guess]
final_name = self.training_set[best_guess]
final_chi = total_chi2[best_guess]
if to_save:
self.plot_comparison(final_name, sn_filters, sn_new_set, out_path)
print('----------------------------')
print('The best match for %s'%self.sn_name + ' is %s'%final_name + ' with a chi-squared of %2f.'%final_chi)
return sn_new_set, sn_filters, sn_t0, err_t0, final_name, final_chi, ref_t0
#
#
#
#
class spectra_selection():
def __init__(self, final_name, ref_t0, training_path):
self.final_name = final_name
self.ref_t0 = ref_t0
self.training_path = training_path
def read_single_file(self, file_path):
file_name = os.path.basename(file_path)
epoch = float(file_name.split('_')[1].rsplit('.', 1)[0])
epoch = epoch - self.ref_t0
data = ascii.read(file_path)
return epoch, data
def collect_spectra(self):
sp_set = defaultdict(list)
spectra_path = os.path.join(self.training_path, 'data_spectra', self.final_name)
all_files = os.listdir(spectra_path)
valid_files = [os.path.join(spectra_path, file) for file in all_files if not (file.startswith('.') or file.lower() == 'desktop.ini')]
all_epochs = set()
with Pool() as pool:
results = pool.map(self.read_single_file, valid_files)
for epoch, data in results:
all_epochs.add(epoch)
sp_set[f'wave_{epoch}'] = data[0][:]
sp_set[f'flux_{epoch}'] = data[1][:]
return sorted(all_epochs), sp_set
def wavelength_coverage(self, ref_files, ref_sp_set):
min_wave = []
max_wave = []
for epoch in ref_files:
wave = np.array(ref_sp_set['wave_' + str(epoch)])
min_wave.append(np.min(wave))
max_wave.append(np.max(wave))
return ([int(np.median(min_wave)), int(np.median(max_wave))])
def filters_coverage(self, wave_range):
spectral_filters = []
for i in range(len(centroid)):
min_wave = centroid[i] - fwhm[i] / 2
max_wave = centroid[i] + fwhm[i] / 2
if wave_range[0] <= min_wave and wave_range[1] >= max_wave:
spectral_filters.append(filterlist[i])
return spectral_filters
def time_coverage(self, ref_files):
maximum_epoch = np.max(ref_files) + np.mean(np.diff(ref_files))
return maximum_epoch
def final_spectra_selection(self):
ref_files, ref_sp_set = self.collect_spectra()
wave_range = self.wavelength_coverage(ref_files, ref_sp_set)
spectral_filters = self.filters_coverage(wave_range)
maximum_epoch = self.time_coverage(ref_files)
print('----------------------------')
print('The reference supernova has %i'%len(ref_files), 'spectra')
print('----------------------------')
print('The minimum epoch is at %2f'%min(ref_files), 'days after the explosion')
print('The maximum epoch is at %2f'%max(ref_files), 'days after the explosion')
print('----------------------------')
print('The time cut is set at %i'%maximum_epoch, 'days after the explosion')
print('----------------------------')
print('The spectra cover the range between ', (wave_range[0]), '-', (wave_range[1]), 'A')
print('corresponding to filters: ', spectral_filters)
return ref_files, ref_sp_set, wave_range, spectral_filters, maximum_epoch
#
#
#
#
class build_templates():
def __init__(self, sn_name, sn_new_set, sn_filters, sn_t0, wave_range,
ref_sp_set, ref_files, ref_t0, maximum_epoch):
self.sn_name = sn_name
self.lc_set = sn_new_set
self.lc_filters = sn_filters
self.t0 = sn_t0
self.ref_files = ref_files
self.ref_sp_set = ref_sp_set
self.ref_t0 = ref_t0
self.min_w = wave_range[0]
self.max_w = wave_range[1]
self.maximum_epoch = maximum_epoch
def process_filter(self, filtro):
x = np.array(self.lc_set['time_' + filtro])
y = np.array(self.lc_set['mag_' + filtro])
yerr = np.array(self.lc_set['emag_' + filtro])
amplitude = np.mean(y)
lengthscale0 = np.mean(x)
lengthscale1 = np.min(sampling_step(x))
lengthscale2 = np.max(sampling_step(x))
k0 = amplitude * Matern32Kernel(lengthscale0)
k1 = amplitude * Matern32Kernel(lengthscale1)
k2 = amplitude * Matern32Kernel(lengthscale2)
kernel = k1 + k2 + k0
gp = george.GP(kernel)
star = gp.compute(x, yerr)
p0 = gp.get_parameter_vector()
results = op.minimize(nll, p0, args = (y, gp, star), jac=grad_nll, method="L-BFGS-B")
gp.set_parameter_vector(results.x)
t = np.linspace(np.min(x), np.max(x), len(y))
mu, cov = gp.predict(y, t)
std = np.sqrt(cov.diagonal())
eff_wav = [centroid[i] for i in range(len(filterlist)) if filterlist[i] == filtro]
wav_c = np.repeat(eff_wav[0], len(mu))
new_mu = (mu - cal_parms[1]) / cal_parms[0]
flux = light_vel_A * 10**(- 0.4* (new_mu + 48.6)) / (eff_wav[0] **2)
return {'time_' + filtro: t, 'flux_' + filtro: flux, 'wave_' + filtro: wav_c, 'std_' + filtro: std, 'mag_' + filtro: new_mu}
def gaussian_process_on_lcs(self):
gp_set = defaultdict(list)
results = Parallel(n_jobs=n_cores)(delayed(self.process_filter)(filtro) for filtro in self.lc_filters)
for result in results:
gp_set.update(result)
return gp_set
def injection(self, gp_set):
total_flux = np.concatenate([gp_set['flux_'+filtro] for filtro in self.lc_filters])
total_wave = np.concatenate([gp_set['wave_'+filtro] for filtro in self.lc_filters])
total_time = np.concatenate([gp_set['time_'+filtro] for filtro in self.lc_filters])
return total_flux, total_wave, total_time
def cut_real_spectra(self, total_flux):
cut_epoch = 1
go = True
while go:
cut_files = list(self.ref_files)[0::cut_epoch]
all_points = np.concatenate([self.ref_sp_set['flux_' + str(epoch)] for epoch in cut_files])
if len(all_points)+len(total_flux) > limit_sp_points:
medium_points = len(all_points) / len(cut_files)
medium_desire = limit_sp_points / len(cut_files)
cut_points = math.ceil(medium_points / medium_desire)
total_points = all_points[0::cut_points]
if len(total_points)+len(total_flux) > limit_sp_points:
cut_epoch += 1
else:
go = False
else:
go = False
return cut_epoch, cut_points
def total_spectra(self, total_flux, total_wave, total_time, cut_epoch, cut_points):
for epoch in list(self.ref_files)[0::cut_epoch]:
obs_wave = self.ref_sp_set['wave_'+str(epoch)]
obs_flux = self.ref_sp_set['flux_'+str(epoch)]
obs_time = np.repeat(epoch, len(obs_flux))
total_flux = np.append(total_flux, obs_flux[0::cut_points])
total_wave = np.append(total_wave, obs_wave[0::cut_points])
total_time = np.append(total_time, obs_time[0::cut_points])
return total_flux, total_wave, total_time
def safe_region(self, total_flux, total_wave, total_time):
save_flux = total_flux[(total_wave>self.min_w)&(total_wave<self.max_w)]
save_time = total_time[(total_wave>self.min_w)&(total_wave<self.max_w)]
save_wave = total_wave[(total_wave>self.min_w)&(total_wave<self.max_w)]
return save_flux, save_wave, save_time
def time_series(self):
min_value = 0
max_value = self.maximum_epoch
time_series = np.linspace(min_value, max_value, n_templates)
return time_series
def bi_gaussian_process(self, total_flux, total_wave, total_time, time_series):
x = total_time
y = total_flux
z = total_wave
y_scale = np.mean(y)
z_scale = 70
x_scale_1 = np.min(sampling_step(x))
x_scale_2 = np.max(sampling_step(x))
k1 = ConstantKernel(1.0, constant_value_bounds='fixed') * Matern(length_scale=[x_scale_1, z_scale], nu = 1.5)
k2 = ConstantKernel(1.0, constant_value_bounds='fixed') * Matern(length_scale=[x_scale_2, z_scale], nu = 1.5)
kernel = y_scale* (k1 + k2)
gp = GaussianProcessRegressor(kernel=kernel, alpha= alpha_templ)
gp.fit(np.column_stack([x.flatten(), z.flatten()]), y)
def process_epoch(epoch):
x_pred = epoch
z_pred = np.linspace(self.min_w, self.max_w, len(y))
tot_pred = np.vstack([np.full_like(z_pred, x_pred), z_pred]).T
pred_mean, pred_std = gp.predict(tot_pred, return_std=True)
warnings.filterwarnings("ignore", category=ConvergenceWarning)
return {
'flux_%s' % str(epoch): pred_mean,
'eflux_%s' % str(epoch): pred_std,
'wave_%s' % str(epoch): z_pred
}
pred_set_list = Parallel(n_jobs=n_cores)(delayed(process_epoch)(epoch) for epoch in time_series)
warnings.filterwarnings("ignore", category=ConvergenceWarning)
pred_set = defaultdict(list)
for result in pred_set_list:
pred_set.update(result)
return pred_set
def total_templates(self, out_path, to_save=True):
start_templates_time = TT.time()
templates_path = out_path + "templates/"
if not os.path.exists(templates_path):
os.makedirs(templates_path)
gp_set = self.gaussian_process_on_lcs()
total_flux, total_wave, total_time = self.injection(gp_set)
cut_epoch, cut_points = self.cut_real_spectra(total_flux)
total_flux, total_wave, total_time = self.total_spectra(total_flux, total_wave, total_time, cut_epoch, cut_points)
total_flux, total_wave, total_time = self.safe_region(total_flux, total_wave, total_time)
time_series = self.time_series()
pred_set = self.bi_gaussian_process(total_flux, total_wave, total_time, time_series)
if to_save:
plt.figure()
for epoch in time_series:
wave, flux = pred_set['wave_' + str(epoch)], pred_set['flux_' + str(epoch )]
plt.plot(wave, flux)
plt.grid()
plt.title('Synthetic spectra')
plt.xlabel('Wavelenght [$\AA$]')
plt.ylabel('Flux [erg/s/cm$^2$/$\AA$]')
file_name = f"{self.sn_name}_{epoch}.txt"
file_path = templates_path + file_name
with open(file_path, 'w') as file:
for w, f in zip(wave, flux):
file.write(f"{w}\t{f}\n")
plt.grid()
plt.tight_layout()
plt.savefig(out_path + 'templates.png')
plt.close()
plt.figure()
for filtro in self.lc_filters:
time, mag = self.lc_set['time_' + filtro], self.lc_set['mag_' + filtro]
gp_time, gp_mag = gp_set['time_' + filtro], gp_set['mag_' + filtro]
gp_mag = gp_mag * cal_parms[0] + cal_parms[1]
plt.scatter(time, mag, label=filtro)
plt.plot(gp_time, gp_mag, '--')
plt.legend()
plt.grid()
plt.gca().invert_yaxis()
plt.tight_layout()
plt.title('Synthetic light curves')
plt.xlabel('Days after the explosion')
plt.ylabel('Apparent magnitude')
plt.savefig(out_path + 'gp.png')
plt.close()
end_templates_time = TT.time()
final_templates_time = end_templates_time - start_templates_time
print("Operation took {:.6f} seconds".format(final_templates_time))
return (gp_set, pred_set, time_series)
#
#
#
#
class line_fitting():
def __init__(self, sn_t0, pred_set, time_series, lines):
self.t0 = sn_t0
self.pred_set = pred_set
self.time_series = time_series
self.lines = lines
def starting_epoch(self):
YES = False
i = 0
while YES == False:
epoch = self.time_series[i]
wave = np.array(self.pred_set['wave_' + str(epoch)])
flux = np.array(self.pred_set['flux_' + str(epoch)])
print('Can you see at least one P-Cygni and one emission line?')
plt.figure(figsize=(8,6))
plt.title('Spectrum at +' + str(int(epoch)) + ' days after explosion')
for value in self.lines.values():
if value < np.max(wave) and value > np.min(wave):
plt.plot(wave, flux, '-', color='grey')
plt.axvline(value, 0, 1, color = 'blue')
plt.show()
answer = input().lower()
if answer == 'yes':
YES = True
elif answer == 'no':
YES = False
i+=1
else:
print("Please enter only 'yes' or 'no'")
return epoch
def find_lines(self, epoch):
wave = np.array(self.pred_set['wave_' + str(epoch)])
flux = np.array(self.pred_set['flux_' + str(epoch)])
print('Select the P-Cygni and the emission lines')
print('The answer should be comma-space separated i.e. Helium Ia, Hydrogen a')
for line, value in self.lines.items():
if value < np.max(wave) and value > np.min(wave):
plt.figure(figsize=(8,6))
plt.plot(wave, flux, '-', color = 'grey')
plt.axvline(value, 0, 1, label=line, color = 'blue')
plt.legend()
plt.show()
OK = False
while OK == False:
answer = input().split(', ')
if answer[0] in lines.keys() and answer[1] in lines.keys():
p_cygni_line = answer[0]
emission_line = answer[1]
OK = True
else:
print('Please select an existing line')
OK = False
return p_cygni_line, emission_line
def p_cygni_interval(self, epoch, p_cygni_line):
wave = np.array(self.pred_set['wave_' + str(epoch)])
flux = np.array(self.pred_set['flux_' + str(epoch)])
red, blu = 100, 100
stop = False
while stop == False:
for line, value in self.lines.items():
if line == p_cygni_line:
blue_limit = value - blu
red_limit = value + red
mask = np.logical_and(wave >= blue_limit, wave <= red_limit)
in_wave, in_flux = wave[mask], flux[mask]
pars = np.poly1d(np.polyfit(in_wave, in_flux, 8))
computed_flux = pars(in_wave)
print('Please insert values to enlarge or reduce the interval i.e. "10, -35"')
print('The first value will add on the left, while the second on the right')
print('If the interval is okay, just write 0, 0')
plt.figure(figsize=(8,6))
plt.plot(in_wave, computed_flux, '--', color = 'red', label = 'fitted flux')
plt.scatter(in_wave, in_flux, color = 'blue', label = 'obs. flux')
plt.axvline(value, 0, 1, color = 'green', label = 'line')
plt.show()
answer = input()
values = answer.split(', ')
blue_value = int(values[0])
red_value = int(values[1])
red += red_value
blu += blue_value
if red_value == blue_value == 0.0:
stop = True
else:
stop = False
return blu, red
def emission_interval(self, epoch, emission_line):
wave = np.array(self.pred_set['wave_' + str(epoch)])
flux = np.array(self.pred_set['flux_' + str(epoch)])
red_e, blu_e = 20, 50 #this are just hypotetical values to start with
stop = False
while stop == False:
for line, value in self.lines.items():
if line == emission_line:
blue_limit = value - blu_e
red_limit = value + red_e
mask = np.logical_and(wave >= blue_limit, wave <= red_limit)
in_wave, in_flux = wave[mask], flux[mask]
pars = np.poly1d(np.polyfit(in_wave, in_flux, 8))
computed_flux = pars(in_wave)
print('Please insert values to enlarge or reduce the interval i.e. "10, -35"')
print('The first value will add on the left, while the second on the right')
print('If the interval is okay, just write 0, 0')
plt.figure(figsize=(8,6))
plt.plot(in_wave, computed_flux, '--', color = 'red', label = 'fitted flux')
plt.scatter(in_wave, in_flux, color = 'blue', label = 'obs. flux')
plt.axvline(value, 0, 1, color = 'green', label = 'line')
plt.show()
answer = input()
values = answer.split(', ')
blue_value = int(values[0])
red_value = int(values[1])
red_e += red_value
blu_e += blue_value
if red_value == blue_value == 0.0:
stop = True
else:
stop = False
return blu_e, red_e
def p_cygni_fitter(self, p_cygni_line, blu, red):
save_files = []
doppler = ([])
err_dop = ([])
for epoch in self.time_series:
wave = np.array(self.pred_set['wave_' + str(epoch)])
flux = np.array(self.pred_set['flux_' + str(epoch)])
value = self.lines[p_cygni_line]
mask = np.logical_and(wave >= value - blu, wave <= value + red)
in_wave = wave[mask]
in_flux = flux[mask]
params = np.polyfit(in_wave, in_flux, deg=5)
fitted_curve = np.poly1d(params)(in_wave)
total_sum_squares = np.sum((in_flux - np.mean(in_flux))**2)
residuals = in_flux - fitted_curve
sum_squared_residuals = np.sum(residuals**2)
r_squared = 1 - (sum_squared_residuals / total_sum_squares)
if r_squared > 0.9:
maxi_wave = in_wave[np.argmax(fitted_curve)]
mini_wave = in_wave[np.argmin(fitted_curve)]
if maxi_wave > mini_wave:
observed_line = (maxi_wave + mini_wave) / 2
doppler = np.append(doppler, abs(observed_line - value)/ value)
err_w = np.mean(np.diff(in_wave))
err_obs = 1/np.sqrt(2) * err_w
err_dop = np.append(err_dop, err_obs / value)
save_files.append(epoch)
return (save_files, doppler, err_dop)
def emission_fitter(self, epoch, emission_line, blu_e, red_e):
wave = np.array(self.pred_set['wave_' + str(epoch)])
flux = np.array(self.pred_set['flux_' + str(epoch)])
value = self.lines[emission_line]
mask = np.logical_and(wave >= value - blu_e, wave <= value + red_e)
in_wave = wave[mask]
in_flux = flux[mask]
params = np.polyfit(in_wave, in_flux, deg=5)
fitted_curve = np.poly1d(params)(in_wave)
maxi_wave = in_wave[np.argmax(fitted_curve)]
observed_line = (maxi_wave)
if observed_line > value:
redshift = (observed_line-value) / value
err_red = np.mean(np.diff(in_wave)) / value
return redshift, err_red
def classification(self):
cmap = plt.get_cmap('viridis')
for i in range(len(self.time_series)):
epoch = self.time_series[i]
color = cmap(i/len(self.time_series))
wave, flux = self.pred_set['wave_' + str(epoch)], self.pred_set['flux_' + str(epoch)]
new_flux = flux * 10**(14)
plt.plot(wave, new_flux, color = color)
for line in lines:
plt.axvline(lines[line], 0, 1, color='grey')
if 'Helium' in line:
plt.axvline(lines[line], 0, 1, color='blue')
elif 'Hydrogen' in line:
plt.axvline(lines[line], 0, 1, color='red')
plt.xlabel('Wavelength [$\AA$]', fontweight = 'demibold', fontsize = 14)
plt.ylabel('Normalized flux', fontweight = 'demibold', fontsize = 14)
cax = plt.axes([0.91, 0.11, 0.03, 0.77])
cmap = 'viridis'
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=0, vmax=np.max(self.time_series)))
sm.set_array([])
cbar = plt.colorbar(sm, cax=cax, ticks=np.linspace(0, np.max(self.time_series), 5))
cbar.set_label('Days after the explosion', fontsize=14, fontweight='bold')
cax.invert_yaxis()
plt.show()
print('Which class is this?')
print('Choose between II, IIP, IIn, IIb, Ib, Ib/c, Ic')
print('\n')
print('-------------------------------------------------')
YES = False
while YES == False:
answer = input()
if answer in ['II', 'Ib', 'Ic', 'IIP', 'IIn', 'IIb', 'Ib/c']:
YES = True
else:
YES = False
print("Please select only a real class")
classe = answer
return classe
def doppler_and_redshift(self):
epoch = self.starting_epoch()
p_cygni_line, emission_line = self.find_lines(epoch)
blu, red = self.p_cygni_interval(epoch, p_cygni_line)
blu_e, red_e = self.emission_interval(epoch, emission_line)
save_files, doppler, err_dop = self.p_cygni_fitter(p_cygni_line, blu, red)
redshift, err_red = self.emission_fitter(epoch, emission_line, blu_e, red_e)
classe = self.classification()
return save_files, doppler, err_dop, redshift, err_red, classe
#
#
#
#
class parameter_estimation():
def __init__(self, sn_t0, err_t0, sn_new_set, sn_filters, gp_set, spectral_filters, time_series, pred_set):
self.t0 = sn_t0
self.err_t0 = err_t0
self.lc_filters = sn_filters
self.lc_set = sn_new_set
self.gp_set = gp_set
self.spectral_filters = spectral_filters
self.time_series = time_series
self.pred_set = pred_set
def time_at_maximum(self):
if 'V' in self.lc_filters:
filtro = 'V'
elif 'i' in self.lc_filters:
filtro = 'i'
else:
filtro = 'Ks'
mag = self.gp_set['mag_' + filtro]
max_index = np.argmin(mag)
time = self.gp_set['time_' + filtro]
err_time = np.mean(np.diff(time))
tmax = time[max_index] + self.t0
err_tmax = np.sqrt(err_time**2 + self.err_t0**2)
return tmax, err_tmax
def time_of_rising(self, tmax, err_tmax):
trise = tmax - self.t0
err_trise = np.sqrt(err_tmax**2 + self.err_t0**2)
return (trise, err_trise)
def absorption(self, trise):
filter_B = 'B' if 'B' in self.lc_filters else 'g'
filter_V = 'V' if 'V' in self.lc_filters else 'i'
mag_V, emag_V = self.gp_set['mag_' + filter_V], self.gp_set['std_' + filter_V]
mag_B, emag_B = self.gp_set['mag_' + filter_B], self.gp_set['std_' + filter_B]
time_B = self.gp_set['time_' + filter_B ]
max_V_index = np.argmin(mag_V)
max_V = mag_V[max_V_index]
err_max_V = emag_V[max_V_index]
max_B_index = 0
for i in range(len(time_B)):
if abs(time_B[i] - trise) < abs(time_B[max_B_index] - trise):
max_B_index = i
max_B = mag_B[max_B_index]
err_max_B = emag_B[max_B_index]
extinction = (max_B-max_V)
err_ext = np.sqrt(err_max_B**2 + err_max_V**2)
absorption = 3.1 * extinction
err_abs = 3.1 * err_ext
return absorption, err_abs
def expansion_velocity(self, doppler, err_dop, save_files, tmax):
velocity_lis = doppler * light_vel_km
err_vel = err_dop * light_vel_km
save_tmax = save_files.index(min(save_files, key = lambda x: abs(x-(tmax-self.t0))))
vel_at_max = velocity_lis[save_tmax]
err_vel_max = err_vel[save_tmax]
return velocity_lis, err_vel, vel_at_max, err_vel_max
def distance(self, redshift, err_red):
dist = redshift * light_vel_km / Hubble_70
err_dist = err_red * light_vel_km / Hubble_70
return dist, err_dist
def bolometric_luminosity(self, trise, dist, err_dist):
total_flux, total_wave = ([]), ([])
for filtro in self.lc_filters:
flux, wave, time = self.gp_set['flux_' + filtro], self.gp_set['wave_' + filtro], self.gp_set['time_' + filtro]
closest_time = min(time, key = lambda x: abs(x-(trise)))
if closest_time - trise < 10:
flux_at_max = flux[time==closest_time]
wave_at_max = wave[time==closest_time]
total_flux = np.append(total_flux, flux_at_max)
total_wave = np.append(total_wave, wave_at_max)
int_flux = trapz(total_flux, total_wave)
dist_in_cm = dist * mpc_to_cm
err_dist_in_cm = err_dist * mpc_to_cm
luminosity = 4 * np.pi * dist_in_cm**2 * int_flux
err_lum = 8 * np.pi * dist_in_cm * err_dist_in_cm * int_flux
return luminosity, err_lum
def kinetic_energy(self, luminosity, err_lum, trise, err_trise):
trise_in_s = trise * days_to_sec
err_trise_in_s = err_trise * days_to_sec
energy = luminosity * trise_in_s * neutrino_weight
err_energy = np.sqrt( (trise_in_s * err_lum)**2 + (luminosity * err_trise_in_s) **2 ) * neutrino_weight
return energy, err_energy
def mass_of_the_ejecta(self, vel_at_max, err_vel_max, energy, err_energy):
vel_in_cm = vel_at_max * km_to_cm
err_vel_in_cm = err_vel_max * km_to_cm
mass_ejecta = (10/3) * energy / vel_in_cm**2
err_mass_ej = (10/3/vel_in_cm**2) * np.sqrt(err_energy**2 + (2*energy*err_vel_in_cm/vel_in_cm)**2)
mass_ejecta = mass_ejecta / M_sun_g
err_mass_ej = err_mass_ej / M_sun_g
return mass_ejecta, err_mass_ej
def fitting_lcs(self, trise):
fit_set = defaultdict(int)
upper_time = []
for filtro in self.lc_filters:
time = self.lc_set['time_' + filtro]
upper_time.append(np.max(time))
decay_time = np.arange(trise+20, np.mean(upper_time), dtype=int, step = 2)
for filtro in self.lc_filters:
x = np.array(self.lc_set['time_' + filtro])
y = np.array(self.lc_set['mag_' + filtro])
yerr = np.array(self.lc_set['emag_' + filtro])
amplitude = np.mean(y)
lengthscale0 = np.mean(x)
lengthscale1 = np.min(sampling_step(x))
lengthscale2 = np.max(sampling_step(x))
k0 = amplitude * Matern32Kernel(lengthscale0)
k1 = amplitude * Matern32Kernel(lengthscale1)
k2 = amplitude * Matern32Kernel(lengthscale2)
kernel = k1 + k2 + k0
gp = george.GP(kernel)
star = gp.compute(x, yerr)
p0 = gp.get_parameter_vector()
results = op.minimize(nll, p0, args = (y, gp, star), jac=grad_nll, method="L-BFGS-B")
gp.set_parameter_vector(results.x)
t = decay_time
mu, _ = gp.predict(y, t)
eff_wav = [centroid[i] for i in range(len(filterlist)) if filterlist[i] == filtro]
wav_c = np.repeat(eff_wav[0], len(mu))
new_mu = (mu - cal_parms[1]) / cal_parms[0]
flux = light_vel_A * 10**(- 0.4* (new_mu + 48.6)) / (eff_wav[0] **2)
fit_set['time_' + filtro] = decay_time
fit_set['mag_' + filtro] = new_mu
fit_set['flux_' + filtro] = flux
fit_set['wave_' + filtro] = wav_c
return fit_set, decay_time
def mass_of_nichel(self, dist, trise):
fit_set, decay_time = self.fitting_lcs(trise)
dist_cm = dist * mpc_to_cm
gamma_1 = 1.32e-6
gamma_2 = 1.02e-7
lum_array = []
for i in range(len(decay_time)):
flux = ([])
wave = ([])
for filtro in self.lc_filters:
flux = np.append(flux, fit_set['flux_' + filtro][i])
wave = np.append(wave, fit_set['wave_' + filtro][i])
int_flux = trapz(flux, wave)
Lum = 4*np.pi * dist_cm**2 * int_flux
lum_array = np.append(lum_array, Lum)
time_in_s = decay_time * 86400
s = 3.90e+10 * np.exp(-gamma_1*time_in_s) + 6.78e+9 * (np.exp(-gamma_2*time_in_s) - np.exp(-gamma_1*time_in_s))
pars = np.polyfit(s,lum_array,1)
mass_nikel = pars[0] / M_sun_g
residuals = lum_array - (pars[0] * s + pars[1])
rss = np.sum(residuals**2)
std_err = np.sqrt(rss / (len(s) - 2)) / np.sqrt(np.sum((s - np.mean(s))**2))
err_mass_ni = std_err/M_sun_g
return mass_nikel, err_mass_ni
def photosphere_fit(self, redshift, dist, trise):
black_body_set = defaultdict(int)
bounds_temperature = np.array([1000, 30000])
bounds_radius = np.array([1000, 20000]) * R_sun_mpc
bounds_zeta = np.array([0, 1])
bounds = ([bounds_temperature[0], bounds_radius[0], bounds_zeta[0]],
[bounds_temperature[1], bounds_radius[1], bounds_zeta[1]])
closest_time = min(self.time_series, key = lambda x: abs(x-trise))
wave_masks = []
wave_center = ([])
for sp_filtro in self.spectral_filters:
wave = self.pred_set['wave_' + str(self.time_series[0])]
filter_index = filterlist.index(sp_filtro)
low_wave = centroid[filter_index] - fwhm[filter_index] / 2
up_wave = centroid[filter_index] + fwhm[filter_index] / 2
mask = np.logical_and(wave >= low_wave, wave<= up_wave)
wave_center = np.append(wave_center, centroid[filter_index]/(1+redshift))
wave_masks.append(mask)
for epoch in self.time_series:
wave, flux = self.pred_set['wave_' + str(epoch)], self.pred_set['flux_' + str(epoch)]
sed = ([])
for mask in wave_masks:
masked_wave = (wave[mask])
masked_flux = (flux[mask])
sed = np.append(sed, trapz(masked_flux, masked_wave))
err = sed * 0.1
fit_func = partial(blackbody, distance=dist)
pars, cov = curve_fit(fit_func, wave_center, sed, sigma = err, maxfev=50000, bounds=bounds)
variances = np.diag(cov)
errors = np.sqrt(variances)
ft = 1 / np.sqrt(pars[2])
final_temp = pars[0] / ft
err_temp = np.sqrt( (errors[0]/ft)**2 + (0.5 * pars[0] * errors[2] * ft)**2 )
final_rad = pars[1] / R_sun_mpc
err_rad = errors[1] / R_sun_mpc
if np.any(np.isinf(cov)) == False:
black_body_set['temperature_'+ str(epoch)] = final_temp
black_body_set['radius_' + str(epoch)] = final_rad
black_body_set['zeta_' + str(epoch)] = pars[2]
black_body_set['err_temp_' + str(epoch)] = err_temp
black_body_set['err_rad_' + str(epoch)] = err_rad
black_body_set['err_zeta_' + str(epoch)] = errors[2]
if epoch == closest_time:
phot_at_max = ([final_temp, err_temp], [final_rad, err_rad], [pars[2], errors[2]])
if epoch == 0.0:
prog_radius = ([final_rad, err_rad])
return black_body_set, phot_at_max, prog_radius
def progenitor_mass(self, mass_ejecta, err_mass_ej):
mass_ns = 1.2
mass_bh = 10
mass_pr = (round(mass_ejecta + mass_ns, 2), round(mass_ejecta + mass_bh, 2))
err_mass_pr = err_mass_ej
return mass_pr, err_mass_pr
def total_estimation(self, out_path):
tmax, err_tmax = self.time_at_maximum()
trise, err_trise = self.time_of_rising(tmax, err_tmax)
Av, err_Av = self.absorption(trise)
save_files, doppler, err_dop, redshift, err_red, classe = line_fitting(self.t0,
self.pred_set,
self.time_series,
lines).doppler_and_redshift()
velocity_lis, err_vel, vel_at_max, err_vel_max = self.expansion_velocity(doppler, err_dop, save_files, tmax)
dist, err_dist = self.distance(redshift, err_red)
luminosity, err_lum = self.bolometric_luminosity(trise, dist, err_dist)
energy, err_energy = self.kinetic_energy(luminosity, err_lum, trise, err_trise)
mass_ejecta, err_mass_ej = self.mass_of_the_ejecta(vel_at_max, err_vel_max, energy, err_energy)
mass_nikel, err_mass_ni = self.mass_of_nichel(dist, trise)
black_body_set, phot_at_max, prog_radius = self.photosphere_fit(redshift, dist, trise)
mass_pr, err_mass_pr = self.progenitor_mass(mass_ejecta, err_mass_ej)
luminosity = luminosity / 10**(41)
err_lum = err_lum / 10**(41)
energy, err_energy = energy / 10**(51) , err_energy / 10**(51)
data = [
['Hubble constant (assumed)', 'km/s/Mpc', Hubble_70],
['Class','-', classe, '-'],
['Time of explosion', 'Mjd', round(self.t0, 2), round(self.err_t0, 2)],
['Time of maximum luminosity', 'Mjd', round(tmax, 3), round(err_tmax, 2)],
['Time of rising', 'day', round(trise, 2), round(err_trise, 2)],
['Absorption', 'mag', round(Av, 2), round(err_Av, 2)],
['Distance', 'Mpc', round(dist, 2), round(err_dist, 2)],
['Redshift', '-', round(redshift, 4), round(err_red, 4)],
['Shock velocity', 'km/s', round(float(velocity_lis[0]), 2), round(float(err_vel[0]), 2)],
['Velocity at tmax', 'km/s', round(vel_at_max, 2), round(err_vel_max, 2)],
['Bolometric luminosity', 'L41', round(luminosity, 2), round(err_lum, 2)],
['Kinetic energy', 'E51', round(energy, 2), round(err_energy, 2)],
['Mass of the ejecta', 'Msun', round(mass_ejecta, 4), round(err_mass_ej, 4)],
["Mass of Nickel", 'Msun', round(mass_nikel, 3), round(err_mass_ni, 3)],
['Photospheric temperature at tmax', 'K', round(phot_at_max[0][0], 2), round(phot_at_max[0][1], 2)],
['Photospheric radius at tmax', 'Rsun', round(phot_at_max[1][0], 2), round(phot_at_max[1][1], 2)],
['Dilution factor at tmax', '-', round(phot_at_max[2][0], 2), round(phot_at_max[2][1], 2)],
["Progenitor's radius", 'Rsun', round(prog_radius[0], 2), round(prog_radius[1], 2)],
["Progenitor's mass", "Msun", mass_pr, round(err_mass_pr, 3)]
]
table = tabulate(data, headers=['Parameter', 'Units', 'Value', 'Error'], tablefmt='simple')
print(table)
with open(out_path + 'results.txt', 'w') as f:
f.write(table)
# Saving figures
fig_name = out_path + 'velocity.png'
plt.figure()
plt.scatter(save_files, velocity_lis, color = 'blue', label = 'Points')
plt.axvline(trise, 0, 1, ls = '--', color='red', label = 'tmax')
plt.xlabel('Days after the explosion')
plt.ylabel('Velocity [km/s]')
plt.legend()
plt.grid()
plt.savefig(fig_name)
plt.close()
#
#
#
#
|
AndreaSimonginiREPO_NAMECASTORPATH_START.@CASTOR_extracted@CASTOR-main@Scripts@castor_source.py@.PATH_END.py
|
{
"filename": "_showspikes.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/scene/yaxis/_showspikes.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowspikesValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="showspikes", parent_name="layout.scene.yaxis", **kwargs
):
super(ShowspikesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@scene@yaxis@_showspikes.py@.PATH_END.py
|
{
"filename": "_x.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/candlestick/_x.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="x", parent_name="candlestick", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@candlestick@_x.py@.PATH_END.py
|
{
"filename": "params.py",
"repo_name": "JayWadekar/gwIAS-HM",
"repo_path": "gwIAS-HM_extracted/gwIAS-HM-main/Pipeline/params.py",
"type": "Python"
}
|
# Parameters used in data analysis
# Template bank parameters
# -----------------------------------------------------------------------------
# Frequency range (Hz)
# Sets minimum frequency of waveform generation
FMIN = 20.
# HM_search = True
# if HM_search: FMAX = 512.
# else: FMAX = 1024.
FMAX = 1024.
NF_GEN = 10**7
SUB_FAC = 1000 # Factor to subsample after unwrapping
# Tolerance for retaining basis
TOL = 1.0E-8
# Factor to protect conversion from basis to phase is finite if weights vanish
EPS_CALPHA = 1e-50
# Fix support captured, (1 - Wt of whitened waveform within finite support)/2
# When testing the overlaps, the accuracy cannot surpass this.
WFAC_WF = 1.E-2
# Relative safety factor for time-domain shift to avoid wraparound artifacts due
# to different waveforms in bank
REL_SHIFT_SAFETY = 0.2
# Factor to discriminate between inspiral-dominated (BNS) and merger-dominated
# (BH) waveforms
MERGERTYPE_THRESH = 100
# Safety factor to inflate estimated time-domain support for BH-type and
# BNS-type waveforms to cover orbital hangup due to spin orientations
SUPPORT_SAFETY_BH = 2 + REL_SHIFT_SAFETY
SUPPORT_SAFETY_BNS = 1.2 + REL_SHIFT_SAFETY
# Minimum waveform duration
MIN_WFDURATION = 16.
# Safety factor to inflate calpha ranges by
TEMPLATE_SAFETY = 1.05
# -----------------------------------------------------------------------------
# Data processing parameters
# -----------------------------------------------------------------------------
# PSD estimation
# --------------
# Default length of chunk for PSD estimation (in seconds)
# DEF_CHUNKTIME_PSD = 64.
# Change in O3a
DEF_CHUNKTIME_PSD = 32.
# Minimum number of valid samples to average over to measure PSD
MINSAMP_PSD = 1
# Minimum and maximum frequencies for PSD computation and data-analysis
# ---------------------------------------------------------------------
# Measure the PSD only above this frequency, this also sets the minimum
# frequencies that will be in the data after highpassing
# Start a bit redward of analysis cutoff to avoid fake line-identification
DF_DATA = -5.
# DF_DATA = 0.
FMIN_PSD = FMIN + DF_DATA
# Measure the PSD only upto this frequency, this should above the Nyquist
# frequency corresponding to fmax_overlap below
# if HM_search: FMAX_PSD = 1024.
# else: FMAX_PSD = 2048.
FMAX_PSD = 2048.
# Measure power only above this frequency in excess power tests, this equals
# fmin since the waveforms only have frequencies >= fmin
FMIN_ANALYSIS = FMIN
# Maximum frequency for matched filtering (Hz)
# if HM_search: FMAX_OVERLAP = 512
# else: FMAX_OVERLAP = 1024
FMAX_OVERLAP = 1024
# Minimum file length needed in units of chunktime_psd, for this choice the
# accuracy of ASD ~ 1/\sqrt{32} ~ 16 % => The loss of sensitivity due to this
# is about 3%
MIN_FILELENGTH_FAC = 16
# NOTE: Check that this is always larger than
# support_wf + butterworth irl + support_wt - 2 in seconds
# Parameter for overlap save
# Used to save time in data processing, not necessary to edit
# DEF_FFTSIZE = 2 ** 20
# Change in O3a
# Intel python's linear scaling ends at lower intervals
# IMPORTANT: This can be shorter than BNS waveforms, in which case it might need
# to x2 the fftsize, the code should do it automatically, but not debugged
DEF_FFTSIZE = 2 ** 18
# Parameters for line detection
# -----------------------------
# Detect lines as deviations from smoothed ASD at this significance
LINE_SIGMA = 4
# Dectect loud lines as deviations from smoothed ASD at this significance
# LOUD_LINE_SIGMA = 1000
# Change in O3a
LOUD_LINE_SIGMA = 500
# Glitch rejection
# ----------------
# Number of seconds to destroy extra whenever LIGO has a super big hole that
# trims a file. Holes cannot be dealt with on the edges currently
IMPROPER_FLAGGING_SAFETY_DURATION = 2
# Number of passes for sigma clipping
N_GLITCHREMOVAL = 7
# If the outlier is more than 1/OUTLIER_FRAC x sigma_clipping_threshold,
# don't clip in an overzealous manner
OUTLIER_FRAC = 0.1
# Minimum width to clip around outlier (s) (can be larger due to Butterworth)
MIN_CLIPWIDTH = 0.1
# Default filelength (s) used to map probabilities to glitch thresholds
DEF_FILELENGTH = 4096
# Number of files in run (used to set global FAR)
NFILES = 2000
# Glitch detectors should fire this many times per perfect file
# Increase to catch glitches more aggressively
NPERFILE = 0.2
# Fraction of interval successive specgrams are allowed to overlap by
# Increase to catch glitches more aggressively
# OVERLAP_FAC = 1 / 2
# OVERLAP_FAC = 7 / 8
OVERLAP_FAC = 3 / 4
# Number of independent excess power measurements in moving average to remove
# long modes
N_INDEP_MOVING_AVG_EXCESS_POWER = 5
# Safety factor to restrict aggressive trimming of the file due to PSD drift
# Related to typical level of PSD drift we see in files (not Gaussian)
# Decrease to catch glitches more aggressively
# PSD_DRIFT_SAFETY = 0
PSD_DRIFT_SAFETY = 5e-2
# Hole filling
# ------------
# Start by filling all holes using brute force logic at the outset, if number
# of bad entries is below this
NHOLE_BF = 2000
# Can't fill with bruteface if more than this many samples are bad in a single
# filling chunk
# NHOLE_MAX = 10000
# Change in O3a
NHOLE_MAX = 20000
# Can't fill if more than this many samples are bad in a consecutive chunk
NHOLE_MAX_CONSECUTIVE = 2**19
# NOTE: Check that this is always larger than support_wf
# -----------------------------------------------------------------------------
# Filter parameters
# -----------------------------------------------------------------------------
# Parameters of high-pass Butterworth filter
ORDER = 4
DF = 0. # The gain falls to 1/sqrt(2) that of the passband at FMIN + DF
# Fraction of max impulse response to capture (make very small, because bad
# stuff is happening at low frequencies)
IRL_EPS_HIGH = 1e-9
# Same for bandpass filter to measure band power of waveforms
IRL_EPS_BAND = 1e-5
# Truncate notch filter at this multiple of 1/bandwidth
NOTCH_TRUNC_FAC = 4
WFAC_FILT = 1.E-3 # (1 - Weight of the filter to capture)/2
# Support of sinc interpolation in units of indices. Empirically tested at
# f_max = 512 Hz
SUPPORT_SINC_FILTER = 200
# Amount of data we lose from each side in the limit of an infinite number of
# sinc interpolations
SUPPORT_EDGE_DATA = 2 * (SUPPORT_SINC_FILTER + 1)
# -----------------------------------------------------------------------------
# Trigger analysis parameters
# -----------------------------------------------------------------------------
# Maximum SNR below which waveforms are never clipped by glitch rejection
DEF_PRESERVE_MAX_SNR = 20
# DEF_PRESERVE_MAX_SNR = 12
FALSE_NEGATIVE_PROB_POWER_TESTS = 1e-4
# Factor to multiply PRESERVE_MAX_SNR for vetoing AFTER template subtraction
DEF_TEMPLATE_MAX_MISMATCH = 0.1
## WARNING: If you are analyzing higher modes and change DEF_SINE_GAUSSIAN_INTERVALS
# or DEF_BANDLIM_TRANSIENT_INTERVALS or DEF_EXCESS_POWER_INTERVALS,
# you should compute which higher mode wfs give you the max
# glitch thresholds and store the info. in bank metadata
# See the 'Computing the thresholds for different glitch tests with HMs'
# subsection in scratch_files/TemplateBank_HigherModes.ipynb
# Frequency bands within which we look for Sine-Gaussian noise transients
# [central frequency, df = (upper - lower frequency)] Hz
DEF_SINE_GAUSSIAN_INTERVALS = [[60., 10.],
[40, 40],
[120., 40],
[140, 80],
[100., 100],
[90, 40],
[70, 40],
[150, 50],
[100, 50]]
# Time-interval and frequency bands within which to look for excess power
# transients [0.5, [55, 65]]
DEF_BANDLIM_TRANSIENT_INTERVALS = [[1., [55, 65]],
[1., [70, 80]],
[1, [40, 60]],
[1, [25, 50]],
[0.5, [40, 60]],
[0.25, [140, 160]],
[1., [20, 50]],
[1., [100, 180]],
[0.05, [25, 70]],
[0.1, [25, 70]],
[0.05, [20, 180]],
[0.025, [60, 180]],
[0.2, [25, 70]]]
DEF_BANDLIM_TRANSIENT_INTERVALS_O3 = [[1., [55, 65]],
[1., [70, 80]],
[1, [40, 60]],
[1, [25, 50]],
[0.5, [40, 60]],
[0.25, [140, 160]],
[1., [100, 180]],
[0.025, [60, 180]],
[0.2, [25, 70]]]
DEF_SINE_GAUSSIAN_INTERVALS_O3 = [[60., 10.],
[120., 40],
[140, 80],
[100., 100],
[90, 40],
[70, 40],
[150, 50],
[100, 50]]
# Scales over which to look for excess power
# DEF_EXCESS_POWER_INTERVALS = [0.2, 1, 4, 10]
# Ensure that longest scale x N_MOVING_AVG_EXCESS_POWER * (1 - OVERLAP_FAC) is
# less than timescale where there is a cliff in the autocorrelation of |d(t)|^2
DEF_EXCESS_POWER_INTERVALS = [0.2, 1]
# Don't trust any triggers that were corrected below this level
HOLE_CORRECTION_MIN = 0.5
# Ensure that division returns a real number
HOLE_EPS = 1e-5
# Interval to update PSD drift correction (s)
DEF_PSD_DRIFT_INTERVAL = 1
# Do not capture psd drifts below this level
PSD_DRIFT_TOL = 2.E-2
# Threshold to sigma clip the overlaps when estimating the PSD drift correction
# (in units of number of times realized over the window due to Gaussian noise)
PSD_DRIFT_SAFEMEAN_THRESH = 1
# When we clip outliers, margin to increase clipping by (in s) to prevent
# events from biasing the PSD drift correction
PSD_DRIFT_SAFETY_LEN = 0.01
# New parameter from O3a
# Target time resolution after sinc-interpolation
DT_FINAL = 1/4096.
# Support of sinc filter to use when optimizing calpha
# (higher is more accurate, but slower)
SUPPORT_SINC_FILTER_OPT = 1024
# For HM, we create a separate file with downsampled triggers,
DOWNSAMPLE_TRIGGERS = 1000
# -----------------------------------------------------------------------------
# Parameters for tracking lines in specgram
# -----------------------------------------------------------------------------
LINE_TRACKING_DT = 2
LINE_TRACKING_TIME_SCALE = 64
LINE_TRACKING_DF = 2
# -----------------------------------------------------------------------------
# Coincidence parameters
# -----------------------------------------------------------------------------
MAX_FRIEND_DEGRADE_SNR2 = 0.1
# Half-Size of timeseries to save (s), should be large enough for any time
# differences
DT_BOUND_TIMESERIES = 0.1
# -----------------------------------------------------------------------------
# Veto parameters
# -----------------------------------------------------------------------------
# Waveform duration below which we avoid holes
SHORT_WF_LIMIT = 10
# Window (s) around short waveforms where we demand no bad time
# Warning: this can mess up events like GW170817!
DT_CLEAN_MASK = 1
# Excess power veto should fire this many times per perfect file
# Increase to make veto more aggressive
NFIRE = 0.1
# If change in finer PSD drift exceeds Gaussian sigma x this factor, we deem it
# significant when vetoing
PSD_DRIFT_VETO_THRESH = 6
# Number of chunks to split waveforms into for chi2 veto
N_CHUNK = 6
# Subsets of chunks to compare for tail/hump tests
SPLIT_CHUNKS = [[[0], [4, 5]],
[[0, 1], [4, 5]],
[[0, 5], [2, 3]],
[[0, 1, 2], [3, 4, 5]]]
# Calculate chi^2 with 20 bins [Catches "dots" in time-freq]
N_CHUNK_2 = 20
# Threshold relative to highest eigenvalue to keep in the covariance matrix,
# i.e., retain eigenvectors with eigenvalue > (max eigenvalue of covariance
# matrix * cov_degrade)
COV_DEGRADE = 0.1
# If the highest eigenvalue of the covariance matrix is below this, do not
# perform chi-squared test
CHI2_MIN_EIGVAL = 1e-2
# Threshold for chi2 test
THRESHOLD_CHI2 = 1e-2
# Threshold for split test
THRESHOLD_SPLIT = 1e-2
# Window (s) around trigger to avoid while estimating statistics of the scores
# from the data
DT_AVOID = 0.1
# Amount to allow time to shift during calpha optimization
DT_OPT = 0.01
# -----------------------------------------------------------------------------
# Coherent score parameters
# -----------------------------------------------------------------------------
LOG2N_QMC, NPHI, MAX_LOG2N_QMC, MIN_N_EFFECTIVE = 12, 256, 16, 50
# Spacing of samples in ms
DEFAULT_DT = 1000/4096
# Least count of timeslides in ms
DEFAULT_TIMESLIDE_JUMP = 100
# safety factor for rank function for extremely loud triggers
SNR2_MAX_BOUND = 300
# -----------------------------------------------------------------------------
|
JayWadekarREPO_NAMEgwIAS-HMPATH_START.@gwIAS-HM_extracted@gwIAS-HM-main@Pipeline@params.py@.PATH_END.py
|
{
"filename": "gwb_spectral_variance.ipynb",
"repo_name": "astrolamb/pop_synth",
"repo_path": "pop_synth_extracted/pop_synth-main/notebooks/gwb_spectral_variance.ipynb",
"type": "Jupyter Notebook"
}
|
Based on a notebook by Stephen Taylor
This notebook uses a very basic population synthesis model to test out the analytical relations found in Lamb & Taylor 2024
```python
import numpy as np
import scipy.stats as ss
import matplotlib.pyplot as plt
from astropy.cosmology import FlatLambdaCDM
from astropy.constants import G, c
from astropy import units as u
from scipy.stats import rv_histogram, skew
from scipy.stats import kurtosis as kurt_sp
from matplotlib.lines import Line2D
```
```python
%load_ext autoreload
%autoreload 2
%config InlineBackend.figure_format = 'retina'
```
```python
cd /Users/lambwg/Documents/Vanderbilt/Research/pop_synth//
```
/Users/lambwg/Documents/Vanderbilt/Research/pop_synth
```python
# Stephen Taylor's default matplotlib settings
rc_params = {"axes.labelsize": 10, "lines.markersize": 4, #"backend": "pdf",
"font.size": 10, "xtick.top": True, "ytick.right": True,
"xtick.major.size": 6, "xtick.minor.size": 3,
"ytick.major.size": 6, "ytick.minor.size": 3,
"xtick.major.width": 0.5, "ytick.major.width": 0.5,
"xtick.minor.width": 0.5, "ytick.minor.width": 0.5,
"lines.markeredgewidth": 1, "axes.linewidth": 1.2,
"legend.fontsize": 7, "xtick.labelsize": 10,
"xtick.direction": "in", "xtick.minor.visible": True,
"xtick.major.top": True, "xtick.minor.top": True,
"ytick.labelsize": 10, "ytick.direction": "in",
"ytick.minor.visible": True, "ytick.major.right": True,
"ytick.minor.right": True, "savefig.dpi": 400,
"path.simplify": True, "font.family": "serif",
"font.serif": "Times", "text.usetex": True,
"figure.figsize": [3.5503666805036667, 2.1942472810764047]}
plt.rcParams.update(rc_params)
```
```python
# constants
fyr = 1 / 365.25 / 86400.0
G = 4.517103 * 10**-48 # Mpc3 / Msun s2
c = 9.7156119 * 10**-15 # Mpc / s
```
# Variation relations
This is a very basic population synthesis model that creates a distribution of binaries as a function of emitted GW frequency. For circular binaries that evolve purely due to GW emission, we expect that $\mathrm{d}t/\mathrm{d}f \propto f^{-11/3}$. Therefore, we sample from this distribution to get our binaries, bin them, and add them in quadrature
```python
## Make a binary distribution function going as f^{-11/3}
A = 1e-22 # arbitrary normalisation
f = np.linspace(1e-9,1e-7,int(1e6)) # frequency grid
fbmid = 0.5 * (f[1:] + f[:-1]) # middle of grid cells
Nf = A * fbmid**(-11./3.) # dt / df for GW emission
```
```python
plt.loglog(fbmid, Nf)
plt.xlabel('GW frequency (Hz)')
plt.ylabel('Number of sources')
```
Text(0, 0.5, 'Number of sources')

```python
# we need to sample the binary distribution. Use scipy's rv_histogram
test = ss.rv_histogram((Nf,f)) # make a scipy.stats object
```
```python
## Create structures for Omega_gw across realizations
Tspan = 20. * 86400 * 365.24
bins = (np.arange(1, 102) - 0.5) / Tspan # 20-year bins
bmid = 0.5 * (bins[1:] + bins[:-1]) # bin mids
omega = np.zeros((bmid.shape[0], int(1e4))) # frequency-bins x realizations
```
```python
# Make Omega_gw spectrum for each realization
for jj in range(omega.shape[1]):
freqs = test.rvs(size=int(1e4)) # frequencies of 10000 sampled binaries
mask = np.digitize(freqs, bins) - 1 # bin the binaries by frequency
# Background spectrum is sum of squared strain amplitudes.
# Power of 10/3 from h^2 and conversion from h_c^2(f) to Omega(f),
# then binned by frequency
for ii, bin in enumerate(bmid):
omega[ii, jj] = np.sum(freqs[mask==ii]**(10/3) *
freqs[mask==ii]/Tspan)
```
The variance of the following quantities can be deduced analytically.
## $\Omega_\mathrm{GW}(f)$
```python
# Plot omega for all realizations
for jj in range(omega.shape[1]):
plt.loglog(bmid, omega[:, jj], alpha=0.2, color='C0')
plt.loglog(bmid, np.mean(omega, axis=1), color='r')
plt.loglog(bmid, 2.8e-42 * (bmid/fyr)**(2/3), color='C1')
# legend handles and labels
h1 = Line2D([], [], alpha=0.2, color='C0', label='Realisations')
h2 = Line2D([], [], color='r', label='Mean across realisations')
h3 = Line2D([], [], color='C1', label=r'$\propto f^{2/3}$')
plt.legend(handles=[h1, h2, h3])
plt.xlabel('GW frequency [Hz]');
plt.ylabel(r'$\Omega_\mathrm{GW}(f)$');
plt.title(r'Mean $\Omega_\mathrm{GW}(f)$');
```

```python
## Variance of Omega_gw
plt.loglog(bmid, np.var(omega, axis=1), label='Variance over realisations')
plt.loglog(bmid, 3e-84*(bmid/fyr)**(5.), label=r'$\propto f^5$')
plt.xlabel('GW frequency [Hz]');
plt.ylabel(r'Var$[\Omega_\mathrm{GW}(f)]$');
plt.title(r'Variance $\Omega_\mathrm{GW}(f)$');
```

## $h_\mathrm{c}^2(f)$
```python
# Plot h_c^2(f) for all realizations
for jj in range(omega.shape[1]):
plt.loglog(bmid, omega[:, jj]/bmid**2, alpha=0.2, color='C0')
plt.loglog(bmid, 1e-27 * (bmid/fyr)**(-4/3.), color='C1')
plt.loglog(bmid, np.mean(omega/bmid[:, None]**2, axis=1), color='r')
# legend handles and labels
h1 = Line2D([], [], alpha=0.2, color='C0', label='Realisations')
h2 = Line2D([], [], color='r', label='Mean across realisations')
h3 = Line2D([], [], color='C1', label=r'$\propto f^{-4/3}$')
plt.legend(handles=[h1, h2, h3])
plt.xlabel('GW frequency [Hz]');
plt.ylabel(r'$h_\mathrm{c}^2(f)$');
plt.title(r'Mean $h^2_\mathrm{c}(f)$');
```
/Users/lambwg/Applications/miniconda3/envs/ceffyl/lib/python3.10/site-packages/IPython/core/events.py:93: UserWarning: Creating legend with loc="best" can be slow with large amounts of data.
func(*args, **kwargs)
/Users/lambwg/Applications/miniconda3/envs/ceffyl/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning: Creating legend with loc="best" can be slow with large amounts of data.
fig.canvas.print_figure(bytes_io, **kw)
/Users/lambwg/Applications/miniconda3/envs/ceffyl/lib/python3.10/site-packages/IPython/core/pylabtools.py:152: UserWarning: Creating legend with loc="best" can be slow with large amounts of data.
fig.canvas.print_figure(bytes_io, **kw)

```python
# variance of hc^2
plt.loglog(bmid, np.var(omega/bmid[:, None]**2, axis=1))
plt.loglog(bmid, 1.e-53*(bmid/fyr))
plt.xlabel('GW frequency [Hz]')
plt.ylabel(r'Var[$h_\mathrm{c}(f)^2$]')
plt.title(r'Variance $h_\mathrm{c}(f)^2$')
```
Text(0.5, 1.0, 'Variance $h_\\mathrm{c}(f)^2$')

## $S_{\delta t}(f)$
```python
# Plot S(f) for all realizations
for jj in range(omega.shape[1]):
plt.loglog(bmid, omega[:, jj]/bmid**5, alpha=0.2, color='C0')
plt.loglog(bmid, 1e-6 * (bmid/fyr)**(-13/3.), color='C1')
plt.loglog(bmid, np.mean(omega/bmid[:, None]**5, axis=1), color='r')
# legend handles and labels
h1 = Line2D([], [], alpha=0.2, color='C0', label='Realisations')
h2 = Line2D([], [], color='r', label='Mean across realisations')
h3 = Line2D([], [], color='C1', label=r'$\propto f^{-13/3}$')
plt.legend(handles=[h1, h2, h3], loc='lower left')
plt.xlabel('GW frequency [Hz]');
plt.ylabel(r'$S_{\delta t}(f)$');
plt.title(r'Mean $S_{\delta t}(f)$')
```
Text(0.5, 1.0, 'Mean $S_{\\delta t}(f)$')

```python
# variance of PSD
plt.loglog(bmid, np.var(omega/bmid[:, None]**5, axis=1))
plt.loglog(bmid, 2e-48*bmid**(-5.))
plt.xlabel('GW frequency [Hz]')
plt.ylabel(r'Var[$S_{\delta t}(f)^2$]')
plt.title(r'Variance $S_{\delta t}(f)^2$')
```
Text(0.5, 1.0, 'Variance $S_{\\delta t}(f)^2$')

# General $dt/df$...
For binaries that also interact with their environments, their frequency evolution is faster, and $\lambda\neq-11/3$.
Let $\frac{dt}{df} \propto f^\lambda$
For stellar scattering, $\lambda = -7/3$
```python
## Make a binary distribution function going as f^lambda
A = 1e-22 # arbitrary
f = np.linspace(1e-9, 1e-7, int(1e6)) # frequncy grid
fbmid = 0.5 * (f[1:] + f[:-1]) # middle of grid cells
lamb_da = -7/3 # stellar scattering
Nf = A * fbmid**lamb_da # dt / df for GW emission
```
```python
plt.loglog(fbmid, Nf)
plt.xlabel(r'GW frequency (Hz)')
plt.ylabel(r'Number of sources')
```
Text(0, 0.5, 'Number of sources')

```python
# we need to sample the binary distribution. Use scipy's rv_histogram
test = ss.rv_histogram((Nf,f)) # make a scipy.stats object
```
```python
## Create structures for Omega_gw across realizations
Tspan = 20. * 86400 * 365.24
bins = (np.arange(1, 102) - 0.5) / Tspan # 20-year bins
bmid = 0.5 * (bins[1:] + bins[:-1]) # bin mids
omega = np.zeros((bmid.shape[0], int(1e4))) # frequency-bins x realizations
```
```python
# Make Omega_gw spectrum for each realization
for jj in range(omega.shape[1]):
freqs = test.rvs(size=int(1e4)) # frequencies of 10000 sampled binaries
mask = np.digitize(freqs, bins) - 1 # bin the binaries by frequency
# Background spectrum is sum of squared strain amplitudes.
# Power of 10/3 from h^2 and conversion from h_c^2(f) to Omega(f),
# then binned by frequency
for ii, bin in enumerate(bmid):
omega[ii, jj] = np.sum(freqs[mask==ii]**(10/3) *
freqs[mask==ii]/Tspan)
```
## $\Omega_\mathrm{GW}(f)$
```python
# Plot omega for all realizations
for jj in range(omega.shape[1]):
plt.loglog(bmid, omega[:, jj], alpha=0.2, color='C0')
plt.loglog(bmid, 7e-40 * (bmid/fyr)**(lamb_da + 13/3), color='C1')
# legend handles and labels
h1 = Line2D([], [], alpha=0.2, color='C0', label='Realisations')
h2 = Line2D([], [], color='r', label='Mean across realisations')
h3 = Line2D([], [], color='C1', label=r'$\propto f^{2/3}$')
plt.legend(handles=[h1, h2, h3])
plt.xlabel('GW frequency [Hz]');
plt.ylabel(r'$\Omega_\mathrm{GW}(f)$');
plt.title(r'Mean $\Omega_\mathrm{GW}(f)$');
```

```python
## Variance of Omega_gw
plt.loglog(bmid, np.var(omega, axis=1))
plt.loglog(bmid, 3e-82*(bmid/fyr)**(lamb_da + 26/3.))
plt.xlabel('GW frequency [Hz]')
plt.ylabel(r'Var[$\Omega_\mathrm{GW}(f)$]')
plt.title(r'Variance $\Omega_\mathrm{GW}(f)$')
```
Text(0.5, 1.0, 'Variance $\\Omega_\\mathrm{GW}(f)$')

## $h_\mathrm{c}^2(f)$
```python
# Plot h_c^2 for all realizations
for jj in range(omega.shape[1]):
plt.loglog(bmid, omega[:, jj]/bmid**2, alpha=0.2, color='C0')
plt.loglog(bmid, 7e-26 * (bmid/fyr)**(lamb_da + 7/3.), color='C1')
# legend handles and labels
h1 = Line2D([], [], alpha=0.2, color='C0', label='Realisations')
h2 = Line2D([], [], color='r', label='Mean across realisations')
h3 = Line2D([], [], color='C1', label=r'$\propto f^{2/3}$')
plt.legend(handles=[h1, h2, h3], loc='lower left')
plt.xlabel('GW frequency [Hz]');
plt.ylabel(r'$h_\mathrm{c}^2(f)$');
plt.title(r'Mean $h_\mathrm{c}^2(f)$');
```

```python
# variance of hc^2
plt.loglog(bmid, np.var(omega/bmid[:, None]**2, axis=1))
plt.loglog(bmid, 6.e-52*(bmid/fyr)**(lamb_da + 14/3))
plt.xlabel('GW frequency [Hz]')
plt.ylabel(r'Var($h_\mathrm{c}(f)^2$)')
plt.title(r'Variance $h^2_\mathrm{c}(f)$')
```
Text(0.5, 1.0, 'Variance $h^2_\\mathrm{c}(f)$')

## $S_{\delta t}(f)$
```python
# Plot psd for all realizations
for jj in range(omega.shape[1]):
plt.loglog(bmid, omega[:, jj]/bmid**5, alpha=0.2, color='C0')
plt.loglog(bmid, 2e-3 * (bmid/fyr)**(lamb_da-2/3.), color='C1')
plt.xlabel('GW frequency [Hz]');
plt.ylabel(r'$S_\mathrm{\delta t}(f)$')
plt.title(r'Mean $S_\mathrm{\delta t}(f)$');
```

```python
# variance of PSD
plt.loglog(bmid, np.var(omega/bmid[:, None]**5, axis=1))
plt.loglog(bmid, 2.5e-8*(bmid/fyr)**(lamb_da-4/3))
plt.xlabel('GW frequency [Hz]')
plt.ylabel(r'Var($S_\mathrm{\delta t}(f)$)')
plt.title(r'Variance $S_\mathrm{\delta t}(f)$')
```
Text(0.5, 1.0, 'Variance $S_\\mathrm{\\delta t}(f)$')

These all work really well!!
```python
# Plot omega for all realizations
plt.loglog(bmid, omega[:, 100], alpha=1, color='C0')
plt.loglog(bmid, 2e-41 * (bmid/fyr)**(lamb_da + 13/3), color='k', ls='--', alpha=0.5)
# legend handles and labels
h1 = Line2D([], [], color='C0', label='Realisations')
#h2 = Line2D([], [], color='r', label='Mean across realisations')
h3 = Line2D([], [], color='k', ls='--', label=r'$\propto f^{2/3}$')
plt.legend(handles=[h1, h3])
plt.xlim(right=1.1e-7)
plt.xlabel('GW frequency [Hz]');
plt.ylabel(r'$\Omega_\mathrm{GW}(f)$');
#plt.title(r'Mean $\Omega_\mathrm{GW}(f)$');
```

# Synthesised idealised background
```python
from scripts import pop_synth as ps
```
```python
# define models
# MODEL 1
model1 = dict(alpha=1, log10_M_star=np.log10(3.2*10**7), beta=3, z0=3, n0_dot=2e-2)
model2 = dict(alpha=0.5, log10_M_star=np.log10(7.5*10**7), beta=2.5, z0=2.4, n0_dot=9.5e-3)
model3 = dict(alpha=0., log10_M_star=np.log10(1.8*10**8), beta=2, z0=1.8, n0_dot=1.5e-3)
model4 = dict(alpha=-0.5, log10_M_star=np.log10(4.2*10**8), beta=1.5, z0=1.1, n0_dot=9e-5)
model5 = dict(alpha=-1, log10_M_star=9., beta=1, z0=0.5, n0_dot=2e-6)
#model5 = dict(alpha=-1, log10_M_star=11., beta=1, z0=5, n0_dot=2.5e-11)
```
```python
Tspan = 20 * 365.24 * 86400
f = np.arange(1, 31)/Tspan
#M = np.linspace(10**6, 10**11, 1000)
log10_M = np.linspace(6, 11, 1001)
z = np.linspace(0, 5, 101)
hc2_ideal = ps.gwb_ideal(f, log10_M, z, model4)
```
```python
plt.plot(f, hc2_ideal)
plt.xscale('log', base=10)
plt.yscale('log', base=10)
```

# Poisson!
```python
G = 4.517103 * 10**-48 # Mpc3 / Msun s2
c = 9.7156119 * 10**-15 # Mpc / s
```
```python
Tspan = 20 * 365.24 * 86400
fbins = (np.arange(1, 32) - 0.5)/Tspan
```
```python
z_bins = np.linspace(0, 5, 101)
#M_bins = np.linspace(10**6, 10**11, 1001)
log10_M_bins = np.linspace(6, 11, 1001)
#dM = M_bins[1] - M_bins[0]
#dz = z_bins[1] - z_bins[0]
#dlogf = np.log(fbins[1:]) - np.log(fbins[:-1])
f_mid = 0.5 * (fbins[1:] + fbins[:-1])
#M_mid = 0.5 * (M_bins[1:] + M_bins[:-1])
log10_M_mid = 0.5 * (log10_M_bins[1:] + log10_M_bins[:-1])
z_mid = 0.5 * (z_bins[1:] + z_bins[:-1])
```
```python
N = ps.smbhb_number_per_cell(fbins, 10**log10_M_bins, z_bins, model4)
```
```python
N.shape
```
(30, 1000, 100)
```python
N.sum()
```
1045194662.0414128
```python
model1
```
{'alpha': 1,
'log10_M_star': 7.505149978319906,
'beta': 3,
'z0': 3,
'n0_dot': 0.02}
```python
from tqdm import tqdm
nreal = 100000 # EXAMPLE NUMBER OF REALISATIONS - INCREASE FOR PRODUCTION
dlogf = (np.log(fbins[1:]) - np.log(fbins[:-1]))
models = [model1, model2, model3, model4, model5]
h2cf_synth = np.zeros((5, nreal, 30))
for ii in tqdm(range(5)):
N = ps.smbhb_number_per_cell(fbins, 10**log10_M_bins, z_bins, models[ii])
rng = np.random.default_rng()
h2cf_synth[ii] = ps.pop_synth(N, fbins, 10*log10_M_mid, z_mid, n_real=nreal, seed=ii)
np.save(f'./data/hc2f_10k_model{ii+1}', h2cf[ii])
```
```python
figsize_params = {"figure.figsize": [3.5503666805036667, 2.1942472810764047]}
plt.rcParams.update(figsize_params)
var = np.var(h2cf_synth, axis=1)
[plt.plot(f_mid, var[ii], label=f'Model {ii+1}') for ii in range(5)]
plt.plot(f_mid, 1.4e-57 * (f_mid/fyr)**(1), c='k', ls='--', label=r'$f^1$')
plt.xscale('log', base=10)
plt.yscale('log', base=10)
plt.ylabel(r'$\mathrm{Var}[h_\mathrm{c}^2(f)]$')
plt.xlabel(r'GW Frequency (Hz)')
```
Text(0.5, 0, 'GW Frequency (Hz)')

```python
figsize_params = {"figure.figsize": [3.5503666805036667*2, 2.1942472810764047]}
plt.rcParams.update(figsize_params)
fig, axs = plt.subplots(ncols=2, tight_layout=True)
fyr = 1/(365.24*86400)
ax = axs[0]
ax.plot(f_mid, h2cf_synth[2, 0], c='C0', alpha=0.2, rasterized=True, label='GWB realisations')
[ax.plot(f_mid, h2cf_synth[2, ii], c='C0', alpha=0.2, rasterized=True) for ii in range(1, nreal)]
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_ylabel(r'$h_\mathrm{c}^2(f)$')
ax.set_xlabel(r'GW Frequency (Hz)')
ax = axs[1]
var = np.var(h2cf_synth[2], axis=0)
#ax.errorbar(f_mid, var, yerr=np.std(vars, axis=0), label='Realisations',
# capsize=2)
ax.plot(f_mid, var)
ax.plot(f_mid, 1.4e-59 * (f_mid/fyr)**(1), c='k', ls='--', label=r'$f^1$')
ax.set_xscale('log', base=10)
ax.set_yscale('log', base=10)
ax.set_ylabel(r'$\mathrm{Var}[h_\mathrm{c}^2(f)]$')
ax.set_xlabel(r'GW Frequency (Hz)')
#fig.savefig('./notebooks/figures/h2cf_10k.pdf', dpi=300);
```
Text(0.5, 0, 'GW Frequency (Hz)')

```python
```
|
astrolambREPO_NAMEpop_synthPATH_START.@pop_synth_extracted@pop_synth-main@notebooks@gwb_spectral_variance.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "mit-ll/spacegym-kspdg",
"repo_path": "spacegym-kspdg_extracted/spacegym-kspdg-main/src/kspdg/private_src/python3_9/Linux_x86_64/pyarmor_runtime_000000/__init__.py",
"type": "Python"
}
|
# Pyarmor 8.5.11 (trial), 000000, 2024-12-09T10:19:41.411263
from .pyarmor_runtime import __pyarmor__
|
mit-llREPO_NAMEspacegym-kspdgPATH_START.@spacegym-kspdg_extracted@spacegym-kspdg-main@src@kspdg@private_src@python3_9@Linux_x86_64@pyarmor_runtime_000000@__init__.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "mneeleman/qubefit",
"repo_path": "qubefit_extracted/qubefit-master/doc/conf.py",
"type": "Python"
}
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sphinx_rtd_theme
import os
import sys
import glob
import subprocess
sys.path.insert(0, os.path.abspath('.'))
from pkg_resources import DistributionNotFound, get_distribution
try:
__version__ = get_distribution("qubefit").version
except DistributionNotFound:
__version__ = "unknown version"
# -- Project information -----------------------------------------------------
project = 'qubefit'
copyright = '2020-2021, Marcel Neeleman'
author = 'Marcel Neeleman'
# The full version, including alpha/beta/rc tags
release = __version__
version = __version__
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.imgmath',
"sphinx.ext.autodoc",
"sphinx.ext.napoleon"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
source_suffix = ".rst"
master_doc = "index"
# Convert the tutorials
for fn in glob.glob("_static/*.ipynb"):
name = os.path.splitext(os.path.split(fn)[1])[0]
outfn = os.path.join("Tutorials", name + ".rst")
print("Building {0}...".format(name))
subprocess.check_call(
"jupyter nbconvert --to rst "
+ fn
+ " --output-dir tutorials",
shell=True,
)
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd:
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme = 'sphinx_rtd_theme'
# html_sidebars = {
# '**': ['globaltoc.html', 'sourcelink.html', 'searchbox.html']
# }
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# html_favicon = "QubeFitLogo.png"
html_logo = "./Fig/QubeFitLogoText.png"
# html_theme_options = {"logo_only": True}
imgmath_image_format = 'svg'
|
mneelemanREPO_NAMEqubefitPATH_START.@qubefit_extracted@qubefit-master@doc@conf.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/contourcarpet/contours/labelfont/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="contourcarpet.contours.labelfont",
**kwargs,
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@contourcarpet@contours@labelfont@_color.py@.PATH_END.py
|
{
"filename": "rundap.py",
"repo_name": "sdss/mangadap",
"repo_path": "mangadap_extracted/mangadap-main/mangadap/scripts/rundap.py",
"type": "Python"
}
|
from IPython import embed
from mangadap.scripts import scriptbase
class RunDap(scriptbase.ScriptBase):
"""
Simple wrapper class for the rundap script.
"""
@classmethod
def name(cls):
"""
Return the name of the executable.
"""
return 'rundap'
@classmethod
def get_parser(cls, width=None):
parser = super().get_parser(description='Perform analysis of integral-field data.',
width=width)
# Read the optional run-mode arguments
parser.add_argument("--overwrite",
help="if all selected, will run dap for all plates/ifudesigns/modes "
" regardless of state", action="store_true", default=False)
parser.add_argument('-v', '--verbose', action='count',
help='Set verbosity level for manga_dap; can be omitted and set up '
'to -vv', default=0)
parser.add_argument("--quiet", help="suppress screen output", action="store_true",
default=False)
parser.add_argument("--print_version", help="print DAP version and stop",
action="store_true", default=False)
# These arguments are used to override default behavior
parser.add_argument("--drpver", type=str, default=None,
help='MaNGA DRP version for analysis; $MANGADRP_VER by default')
parser.add_argument("--redux_path", type=str, help="main DRP output path", default=None)
parser.add_argument("--dapver", type=str, default=None,
help='optional output version, different from product version. This '
'*only* affects the output directory structure. It does *not* '
'select the version of the DAP to use.')
parser.add_argument("--analysis_path", type=str, help="main DAP output path", default=None)
parser.add_argument("--plan_file", type=str, help="parameter file with the MaNGA DAP "
"execution plan to use instead of the default" , default=None)
parser.add_argument("--platelist", type=str, help="set list of plates to reduce",
default=None)
parser.add_argument("--ifudesignlist", type=str, help="set list of ifus to reduce",
default=None)
parser.add_argument("--list_file", type=str,
help='A file with the list of plates and ifudesigns to analyze',
default=None)
parser.add_argument("--combinatorics", help="force execution of all permutations of the "
"provided lists", action="store_true", default=False)
parser.add_argument('--sres_ext', type=str, default=None,
help='Spectral resolution extension to use. Default set by '
'MaNGADataCube class.')
parser.add_argument('--sres_fill', type=str, default=None,
help='If present, use interpolation to fill any masked pixels in the '
'spectral resolution vectors. Default set by MaNGADataCube '
'class.')
parser.add_argument('--covar_ext', type=str, default=None,
help='Use this extension to define the spatial correlation matrix. '
'Default set by MaNGADataCube class.')
parser.add_argument('--on_disk', action='store_true', default=False,
help='When using the DRPall file to collate the data for input to '
'the DAP, search for available DRP files on disk instead of '
'using the DRPall file content.')
parser.add_argument('--can_analyze', action='store_true', default=False,
help='Only construct script files for datacubes that can/should be '
'analyzed by the DAP. See '
':func:`~mangadap.survey.drpcomplete.DRPComplete.can_analyze`.')
parser.add_argument("--log", help="Have the main DAP executable produce a log file",
action="store_true", default=False)
parser.add_argument("--no_proc", help="Do NOT perform the main DAP processing steps",
action="store_true", default=False)
parser.add_argument("--no_plots", help="Do NOT create QA plots", action="store_true",
default=False)
parser.add_argument("--post", help="Create/Submit the post-processing scripts",
action="store_true", default=False)
parser.add_argument("--post_plots", action="store_true", default=False,
help="Create/Submit the post-processing plotting scripts")
# Read arguments specific to the cluster submission behavior
parser.add_argument("--label", type=str, help='label for cluster job', default='mangadap')
parser.add_argument("--nodes", type=int, help='number of nodes to use in cluster',
default=1)
parser.add_argument("--cpus", type=int,
help='number of cpus to use per node. Default is to use all available'
'; otherwise, set to minimum of provided number and number of '
'processors per node', default=None)
parser.add_argument("--fast", dest='qos', type=str, help='qos state', default=None)
parser.add_argument("--umask", type=str, help='umask bit for cluster job', default='0027')
parser.add_argument("--walltime", type=str, help='walltime for cluster job',
default='240:00:00')
parser.add_argument("--toughness", dest='hard', action='store_false', default=True,
help='turn off hard keyword for cluster submission')
parser.add_argument('--create', action='store_true', default=False,
help='use the pbs package to create the cluster scripts')
parser.add_argument('--submit', action='store_true', default=False,
help='submit the scripts to the cluster')
parser.add_argument('--progress', action='store_true', default=False,
help='instead of closing the script, report the progress of the '
'analysis on the cluster; this is required if you want to submit '
'the DAPall script immediately after completing the individual '
'cube analysis')
parser.add_argument("--queue", dest='queue', type=str, help='set the destination queue',
default=None)
return parser
@staticmethod
def main(args):
if args.print_version:
from mangadap import __version__
print(f'DAP Version: {__version__}')
return
from mangadap.survey.rundap import rundap
# Handle special argument cases
if args.qos is not None and args.nodes > 1:
warnings.warn('Requesting the fast node requires node=1. Ignoring input node number.')
nodes = 1 # Force the number of nodes to be 1
else:
nodes = args.nodes
_rundap = rundap(overwrite=args.overwrite, quiet=args.quiet, drpver=args.drpver,
redux_path=args.redux_path, dapver=args.dapver,
analysis_path=args.analysis_path, plan_file=args.plan_file,
platelist=args.platelist, ifudesignlist=args.ifudesignlist,
combinatorics=args.combinatorics, list_file=args.list_file,
sres_ext=args.sres_ext, sres_fill=args.sres_fill,
covar_ext=args.covar_ext, on_disk=args.on_disk,
can_analyze=args.can_analyze, log=args.log, dapproc=not args.no_proc,
pltifu_plots=not args.no_plots, post_process=args.post,
post_plots=args.post_plots, report_progress=args.progress,
verbose=args.verbose, label=args.label, nodes=nodes, cpus=args.cpus,
qos=args.qos, umask=args.umask, walltime=args.walltime, hard=args.hard,
create=args.create, submit=args.submit, queue=args.queue)
|
sdssREPO_NAMEmangadapPATH_START.@mangadap_extracted@mangadap-main@mangadap@scripts@rundap.py@.PATH_END.py
|
{
"filename": "_layer.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/newshape/_layer.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LayerValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="layer", parent_name="layout.newshape", **kwargs):
super(LayerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
values=kwargs.pop("values", ["below", "above", "between"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@newshape@_layer.py@.PATH_END.py
|
{
"filename": "demo_MBS_spring.py",
"repo_name": "projectchrono/chrono",
"repo_path": "chrono_extracted/chrono-main/src/demos/python/mbs/demo_MBS_spring.py",
"type": "Python"
}
|
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Simone Benatti
# =============================================================================
#
# Simple example demonstrating the use of ChLinkTSDA.
#
# Two bodies, connected with identical (but modeled differently) spring-dampers
# are created side by side.
#
# Recall that Irrlicht uses a left-hand frame, so everything is rendered with
# left and right flipped.
#
# =============================================================================
import pychrono as chrono
import pychrono.irrlicht as chronoirr
# =============================================================================
rest_length = 1.5
spring_coef = 50
damping_coef = 1
# =============================================================================
# Functor class implementing the force for a ChLinkTSDA link.
# In this simple demonstration, we just reimplement the default linear spring-damper.
class MySpringForce(chrono.ForceFunctor):
def __init__(self):
super(MySpringForce, self).__init__()
def evaluate(self, #
time, # current time
rest_length, # undeformed length
length, # current length
vel, # current velocity (positive when extending)
link): # associated link
force = -spring_coef * (length - rest_length) - damping_coef * vel
return force
# =============================================================================
print("Copyright (c) 2017 projectchrono.org")
sys = chrono.ChSystemNSC()
sys.SetGravitationalAcceleration(chrono.ChVector3d(0, 0, 0))
# Create the ground body with two visualization spheres
# -----------------------------------------------------
ground = chrono.ChBody()
sys.AddBody(ground)
ground.SetFixed(True)
ground.EnableCollision(False)
sph_1 = chrono.ChVisualShapeSphere(0.1)
ground.AddVisualShape(sph_1, chrono.ChFramed(chrono.ChVector3d(-1, 0, 0)))
sph_2 = chrono.ChVisualShapeSphere(0.1)
ground.AddVisualShape(sph_2, chrono.ChFramed(chrono.ChVector3d(1, 0, 0)))
# Create a body suspended through a ChLinkTSDA (default linear)
# -------------------------------------------------------------
body_1 = chrono.ChBody()
sys.AddBody(body_1)
body_1.SetPos(chrono.ChVector3d(-1, -3, 0))
body_1.SetFixed(False)
body_1.EnableCollision(False)
body_1.SetMass(1)
body_1.SetInertiaXX(chrono.ChVector3d(1, 1, 1))
# Attach a visualization asset.
box_1 = chrono.ChVisualShapeBox(1, 1, 1)
box_1.SetColor(chrono.ChColor(0.6, 0, 0))
body_1.AddVisualShape(box_1)
# Create the spring between body_1 and ground. The spring end points are
# specified in the body relative frames.
spring_1 = chrono.ChLinkTSDA()
spring_1.Initialize(body_1, ground, True, chrono.ChVector3d(0, 0, 0), chrono.ChVector3d(-1, 0, 0))
spring_1.SetRestLength(rest_length)
spring_1.SetSpringCoefficient(spring_coef)
spring_1.SetDampingCoefficient(damping_coef)
sys.AddLink(spring_1)
# Attach a visualization asset.
spring_1.AddVisualShape(chrono.ChVisualShapeSpring(0.05, 80, 15))
# Create a body suspended through a ChLinkTSDA (custom force functor)
# -------------------------------------------------------------------
body_2 = chrono.ChBody()
sys.AddBody(body_2)
body_2.SetPos(chrono.ChVector3d(1, -3, 0))
body_2.SetFixed(False)
body_2.EnableCollision(False)
body_2.SetMass(1)
body_2.SetInertiaXX(chrono.ChVector3d(1, 1, 1))
# Attach a visualization asset.
box_2 = chrono.ChVisualShapeBox(1, 1, 1)
box_2.SetColor(chrono.ChColor(0, 0, 0.6))
body_2.AddVisualShape(box_2)
# Create the spring between body_2 and ground. The spring end points are
# specified in the body relative frames.
force = MySpringForce()
spring_2 = chrono.ChLinkTSDA()
spring_2.Initialize(body_2, ground, True, chrono.ChVector3d(0, 0, 0), chrono.ChVector3d(1, 0, 0))
spring_2.SetRestLength(rest_length)
spring_2.RegisterForceFunctor(force)
sys.AddLink(spring_2)
# Attach a visualization asset.
spring_2.AddVisualShape(chrono.ChVisualShapeSpring(0.05, 80, 15))
# Create the Irrlicht application
# -------------------------------
vis = chronoirr.ChVisualSystemIrrlicht()
vis.AttachSystem(sys)
vis.SetWindowSize(1024,768)
vis.SetWindowTitle('ChLinkTSDA demo')
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddSkyBox()
vis.AddCamera(chrono.ChVector3d(0, 0, 6))
vis.AddTypicalLights()
# Simulation loop
frame = 0
while vis.Run() :
vis.BeginScene()
vis.Render()
vis.EndScene()
sys.DoStepDynamics(1e-3)
if (frame % 50 == 0) :
print( '{:.6}'.format(str(sys.GetChTime())) + " \n" + '{:.6}'.format(str(spring_1.GetLength())) +
" " + '{:.6}'.format(str(spring_1.GetVelocity())) + " "
+ '{:.6}'.format(str(spring_1.GetForce())))
print('{:.6}'.format(str(spring_2.GetLength())) + " " +
'{:.6}'.format(str(spring_2.GetVelocity())) + " " + '{:.6}'.format(str(spring_2.GetForce())) )
frame += 1
|
projectchronoREPO_NAMEchronoPATH_START.@chrono_extracted@chrono-main@src@demos@python@mbs@demo_MBS_spring.py@.PATH_END.py
|
{
"filename": "test_utils.py",
"repo_name": "HERA-Team/hera_opm",
"repo_path": "hera_opm_extracted/hera_opm-main/hera_opm/tests/test_utils.py",
"type": "Python"
}
|
"""Tests for utils.py"""
import pytest
import hera_opm.utils as utils
def test_get_makeflow_ArgumentParser():
# get an argument parser and make sure it behaves as expected
a = utils.get_makeflow_ArgumentParser()
config_file = "config_file.cfg"
output_file = "mf.log"
obsids = ["zen.2458000.12345.xx.uv", "zen.2458000.12345.yy.uv"]
args = ["-c", config_file, "-o", output_file, obsids[0], obsids[1]]
parsed_args = a.parse_args(args)
# make sure we got what we expected
assert parsed_args.config == config_file
assert parsed_args.output == output_file
for obsid in obsids:
assert obsid in parsed_args.files
assert parsed_args.scan_files is False
assert parsed_args.rename_bad_files is False
assert parsed_args.bad_suffix == ".METADATA_ERROR"
return
def test_get_cleaner_ArgumentParser():
# raise error for requesting unknown function
with pytest.raises(AssertionError):
utils.get_cleaner_ArgumentParser("blah")
# test getting each type of argparser
# wrapper
a = utils.get_cleaner_ArgumentParser("wrapper")
work_dir = "/foo/bar"
args = [work_dir]
parsed_args = a.parse_args(args)
assert parsed_args.directory == work_dir
# output
a = utils.get_cleaner_ArgumentParser("output")
parsed_args = a.parse_args(args)
assert parsed_args.directory == work_dir
# logs
a = utils.get_cleaner_ArgumentParser("logs")
output_file = "mf.log"
args = [work_dir, "-o", output_file]
parsed_args = a.parse_args(args)
assert parsed_args.directory == work_dir
assert parsed_args.output == output_file
assert not parsed_args.overwrite
assert parsed_args.remove_original
assert not parsed_args.zip
return
|
HERA-TeamREPO_NAMEhera_opmPATH_START.@hera_opm_extracted@hera_opm-main@hera_opm@tests@test_utils.py@.PATH_END.py
|
{
"filename": "_hoverlabel.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/scene/annotation/_hoverlabel.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="hoverlabel", parent_name="layout.scene.annotation", **kwargs
):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the background color of the hover label.
By default uses the annotation's `bgcolor` made
opaque, or white if it was transparent.
bordercolor
Sets the border color of the hover label. By
default uses either dark grey or white, for
maximum contrast with `hoverlabel.bgcolor`.
font
Sets the hover label text font. By default uses
the global hover font and size, with color from
`hoverlabel.bordercolor`.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@scene@annotation@_hoverlabel.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "hahnec/torchimize",
"repo_path": "torchimize_extracted/torchimize-master/torchimize/__init__.py",
"type": "Python"
}
|
__author__ = "Christopher Hahne"
__email__ = "inbox@christopherhahne.de"
__license__ = """
Copyright (c) 2022 Christopher Hahne <inbox@christopherhahne.de>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
__version__ = '0.0.16'
|
hahnecREPO_NAMEtorchimizePATH_START.@torchimize_extracted@torchimize-master@torchimize@__init__.py@.PATH_END.py
|
{
"filename": "test_make_moments.py",
"repo_name": "Astroua/TurbuStat",
"repo_path": "TurbuStat_extracted/TurbuStat-master/turbustat/moments/tests/test_make_moments.py",
"type": "Python"
}
|
# Licensed under an MIT open source license - see LICENSE
from __future__ import print_function, absolute_import, division
import numpy.testing as npt
import astropy.units as u
import os
from glob import glob
import pytest
from .. import Moments
from ...tests._testing_data import dataset1, sc1, props1
@pytest.mark.openfiles_ignore
def test_loading():
# Save the files.
props1.to_fits(save_name="dataset1", overwrite=True)
# Try loading the files.
# Set the scale to the assumed value.
test = Moments.from_fits(sc1, moments_prefix="dataset1",
moments_path=".",
scale=0.003031065017916262 * u.Unit(""))
npt.assert_allclose(test.moment0, dataset1["moment0"][0])
npt.assert_allclose(test.moment1, dataset1["centroid"][0])
npt.assert_allclose(test.linewidth, dataset1["linewidth"][0])
npt.assert_allclose(test.moment0_err, dataset1["moment0_error"][0])
npt.assert_allclose(test.moment1_err, dataset1["centroid_error"][0])
npt.assert_allclose(test.linewidth_err, dataset1["linewidth_error"][0])
# Clean-up the saved files
# moment_fits = glob("dataset1*.fits")
# for file in moment_fits:
# os.remove(file)
|
AstrouaREPO_NAMETurbuStatPATH_START.@TurbuStat_extracted@TurbuStat-master@turbustat@moments@tests@test_make_moments.py@.PATH_END.py
|
{
"filename": "maps.py",
"repo_name": "simonsobs/nemo",
"repo_path": "nemo_extracted/nemo-main/nemo/maps.py",
"type": "Python"
}
|
"""
This module contains tools for manipulating maps.
"""
from astLib import *
from scipy import ndimage
from scipy import interpolate
from scipy.signal import convolve as scipy_convolve
from scipy import optimize
import astropy.io.fits as pyfits
import astropy.table as atpy
import astropy.stats as apyStats
import mahotas
import colorcet
import numpy as np
import pylab as plt
import glob
import os
import sys
import math
import time
import shutil
import copy
import yaml
import pickle
from pixell import enmap, curvedsky, utils, powspec
import nemo
try:
import reproject
except:
pass
from . import catalogs
from . import signals
from . import photometry
from . import plotSettings
from . import pipelines
from . import completeness
np.random.seed()
#------------------------------------------------------------------------------------------------------------
class MapDict(dict):
"""A dictionary for managing a sky map (a 2d array with an associated WCS) within Nemo. Keys within the
dictionary can be set to values that control preprocessing of the map (usually done before filtering).
Many of the keys in the dictionary map to entries in the .yml config file used by Nemo.
Args:
inputDict (:obj:`dict`): Input dictionary (usually this mirrors the contents of `unfilteredMaps` in
Nemo .yml config files).
tileCoordsDict (:obj:`dict`, optional): A dictionary that describes the tiling of a large map, as
produced by :meth:`startUp.NemoConfig.getTileCoordsDict`.
Attributes:
tileCoordsDict (:obj:`dict`): A dictionary that describes the tiling of a large map, as
produced by :meth:`startUp.NemoConfig.getTileCoordsDict`.
validMapKeys (:obj:`list`): A list of keys that may contain a path to a map in FITS image format.
These are: ['mapFileName', 'weightsFileName', 'pointSourceMask', 'surveyMask', 'flagMask'].
"""
def __init__(self, inputDict, tileCoordsDict = None):
super(MapDict, self).__init__(inputDict)
self.tileCoordsDict=tileCoordsDict
self._maskKeys=['pointSourceMask', 'surveyMask', 'flagMask', 'extendedMask']
self.validMapKeys=['mapFileName', 'weightsFileName']+self._maskKeys
def copy(self):
"""Make a copy of this :class:`MapDict` object.
Returns:
A deep copy of the :class:`MapDict` object.
"""
return MapDict(self, tileCoordsDict = self.tileCoordsDict)
def loadTile(self, mapKey, tileName, returnWCS = False):
"""Given a key in the map dictionary that contains the path to a FITS image, return the map
as a 2d array and (optionally) the WCS.
Args:
mapKey (:obj:`str`): Name of a key in a map dictionary that contains the path to map FITS image.
See self.`validMapKeys` for a list.
tileName (:obj:`str`): The name of the tile to load.
Returns:
Map data as a 2d array (and optionally a WCS)
Note:
Tiles can be re-projected from CAR to TAN on the fly if the 'reprojectToTan' is set in the
Nemo config.
"""
if mapKey not in self.validMapKeys:
raise Exception("mapKey must be one of %s - given mapKey = '%s'." % (self.validMapKeys, mapKey))
pathToTileImages=self.get(mapKey)
if os.path.isdir(pathToTileImages) == True:
# Directory full of tile images (used by, e.g., on-the-fly extended source masking)
with pyfits.open(pathToTileImages+os.path.sep+tileName+".fits") as img:
extName=0
tileData=img[extName].data
if tileData is None:
for extName in img:
tileData=img[extName].data
if tileData is not None:
break
assert tileData is not None
if returnWCS == True or self['reprojectToTan'] == True:
# Zapping keywords in old ACT maps that confuse astropy.wcs
wcs=astWCS.WCS(img[extName].header, mode = 'pyfits', zapKeywords = ['PC1_1', 'PC1_2', 'PC2_1', 'PC2_2'])
data=tileData
elif type(pathToTileImages) == np.ndarray:
# We no longer want to support this kind of thing... clean this up later
raise Exception("Expected a path but got an array instead (image already loaded).")
else:
# On-the-fly tile clipping
with pyfits.open(pathToTileImages) as img:
for ext in img:
if img[ext].data is not None:
break
if returnWCS == True or self['reprojectToTan'] == True:
wcs=astWCS.WCS(self.tileCoordsDict[tileName]['header'], mode = 'pyfits')
minX, maxX, minY, maxY=self.tileCoordsDict[tileName]['clippedSection']
if img[ext].data.ndim == 3:
data=img[ext].data[0, minY:maxY, minX:maxX]
elif img[ext].data.ndim == 2:
data=img[ext].data[minY:maxY, minX:maxX]
else:
raise Exception("Map data has %d dimensions - only ndim = 2 or ndim = 3 are currently handled." % (img[ext].data.ndim))
# Avoiding potential for read-only weirdness
data=data[:]
data=data.copy()
# Convert any mask to 8-bit unsigned ints to save memory
if mapKey in self._maskKeys:
if data.dtype != np.uint8:
data=np.array(data, dtype = np.uint8)
# Survey masks are special: we need to zap the border overlap area or area calculations will be wrong
if mapKey == 'surveyMask':
minX, maxX, minY, maxY=self.tileCoordsDict[tileName]['areaMaskInClipSection']
data[:minY, :]=0
data[maxY:, :]=0
data[:, :minX]=0
data[:, maxX:]=0
# Optional TAN reprojection - may help avoid biases due to distortion at high dec in CAR
# WARNING: Probably introduces a new pixel window if we're not careful
if self['reprojectToTan'] == True:
if mapKey in self._maskKeys:
order=0
else:
order='bicubic'
tanWCS=_makeTanWCS(wcs)
ySizePix, xSizePix=tanWCS.header['NAXIS2'], tanWCS.header['NAXIS1']
tanData, footprint=reproject.reproject_interp((data, wcs.AWCS), tanWCS.AWCS, shape_out = [ySizePix, xSizePix],
order = order, return_footprint = True)
tanData[footprint == 0]=0 # get rid of nans which will be in borders anyway
# checkData=reproject.reproject_interp((tanData, tanWCS.AWCS), wcs.AWCS, shape_out = data.shape, order = 'bicubic',
# return_footprint = False)
wcs=tanWCS
data=tanData
if returnWCS == True:
return data, wcs
else:
return data
def preprocess(self, tileName = 'PRIMARY', diagnosticsDir = None):
"""Applies a number of pre-processing steps to the map described by this :class:`MapDict` object,
typically used before filtering.
The first step is to load the map itself and the associated weights. Some other operations that may be
applied are controlled by keys added to the MapDict. Some of these may be specified in the .yml
configuration file, while others are applied by particular filter objects or by routines that generate
simulated data. The following keys are understood:
surveyMask (:obj:`str`)
Path to a mask (.fits image; 1 = valid, 0 = masked) that defines the valid object search area.
pointSourceMask (:obj:`str`)
Path to a mask (.fits image; 1 = valid, 0 = masked) that contains holes at the locations of point
sources, defining regions that are excluded from the object search area.
RADecSection (:obj:`list`)
Defines a region to extract from the map. Use the format [RAMin, RAMax, decMin, decMax] (units:
decimal degrees).
CMBSimSeed (:obj:`int`)
If present, replace the map with a source-free simulated CMB realisation, generated using the given
seed number. Used by :meth:`estimateContaminationFromSkySim`.
applyBeamConvolution (:obj:`bool`)
If True, the map is convolved with the beam given in the beamFileName key. This should only be
needed when using preliminary y-maps made by tILe-C.
Args:
tileName (:obj:`str`): Name of the map tile (extension name) to operate on.
diagnosticsDir (:obj:`str`): Path to a directory where miscellaneous diagnostic data may be written.
Returns:
None - values in the map dictionary are updated in-place, and additional keys may be added.
"""
data, wcs=self.loadTile('mapFileName', tileName, returnWCS = True)
# Optional calibration factor
if 'calibFactor' in self.keys():
data=data*self['calibFactor']
if self['units'] == 'Jy/sr':
if self['obsFreqGHz'] == 148:
data=(data/1.072480e+09)*2.726*1e6
elif self['obsFreqGHz'] == 219:
data=(data/1.318837e+09)*2.726*1e6
else:
raise Exception("no code added to support conversion to uK from Jy/sr for freq = %.0f GHz" \
% (self['obsFreqGHz']))
# Load weight map if given
if 'weightsFileName' in list(self.keys()) and self['weightsFileName'] is not None:
weights=self.loadTile('weightsFileName', tileName)
# For Enki maps... take only I (temperature) for now, add options for this later
if weights.ndim == 3: # I, Q, U
weights=weights[0, :]
elif weights.ndim == 4: # I, Q, U and also a covariance matrix
weights=weights[0, 0, :]
else:
weights=np.ones(data.shape)
# We rely on pixels with zero weight having zero value in actual maps later (automated edge trimming)
# This might not be the case if the map has been filtered slightly before being fed into nemo
data[weights == 0]=0
# Load survey and point source masks, if given
if 'surveyMask' in list(self.keys()) and self['surveyMask'] is not None:
surveyMask=self.loadTile('surveyMask', tileName)
else:
surveyMask=np.ones(data.shape, dtype = np.uint8)
surveyMask[weights == 0]=0
# Some apodisation of the data outside the survey mask
# NOTE: should add adjustable parameter for this somewhere later
if 'apodizeUsingSurveyMask' in list(self.keys()) and self['apodizeUsingSurveyMask'] == True:
# We need to remain unapodized to at least noiseGridArcmin beyond the edge of the survey mask
# We'll need to make these adjustable parameters
apodMask=np.array(surveyMask, dtype = bool)
for i in range(120):
apodMask=mahotas.dilate(apodMask)
apodMask=ndimage.gaussian_filter(np.array(apodMask, dtype = float), 20)
data=data*apodMask
del apodMask
if 'pointSourceMask' in list(self.keys()) and self['pointSourceMask'] is not None:
psMask=self.loadTile('pointSourceMask', tileName)
else:
psMask=np.ones(data.shape, dtype = np.uint8)
# Use for tracking regions where subtraction/in-painting took place to make flags in catalog
# We can also supply a flag mask at the start, e.g., for marking dusty regions without zapping them
# NOTE: flag masks for each frequency map get combined within filter objects
if 'flagMask' in list(self.keys()) and self['flagMask'] is not None:
flagMask=self.loadTile('flagMask', tileName)*surveyMask
else:
flagMask=np.zeros(data.shape, dtype = np.uint8)
# Optional map clipping
if 'RADecSection' in list(self.keys()) and self['RADecSection'] is not None:
RAMin, RAMax, decMin, decMax=self['RADecSection']
clip=astImages.clipUsingRADecCoords(data, wcs, RAMin, RAMax, decMin, decMax)
data=clip['data']
whtClip=astImages.clipUsingRADecCoords(weights, wcs, RAMin, RAMax, decMin, decMax)
weights=whtClip['data']
psClip=astImages.clipUsingRADecCoords(psMask, wcs, RAMin, RAMax, decMin, decMax)
psMask=psClip['data']
surveyClip=astImages.clipUsingRADecCoords(surveyMask, wcs, RAMin, RAMax, decMin, decMax)
surveyMask=surveyClip['data']
flagClip=astImages.clipUsingRADecCoords(flagMask, wcs, RAMin, RAMax, decMin, decMax)
flagMask=flagClip['data']
wcs=clip['wcs']
if len(clip['data']) == 0:
raise Exception("Clipping using RADecSection returned empty array - check RADecSection in config .yml file is in map")
# For source-free simulations (contamination tests)
if 'CMBSimSeed' in list(self.keys()):
randMap=simCMBMap(data.shape, wcs, noiseLevel = 0, beam = self['beamFileName'],
seed = self['CMBSimSeed'])
randMap[np.equal(weights, 0)]=0
# Add white noise that varies according to inv var map...
# Noise needed is the extra noise we need to add to match the real data, scaled by inv var map
# This initial estimate is too high, so we use a grid search to get a better estimate
mask=np.nonzero(data)
dataSigma=data[mask].std()
whiteNoiseLevel=np.zeros(weights.shape)
whiteNoiseLevel[mask]=1/np.sqrt(weights[mask])
noiseNeeded=np.sqrt(data[mask].var()-randMap[mask].var()-np.median(whiteNoiseLevel[mask])**2)
noiseBoostFactor=noiseNeeded/np.median(whiteNoiseLevel[mask])
# NOTE: disabled finding boost factor below for now...
bestBoostFactor=1.
# --- disabled
#bestDiff=1e6
#bestBoostFactor=noiseBoostFactor
#simNoiseValues=simNoise[mask]
#for boostFactor in np.linspace(noiseBoostFactor*0.5, noiseBoostFactor, 10):
#diff=abs(dataSigma-(simNoiseValues+generatedNoise*boostFactor).std())
#if diff < bestDiff:
#bestBoostFactor=boostFactor
#bestDiff=diff
# ---
data[mask]=np.random.normal(randMap[mask], bestBoostFactor*whiteNoiseLevel[mask],
whiteNoiseLevel[mask].shape)
outFileName=diagnosticsDir+os.path.sep+"CMBSim_%d#%s.fits" % (self['obsFreqGHz'], tileName)
saveFITS(outFileName, data, wcs)
# For position recovery tests, completeness calculations
if 'injectSources' in list(self.keys()):
# NOTE: Need to add varying GNFWParams here
if 'GNFWParams' in self['injectSources'].keys():
GNFWParams=self['injectSources']['GNFWParams']
obsFreqGHz=self['obsFreqGHz']
else:
GNFWParams=None
obsFreqGHz=None
# Unsure if we actually want/need the below...
# source injection sim tiles are processed independently (so shouldn't be double counting in overlaps anyway)
validAreaSection=self.tileCoordsDict[tileName]['areaMaskInClipSection']
modelMap=makeModelImage(data.shape, wcs, self['injectSources']['catalog'],
self['beamFileName'], obsFreqGHz = obsFreqGHz,
GNFWParams = GNFWParams, profile = self['injectSources']['profile'],
validAreaSection = validAreaSection,
override = self['injectSources']['override'])
if modelMap is not None:
modelMap[weights == 0]=0
data=data+modelMap
# Should only be needed for handling preliminary tILe-C maps
if 'applyBeamConvolution' in self.keys() and self['applyBeamConvolution'] == True:
data=convolveMapWithBeam(data, wcs, self['beamFileName'], maxDistDegrees = 1.0)
if diagnosticsDir is not None:
saveFITS(diagnosticsDir+os.path.sep+"beamConvolved#%s.fits" % (tileName), data, wcs)
# Smoothing with some kernel (used, e.g., in PSF-matching between maps in nemoSpec)
if 'smoothKernel' in self.keys():
if 'smoothAttenuationFactor' in self.keys():
data=data*self['smoothAttenuationFactor']
data=convolveMapWithBeam(data, wcs, self['smoothKernel'], maxDistDegrees = 1.0)
# Check if we're going to need to fill holes - if so, set-up smooth background only once
# NOTE: If this needs changing, needs parametrizing as used in e.g. ACT DR5 results
holeFillingKeys=['maskPointSourcesFromCatalog', 'maskAndFillFromCatalog', 'extendedMask']
holeFilling=False
for h in holeFillingKeys:
if h in list(self.keys()):
holeFilling=True
break
if holeFilling == True:
pixRad=(10.0/60.0)/wcs.getPixelSizeDeg()
bckData=ndimage.median_filter(data, int(pixRad))
if 'extendedMask' in list(self.keys()):
# Filling with white noise + smooth large scale image
# WARNING: Assumes weights are ivar maps [true for Sigurd's maps]
extendedMask=self.loadTile('extendedMask', tileName = tileName)
mask=np.nonzero(weights)
whiteNoiseLevel=np.zeros(weights.shape)
whiteNoiseLevel[mask]=1/np.sqrt(weights[mask])
data[extendedMask == 1]=bckData[extendedMask == 1]+np.random.normal(0, whiteNoiseLevel[extendedMask == 1])
surveyMask=surveyMask*(1-extendedMask)
#flagMask=flagMask+extendedMask
# Optional masking of point sources from external catalog
# Especially needed if using Fourier-space matched filter (and maps not already point source subtracted)
if 'maskPointSourcesFromCatalog' in list(self.keys()) and self['maskPointSourcesFromCatalog'] is not None:
# This is fast enough if using small tiles and running in parallel...
# If our masking/filling is effective enough, we may not need to mask so much here...
if type(self['maskPointSourcesFromCatalog']) is not list:
self['maskPointSourcesFromCatalog']=[self['maskPointSourcesFromCatalog']]
psMask=np.ones(data.shape, dtype = np.uint8)
#pixRad=(10.0/60.0)/wcs.getPixelSizeDeg()
#bckData=ndimage.median_filter(data, int(pixRad))
rDegMap=np.ones(data.shape, dtype = float)*1e6
for catalogInfo in self['maskPointSourcesFromCatalog']:
if type(catalogInfo) == str:
catalogPath=catalogInfo
fluxCutJy=0.0
elif type(catalogInfo) == dict:
catalogPath=catalogInfo['path']
fluxCutJy=catalogInfo['fluxCutJy']
else:
raise Exception("Didn't understand contents of 'maskPointSourcesFromCatalog' - should be a path, or a dict with 'path' key.")
tab=atpy.Table().read(catalogPath)
if 'fluxJy' in tab.keys():
tab=tab[tab['fluxJy'] > fluxCutJy]
tab=catalogs.getCatalogWithinImage(tab, data.shape, wcs)
# If we're given a catalog that already has rArcmin in it, we use that to set hole size
# Otherwise, if we have shape measurements (ellipse_A at least), we can use that
for row in tab:
# Extended sources - identify by measured size > masking radius
# These will mess up noise term in filter, so add to psMask also and fill + smooth
# We won't fiddle with PA here, we'll just maximise based on x-pixel scale (because CAR)
if 'rArcmin' in tab.keys():
maskRadiusArcmin=row['rArcmin']
elif 'ellipse_A' in tab.keys():
xPixSizeArcmin=(wcs.getXPixelSizeDeg()/np.cos(np.radians(row['decDeg'])))*60
ASizeArcmin=row['ellipse_A']/xPixSizeArcmin
maskRadiusArcmin=ASizeArcmin/2
else:
raise Exception("To mask sources in a catalog, need either 'rArcmin' or 'ellipse_A' column to be present.")
rDegMap, xBounds, yBounds=makeDegreesDistanceMap(rDegMap, wcs,
row['RADeg'], row['decDeg'],
maskRadiusArcmin/60)
surveyMask[rDegMap < maskRadiusArcmin/60.0]=0
psMask[rDegMap < maskRadiusArcmin/60.0]=0
data[rDegMap < maskRadiusArcmin/60.0]=bckData[rDegMap < maskRadiusArcmin/60.0]
if 'subtractModelFromCatalog' in list(self.keys()) and self['subtractModelFromCatalog'] is not None:
if type(self['subtractModelFromCatalog']) is not list:
self['subtractModelFromCatalog']=[self['subtractModelFromCatalog']]
for tab in self['subtractModelFromCatalog']:
if type(tab) != atpy.Table:
tab=atpy.Table().read(catalogPath)
tab=catalogs.getCatalogWithinImage(tab, data.shape, wcs)
model=makeModelImage(data.shape, wcs, tab, self['beamFileName'], obsFreqGHz = self['obsFreqGHz'])
if model is not None:
data=data-model
# Threshold of > 1 uK here should be made adjustable in config
flagMask=flagMask+np.greater(model, 1)
if 'maskAndFillFromCatalog' in list(self.keys()) and self['maskAndFillFromCatalog'] is not None:
if type(self['maskAndFillFromCatalog']) is not list:
self['maskAndFillFromCatalog']=[self['maskAndFillFromCatalog']]
for tab in self['maskAndFillFromCatalog']:
if type(tab) != atpy.Table:
tab=atpy.Table().read(catalogPath)
tab=catalogs.getCatalogWithinImage(tab, data.shape, wcs)
if len(tab) > 0 and 'ellipse_A' not in tab.keys():
raise Exception("Need to set measureShapes: True to use maskAndFillFromCatalog")
for row in tab:
x, y=wcs.wcs2pix(row['RADeg'], row['decDeg'])
rArcminMap=np.ones(data.shape, dtype = float)*1e6
if 'ellipse_A' and 'ellipse_B' in tab.keys():
xPixSizeArcmin=(wcs.getXPixelSizeDeg()/np.cos(np.radians(row['decDeg'])))*60
maskRadiusArcmin=(row['ellipse_A']/xPixSizeArcmin)/2
if 'maskHoleDilationFactor' in self.keys() and self['maskHoleDilationFactor'] is not None:
maskRadiusArcmin=maskRadiusArcmin*self['maskHoleDilationFactor']
rArcminMap, xBounds, yBounds=makeDegreesDistanceMap(rArcminMap, wcs,
row['RADeg'], row['decDeg'],
maskRadiusArcmin/60)
rArcminMap=rArcminMap*60
surveyMask[rArcminMap < maskRadiusArcmin]=0
psMask[rArcminMap < maskRadiusArcmin]=0
data[rArcminMap < maskRadiusArcmin]=bckData[rArcminMap < maskRadiusArcmin]
# Add the map data to the dict
self['data']=data
self['weights']=weights
self['wcs']=wcs
self['surveyMask']=surveyMask
self['pointSourceMask']=psMask
self['flagMask']=flagMask
self['tileName']=tileName
# No point continuing if masks are different shape to map (easier to tell user here)
if self['data'].shape != self['pointSourceMask'].shape:
raise Exception("Map and point source mask dimensions are not the same (they should also have same WCS)")
if self['data'].shape != self['surveyMask'].shape:
raise Exception("Map and survey mask dimensions are not the same (they should also have same WCS)")
#------------------------------------------------------------------------------------------------------------
class MapDictList(object):
"""Blah. We want this to iterate over the mapDictList and be indexable.
"""
def __init__(self, mapDictList, tileCoordsDict = None):
"""Blah.
"""
self.mapDictList=[]
for mapDict in mapDictList:
self.mapDictList.append(MapDict(mapDict, tileCoordsDict))
def __iter__(self):
yield from self.mapDictList
def __getitem__(self, item):
return self.mapDictList[item]
#------------------------------------------------------------------------------------------------------------
class TileDict(dict):
"""A dictionary for collecting tile images, for later saving as multi-extension FITS or outputting as a
single monolithic FITS image. Keys within the dictionary map to tile names. Handles on-the-fly
reprojection between TAN and CAR if specified in the Nemo config.
Args:
inputDict (:obj:`dict`): Input dictionary (keys map to tile names).
tileCoordsDict (:obj:`dict`, optional): A dictionary that describes the tiling of a large map, as
produced by :meth:`startUp.NemoConfig.getTileCoordsDict`.
Attributes:
tileCoordsDict (:obj:`dict`): A dictionary that describes the tiling of a large map, as
produced by :meth:`startUp.NemoConfig.getTileCoordsDict`.
"""
def __init__(self, inputDict, tileCoordsDict = None):
super(TileDict, self).__init__(inputDict)
self.tileCoordsDict=tileCoordsDict
def copy(self):
"""Make a copy of this :class:`TileDict` object.
Returns:
A deep copy of the :class:`TileDict` object.
"""
return TileDict(self, tileCoordsDict = self.tileCoordsDict)
def saveMEF(self, outFileName, compressionType = None):
"""Save the tile images as a multi-extension FITS file.
Args:
outFileName (:obj:`str`): Path where the MEF file will be written.
compressionType (:obj:`str`): If given, the data will be compressed using the given
method (as understood by :mod:`astropy.io.fits`). Use `PLIO_1` for masks,
and `RICE_1` for other image data that can stand lossy compression. If None,
the image data is not compressed.
Returns:
None
"""
newImg=pyfits.HDUList()
for tileName in self.keys():
if self.tileCoordsDict[tileName]['reprojectToTan'] == True:
wcs=astWCS.WCS(self.tileCoordsDict[tileName]['header'], mode = 'pyfits')
tanWCS=_makeTanWCS(wcs)
header=tanWCS.header
else:
header=self.tileCoordsDict[tileName]['header']
if compressionType is not None:
if compressionType == 'PLIO_1':
dtype=np.uint8
else:
dtype=np.float32
hdu=pyfits.CompImageHDU(np.array(self[tileName], dtype = dtype),
header, name = tileName,
compression_type = compressionType)
else:
hdu=pyfits.ImageHDU(self[tileName], header, name = tileName)
newImg.append(hdu)
newImg.writeto(outFileName, overwrite = True)
def saveStitchedFITS(self, outFileName, stitchedWCS, compressionType = None):
"""Stitch together the tiles into a monolithic image and save in a FITS file.
Args:
outFileName (:obj:`str`): Path where the stitched image FITS file will be written.
stitchedWCS (:obj:`astWCS.WCS`): WCS object corresponding to the stitched map
that will be produced.
compressionType (:obj:`str`): If given, the data will be compressed using the given
method (as understood by :mod:`astropy.io.fits`). Use `PLIO_1` for masks,
and `RICE_1` for other image data that can stand lossy compression. If None,
the image data is not compressed.
Returns:
None
"""
wcs=stitchedWCS
d=np.zeros([stitchedWCS.header['NAXIS2'], stitchedWCS.header['NAXIS1']], dtype = np.float32)
for tileName in self.keys():
if self.tileCoordsDict[tileName]['reprojectToTan'] == True:
carWCS=astWCS.WCS(self.tileCoordsDict[tileName]['header'], mode = 'pyfits')
tanWCS=_makeTanWCS(carWCS)
shape=[self.tileCoordsDict[tileName]['header']['NAXIS2'],
self.tileCoordsDict[tileName]['header']['NAXIS1']]
if compressionType == 'PLIO_1':
order=0
else:
order='bicubic'
carData, footprint=reproject.reproject_interp((self[tileName], tanWCS.AWCS), carWCS.AWCS, shape_out = shape, order = order,
return_footprint = True)
carData[footprint == 0]=0 # get rid of nans which will be in borders anyway
else:
carData=self[tileName]
minX, maxX, minY, maxY=self.tileCoordsDict[tileName]['clippedSection']
try:
d[minY:maxY, minX:maxX]=d[minY:maxY, minX:maxX]+carData.data
except:
raise Exception("Stitching error on tile %s" % (tileName))
saveFITS(outFileName, d, wcs, compressionType = compressionType)
#-------------------------------------------------------------------------------------------------------------
def _makeTanWCS(wcs, pixScale = 0.5/60.0):
"""Generate a TAN WCS.
Returns:
TAN WCS
"""
RADeg, decDeg=wcs.getCentreWCSCoords()
CRVAL1, CRVAL2=RADeg, decDeg
xSizeDeg, ySizeDeg=wcs.getFullSizeSkyDeg()
xSizePix, ySizePix=int(xSizeDeg/pixScale), int(ySizeDeg/pixScale)
xRefPix=xSizePix/2.0
yRefPix=ySizePix/2.0
xOutPixScale=xSizeDeg/xSizePix
yOutPixScale=ySizeDeg/ySizePix
newHead=pyfits.Header()
newHead['NAXIS']=2
newHead['NAXIS1']=xSizePix
newHead['NAXIS2']=ySizePix
newHead['CTYPE1']='RA---TAN'
newHead['CTYPE2']='DEC--TAN'
newHead['CRVAL1']=CRVAL1
newHead['CRVAL2']=CRVAL2
newHead['CRPIX1']=xRefPix+1
newHead['CRPIX2']=yRefPix+1
newHead['CDELT1']=-xOutPixScale
newHead['CDELT2']=xOutPixScale # Makes more sense to use same pix scale
newHead['CUNIT1']='DEG'
newHead['CUNIT2']='DEG'
tanWCS=astWCS.WCS(newHead, mode='pyfits')
return tanWCS
import reproject
tanData, footprint=reproject.reproject_interp((data, wcs.AWCS), tanWCS.AWCS, shape_out = [ySizePix, xSizePix],
order = 'bicubic', return_footprint = True)
tanData[footprint == 0]=0 # get rid of nans which will be in borders anyway
# checkData=reproject.reproject_interp((tanData, tanWCS.AWCS), wcs.AWCS, shape_out = data.shape, order = 'bicubic',
# return_footprint = False)
wcs=tanWCS
data=tanData
#-------------------------------------------------------------------------------------------------------------
def convertToY(mapData, obsFrequencyGHz = 148):
"""Converts an array (e.g., a map) in ΔTemperature (μK) with respect to the CMB to Compton y parameter
values at the given frequency.
Args:
mapData (:obj:`np.ndarray`): An array containing delta T (micro Kelvin, with respect to CMB) values.
obsFrequencyGHz (:obj:`float`): Frequency in GHz at which to do the conversion.
Returns:
An array of Compton y parameter values.
"""
fx=signals.fSZ(obsFrequencyGHz)
mapData=(mapData/(signals.TCMB*1e6))/fx # remember, map is in deltaT uK
return mapData
#-------------------------------------------------------------------------------------------------------------
def convertToDeltaT(mapData, obsFrequencyGHz = 148, TCMBAlpha = 0.0, z = None):
"""Converts an array (e.g., a map) of Compton y parameter values to ΔTemperature (μK) with respect to the
CMB at the given frequency.
Args:
mapData (:obj:`np.ndarray`): An array containing Compton y parameter values.
obsFrequencyGHz (:obj:`float`): Frequency in GHz at which to do the conversion.
TCMBAlpha (:obj:`float`, optional): This should always be zero unless you really do want to make a model
where CMB temperature evolves as T\ :sub:`0` * (1+z)\ :sup:`1-TCMBAlpha`.
z (:obj:`float`, optional): Redshift - needed only if TCMBAlpha is non-zero.
Returns:
An array of ΔT (μK) values.
"""
fx=signals.fSZ(obsFrequencyGHz, TCMBAlpha = TCMBAlpha, z = z)
mapData=mapData*fx*(signals.TCMB*1e6) # into uK
return mapData
#-------------------------------------------------------------------------------------------------------------
def autotiler(surveyMask, wcs, targetTileWidth, targetTileHeight):
"""Given a survey mask (where values > 0 indicate valid area, and 0 indicates area to be ignored),
figure out an optimal tiling strategy to accommodate tiles of the given dimensions. The survey mask need
not be contiguous (e.g., AdvACT and SO maps, using the default pixelization, can be segmented into three
or more different regions).
Args:
surveyMask (numpy.ndarray): Survey mask image (2d array). Values > 0 will be taken to define valid
area.
wcs (astWCS.WCS): WCS associated with survey mask image.
targetTileWidth (float): Desired tile width, in degrees (RA direction for CAR).
targetTileHeight (float): Desired tile height, in degrees (dec direction for CAR).
Returns:
Dictionary list defining tiles in same format as config file.
Note:
While this routine will try to match the target file sizes, it may not match exactly. Also,
:meth:`startUp.NemoConfig.getTileCoordsDict` will expand tiles by a user-specified amount such that
they overlap.
"""
# This deals with identifying boss vs. full AdvACT footprint maps
mapCentreRA, mapCentreDec=wcs.getCentreWCSCoords()
skyWidth, skyHeight=wcs.getFullSizeSkyDeg()
if mapCentreRA < 0.1 and skyWidth < 0.1 or skyWidth > 359.9:
handle180Wrap=True
else:
handle180Wrap=False
segMap=surveyMask
try:
numObjects=ndimage.label(segMap, output = segMap)
except:
raise Exception("surveyMask given for autotiler is probably too complicated (breaks into > 256 regions) - check your mask and/or config file.")
# More memory efficient than previous version
fieldIDs=np.arange(1, numObjects+1, dtype = segMap.dtype)
maskSections=ndimage.find_objects(segMap)
tileList=[]
for maskSection, f in zip(maskSections, fieldIDs):
yMin=maskSection[0].start
yMax=maskSection[0].stop-1
if yMax-yMin < 1000: # In case of stray individual pixels (e.g., combined with extended sources mask)
continue
xc=int((maskSection[1].start+(maskSection[1].stop-1))/2)
# Some people want to run on full sky CAR ... so we have to avoid that blowing up at the poles
decMin, decMax=np.nan, np.nan
deltaY=0
while np.isnan(decMin) and np.isnan(decMax):
RAc, decMin=wcs.pix2wcs(xc, yMin+deltaY)
RAc, decMax=wcs.pix2wcs(xc, yMax-deltaY)
deltaY=deltaY+0.01
# Adjusted post-nemo v0.7.3 - closer to actual size of supplied mask without going over the edge
numRows=int((decMax-decMin)/targetTileHeight)
if numRows == 0:
raise Exception("targetTileHeight is larger than the height of the map - edit your config file accordingly.")
tileHeight=np.round(((decMax-decMin)/numRows)*1000)/1000 # This rounds the height at 3 decimal places (was ceil at 2dp)
for i in range(numRows):
decBottom=decMin+i*tileHeight
decTop=decMin+(i+1)*tileHeight
xc, yBottom=wcs.wcs2pix(RAc, decBottom)
xc, yTop=wcs.wcs2pix(RAc, decTop)
yBottom=int(yBottom)
yTop=int(yTop)
yc=int((yTop+yBottom)/2)
strip=segMap[yBottom:yTop]
ys, xs=np.where(strip == f)
xMin=xs.min()
xMax=xs.max()
del ys, xs, strip
stripWidthDeg=(xMax-xMin)*wcs.getXPixelSizeDeg()
RAMax, decc=wcs.pix2wcs(xMin, yc)
RAMin, decc=wcs.pix2wcs(xMax, yc)
numCols=int(stripWidthDeg/targetTileWidth)
tileWidth=np.ceil((stripWidthDeg/numCols)*100)/100
#assert(tileWidth < targetTileWidth*1.1)
stretchFactor=1/np.cos(np.radians(decTop))
numCols=int(stripWidthDeg/(targetTileWidth*stretchFactor))
for j in range(numCols):
tileWidth=np.ceil((stripWidthDeg/numCols)*100)/100
RALeft=RAMax-j*tileWidth
RARight=RAMax-(j+1)*tileWidth
if RALeft < 0:
RALeft=RALeft+360
if RARight < 0:
RARight=RARight+360
# HACK: Edge-of-map handling
if handle180Wrap == True:
if RARight < 180.01 and RALeft < 180+tileWidth and RALeft > 180.01:
RARight=180.01
# NOTE: floats here to make tileDefinitions.yml readable
tileList.append({'tileName': '%d_%d_%d' % (f, i, j),
'RADecSection': [float(RARight), float(RALeft), float(decBottom), float(decTop)]})
return tileList
#------------------------------------------------------------------------------------------------------------
def saveTilesDS9RegionsFile(parDict, DS9RegionFileName):
"""Writes a DS9 .reg file containing the locations of tiles defined in parDict.
Args:
parDict (:obj:`dict`): Dictionary containing the contents of the Nemo config file.
DS9RegionFileName (str): Path to DS9 regions file to be written.
"""
if type(parDict['tileDefinitions']) is not list:
raise Exception("parDict did not contain a list of tile definitions.")
tileNames=[]
coordsList=[]
for tileDict in parDict['tileDefinitions']:
ra0, ra1, dec0, dec1=tileDict['RADecSection']
coordsList.append([ra0, ra1, dec0, dec1])
tileNames.append(tileDict['tileName'])
with open(DS9RegionFileName, "w") as outFile:
outFile.write("# Region file format: DS9 version 4.1\n")
outFile.write('global color=blue dashlist=8 3 width=1 font="helvetica 10 normal roman" select=1 highlite=1 dash=0 fixed=0 edit=1 move=1 delete=1 include=1 source=1\n')
outFile.write("fk5\n")
for c, name in zip(coordsList, tileNames):
outFile.write('polygon(%.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f, %.6f) # text="%s"\n' % (c[0], c[2], c[0], c[3], c[1], c[3], c[1], c[2], name))
#-------------------------------------------------------------------------------------------------------------
def shrinkWCS(origShape, origWCS, scaleFactor):
"""Given an astWCS object and corresponding image shape, scale the WCS by scaleFactor. Used for making
downsampled quicklook images (using stitchMaps).
Args:
origShape (tuple): Shape of the original image.
origWCS (astWCS.WCS object): WCS for the original image.
scaleFactor (float): The factor by which to scale the image WCS.
Returns:
shape (tuple), WCS (astWCS.WCS object)
"""
scaledShape=[int(origShape[0]*scaleFactor), int(origShape[1]*scaleFactor)]
scaledData=np.zeros(scaledShape)
trueScaleFactor=np.array(scaledData.shape, dtype = float) / np.array(origShape, dtype = float)
offset=0.
imageWCS=origWCS.copy()
try:
oldCRPIX1=imageWCS.header['CRPIX1']
oldCRPIX2=imageWCS.header['CRPIX2']
CD11=imageWCS.header['CD1_1']
CD21=imageWCS.header['CD2_1']
CD12=imageWCS.header['CD1_2']
CD22=imageWCS.header['CD2_2']
except KeyError:
oldCRPIX1=imageWCS.header['CRPIX1']
oldCRPIX2=imageWCS.header['CRPIX2']
CD11=imageWCS.header['CDELT1']
CD21=0
CD12=0
CD22=imageWCS.header['CDELT2']
CDMatrix=np.array([[CD11, CD12], [CD21, CD22]], dtype=np.float64)
scaleFactorMatrix=np.array([[1.0/trueScaleFactor[1], 0], [0, 1.0/trueScaleFactor[0]]])
scaleFactorMatrix=np.array([[1.0/trueScaleFactor[1], 0], [0, 1.0/trueScaleFactor[0]]])
scaledCDMatrix=np.dot(scaleFactorMatrix, CDMatrix)
scaledWCS=imageWCS.copy()
scaledWCS.header['NAXIS1']=scaledData.shape[1]
scaledWCS.header['NAXIS2']=scaledData.shape[0]
scaledWCS.header['CRPIX1']=oldCRPIX1*trueScaleFactor[1]
scaledWCS.header['CRPIX2']=oldCRPIX2*trueScaleFactor[0]
scaledWCS.header['CD1_1']=scaledCDMatrix[0][0]
scaledWCS.header['CD2_1']=scaledCDMatrix[1][0]
scaledWCS.header['CD1_2']=scaledCDMatrix[0][1]
scaledWCS.header['CD2_2']=scaledCDMatrix[1][1]
scaledWCS.updateFromHeader()
return scaledShape, scaledWCS
#-------------------------------------------------------------------------------------------------------------
def chunkLoadMask(fileName, numChunks = 8, dtype = np.uint8, returnWCS = True):
"""Load a FITS-format mask file (with default 8-bit integer values) in chunks, for memory efficiency,
at the expense of some speed. Masks in compressed format (see :meth:`saveFITS`) are supported.
Args:
fileName (:obj:`str`): Path to the FITS-format mask file.
numChunks (:obj:`int`): Number of chunks in which to load the file. Largers numbers use less memory,
but it takes a little longer for the mask to load.
returnWCS (:obj:`bool`, optional): If given, return the WCS of the mask.
Returns:
Mask image (2d array of 8-bit unsigned integers), and optionally a WCS object.
Note:
This can also be used to load large compressed maps in a memory-efficient way by setting
``dtype = np.float32``.
"""
shape=None
with pyfits.open(fileName) as img:
for hdu in img:
if hdu.data is not None:
shape=hdu.data.shape
if returnWCS == True:
wcs=astWCS.WCS(hdu.header, mode = 'pyfits')
break
del img
height=shape[0]
chunkSize=int(height/numChunks)
maskArr=np.zeros(shape, dtype = dtype)
for i in range(numChunks):
with pyfits.open(fileName) as img:
for hdu in img:
if hdu.data is not None:
start=i*chunkSize
end=(i+1)*chunkSize
if end >= height:
end=height-1
chunk=hdu.data[start:end]
maskArr[start:end]=chunk
del chunk
del hdu.data
del img
if returnWCS == True:
return maskArr, wcs
else:
return maskArr
#-------------------------------------------------------------------------------------------------------------
def checkMask(fileName, numChunks = 8):
"""Checks whether a mask contains negative values (invalid) and throws an exception if this is the case.
Args:
fileName (str): Name of the FITS format mask file to check.
"""
# This is now more horrid looking to save memory, at the expense of speed
height=None
with pyfits.open(fileName) as img:
for hdu in img:
if hdu.data is not None:
height=hdu.data.shape[0]
del img
chunkSize=int(height/numChunks)
for i in range(numChunks):
with pyfits.open(fileName) as img:
for hdu in img:
if hdu.data is not None:
start=i*chunkSize
end=(i+1)*chunkSize
if end >= height:
end=height-1
chunk=hdu.data[start:end].flatten()
if np.any(chunk < 0) == True:
raise Exception("Mask file '%s' contains negative values - please fix your mask." % (fileName))
del chunk
del hdu.data
del img
#-------------------------------------------------------------------------------------------------------------
def maskOutSources(mapData, wcs, catalog, radiusArcmin = 7.0, mask = 0.0, growMaskedArea = 1.0):
"""Given a mapData array and a catalog of source positions, replace the values at the object positions
in the map within radiusArcmin with replacement values. If mask == 'whiteNoise', this will be white
noise with mean and sigma set by the pixel values in an annulus of 1 < r < 2 * radiusArcmin.
growMaskedArea sets factor larger than radiusArcmin to set masked area to in returned mask. This can
avoid any weird artefacts making it into source lists.
Returns a dictionary with keys 'data' (mapData with mask applied), 'mask' (0-1 mask of areas masked).
"""
maskMap=np.zeros(mapData.shape)
maskedMapData=np.zeros(mapData.shape, dtype=np.float64)+mapData # otherwise, gets modified in place.
bckSubbed=subtractBackground(mapData, wcs, smoothScaleDeg = 1.4/60.0) # for source subtracting
mapInterpolator=interpolate.RectBivariateSpline(np.arange(mapData.shape[0]),
np.arange(mapData.shape[1]),
bckSubbed, kx = 1, ky = 1)
for obj in catalog:
if wcs.coordsAreInImage(obj['RADeg'], obj['decDeg']) == True:
degreesMap=np.ones(mapData.shape, dtype = float)*1e6
rRange, xBounds, yBounds=makeDegreesDistanceMap(degreesMap, wcs,
obj['RADeg'], obj['decDeg'],
20.0/60.0)
circleMask=np.less(rRange, radiusArcmin/60.0)
grownCircleMask=np.less(rRange, (radiusArcmin*growMaskedArea)/60.0)
maskMap[grownCircleMask]=1.0
if type(mask) == float or type(mask) == int:
maskedMapData[circleMask]=mask
elif mask == 'shuffle':
# How about copying random pixels from the vicinity into the area to be masked?
annulusMask=np.logical_and(np.greater(rRange, 5.0/60.0), \
np.less(rRange, 10.0/60.0))
annulusValues=mapData[annulusMask].flatten()
indices=np.random.randint(0, annulusValues.shape[0], circleMask.flatten().nonzero()[0].shape[0])
maskedMapData[circleMask]=annulusValues[indices]
elif mask == 'subtract':
peakValue=mapData[int(round(obj['y'])), int(round(obj['x']))]
sigmaDeg=(1.4/60.0)/np.sqrt(8.0*np.log(2.0))
profRDeg=np.linspace(0.0, 30.0/60.0, 5000)
profile1d=peakValue*np.exp(-((profRDeg**2)/(2*sigmaDeg**2)))
r2p=interpolate.interp1d(profRDeg, profile1d, bounds_error=False, fill_value=0.0)
profile2d=np.zeros(rRange.shape)
profMask=np.less(rRange, 1.0)
profile2d[profMask]=r2p(rRange[profMask])
maskedMapData[profMask]=maskedMapData[profMask]-profile2d[profMask]
# NOTE: below old, replaced Jul 2015 but not deleted as yet...
# 1.3197 is a correction factor for effect of filtering on bckSubbed
# Worked out by comparing peak value of bckSubbed profile2d only map
#peakValue=mapInterpolator(obj['y'], obj['x'])[0][0]*1.3197
#sigmaDeg=(1.4/60.0)/np.sqrt(8.0*np.log(2.0))
#profRDeg=np.linspace(0.0, 30.0/60.0, 5000)
#profile1d=peakValue*np.exp(-((profRDeg**2)/(2*sigmaDeg**2)))
#r2p=interpolate.interp1d(profRDeg, profile1d, bounds_error=False, fill_value=0.0)
#profile2d=np.zeros(rRange.shape)
#profMask=np.less(rRange, 1.0)
#profile2d[profMask]=r2p(rRange[profMask])
#maskedMapData[profMask]=maskedMapData[profMask]-profile2d[profMask]
elif mask == "whiteNoise":
# Get pedestal level and white noise level from average between radiusArcmin and 2*radiusArcmin
annulusMask=np.logical_and(np.greater(rRange, 2*radiusArcmin/60.0), \
np.less(rRange, 4*radiusArcmin/60.0))
maskedMapData[circleMask]=np.random.normal(mapData[annulusMask].mean(), \
mapData[annulusMask].std(), \
mapData[circleMask].shape)
return {'data': maskedMapData, 'mask': maskMap}
#-------------------------------------------------------------------------------------------------------------
def applyPointSourceMask(maskFileName, mapData, mapWCS, mask = 0.0, radiusArcmin = 2.8):
"""Given file name pointing to a point source mask (as made by maskOutSources), apply it to given mapData.
"""
img=pyfits.open(maskFileName)
maskData=img[0].data
maskedMapData=np.zeros(mapData.shape)+mapData # otherwise, gets modified in place.
# Thresholding to identify significant pixels
threshold=0
sigPix=np.array(np.greater(maskData, threshold), dtype=int)
sigPixMask=np.equal(sigPix, 1)
# Fast, simple segmentation - don't know about deblending, but doubt that's a problem for us
segmentationMap, numObjects=ndimage.label(sigPix)
# Get object positions, number of pixels etc.
objIDs=np.unique(segmentationMap)
objPositions=ndimage.center_of_mass(maskData, labels = segmentationMap, index = objIDs)
objNumPix=ndimage.sum(sigPixMask, labels = segmentationMap, index = objIDs)
for objID, pos, numPix in zip(objIDs, objPositions, objNumPix):
circleMask=np.equal(segmentationMap, objID)
if type(mask) == float or type(mask) == int:
maskedMapData[circleMask]=mask
elif mask == "subtract":
print("Add code to subtract point sources")
ipshell()
sys.exit()
elif mask == "whiteNoise":
RADeg, decDeg=mapWCS.pix2wcs(pos[1], pos[0])
if np.isnan(RADeg) == False and np.isnan(decDeg) == False:
degreesMap=np.ones(mapData.shape, dtype = float)*1e6
rRange, xBounds, yBounds=makeDegreesDistanceMap(degreesMap, mapWCS,
RADeg, decDeg,
(radiusArcmin*4)/60.0)
# Get pedestal level and white noise level from average between radiusArcmin and 2*radiusArcmin
annulusMask=np.logical_and(np.greater(rRange, radiusArcmin/60.0), \
np.less(rRange, 2*radiusArcmin/60.0))
# Below just does a quick sanity check - we don't bother masking if std == 0, because we're
# most likely applying this in the middle of a fake source sim with map set to zero for testing
sigma=mapData[annulusMask].std()
if sigma > 0:
maskedMapData[circleMask]=np.random.normal(mapData[annulusMask].mean(), \
sigma, \
mapData[circleMask].shape)
return maskedMapData
#-------------------------------------------------------------------------------------------------------------
def addWhiteNoise(mapData, noisePerPix):
"""Adds Gaussian distributed white noise to mapData.
"""
noise=np.random.normal(0, noisePerPix, mapData.shape)
mapData=mapData+noise
return mapData
#------------------------------------------------------------------------------------------------------------
def simCMBMap(shape, wcs, noiseLevel = None, beam = None, seed = None):
"""Generate a simulated CMB map, optionally convolved with the beam and with (white) noise added.
Args:
shape (:obj:`tuple`): A tuple describing the map (numpy array) shape in pixels (height, width).
wcs (:obj:`astWCS.WCS`): An astWCS object.
noiseLevel (:obj:`numpy.ndarray` or float): If a single number, this is taken as sigma (in map units,
usually uK) for generating white noise that is added across the whole map. Alternatively, an array
with the same dimensions as shape may be used, specifying sigma (in map units) per corresponding
pixel. Noise will only be added where non-zero values appear in noiseLevel.
beam (:obj:`str` or :obj:`signals.BeamProfile`): Either the file name of the text file that describes
the beam with which the map will be convolved, or a :obj:`signals.BeamProfile` object. If None,
no beam convolution is applied.
seed (:obj:`int`): The seed used for the random CMB realisation.
Returns:
A map (:obj:`numpy.ndarray`)
"""
# Power spectrum array ps here is indexed by ell, starting from 0
# i.e., each element corresponds to the power at ell = 0, 1, 2 ... etc.
ps=powspec.read_spectrum(nemo.__path__[0]+os.path.sep+"data"+os.path.sep+"planck_lensedCls.dat",
scale = True, expand = None)
ps=ps[0]
lps=np.arange(0, len(ps))
if beam is not None:
if type(beam) == str:
beam=signals.BeamProfile(beamFileName = beam)
assert(type(beam) == signals.BeamProfile)
lbeam=np.interp(lps, beam.ell, beam.Bell)
ps*=np.power(lbeam, 2)
randMap=curvedsky.rand_map(shape, wcs.AWCS, ps = ps, spin = [0,2], seed = seed)
if noiseLevel is not None:
randMap=randMap+simNoiseMap(shape, noiseLevel)
np.random.seed()
return randMap
#-------------------------------------------------------------------------------------------------------------
def simNoiseMap(shape, noiseLevel, wcs = None, lKnee = None, alpha = -3, noiseMode = 'perPixel', lmin = 100):
"""Generate a simulated noise map. This may contain just white noise, or optionally a 1/f noise component
can be generated.
Args:
shape (:obj:`tuple`): A tuple describing the map (numpy array) shape in pixels (height, width).
noiseLevel (:obj:`numpy.ndarray` or float): If a single number, this is taken as sigma (in map units,
usually uK) for generating white noise that is added across the whole map. Alternatively, an array
with the same dimensions as shape may be used, specifying sigma (in map units) per corresponding
pixel. Noise will only be added where non-zero values appear in noiseLevel.
wcs (:obj:`astWCS.WCS`, optional): WCS corresponding to the map shape.
lKnee (:obj:`float`, optional): If given, 1/f noise will be generated using the power spectrum
N_l = (1 + l/lknee)^-alpha) - see Appendix A of MacCrann et al. 2023.
alpha (:obj:`float`, optional): Power-law exponent in the power spectrum used for generating 1/f noise. Has
no effect unless lKnee is also given.
noiseMode(:obj:`str`, optional): Either 'perPixel', or 'perSquareArcmin' - if the latter, constant noise in terms
of surface brightness will be added (accounts for varying pixel scale, if present - which requires
`wcs` to be supplied).
lmin (:obj:`float`, optional): If given, 1/f noise will be zeroed out below this multipole to avoid pathologically
high noise. Defaults to 100. Use None if you do not want to zero out the 1/f noise on large scales.
Returns:
A map (:obj:`numpy.ndarray`)
"""
np.random.seed()
assert(noiseMode in ['perPixel', 'perSquareArcmin'])
if noiseMode == 'perSquareArcmin' and lKnee is not None:
raise Exception("Adding 1/f noise when noiseMode != 'perPixel' is not supported yet")
if noiseMode == 'perSquareArcmin' and type(noiseLevel) == np.ndarray:
raise Exception("noiseLevel is a map - this is only currently supported if noiseMode = 'perPixel' (noiseMode = 'perSquareArcmin' given)")
if lKnee is None:
# White noise only
randMap=np.zeros(shape)
generatedNoise=np.zeros(randMap.shape)
if type(noiseLevel) == np.ndarray:
mask=np.nonzero(noiseLevel)
generatedNoise[mask]=np.random.normal(0, noiseLevel[mask], noiseLevel[mask].shape)
else:
if noiseLevel > 0:
if noiseMode == 'perPixel':
generatedNoise=np.random.normal(0, noiseLevel, randMap.shape)
else:
arcmin2Map=getPixelAreaArcmin2Map(shape, wcs)
generatedNoise=np.random.normal(0, noiseLevel/arcmin2Map, randMap.shape)
randMap=randMap+generatedNoise
else:
# 1/f noise + white noise, using Niall's routines
mlmax=6000 # following config in Niall's code, could be made a parameter
if wcs is None:
raise Exception("wcs is None - need to supply a wcs to generate a noise map with 1/f noise included.")
if type(noiseLevel) == np.ndarray:
mask=noiseLevel > 1e-07
ivarMap=np.zeros(shape)
ivarMap[mask]=1/noiseLevel[mask]**2
ivarMap=enmap.enmap(ivarMap, wcs.AWCS)
else:
ivarMap=enmap.enmap(np.ones(shape)*(1/noiseLevel**2), wcs.AWCS)
def _mod_noise_map(ivar, Nl):
map1 = enmap.rand_gauss(ivar.shape, ivar.wcs)
lmax = len(Nl)-1
ainfo = curvedsky.alm_info(lmax)
alm = curvedsky.map2alm(map1, ainfo=ainfo)
map2 = curvedsky.alm2map(alm, np.zeros_like(map1))
map1 -= map2
ainfo.lmul(alm, Nl**0.5, alm)
map2 = curvedsky.alm2map(alm, np.zeros_like(map1))
map1 += map2
map1 *= ivar**-0.5
ivar_nonzero = ivar>0.
ivar_median = np.median(ivar[ivar_nonzero])
valid_ivar = ivar_nonzero*(ivar>ivar_median/1.e6)
map1[~(valid_ivar)] = 0.
return map1
assert np.all(np.isfinite(ivarMap))
shape, wcs=ivarMap.shape, ivarMap.wcs
ells = np.arange(mlmax+1)
Nl_atm = (lKnee/ells)**-alpha + 1
Nl_atm[~np.isfinite(Nl_atm)] = 0.
if not(lmin is None): Nl_atm[ells<lmin] = 0.
assert np.all(np.isfinite(Nl_atm))
randMap = _mod_noise_map(ivarMap, Nl_atm)
assert np.all(np.isfinite(randMap))
return randMap
#-------------------------------------------------------------------------------------------------------------
def subtractBackground(data, wcs, RADeg = 'centre', decDeg = 'centre', smoothScaleDeg = 30.0/60.0):
"""Smoothes map with Gaussian of given scale and subtracts it, to get rid of large scale power.
If RADeg, decDeg = 'centre', then the pixel scales used to set the kernel shape will be set from that at the
centre of the WCS. Otherwise, they will be taken at the given coords.
Note that wcs is only used to figure out the pixel scales here.
"""
smoothedData=smoothMap(data, wcs, RADeg = RADeg, decDeg = decDeg, smoothScaleDeg = smoothScaleDeg)
data=data-smoothedData
return data
#------------------------------------------------------------------------------------------------------------
def convolveMapWithBeam(data, wcs, beam, maxDistDegrees = 1.0):
"""Convolves map defined by data, wcs with the beam.
Args:
data (:obj:`numpy.ndarray`): Map to convolve, as 2d array.
wcs (:obj:`astWCS.WCS`): WCS corresponding to data (i.e., the map).
beam (:obj:`BeamProfile` or str): Either a BeamProfile object, or a string that gives the path to a
text file that describes the beam profile.
maxDistDegrees (float): Sets the size of the convolution kernel, for optimization purposes.
Returns:
Beam-convolved map (numpy array).
Note:
The pixel scale used to define the convolution kernel is evaluated at the central map pixel. So,
this routine should only be used with either pixelisations where the scale is constant or on
relatively small tiles.
"""
if type(beam) == str:
beam=signals.BeamProfile(beamFileName = beam)
# Pad the beam kernel to odd number of pixels (so we know shift to apply)
# We're only really using WCS info here for the pixel scale at the centre of the map
if data.shape[0] % 2 == 0:
yPad=1
else:
yPad=0
if data.shape[1] % 2 == 0:
xPad=1
else:
xPad=0
degreesMap=np.ones([data.shape[0]+yPad, data.shape[1]+xPad], dtype = float)*1e6
RADeg, decDeg=wcs.pix2wcs(int(degreesMap.shape[1]/2)+1, int(degreesMap.shape[0]/2)+1)
degreesMap, xBounds, yBounds=makeDegreesDistanceMap(degreesMap, wcs, RADeg, decDeg,
maxDistDegrees)
beamMap=signals.makeBeamModelSignalMap(degreesMap, wcs, beam)
if (yBounds[1]-yBounds[0]) > beamMap.shape[1] and (yBounds[1]-yBounds[0]) % 2 == 0:
yBounds[0]=yBounds[0]-1
if (xBounds[1]-xBounds[0]) > beamMap.shape[0] and (xBounds[1]-xBounds[0]) % 2 == 0:
xBounds[0]=xBounds[0]-1
beamMap=beamMap[yBounds[0]:yBounds[1], xBounds[0]:xBounds[1]]
beamMap=beamMap/np.sum(beamMap)
# For testing for shift
# This shows we get (-1, -1) shift with scipy_convolve and odd-shaped kernel
#testMap=np.zeros([301, 301])
#yc1=151
#xc1=151
#testMap[yc1, xc1]=1.
#outMap=scipy_convolve(testMap, beamMap, mode = 'same')
#yc2, xc2=np.where(outMap == outMap.max())
#yc2=int(yc2)
#xc2=int(xc2)
#outMap=ndimage.shift(outMap, [yc1-yc2, xc1-xc2])
outMap=ndimage.shift(scipy_convolve(data, beamMap, mode = 'same'), [-1, -1])
return outMap
#-------------------------------------------------------------------------------------------------------------
def smoothMap(data, wcs, RADeg = 'centre', decDeg = 'centre', smoothScaleDeg = 5.0/60.0):
"""Smoothes map with Gaussian of given scale.
If RADeg, decDeg = 'centre', then the pixel scales used to set the kernel shape will be set from that at the
centre of the WCS. Otherwise, they will be taken at the given coords.
Note that wcs is only used to figure out the pixel scales here.
"""
ra0, dec0=wcs.getCentreWCSCoords()
if RADeg != 'centre':
ra0=float(RADeg)
if decDeg != 'centre':
dec0=float(decDeg)
x0, y0=wcs.wcs2pix(ra0, dec0)
x1=x0+1
y1=y0+1
ra1, dec1=wcs.pix2wcs(x1, y1)
xPixScale=astCoords.calcAngSepDeg(ra0, dec0, ra1, dec0)
yPixScale=astCoords.calcAngSepDeg(ra0, dec0, ra0, dec1)
xSmoothScalePix=smoothScaleDeg/xPixScale
ySmoothScalePix=smoothScaleDeg/yPixScale
smoothedData=ndimage.gaussian_filter(data, (ySmoothScalePix, xSmoothScalePix))
return smoothedData
#-------------------------------------------------------------------------------------------------------------
def getPixelAreaArcmin2Map(shape, wcs):
"""Returns a map of pixel area in arcmin2
"""
# Get pixel size as function of position
pixAreasDeg2=[]
RACentre, decCentre=wcs.getCentreWCSCoords()
x0, y0=wcs.wcs2pix(RACentre, decCentre)
x1=x0+1
for y0 in range(shape[0]):
y1=y0+1
ra0, dec0=wcs.pix2wcs(x0, y0)
ra1, dec1=wcs.pix2wcs(x1, y1)
xPixScale=astCoords.calcAngSepDeg(ra0, dec0, ra1, dec0)
yPixScale=astCoords.calcAngSepDeg(ra0, dec0, ra0, dec1)
pixAreasDeg2.append(xPixScale*yPixScale)
pixAreasDeg2=np.array(pixAreasDeg2)
pixAreasArcmin2=pixAreasDeg2*(60**2)
pixAreasArcmin2Map=np.array([pixAreasArcmin2]*shape[1]).transpose()
return pixAreasArcmin2Map
#-------------------------------------------------------------------------------------------------------------
def estimateContaminationFromSkySim(config, imageDict):
"""Estimate contamination by running on source-free sky simulations (CMB plus noise that we generate here
on the fly).
This uses the same kernels that were constructed and used on the real maps. The whole filtering and object
detection pipeline is run on the simulated maps repeatedly. The number of sky sims used (set by numSkySims
in the .yml config file) should be fairly large (~100) for the results to be robust (results on individual
sims can vary by a lot).
Args:
config (:obj:`startUp.NemoConfig`): Nemo configuration object.
imageDict (:obj:`dict`): A dictionary containing the output filtered maps and catalogs from running on
the real data (i.e., the output of pipelines.filterMapsAndMakeCatalogs). This will not be modified,
but is used for estimating the contamination rate by comparison to the source-free sims.
Returns:
A dictionary where each key points to an astropy Table object containing the average contamination
estimate corresponding to SNR (maximal estimate) and fixed_SNR (for the chosen reference filter
scale).
"""
simRootOutDir=config.diagnosticsDir+os.path.sep+"skySim_rank%d" % (config.rank)
SNRKeys=['fixed_SNR']
numSkySims=config.parDict['numSkySims']
resultsList=[]
for i in range(numSkySims):
# NOTE: we throw the first sim away on figuring out noiseBoostFactors
print(">>> Sky sim %d/%d [rank = %d] ..." % (i+1, numSkySims, config.rank))
t0=time.time()
# We don't copy this, because it's complicated due to containing MPI-related things (comm)
# So... we modify the config parameters in-place, and restore them before exiting this method
simConfig=config
# We use the seed here to keep the CMB sky the same across frequencies...
CMBSimSeed=np.random.randint(16777216)
# NOTE: This block below should be handled when parsing the config file - fix/remove
# Optional override of default GNFW parameters (used by Arnaud model), if used in filters given
if 'GNFWParams' not in list(simConfig.parDict.keys()):
simConfig.parDict['GNFWParams']='default'
for filtDict in simConfig.parDict['mapFilters']:
filtDict['params']['GNFWParams']=simConfig.parDict['GNFWParams']
# Delete all non-reference scale filters (otherwise we'd want to cache all filters for speed)
for filtDict in simConfig.parDict['mapFilters']:
if filtDict['label'] == simConfig.parDict['photFilter']:
break
simConfig.parDict['mapFilters']=[filtDict]
# Filling in with sim will be done when maps.preprocessMapDict is called by the filter object
for mapDict in simConfig.unfilteredMapsDictList:
mapDict['CMBSimSeed']=CMBSimSeed
# NOTE: we need to zap ONLY specific maps for when we are running in parallel
for tileName in simConfig.tileNames:
mapFileNames=glob.glob(simRootOutDir+os.path.sep+"filteredMaps"+os.path.sep+"*#%s_*.fits" % (tileName))
for m in mapFileNames:
os.remove(m)
simImageDict=pipelines.filterMapsAndMakeCatalogs(simConfig,
rootOutDir = simRootOutDir,
useCachedFilters = True)
# Write out the last sim map catalog for debugging
# NOTE: tileName here makes no sense - this should be happening in the pipeline call above
#optimalCatalogFileName=simRootOutDir+os.path.sep+"CMBSim_optimalCatalog#%s.csv" % (tileName)
#optimalCatalog=simImageDict['optimalCatalog']
#if len(optimalCatalog) > 0:
#catalogs.writeCatalog(optimalCatalog, optimalCatalogFileName.replace(".csv", ".fits"), constraintsList = ["SNR > 0.0"])
# Contamination estimate...
contaminTabDict=estimateContamination(simImageDict, imageDict, SNRKeys, 'skySim', config.diagnosticsDir)
resultsList.append(contaminTabDict)
t1=time.time()
print("... time taken for sky sim run = %.3f sec" % (t1-t0))
# Average results
avContaminTabDict={}
for k in list(resultsList[0].keys()):
avContaminTabDict[k]=atpy.Table()
for kk in list(resultsList[0][k].keys()):
avContaminTabDict[k].add_column(atpy.Column(np.zeros(len(resultsList[0][k])), kk))
for i in range(len(resultsList)):
avContaminTabDict[k][kk]=avContaminTabDict[k][kk]+resultsList[i][k][kk]
avContaminTabDict[k][kk]=avContaminTabDict[k][kk]/float(len(resultsList))
# For writing separate contamination .fits tables if running in parallel
# (if we're running in serial, then we'll get a giant file name with full tileNames list... fix later)
tileNamesLabel="#"+str(config.tileNames).replace("[", "").replace("]", "").replace("'", "").replace(", ", "#")
for k in list(avContaminTabDict.keys()):
fitsOutFileName=config.diagnosticsDir+os.path.sep+"%s_contaminationEstimate_%s.fits" % (k, tileNamesLabel)
contaminTab=avContaminTabDict[k]
contaminTab.meta['NEMOVER']=nemo.__version__
contaminTab.write(fitsOutFileName, overwrite = True)
# Restore the original config parameters (which we overrode to make the sims here)
config.restoreConfig()
return avContaminTabDict
#-------------------------------------------------------------------------------------------------------------
def estimateContaminationFromInvertedMaps(config, imageDict):
"""Run the whole filtering set up again, on inverted maps.
Writes a DS9. reg file, which contains only the highest SNR contaminants (since these
are most likely to be associated with artefacts in the map - e.g., point source masking).
Writes a plot and a .fits table to the diagnostics dir.
Runs over both SNR and fixed_SNR values.
Returns a dictionary containing the results
"""
invertedDict={}
ignoreKeys=['optimalCatalog', 'mergedCatalog']
for key in imageDict:
if key not in ignoreKeys:
invertedDict[key]=imageDict[key]
invertedDict=pipelines.filterMapsAndMakeCatalogs(config, measureFluxes = False, invertMap = True)
SNRKeys=['SNR', 'fixed_SNR']
contaminTabDict=estimateContamination(invertedDict, imageDict, SNRKeys, 'invertedMap', config.diagnosticsDir)
for k in list(contaminTabDict.keys()):
fitsOutFileName=config.diagnosticsDir+os.path.sep+"%s_contaminationEstimate.fits" % (k)
contaminTab=contaminTabDict[k]
contaminTab.write(fitsOutFileName, overwrite = True)
return contaminTabDict
#------------------------------------------------------------------------------------------------------------
def plotContamination(contaminTabDict, diagnosticsDir):
"""Makes contamination rate plots, output stored under diagnosticsDir
While we're at it, we write out a text file containing interpolated values for e.g., 5%, 10%
contamination levels
"""
plotSettings.update_rcParams()
for k in list(contaminTabDict.keys()):
if k.find('fixed') != -1:
SNRKey="fixed_SNR"
SNRLabel="SNR$_{\\rm 2.4}$"
else:
SNRKey="SNR"
SNRLabel="SNR"
binEdges=contaminTabDict[k][SNRKey]
cumContamination=contaminTabDict[k]['cumContamination']
plt.figure(figsize=(9,6.5))
ax=plt.axes([0.10, 0.11, 0.87, 0.87])
plt.plot(binEdges, cumContamination, 'k-')# % (l))#, label = legl)
plt.xlabel("%s" % (SNRLabel))#, fontdict = fontDict)
plt.ylabel("Contamination fraction > %s" % (SNRLabel))#, fontdict = fontDict)
allLabels=['4.0', '', '', '', '', '5.0', '', '', '', '', '6.0', '', '', '', '', '7.0', '', '', '', '', '8.0']
allTicks=np.arange(4.0, 8.2, 0.2)
plt.xticks(allTicks, allLabels)
plt.xlim(4, 8)
#plt.xlim(binMin, 10.01)#binMax)
plt.ylim(-0.05, 0.6)
#plt.legend()
plt.savefig(diagnosticsDir+os.path.sep+"%s_contaminationEstimate.pdf" % (k))
plt.close()
tck=interpolate.splrep(binEdges, contaminTabDict[k]['cumContamination'])
fineSNRs=np.linspace(binEdges.min(), binEdges.max(), 1000)
fineContamination=interpolate.splev(fineSNRs, tck, ext = 1)
with open(diagnosticsDir+os.path.sep+"%s_contaminationEstimate_usefulFractions.txt" % (k), "w") as outFile:
fracs=[0.4, 0.3, 0.2, 0.1, 0.05, 0.01]
for f in fracs:
SNRf=fineSNRs[np.argmin(abs(fineContamination-f))]
logStr="... contamination fraction = %.2f for %s > %.3f ..." % (f, SNRKey, SNRf)
print(logStr)
outFile.write(logStr+"\n")
#------------------------------------------------------------------------------------------------------------
def estimateContamination(contamSimDict, imageDict, SNRKeys, label, diagnosticsDir):
"""Performs the actual contamination estimate, makes output under diagnosticsDir.
Use label to set a prefix for output (plots / .fits tables), e.g., label = "skySim"
"""
invertedDict=contamSimDict
contaminTabDict={}
for SNRKey in SNRKeys:
#catalogs.catalog2DS9(invertedDict['optimalCatalog'], rootOutDir+os.path.sep+"skySimCatalog_%s_gtr_5.reg" % (SNRKey),
#constraintsList = ['%s > 5' % (SNRKey)])
invertedSNRs=[]
for obj in invertedDict['optimalCatalog']:
invertedSNRs.append(obj[SNRKey])
invertedSNRs=np.array(invertedSNRs)
invertedSNRs.sort()
numInverted=np.arange(len(invertedSNRs))+1
candidateSNRs=[]
for obj in imageDict['optimalCatalog']:
candidateSNRs.append(obj[SNRKey])
candidateSNRs=np.array(candidateSNRs)
candidateSNRs.sort()
numCandidates=np.arange(len(candidateSNRs))+1
binMin=4.0
binMax=20.0
binStep=0.2
binEdges=np.linspace(binMin, binMax, int((binMax-binMin)/binStep+1))
binCentres=(binEdges+binStep/2.0)[:-1]
candidateSNRHist=np.histogram(candidateSNRs, bins = binEdges)
invertedSNRHist=np.histogram(invertedSNRs, bins = binEdges)
cumSumCandidates=[]
cumSumInverted=[]
for i in range(binCentres.shape[0]):
cumSumCandidates.append(candidateSNRHist[0][i:].sum())
cumSumInverted.append(invertedSNRHist[0][i:].sum())
cumSumCandidates=np.array(cumSumCandidates, dtype = float)
cumSumInverted=np.array(cumSumInverted, dtype = float)
# Plot cumulative contamination estimate (this makes more sense than plotting purity, since we don't know
# that from what we're doing here, strictly speaking)
cumContamination=np.zeros(cumSumCandidates.shape)
mask=np.greater(cumSumCandidates, 0)
cumContamination[mask]=cumSumInverted[mask]/cumSumCandidates[mask]
# Remember, this is all cumulative (> SNR, so lower bin edges)
contaminDict={}
contaminDict['%s' % (SNRKey)]=binEdges[:-1]
contaminDict['cumSumRealCandidates']=cumSumCandidates
contaminDict['cumSumSimCandidates']=cumSumInverted
contaminDict['cumContamination']=cumContamination
# Convert to .fits table
contaminTab=atpy.Table()
for key in list(contaminDict.keys()):
contaminTab.add_column(atpy.Column(contaminDict[key], key))
contaminTabDict['%s_%s' % (label, SNRKey)]=contaminTab
return contaminTabDict
#------------------------------------------------------------------------------------------------------------
def makeModelImage(shape, wcs, catalog, beamFileName, obsFreqGHz = None, GNFWParams = 'default',\
profile = 'A10', cosmoModel = None, applyPixelWindow = True, override = None,\
validAreaSection = None, minSNR = -99, TCMBAlpha = 0, reportTimingInfo = False):
"""Make a map with the given dimensions (shape) and WCS, containing model clusters or point sources,
with properties as listed in the catalog. This can be used to either inject or subtract sources
from real maps.
Args:
shape (tuple): The dimensions of the output map (height, width) that will contain the model sources.
wcs (:obj:`astWCS.WCS`): A WCS object that defines the coordinate system of the map.
catalog (:obj:`astropy.table.Table` or str): An astropy Table object containing the catalog,
or a string containing the path to a catalog that astropy Table understands. The catalog must
include columns named 'RADeg', 'decDeg' that give object coordinates. For point sources, the
amplitude in uK must be given in a column named 'deltaT_c'. For clusters, either 'M500' (in
units of 10^14 MSun), 'z', and 'fixed_y_c' must be given (as in a mock catalog), OR the
catalog must contain a 'template' column, with templates named like, e.g., Arnaud_M1e14_z0p2
(for a z = 0.2, M500 = 1e14 MSun cluster; see the example .yml config files included with nemo).
beamFileName: Path to a text file that describes the beam.
obsFreqGHz (float, optional): Used only by cluster catalogs - if given, the returned map will be
converted into delta T uK, assuming the given frequency. Otherwise, a y0 map is returned.
GNFWParams (str or dict, optional): Used only by cluster catalogs. If 'default', the Arnaud et al.
(2010) Universal Pressure Profile is assumed. Otherwise, a dictionary that specifies the profile
parameters can be given here (see gnfw.py).
profile (str, optional): Used by cluster models only - sets the profile shape to use: 'A10'
for Arnaud et al. (2010) UPP models, or 'B12' for Battaglia et al. (2012) models.
override (dict, optional): Used only by cluster catalogs. If a dictionary containing keys
{'M500', 'redshift'} is given, all objects in the model image are forced to have the
corresponding angular size. Used by :meth:`sourceInjectionTest`.
applyPixelWindow (bool, optional): If True, apply the pixel window function to the map.
validAreaSection (list, optional): Pixel coordinates within the wcs in the format
[xMin, xMax, yMin, yMax] that define valid area within the model map. Pixels outside this
region will be set to zero. Use this to remove overlaps between tile boundaries.
minSNR (float, optional): Only include objects with SNR (or fixed_SNR) > this value in the model.
If found, the 'SNR' column will be used, otherwise the 'fixed_SNR' column will be used. If
neither is present, no cuts on the catalog will be performed.
TCMBAlpha (float, optional): This should always be zero unless you really do want to make a
cluster model image where CMB temperature evolves as T0*(1+z)^{1-TCMBAlpha}.
reportTimingInfo (bool, optional): If True, report how long each step takes.
Returns:
Map containing injected sources, or None if there are no objects within the map dimensions.
"""
modelMap=enmap.zeros(shape, dtype = np.float32) #np.zeros(shape, dtype = np.float32)
if type(catalog) == str:
catalog=atpy.Table().read(catalog)
# This works per-tile, so throw out objects that aren't in it
t0=time.time()
catalog=catalogs.getCatalogWithinImage(catalog, shape, wcs)
t1=time.time()
if reportTimingInfo: print("makeModelImage - getting catalog within image - took %.3f sec" % (t1-t0))
# Optional SNR cuts
if 'SNR' in catalog.keys():
SNRKey='SNR'
elif 'fixed_SNR' in catalog.keys():
SNRKey='fixed_SNR'
else:
SNRKey=None
if SNRKey is not None:
catalog=catalog[catalog[SNRKey] > minSNR]
# If we want to restrict painting to just area mask within in a tile
# (avoids double painting of objects in overlap areas)
if validAreaSection is not None and len(catalog) > 0:
t0=time.time()
x0, x1, y0, y1=validAreaSection
coords=wcs.wcs2pix(catalog['RADeg'], catalog['decDeg'])
x=np.array(coords)[:, 0]
y=np.array(coords)[:, 1]
xMask=np.logical_and(x >= x0, x < x1)
yMask=np.logical_and(y >= y0, y < y1)
cMask=np.logical_and(xMask, yMask)
catalog=catalog[cMask]
t1=time.time()
if reportTimingInfo: print("makeModelImage - cutting catalog to area mask - took %.3f sec" % (t1-t0))
if len(catalog) == 0:
return None
if cosmoModel is None:
cosmoModel=signals.fiducialCosmoModel
# Set initial max size in degrees from beam file (used for sources; clusters adjusted for each object)
t0=time.time()
numFWHM=5.0
beam=signals.BeamProfile(beamFileName = beamFileName)
maxSizeDeg=(beam.FWHMArcmin*numFWHM)/60
t1=time.time()
if reportTimingInfo: print("makeModelImage - set up beam - took %.3f sec" % (t1-t0))
t0=time.time()
if 'y_c' in catalog.keys() or 'true_y_c' in catalog.keys():
# Clusters - insert one at a time (with different scales etc.)
# We could use this to replace how GNFWParams are fed in also (easier for nemoModel script)
if profile == 'A10':
makeClusterSignalMap=signals.makeArnaudModelSignalMap
elif profile == 'B12':
makeClusterSignalMap=signals.makeBattagliaModelSignalMap
else:
raise Exception("Didn't understand profile - should be A10 or B12. This would be an excellent place\
to accept a string of GNFW parameters, but that is not implemented yet.")
count=0
# First bit here (override) is for doing injection sims faster
if override is not None:
z=override['redshift']
M500=override['M500']
y0ToInsert=catalog['y_c'].data*1e-4
RAs=catalog['RADeg'].data
decs=catalog['decDeg'].data
theta500Arcmin=signals.calcTheta500Arcmin(z, M500, cosmoModel)
maxSizeDeg=5*(theta500Arcmin/60)
modelMap=makeClusterSignalMap(z, M500, modelMap.shape, wcs, RADeg = RAs,
decDeg = decs, beam = beam,
GNFWParams = GNFWParams, amplitude = y0ToInsert,
maxSizeDeg = maxSizeDeg, convolveWithBeam = True,
cosmoModel = cosmoModel)
if obsFreqGHz is not None:
modelMap=convertToDeltaT(modelMap, obsFrequencyGHz = obsFreqGHz,
TCMBAlpha = TCMBAlpha, z = z)
else:
for row in catalog:
count=count+1
if 'true_M500c' in catalog.keys():
# This case is for when we're running from nemoMock output
# Since the idea of this is to create noise-free model images, we must use true values here
# (to avoid any extra scatter/selection effects after adding model clusters to noise maps).
M500=row['true_M500c']*1e14
z=row['redshift']
y0ToInsert=row['true_y_c']*1e-4
else:
# NOTE: This case is for running from nemo output
# We need to adapt this for when the template names are not in this format
if 'template' not in catalog.keys():
raise Exception("No M500, z, or template column found in catalog.")
bits=row['template'].split("#")[0].split("_")
M500=float(bits[1][1:].replace("p", "."))
z=float(bits[2][1:].replace("p", "."))
y0ToInsert=row['y_c']*1e-4 # or fixed_y_c...
theta500Arcmin=signals.calcTheta500Arcmin(z, M500, cosmoModel)
maxSizeDeg=5*(theta500Arcmin/60)
# Updated in place
makeClusterSignalMap(z, M500, modelMap.shape, wcs, RADeg = row['RADeg'],
decDeg = row['decDeg'], beam = beam,
GNFWParams = GNFWParams, amplitude = y0ToInsert,
maxSizeDeg = maxSizeDeg, convolveWithBeam = True,
cosmoModel = cosmoModel, omap = modelMap,
obsFrequencyGHz = obsFreqGHz, TCMBAlpha = TCMBAlpha)
else:
# Sources - slower but more accurate way
for row in catalog:
if validAreaSection is not None:
x0, x1, y0, y1=validAreaSection
x, y=wcs.wcs2pix(row['RADeg'], row['decDeg'])
if (x >= x0 and x < x1 and y >= y0 and y < y1) == False:
continue
degreesMap=np.ones(modelMap.shape, dtype = float)*1e6 # NOTE: never move this
degreesMap, xBounds, yBounds=makeDegreesDistanceMap(degreesMap, wcs,
row['RADeg'], row['decDeg'],
maxSizeDeg)
signalMap=signals.makeBeamModelSignalMap(degreesMap, wcs, beam)*row['deltaT_c']
modelMap=modelMap+signalMap
t1=time.time()
if reportTimingInfo: print("makeModelImage - painting objects - took %.3f sec" % (t1-t0))
# Optional: apply pixel window function - generally this should be True
# (because the source-insertion routines in signals.py interpolate onto the grid rather than average)
if applyPixelWindow == True:
t0=time.time()
modelMap=enmap.apply_window(modelMap, pow = 1.0)
t1=time.time()
if reportTimingInfo: print("makeModelImage - pix win application - took %.3f sec" % (t1-t0))
return modelMap
#------------------------------------------------------------------------------------------------------------
def sourceInjectionTest(config):
"""Insert sources with known positions and properties into the map, apply the filter, and record their
offset with respect to the true location as a function of S/N (for the fixed reference scale only).
If the inserted sources are clusters, the Q function will be applied to the output fluxes, to account
for any mismatch between the reference filter scale and the inserted clusters.
Writes output to the diagnostics/ directory.
Args:
config (:obj:`nemo.startUp.NemoConfig`): Nemo configuration object.
Returns:
An astropy Table containing recovered position offsets and fluxes versus fixed_SNR for inserted
sources.
Note:
Injection tests for clusters use the reference filter only (set with the `photFilter` keyword
in the config). Input amplitudes for clusters are in `y_c`, while output is in `fixed_y_c`
(because the reference filter is used). Similarly, output SNR is `fixed_SNR`, although the
output column is labelled as `SNR`.
"""
# WARNING: For multi-pass mode, this has the desired behaviour IF this is called after a nemo run
# (i.e., the config is already set to the last filterSet)
# But, can we guarantee that? Probably not. But we could put a warning in the docs for now?
# This should perhaps be a config parameter
realExclusionRadiusArcmin=5.0
# This should make it quicker to generate test catalogs (especially when using tiles)
selFn=completeness.SelFn(config.selFnDir, 4.0, configFileName = config.configFileName,
enableCompletenessCalc = False, setUpAreaMask = True,
tileNames = config.allTileNames)
print(">>> Position recovery test [rank = %d] ..." % (config.rank))
if 'sourceInjectionIterations' not in config.parDict.keys():
numIterations=1
else:
numIterations=config.parDict['sourceInjectionIterations']
# Change to previous behavior - if config doesn't specify models to use, assume it's point sources
if 'sourceInjectionModels' in config.parDict.keys():
clusterMode=True
sourceInjectionModelList=config.parDict['sourceInjectionModels']
SNRCol='SNR'
fluxCol='y_c'
noiseLevelCol='err_y_c'
for sourceInjectionModel in sourceInjectionModelList:
theta500Arcmin=signals.calcTheta500Arcmin(sourceInjectionModel['redshift'],
sourceInjectionModel['M500'],
signals.fiducialCosmoModel)
label='%.2f' % (theta500Arcmin)
sourceInjectionModel['label']=label
sourceInjectionModel['theta500Arcmin']=theta500Arcmin
else:
# Sources
clusterMode=False
sourceInjectionModelList=[{'label': 'pointSource'}]
SNRCol='SNR'
fluxCol='deltaT_c'
noiseLevelCol='err_deltaT_c'
# This isn't really important as avoidance radius will stop us putting in too many sources
if 'sourcesPerTile' not in config.parDict.keys():
numSourcesPerTile=300
else:
numSourcesPerTile=config.parDict['sourcesPerTile']
# We need the actual catalog to throw out spurious 'recoveries'
# i.e., we only want to cross-match with objects we injected
catFileName=config.rootOutDir+os.path.sep+"%s_optimalCatalog.fits" % (os.path.split(config.rootOutDir)[-1])
if os.path.exists(catFileName) == False:
raise Exception("Catalog file '%s' not found - needed to do source injection test." % (catFileName))
realCatalog=atpy.Table().read(catFileName)
# Run each scale / model and then collect everything into one table afterwards
# NOTE: These dictionaries contain recovered measurements from running the finder
RADegDict={}
decDegDict={}
SNRDict={}
rArcminDict={}
inFluxDict={}
outFluxDict={}
noiseLevelDict={}
tileNamesDict={}
# NOTE: This list collects all the input catalogs
allInputCatalogs=[]
modelCount=0
for sourceInjectionModel in sourceInjectionModelList:
modelCount=modelCount+1
print(">>> Source injection model: %d/%d" % (modelCount, len(sourceInjectionModelList)))
RADegDict[sourceInjectionModel['label']]=[]
decDegDict[sourceInjectionModel['label']]=[]
SNRDict[sourceInjectionModel['label']]=[]
rArcminDict[sourceInjectionModel['label']]=[]
inFluxDict[sourceInjectionModel['label']]=[]
outFluxDict[sourceInjectionModel['label']]=[]
noiseLevelDict[sourceInjectionModel['label']]=[]
tileNamesDict[sourceInjectionModel['label']]=[]
for i in range(numIterations):
print(">>> Source injection and recovery test %d/%d [rank = %d]" % (i+1, numIterations, config.rank))
# NOTE: This block below should be handled when parsing the config file - fix/remove
# Optional override of default GNFW parameters (used by Arnaud model), if used in filters given
if 'GNFWParams' not in list(config.parDict.keys()):
config.parDict['GNFWParams']='default'
for filtDict in config.parDict['mapFilters']:
filtDict['params']['GNFWParams']=config.parDict['GNFWParams']
# We don't want to save/cache position recovery test maps
for filtDict in config.parDict['mapFilters']:
keysToFalsify=['saveFilteredMaps', 'savePlots']
for key in keysToFalsify:
filtDict['params'][key]=False
# Delete all non-reference scale filters (otherwise we'd want to cache all filters for speed)
# NOTE: As it stands, point-source only runs may not define photFilter - we need to handle that
# That should be obvious, as mapFilters will only have one entry
for filtDict in config.parDict['mapFilters']:
if filtDict['label'] == config.parDict['photFilter']:
break
config.parDict['mapFilters']=[filtDict]
# Filling maps with injected sources will be done when maps.preprocessMapDict is called by the filter object
# So, we only generate the catalog here
print("... generating mock catalog")
if config.rank == 0:
if filtDict['class'].find("ArnaudModel") != -1:
if 'sourceInjectionAmplitudeRange' not in config.parDict.keys():
amplitudeRange=[0.001, 10]
else:
amplitudeRange=config.parDict['sourceInjectionAmplitudeRange']
if amplitudeRange == 'auto':
amplitudeRange=[realCatalog['fixed_y_c'].min()*0.5, realCatalog['fixed_y_c'].max()]
if 'sourceInjectionDistribution' not in config.parDict.keys():
distribution='linear'
else:
distribution=config.parDict['sourceInjectionDistribution']
# Quick test catalog - takes < 1 sec to generate
mockCatalog=catalogs.generateTestCatalog(config, numSourcesPerTile,
amplitudeColumnName = fluxCol,
amplitudeRange = amplitudeRange,
amplitudeDistribution = distribution,
selFn = selFn, maskDilationPix = 20)
# Or... proper mock, but this takes ~24 sec for E-D56
#mockCatalog=pipelines.makeMockClusterCatalog(config, writeCatalogs = False, verbose = False)[0]
injectSources={'catalog': mockCatalog, 'GNFWParams': config.parDict['GNFWParams'],
'override': sourceInjectionModel, 'profile': 'A10'}
elif filtDict['class'].find("Beam") != -1:
if 'sourceInjectionAmplitudeRange' not in config.parDict.keys():
amplitudeRange=[1, 1000]
else:
amplitudeRange=config.parDict['sourceInjectionAmplitudeRange']
if 'sourceInjectionDistribution' not in config.parDict.keys():
distribution='log'
else:
distribution=config.parDict['sourceInjectionDistribution']
mockCatalog=catalogs.generateTestCatalog(config, numSourcesPerTile,
amplitudeColumnName = fluxCol,
amplitudeRange = amplitudeRange,
amplitudeDistribution = distribution,
selFn = selFn, maskDilationPix = 20)
injectSources={'catalog': mockCatalog, 'override': sourceInjectionModel, 'profile': None}
else:
raise Exception("Don't know how to generate injected source catalogs for filterClass '%s'" % (filtDict['class']))
if 'theta500Arcmin' in sourceInjectionModel.keys():
mockCatalog['theta500Arcmin']=sourceInjectionModel['theta500Arcmin']
allInputCatalogs.append(mockCatalog)
else:
injectSources=None
mockCatalog=None
if config.MPIEnabled == True:
bcastInjectSources=config.comm.bcast(injectSources, root = 0)
config.comm.barrier()
if config.rank > 0:
injectSources=bcastInjectSources
mockCatalog=bcastInjectSources['catalog']
for mapDict in config.unfilteredMapsDictList:
mapDict['injectSources']=injectSources
# Ideally we shouldn't have blank tiles... but if we do, skip
if len(mockCatalog) > 0:
# Uncomment line below if want to save filtered maps for quick and dirty debugging
# Overwrites the original filtered map, but can compare to 'stitched' map
# config.parDict['mapFilters'][0]['params']['saveFilteredMaps']=True
recCatalog=pipelines.filterMapsAndMakeCatalogs(config, useCachedFilters = True,
useCachedRMSMap = True, writeAreaMask = False,
writeFlagMask = False)
# NOTE: Below here only rank 0 really needed (could then broadcast result)
# We should be conservative in removing potential matches with real objects
# Because we have a huge sky area and there's no reason to risk contamination of this kind
# Effectively this is the same as using 5' circular holes in the survey mask on real objects
# (but actually adding the avoidance radius parameter to the test catalogs really solved this)
if len(recCatalog) > 0:
recCatalog=catalogs.removeCrossMatched(recCatalog, realCatalog,
radiusArcmin = realExclusionRadiusArcmin)
if len(recCatalog) > 0:
try:
x_mockCatalog, x_recCatalog, rDeg=catalogs.crossMatch(mockCatalog, recCatalog,
radiusArcmin = realExclusionRadiusArcmin)
except:
raise Exception("Source injection test: cross match failed on tileNames = %s; mockCatalog length = %d; recCatalog length = %d" % (str(config.tileNames), len(mockCatalog), len(recCatalog)))
# Catching any crazy mismatches, writing output for debugging
if clusterMode == False and np.logical_and(rDeg > 1.5/60, x_recCatalog['SNR'] > 10).sum() > 0:
mask=np.logical_and(rDeg > 1.5/60, x_recCatalog['SNR'] > 10)
config.parDict['mapFilters'][0]['params']['saveFilteredMaps']=True
recCatalog2=pipelines.filterMapsAndMakeCatalogs(config, useCachedFilters = True,
writeAreaMask = False, writeFlagMask = False)
recCatalog2=catalogs.removeCrossMatched(recCatalog2, realCatalog,
radiusArcmin = realExclusionRadiusArcmin)
catalogs.catalog2DS9(x_recCatalog[mask],
simRootOutDir+os.path.sep+"filteredMaps"+os.path.sep+tileName+os.path.sep+"mismatch-rec.reg")
catalogs.catalog2DS9(x_mockCatalog[mask],
simRootOutDir+os.path.sep+"filteredMaps"+os.path.sep+tileName+os.path.sep+"mismatch-input.reg",
color = 'red')
msg="Caught recovered source at large offset - check output under %s" % (simRootOutDir+os.path.sep+"filteredMaps"+os.path.sep+tileName)
if config.parDict['haltOnPositionRecoveryProblem'] == True:
raise Exception(msg)
else:
print("... Warning: %s ..." % (msg))
# Store everything - analyse later
RADegDict[sourceInjectionModel['label']]=RADegDict[sourceInjectionModel['label']]+x_recCatalog['RADeg'].tolist()
decDegDict[sourceInjectionModel['label']]=decDegDict[sourceInjectionModel['label']]+x_recCatalog['decDeg'].tolist()
SNRDict[sourceInjectionModel['label']]=SNRDict[sourceInjectionModel['label']]+x_recCatalog[SNRCol].tolist()
rArcminDict[sourceInjectionModel['label']]=rArcminDict[sourceInjectionModel['label']]+(rDeg*60).tolist()
inFluxDict[sourceInjectionModel['label']]=inFluxDict[sourceInjectionModel['label']]+x_mockCatalog[fluxCol].tolist()
outFluxDict[sourceInjectionModel['label']]=outFluxDict[sourceInjectionModel['label']]+x_recCatalog[fluxCol].tolist()
noiseLevelDict[sourceInjectionModel['label']]=noiseLevelDict[sourceInjectionModel['label']]+x_recCatalog[noiseLevelCol].tolist()
tileNamesDict[sourceInjectionModel['label']]=tileNamesDict[sourceInjectionModel['label']]+x_recCatalog['tileName'].tolist()
RADegDict[sourceInjectionModel['label']]=np.array(RADegDict[sourceInjectionModel['label']])
decDegDict[sourceInjectionModel['label']]=np.array(decDegDict[sourceInjectionModel['label']])
SNRDict[sourceInjectionModel['label']]=np.array(SNRDict[sourceInjectionModel['label']])
rArcminDict[sourceInjectionModel['label']]=np.array(rArcminDict[sourceInjectionModel['label']])
inFluxDict[sourceInjectionModel['label']]=np.array(inFluxDict[sourceInjectionModel['label']])
outFluxDict[sourceInjectionModel['label']]=np.array(outFluxDict[sourceInjectionModel['label']])
noiseLevelDict[sourceInjectionModel['label']]=np.array(noiseLevelDict[sourceInjectionModel['label']])
tileNamesDict[sourceInjectionModel['label']]=np.array(tileNamesDict[sourceInjectionModel['label']])
# Collecting all results into one giant table
models=[]
theta500s=[]
RAs=[]
decs=[]
SNRs=[]
rArcmin=[]
inFlux=[]
outFlux=[]
noiseLevel=[]
tileNames=[]
for sourceInjectionModel in sourceInjectionModelList:
label=sourceInjectionModel['label']
if 'theta500Arcmin' in sourceInjectionModel.keys():
theta500s=theta500s+[sourceInjectionModel['theta500Arcmin']]*len(SNRDict[label])
models=models+[label]*len(SNRDict[label])
RAs=RAs+RADegDict[label].tolist()
decs=decs+decDegDict[label].tolist()
SNRs=SNRs+SNRDict[label].tolist()
rArcmin=rArcmin+rArcminDict[label].tolist()
inFlux=inFlux+inFluxDict[label].tolist()
outFlux=outFlux+outFluxDict[label].tolist()
noiseLevel=noiseLevel+noiseLevelDict[label].tolist()
tileNames=tileNames+tileNamesDict[label].tolist()
resultsTable=atpy.Table()
resultsTable.add_column(atpy.Column(RAs, 'RADeg'))
resultsTable.add_column(atpy.Column(decs, 'decDeg'))
resultsTable.add_column(atpy.Column(models, 'sourceInjectionModel'))
if len(theta500s) == len(resultsTable):
resultsTable.add_column(atpy.Column(theta500s, 'theta500Arcmin'))
resultsTable.add_column(atpy.Column(SNRs, SNRCol))
resultsTable.add_column(atpy.Column(rArcmin, 'rArcmin'))
resultsTable.add_column(atpy.Column(inFlux, 'inFlux'))
resultsTable.add_column(atpy.Column(outFlux, 'outFlux'))
resultsTable.add_column(atpy.Column(noiseLevel, 'noiseLevel'))
resultsTable.add_column(atpy.Column(tileNames, 'tileName'))
# Store the giant combined input catalog as well, for completeness calculations
# NOTE: Not all objects in this may have been injected (masking, avoiding overlap etc.)
if config.rank == 0:
allInputTab=atpy.vstack(allInputCatalogs)
allInputTab.rename_column(fluxCol, "inFlux")
allInputTab=catalogs.removeCrossMatched(allInputTab, realCatalog, radiusArcmin = realExclusionRadiusArcmin)
allInputTab.write(config.selFnDir+os.path.sep+"sourceInjectionInputCatalog.fits", overwrite = True)
# Restore the original config parameters (which we overrode here)
config.restoreConfig()
return resultsTable
#------------------------------------------------------------------------------------------------------------
def positionRecoveryAnalysis(posRecTable, plotFileName, percentiles = [50, 95, 99.7],
sourceInjectionModel = None, plotRawData = True, rawDataAlpha = 1,
pickleFileName = None, selFnDir = None):
"""Estimate and plot position recovery accuracy as function of fixed filter scale S/N (fixed_SNR), using
the contents of posRecTable (see positionRecoveryTest).
Args:
posRecTable (:obj:`astropy.table.Table`): Table containing recovered position offsets versus SNR
or fixed_SNR for various cluster/source models (produced by sourceInjectionTest).
plotFileName (str): Path where the plot file will be written.
percentiles (list, optional): List of percentiles to plot (some interpolation will be done) and
for which corresponding model fit parameters will be saved (if selFnDir is not None).
sourceInjectionModel (str, optional): If given, select only objects matching the given source
injection model name from the input table. This can be used to get results for individual
cluster scales, for example.
plotRawData (bool, optional): Plot the raw (fixed_SNR, positional offset) data in the background.
pickleFileName (string, optional): Saves the percentile contours data as a pickle file if not None.
This is saved as a dictionary with top-level keys named according to percentilesToPlot.
selFnDir (string, optional): If given, model fit parameters will be written to a file named
posRecModelParameters.txt under the given selFn directory path.
"""
# Sources or clusters table?
tab=posRecTable
if len(tab) == 0:
return None
if np.unique(tab['sourceInjectionModel'])[0] == 'pointSource':
SNRCol='SNR'
plotSNRLabel="SNR"
rArcminThreshold=np.linspace(0, 5, 201)
plotUnits="arcsec"
plotUnitsMultiplier=60
plotUnitsLabel="$^{\prime\prime}$"
else:
# Clusters - SNR is really fixed_SNR here, because injection sims use only ref filter
SNRCol='SNR'
plotSNRLabel="fixed_SNR"
rArcminThreshold=np.linspace(0, 10, 101)
plotUnits="arcmin"
plotUnitsMultiplier=1
plotUnitsLabel="$^\prime$"
# Optional cut on injected signal model
if sourceInjectionModel is not None:
tab=tab[tab['sourceInjectionModel'] == str(sourceInjectionModel)]
# Evaluate %-age of sample in bins of SNR within some rArcmin threshold
# No longer separating by input model (clusters are all shapes anyway)
SNREdges=np.linspace(3.0, 10.0, 36)#np.linspace(0, 10, 101)
SNRCentres=(SNREdges[1:]+SNREdges[:-1])/2.
grid=np.zeros([rArcminThreshold.shape[0], SNREdges.shape[0]-1])
totalGrid=np.zeros(grid.shape)
withinRGrid=np.zeros(grid.shape)
for i in range(SNREdges.shape[0]-1):
SNRMask=np.logical_and(tab[SNRCol] >= SNREdges[i], tab[SNRCol] < SNREdges[i+1])
for j in range(rArcminThreshold.shape[0]):
total=SNRMask.sum()
withinR=(tab['rArcmin'][SNRMask] < rArcminThreshold[j]).sum()
totalGrid[j, i]=total
withinRGrid[j, i]=withinR
if total > 0:
grid[j, i]=withinR/total
# What we want are contours of constant prob - easiest to get this via matplotlib
levelsList=np.array(percentiles)/100.
contours=plt.contour(SNRCentres, rArcminThreshold, grid, levels = levelsList)
minSNR=SNRCentres[np.sum(grid, axis = 0) > 0].min()
maxSNR=SNRCentres[np.sum(grid, axis = 0) > 0].max()
plt.close()
# We make our own plot so we use consistent colours, style (haven't fiddled with contour rc settings)
plotSettings.update_rcParams()
plt.figure(figsize=(9,6.5))
ax=plt.axes([0.11, 0.11, 0.88, 0.87])
if plotRawData == True:
plt.plot(posRecTable[SNRCol], posRecTable['rArcmin']*plotUnitsMultiplier,
'.', color = '#A0A0A0', alpha = rawDataAlpha)
contoursDict={}
for i in range(len(levelsList)):
vertices=contours.collections[i].get_paths()[0].vertices
SNRs=vertices[:, 0]
rArcminAtProb=vertices[:, 1]
labelStr="%.1f" % (percentiles[i]) + "%"
contoursDict[labelStr]={SNRCol: SNRs, 'rArcmin': rArcminAtProb}
plt.plot(SNRs, rArcminAtProb*plotUnitsMultiplier, label = labelStr, lw = 3)
plt.xlim(minSNR, maxSNR)
#plt.ylim(0, 5)
#plt.ylim(0,3)
plt.legend(loc = 'upper right')
plt.xlabel(plotSNRLabel)
plt.ylabel("Recovered Position Offset (%s)" % (plotUnitsLabel))
plt.savefig(plotFileName)
plt.close()
# Save %-ile contours in case we want to use them in some modelling later
if pickleFileName is not None:
with open(pickleFileName, "wb") as pickleFile:
pickler=pickle.Pickler(pickleFile)
pickler.dump(contoursDict)
# Fit and save a position recovery model under selFn directory
if selFnDir is not None:
# This extra plot isn't really necessary
outDir, fileName=os.path.split(os.path.abspath(plotFileName))
fitPlotFileName=outDir+os.path.sep+"modelFits_"+fileName
keys=contoursDict.keys()
fitParamsDict={}
plotSettings.update_rcParams()
plt.figure(figsize=(9,6.5), dpi = 300)
ax=plt.axes([0.11, 0.11, 0.88, 0.87])
if plotRawData == True:
posRecTable=tab
plt.plot(posRecTable[SNRCol], posRecTable['rArcmin']*plotUnitsMultiplier,
'.', color = '#A0A0A0', alpha = rawDataAlpha)
for key in keys:
a=contoursDict[key]
valid=np.where(a[SNRCol] >= 4.1)
snr=a[SNRCol][valid]
rArcmin=a['rArcmin'][valid]
try:
results=optimize.curve_fit(catalogs._posRecFitFunc, snr, rArcmin)
except:
print("... WARNING: curve_fit failed for key = %s ..." % (key))
continue
bestFitSNRFold, bestFitPedestal, bestFitNorm=results[0]
fitParamsDict[key]=np.array([bestFitSNRFold, bestFitPedestal, bestFitNorm])
fitSNRs=np.linspace(4, 10, 100)
plt.plot(fitSNRs,
catalogs._posRecFitFunc(fitSNRs, bestFitSNRFold, bestFitPedestal, bestFitNorm)*plotUnitsMultiplier,
'-', label = key)
#plt.ylim(0, 3)
plt.legend(loc = 'upper right')
plt.xlim(snr.min(), snr.max())
plt.xlabel(plotSNRLabel)
plt.ylabel("Recovered Position Offset (%s)" % (plotUnitsLabel))
plt.savefig(fitPlotFileName)
plt.close()
# Save the fits
outFileName=selFnDir+os.path.sep+"posRecModelFits.pkl"
with open(outFileName, "wb") as pickleFile:
pickler=pickle.Pickler(pickleFile)
pickler.dump(fitParamsDict)
#------------------------------------------------------------------------------------------------------------
def noiseBiasAnalysis(sourceInjTable, plotFileName, sourceInjectionModel = None):
"""Estimate the noise bias from the ratio of input to recovered flux as a function of signal-to-noise.
Args:
posRecTable (:obj:`astropy.table.Table`): Table containing recovered position offsets versus fixed_SNR
for various cluster/source models (produced by sourceInjectionTest).
plotFileName (str): Path where the plot file will be written.
clipPercentile (float, optional): Clips offset values outside of this percentile of the whole
position offsets distribution, to remove a small number of outliers (spurious next-neighbour
cross matches) that otherwise bias the contours high for large (99%+) percentile cuts in
individual fixed_SNR bins.
sourceInjectionModel (str, optional): If given, restrict analysis to only objects matching this.
Notes:
For clusters, bear in mind this only makes sense if any mismatch between the inserted cluster's
shape and the signal assumed by the filter is taken into account. This is done using the Q-function
in sourceInjectionTest.
"""
print("Work in progress - skipped")
return None
#---------------------------------------------------------------------------------------------------
def saveFITS(outputFileName, mapData, wcs, compressionType = None):
"""Writes a map (2d image array) to a new FITS file.
Args:
outputFileName (str): Filename of output FITS image.
mapData (:obj:`np.ndarray`): Map data array.
wcs (:obj:`astWCS.WCS`): Map WCS object.
compressionType (str, optional): If given, the data will be compressed using the given
method (as understood by :mod:`astropy.io.fits`). Use `PLIO_1` for masks,
and `RICE_1` for other image data that can stand lossy compression. If None,
the image data is not compressed.
"""
wcs.header['NEMOVER']=nemo.__version__
if os.path.exists(outputFileName):
os.remove(outputFileName)
if compressionType is None:
if wcs is not None:
hdu=pyfits.PrimaryHDU(mapData, wcs.header)
else:
hdu=pyfits.PrimaryHDU(mapData, None)
if compressionType is not None:
if wcs is not None:
if compressionType == 'PLIO_1':
dtype=np.uint8
else:
dtype=np.float32
hdu=pyfits.CompImageHDU(np.array(mapData, dtype = dtype), wcs.header,
compression_type = compressionType)
else:
hdu=pyfits.CompImageHDU(np.array(mapData, dtype = dtype), None,
compression_type = compressionType)
newImg=pyfits.HDUList()
newImg.append(hdu)
newImg.writeto(outputFileName)
newImg.close()
#---------------------------------------------------------------------------------------------------
def makeDegreesDistanceMap(degreesMap, wcs, RADeg, decDeg, maxDistDegrees):
"""Fills (in place) the 2d array degreesMap with distance in degrees from the given position,
out to some user-specified maximum distance.
Args:
degreesMap (:obj:`np.ndarray`): Map (2d array) that will be filled with angular distance
from the given coordinates. Probably you should feed in an array set to some extreme
initial value (e.g., 1e6 everywhere) to make it easy to filter for pixels near the
object coords afterwards.
wcs (:obj:`astWCS.WCS`): WCS corresponding to degreesMap.
RADeg (float): RA in decimal degrees of position of interest (e.g., object location).
decDeg (float): Declination in decimal degrees of position of interest (e.g., object
location).
maxDistDegrees: The maximum radius out to which distance will be calculated.
Returns:
A map (2d array) of distance in degrees from the given position,
(min x, max x) pixel coords corresponding to maxDistDegrees box,
(min y, max y) pixel coords corresponding to maxDistDegrees box
Note:
This routine measures the pixel scale local to the given position, then assumes that it
does not change. So, this routine may only be accurate close to the given position,
depending upon the WCS projection used.
"""
x0, y0=wcs.wcs2pix(RADeg, decDeg)
ra0, dec0=RADeg, decDeg
ra1, dec1=wcs.pix2wcs(x0+1, y0+1)
xPixScale=astCoords.calcAngSepDeg(ra0, dec0, ra1, dec0)
yPixScale=astCoords.calcAngSepDeg(ra0, dec0, ra0, dec1)
xDistPix=int(round((maxDistDegrees)/xPixScale))
yDistPix=int(round((maxDistDegrees)/yPixScale))
Y=degreesMap.shape[0]
X=degreesMap.shape[1]
minX=int(round(x0))-xDistPix
maxX=int(round(x0))+xDistPix
minY=int(round(y0))-yDistPix
maxY=int(round(y0))+yDistPix
if minX < 0:
minX=0
if maxX > X:
maxX=X
if minY < 0:
minY=0
if maxY > Y:
maxY=Y
xDeg=(np.arange(degreesMap.shape[1])-x0)*xPixScale
yDeg=(np.arange(degreesMap.shape[0])-y0)*yPixScale
for i in range(minY, maxY):
degreesMap[i][minX:maxX]=np.sqrt(yDeg[i]**2+xDeg[minX:maxX]**2)
return degreesMap, [minX, maxX], [minY, maxY]
#---------------------------------------------------------------------------------------------------
def makeExtendedSourceMask(config, tileName):
"""Find extended sources in all maps, adding an extended mask to the Nemo config. Each frequency
map will then have extended mask holes filled when preprocess is called.
"""
settings=config.parDict['findAndMaskExtended']
maskCube=[]
for mapDict in config.unfilteredMapsDictList:
data, wcs=mapDict.loadTile('mapFileName', tileName, returnWCS = True)
weights=mapDict.loadTile('weightsFileName', tileName)
validMask=np.nonzero(weights)
whiteNoiseLevel=np.zeros(weights.shape)
whiteNoiseLevel[validMask]=1/np.sqrt(weights[validMask]) # Assumed inverse variance
# Isolate a scale that's extended
s1=subtractBackground(data, wcs, smoothScaleDeg = settings['bigScaleDeg'])
s2=subtractBackground(data, wcs, smoothScaleDeg = settings['smallScaleDeg'])
s=s1-s2
del s1, s2
# Make a simple global 3-sigma clipped noise estimate from the filtered map
# Then scale that according to the white noise level map from the map maker
# Assume that median white noise level there should correspond with our global clipped noise estimate
# (we were using mean but that blows up in edge tiles)
mean=0
sigma=1e6
vals=s.flatten()
for i in range(10):
mask=np.less(abs(vals-mean), 3*sigma)
mean=np.mean(vals[mask])
sigma=np.std(vals[mask])
scaleFactor=sigma/np.median(whiteNoiseLevel[validMask])
whiteNoiseLevel[validMask]=whiteNoiseLevel[validMask]*scaleFactor
snr=np.zeros(s.shape)
snr[validMask]=s[validMask]/whiteNoiseLevel[validMask]
# Mask set such that 1 = masked, 0 = not masked
extendedMask=np.array(np.greater(snr, settings['thresholdSigma']), dtype = np.uint8)
if 'dilationPix' in settings.keys() and settings['dilationPix'] > 0:
for i in range(settings['dilationPix']):
extendedMask=mahotas.dilate(extendedMask)
extendedMask[extendedMask > 0]=1
maskCube.append(extendedMask)
maskCube=np.array(maskCube, dtype = np.uint8)
extendedMask=maskCube.sum(axis = 0)
extendedMask[extendedMask > 0]=1
# Optionally cut any small objects
if 'minSizeArcmin2' in settings.keys() and settings['minSizeArcmin2'] > 0:
arcmin2Map=getPixelAreaArcmin2Map(extendedMask.shape, wcs)
segMap, numObjects=ndimage.label(extendedMask)
for i in range(1, numObjects+1):
if arcmin2Map[segMap == i].sum() < settings['minSizeArcmin2']:
extendedMask[segMap == i]=0
os.makedirs(config.diagnosticsDir+os.path.sep+"extendedMask", exist_ok = True)
outFileName=config.diagnosticsDir+os.path.sep+"extendedMask"+os.path.sep+tileName+".fits"
saveFITS(outFileName, extendedMask, wcs, compressionType = 'PLIO_1')
for mapDict in config.unfilteredMapsDictList:
mapDict['extendedMask']=config.diagnosticsDir+os.path.sep+"extendedMask"
#------------------------------------------------------------------------------------------------------------
def makeMaskFromDS9PolyRegionFile(regionFileName, shape, wcs):
"""Make a mask from a DS9 region file. The region file must have been created with RA, dec coordinates
given in decimal degrees, and the shapes defining the mask must consist of polygon regions only.
Args:
regionFileName (:obj:`str`): Path to SAOImage DS9 region file.
origShape (:obj:`tuple`): Shape of the output mask.
origWCS (:obj:`astWCS.WCS object`): WCS for the output mask.
Returns:
Mask (2d array)
"""
with open(regionFileName, "r") as inFile:
lines=inFile.readlines()
polyList=[]
for line in lines:
if line.find("polygon") != -1:
polyPoints=[]
coords=line.split("polygon(")[-1].split(") ")[0].split(",")
for i in range(0, len(coords), 2):
try:
RADeg, decDeg=[float(coords[i]), float(coords[i+1])]
except:
raise Exception("failed to parse coords in region file %s - problem at: %s" % (regionFileName, coords))
x, y=wcs.wcs2pix(RADeg, decDeg)
polyPoints.append((int(round(y)), int(round(x))))
polyList.append(polyPoints)
surveyMask=np.zeros(shape, dtype = int)
for polyPoints in polyList:
mahotas.polygon.fill_polygon(polyPoints, surveyMask)
return surveyMask
|
simonsobsREPO_NAMEnemoPATH_START.@nemo_extracted@nemo-main@nemo@maps.py@.PATH_END.py
|
{
"filename": "imagemaker.py",
"repo_name": "spacetelescope/hstaxe",
"repo_path": "hstaxe_extracted/hstaxe-main/hstaxe/axesrc/imagemaker.py",
"type": "Python"
}
|
"""
See LICENSE.txt
"""
import logging
import os
from astropy.io import fits
from hstaxe import axeerror
from hstaxe.config import get_random_filename
from . import configfile
# make sure there is a logger
_log = logging.getLogger(__name__)
class DummyImages:
"""
Small class to create dummy images for the simulations
This class creates empty dispersed and direct images as dummies.
Since all aXe C-executables need the dispersed image as an input,
such an image must exist for making the simualtions.
"""
def __init__(self, confname, griname=None, dirname=None, nx=None, ny=None):
"""
Initializes the class
@param confname: name of the aXe configuration file
@type confname: string
@param griname: name of the dispersed image
@type griname: string
@param dirname: name of the direct image
@type dirname: string
@param nx: image dimension in x
@type nx: int
@param ny: image dimension in y
@type ny: int
"""
# set the variable
self.WCSimage = None
self.WCSext = None
# load the aXe configuration file
self.conf = configfile.ConfigFile(confname)
# load the pre-defined image information
image_data = self._get_image_data(self.conf)
# check whether a grism image
# shall be created
if image_data['grism'] is not None and griname is not None:
self.griname = griname
self.gridata = image_data['grism']
self.WCSimage = griname
self.WCSext = '[SCI]'
else:
self.griname = None
self.gridata = None
# check whether a direct image
# shall be created
if dirname is not None:
self.dirname = dirname
self.WCSimage = dirname
self.WCSext = '[SCI]'
if image_data['direct'] is not None:
self.dirdata = image_data['direct']
else:
self.dirdata = image_data['grism']
else:
self.dirname = None
self.dirdata = None
# store the drizzle metadata
if 'drizzle' in image_data:
self.drzdata = image_data['drizzle']
else:
self.drzdata = None
# store the x-dimension
if nx is not None:
self.nx = nx
else:
self.nx = image_data['dimension'][0]
# store the y-dimension
if ny is not None:
self.ny = ny
else:
self.ny = image_data['dimension'][1]
def _get_image_data(self, conf):
"""
Determines the pre-defined image information
@param conf: aXe configuration object
@type conf: <ConfigFile>
@return: image information
@rtype: {}
"""
from . import WCSdata
# load the WCS for the various grism modes
if self.conf['CAMERA'] == 'HRC':
# in case that several orders exist OR
# the XOFFSET value is larger than -100, its HRG/G800L
if self.conf['B'] is not None or float(self.conf['A']['XOFF_'].split()[0]) > -100.0:
image_data = WCSdata.get_HRC_G800L_WCS()
else:
image_data = WCSdata.get_HRC_PR200L_WCS()
elif self.conf['CAMERA'] == 'SBC':
# in case that the singularity is more than
# -90 away from the reference point, its PR110L
if float(self.conf['A'].disp[0][0]) < -90.0:
image_data = WCSdata.get_SBC_PR110L_WCS()
else:
# otherwise, it is PR130L
image_data = WCSdata.get_SBC_PR130L_WCS()
# the WFC is fortunately unique
elif self.conf['CAMERA'] == 'WFC':
image_data = WCSdata.get_WFC_G800L_WCS()
# check whether the mode is WFC3/IR
elif self.conf['INSTRUMENT'] == 'WFC3' and self.conf['CAMERA'] == 'IR':
image_data = WCSdata.get_WFC3_IR_G102_WCS()
# check whether the mode is WFC3/IR
elif self.conf['INSTRUMENT'] == 'WFC3' and self.conf['CAMERA'] == 'UV':
image_data = WCSdata.get_WFC3_UV_G280_WCS()
# check whether the mode is NICMOS/G141
elif self.conf['INSTRUMENT'] == 'NICMOS' and self.conf['CAMERA'] == 'NIC3':
image_data = WCSdata.get_NICMOS3_G141_WCS()
else:
# HRC/G800L is the dummy
image_data = WCSdata.get_HRC_G800L_WCS()
return image_data
def deleteImages(self):
"""
Deletes all images
The method deletes the dummy images of the class instance.
"""
# check whether there should exist a grism image
if self.griname is not None:
# check whether it exists and delete it
if os.path.isfile(self.griname):
os.unlink(self.griname)
# check whether there should exist a direct image
if self.dirname is not None:
# check whether it exists and delete it
if os.path.isfile(self.dirname):
os.unlink(self.dirname)
def makeImages(self):
"""
Makes all images
The method lets all images be generated.
"""
# check whether a grism image
# shall be created
if self.griname is not None:
# make the grism image
self.makeOneImage(self.griname, self.nx, self.ny, self.gridata, self.drzdata)
# check whether a direct image
# shall be created
if self.dirname is not None:
# make the direct image
self.makeOneImage(self.dirname, self.nx, self.ny, self.dirdata, self.drzdata)
def makeOneImage(self, imgname, nx, ny, metadata, drzmeta=None):
"""
Creates one dummy image
The method creates a dummy image with the given name and dimension.
A list of general metadata and a list of drizzle metadata is added
to the zero extension header of the image.
@param imgname: name of the image to be created
@type imgname: string
@param nx: image dimension in x
@type nx: int
@param ny: image dimension in y
@type ny: int
@param metadata: the list of general image metadata
@type metadata: []
@param drzmeta: the list of drizzle metadata
@type drzmeta: []
"""
from pyraf import iraf
from iraf import noao, artdata
# delete a previous version
if os.path.isfile(imgname):
os.unlink(imgname)
# open a HDU-list
mex_hdu = fits.HDUList()
# create a primary HDU,
# append it to the list
hdrpr = fits.PrimaryHDU()
mex_hdu.append(hdrpr)
# go the the header and put
# the exposure time
hdr = mex_hdu[0].header
hdr['EXPTIME'] = (1.0, 'dummy exposure time')
if drzmeta is not None:
# update the header
for item in drzmeta:
hdr[item[0]] = (item[1], item[2])
# write the image and close it
mex_hdu.writeto(imgname)
mex_hdu.close()
# get a random filename
tmpfile = get_random_filename('t', '.fits')
# create a tmp-image with the right dimension
iraf.mkpattern(input=tmpfile, output="", pattern="constant",
option="replace", v1=0.0, v2=0.0, size=1,
title="aXeSIM simulation", pixtype="real", ndim=2, ncols=nx,
nlines=ny, n3=1, n4=1, n5=1, n6=1, n7=1, header="")
# copy the tmp-image to the empty dummy image
# as science extension
iraf.imcopy(input=tmpfile,
output=(imgname+'[SCI,1,append]'),
verbose='YES', Stdout=1)
# open the dummy image, go to the header
img = fits.open(imgname, 'update')
hdr = img[1].header
# update the header
for item in metadata:
hdr[item[0]] = (item[1], item[2])
# write to disk and close
img.flush()
img.close
# delete the tmp-image
if os.path.isfile(tmpfile):
os.unlink(tmpfile)
|
spacetelescopeREPO_NAMEhstaxePATH_START.@hstaxe_extracted@hstaxe-main@hstaxe@axesrc@imagemaker.py@.PATH_END.py
|
{
"filename": "rain_check_images.py",
"repo_name": "jsnguyen/rain",
"repo_path": "rain_extracted/rain-main/rain_check_images.py",
"type": "Python"
}
|
import os
import time
import numpy as np
from astropy.io import fits
from tqdm import tqdm
from rain import rain
def distortion(index_x, index_y, x_dist, y_dist):
'''
-- Description:
performs the distortion correction according to x_dist and y_dist
assumes that x_dist and y_dist are both functions of x and y
-- Returns:
the distortion corrected pixel coordinates
-- Arguments:
index_x: the x index of the pixel
index_y: the y index of the pixel
x_dist: distortion map in x
y_dist: distortion map in y
'''
# center on pixel
x_actual = index_x+0.5
y_actual = index_y+0.5
return x_actual+x_dist[index_y,index_x], y_actual+y_dist[index_y,index_x]
def main():
data_dir = './check_images'
n_images = 7
n_cpu = os.cpu_count()
#n_cpu = 32
#########################
# LANCZOS3 CHECK IMAGES #
#########################
# nominal values for lanczos3
kernel='lanczos3_lut'
pixel_frac = 1 # pixel fraction, fraction of pixel side length
n_div = 1 # number of divisions per pixel
n_pad = 0
for i in range(n_images):
image_filepath = os.path.join(data_dir, 'check_image_{}.fits'.format(i))
dist_x_filepath = os.path.join(data_dir, 'check_image_{}_dist_x.fits'.format(i))
dist_y_filepath = os.path.join(data_dir, 'check_image_{}_dist_y.fits'.format(i))
basename = os.path.basename(image_filepath)
output_filename = 'rain_lanc3_{}'.format(basename)
output_path = os.path.join(data_dir, output_filename)
print('Correcting {} -> {}'.format(image_filepath, output_path))
image = fits.getdata(image_filepath)
iy,ix = image.shape
xs = np.arange(0,ix,1)
ys = np.arange(0,iy,1)
grid = np.meshgrid(xs, ys)
index_coords = np.stack(grid).T.reshape(-1,2)
bad_pixel_map = None
x_dist = fits.getdata(dist_x_filepath)
y_dist = fits.getdata(dist_y_filepath)
new_pc_coords = []
for index_x,index_y in tqdm(index_coords, desc='Coords'):
new_pc_coords.append(distortion(index_x, index_y, x_dist, y_dist))
time_start = time.time()
wet_image, missed_pixels = rain(image, pixel_frac, new_pc_coords, n_div, kernel=kernel, bad_pixel_map=bad_pixel_map, parallel=True, n_cpu=n_cpu, n_pad=n_pad)
time_end = time.time()
time_diff = time_end - time_start
print('Time : {:.3f}'.format(time_diff))
print('Original Image Sum : {:.6f}'.format(np.sum(image)))
print('Wet Image Sum : {:.6f}'.format(np.sum(wet_image)))
print('Missed Pixels : {:.6f}'.format(missed_pixels))
print('Missed + Wet Image : {:.6f}'.format(missed_pixels+np.sum(wet_image)))
print('Writing to {}...'.format(output_path))
hdu = fits.PrimaryHDU(wet_image)
hdul = fits.HDUList([hdu])
hdul.writeto(output_path, overwrite=True)
print()
#########################
# GAUSSIAN CHECK IMAGES #
#########################
# nominal values for gaussian
kernel='gaussian_lut'
pixel_frac = 0.5
n_div = 8
n_sigma = 4
n_pad = 0
for i in range(n_images):
image_filepath = os.path.join(data_dir, 'check_image_{}.fits'.format(i))
dist_x_filepath = os.path.join(data_dir, 'check_image_{}_dist_x.fits'.format(i))
dist_y_filepath = os.path.join(data_dir, 'check_image_{}_dist_y.fits'.format(i))
basename = os.path.basename(image_filepath)
output_filename = 'rain_gauss_{}'.format(basename)
output_path = os.path.join(data_dir, output_filename)
print('Correcting {} -> {}'.format(image_filepath, output_path))
image = fits.getdata(image_filepath)
iy,ix = image.shape
xs = np.arange(0,ix,1)
ys = np.arange(0,iy,1)
grid = np.meshgrid(xs, ys)
index_coords = np.stack(grid).T.reshape(-1,2)
bad_pixel_map = None
x_dist = fits.getdata(dist_x_filepath)
y_dist = fits.getdata(dist_y_filepath)
new_pc_coords = []
for index_x,index_y in tqdm(index_coords, desc='Coords'):
new_pc_coords.append(distortion(index_x, index_y, x_dist, y_dist))
time_start = time.time()
wet_image, missed_pixels = rain(image, pixel_frac, new_pc_coords, n_div, kernel=kernel, n_sigma=n_sigma, bad_pixel_map=bad_pixel_map, parallel=True, n_cpu=n_cpu)
time_end = time.time()
time_diff = time_end - time_start
print('Time : {:.3f}'.format(time_diff))
print('Original Image Sum : {:.6f}'.format(np.sum(image)))
print('Wet Image Sum : {:.6f}'.format(np.sum(wet_image)))
print('Missed Pixels : {:.6f}'.format(missed_pixels))
print('Missed + Wet Image : {:.6f}'.format(missed_pixels+np.sum(wet_image)))
print('Writing to {}...'.format(output_path))
hdu = fits.PrimaryHDU(wet_image)
hdul = fits.HDUList([hdu])
hdul.writeto(output_path, overwrite=True)
print()
#############################
# LANCZOS3 KECK CHECK IMAGE #
#############################
# nominal values for lanczos3
kernel='lanczos3_lut'
pixel_frac = 1 # pixel fraction, fraction of pixel side length
n_div = 1 # number of divisions per pixel
n_pad = 0
image_filepath = os.path.join(data_dir, 'r_n0060.fits')
dist_x_filepath = 'distortion/nirc2_distort_X_post20150413_v1.fits'
dist_y_filepath = 'distortion/nirc2_distort_Y_post20150413_v1.fits'
basename = os.path.basename(image_filepath)
output_filename = 'rain_lanc3_{}'.format(basename)
output_path = os.path.join(data_dir, output_filename)
print('Correcting {} -> {}'.format(image_filepath, output_path))
image = fits.getdata(image_filepath)
iy,ix = image.shape
xs = np.arange(0,ix,1)
ys = np.arange(0,iy,1)
grid = np.meshgrid(xs, ys)
index_coords = np.stack(grid).T.reshape(-1,2)
bad_pixel_map = None
x_dist = fits.getdata(dist_x_filepath)
y_dist = fits.getdata(dist_y_filepath)
new_pc_coords = []
for index_x,index_y in tqdm(index_coords, desc='Coords'):
new_pc_coords.append(distortion(index_x, index_y, x_dist, y_dist))
time_start = time.time()
wet_image, missed_pixels = rain(image, pixel_frac, new_pc_coords, n_div, kernel=kernel, bad_pixel_map=bad_pixel_map, parallel=True, n_cpu=n_cpu, n_pad=n_pad)
time_end = time.time()
time_diff = time_end - time_start
print('Time : {:.3f}'.format(time_diff))
print('Original Image Sum : {:.6f}'.format(np.sum(image)))
print('Wet Image Sum : {:.6f}'.format(np.sum(wet_image)))
print('Missed Pixels : {:.6f}'.format(missed_pixels))
print('Missed + Wet Image : {:.6f}'.format(missed_pixels+np.sum(wet_image)))
print('Writing to {}...'.format(output_path))
hdu = fits.PrimaryHDU(wet_image)
hdul = fits.HDUList([hdu])
hdul.writeto(output_path, overwrite=True)
print()
#############################
# GAUSSIAN KECK CHECK IMAGE #
#############################
# nominal values for gaussian
kernel='gaussian_lut'
pixel_frac = 0.5
n_div = 8
n_sigma = 4
n_pad = 0
image_filepath = os.path.join(data_dir, 'r_n0060.fits')
dist_x_filepath = 'distortion/nirc2_distort_X_post20150413_v1.fits'
dist_y_filepath = 'distortion/nirc2_distort_Y_post20150413_v1.fits'
basename = os.path.basename(image_filepath)
output_filename = 'rain_gauss_{}'.format(basename)
output_path = os.path.join(data_dir, output_filename)
print('Correcting {} -> {}'.format(image_filepath, output_path))
image = fits.getdata(image_filepath)
iy,ix = image.shape
xs = np.arange(0,ix,1)
ys = np.arange(0,iy,1)
grid = np.meshgrid(xs, ys)
index_coords = np.stack(grid).T.reshape(-1,2)
bad_pixel_map = None
x_dist = fits.getdata(dist_x_filepath)
y_dist = fits.getdata(dist_y_filepath)
new_pc_coords = []
for index_x,index_y in tqdm(index_coords, desc='Coords'):
new_pc_coords.append(distortion(index_x, index_y, x_dist, y_dist))
time_start = time.time()
wet_image, missed_pixels = rain(image, pixel_frac, new_pc_coords, n_div, kernel=kernel, bad_pixel_map=bad_pixel_map, parallel=True, n_cpu=n_cpu, n_pad=n_pad)
time_end = time.time()
time_diff = time_end - time_start
print('Time : {:.3f}'.format(time_diff))
print('Original Image Sum : {:.6f}'.format(np.sum(image)))
print('Wet Image Sum : {:.6f}'.format(np.sum(wet_image)))
print('Missed Pixels : {:.6f}'.format(missed_pixels))
print('Missed + Wet Image : {:.6f}'.format(missed_pixels+np.sum(wet_image)))
print('Writing to {}...'.format(output_path))
hdu = fits.PrimaryHDU(wet_image)
hdul = fits.HDUList([hdu])
hdul.writeto(output_path, overwrite=True)
print()
if __name__=='__main__':
main()
|
jsnguyenREPO_NAMErainPATH_START.@rain_extracted@rain-main@rain_check_images.py@.PATH_END.py
|
{
"filename": "local.py",
"repo_name": "ML4GW/hermes",
"repo_path": "hermes_extracted/hermes-main/hermes/quiver/io/local.py",
"type": "Python"
}
|
import glob
import os
import shutil
from dataclasses import dataclass
from pathlib import Path
from typing import TYPE_CHECKING, List, Optional
from hermes.quiver.io.exceptions import NoFilesFoundError
from hermes.quiver.io.file_system import FileSystem
if TYPE_CHECKING:
from hermes.types import IO_TYPE
@dataclass
class LocalFileSystem(FileSystem):
def __post_init__(self):
# TODO: switch to preferring Path as root
# and using pathlib apis instead of os.
if not isinstance(self.root, Path):
self.root = Path(self.root)
self.root = str(self.root.resolve())
self.soft_makedirs("")
def soft_makedirs(self, path: str):
path = self.join(self.root, path)
# TODO: start using exists_ok kwargs once
# we know we have the appropriate version
if not os.path.exists(path):
os.makedirs(path)
return True
return False
def join(self, *args):
return os.path.join(*args)
def isdir(self, path: str) -> bool:
path = self.join(self.root, path)
return os.path.isdir(path)
def list(self, path: Optional[str] = None) -> List[str]:
if path is not None:
path = self.join(self.root, path)
else:
path = self.root
return os.listdir(path)
def glob(self, path: str):
files = glob.glob(self.join(self.root, path))
# get rid of the root to put everything
# relative to the fs root
if self.root.endswith(os.path.sep):
prefix = self.root
else:
prefix = self.root + os.path.sep
return [f.replace(prefix, "") for f in files]
def remove(self, path: str):
path = self.join(self.root, path)
if os.path.isdir(path):
shutil.rmtree(path)
elif os.path.isfile(path):
os.remove(path)
else:
paths = self.glob(path)
if len(paths) == 0:
raise NoFilesFoundError(path)
for path in paths:
self.remove(path)
def delete(self):
self.remove("")
def read(self, path: str, mode: str = "r") -> "IO_TYPE":
path = self.join(self.root, path)
with open(path, mode) as f:
return f.read()
def write(self, obj: "IO_TYPE", path: str) -> None:
path = self.join(self.root, path)
if isinstance(obj, str):
mode = "w"
elif isinstance(obj, bytes):
mode = "wb"
else:
raise TypeError(
"Expected object to be of type "
"str or bytes, found type {}".format(type(obj))
)
with open(path, mode) as f:
f.write(obj)
def __str__(self):
return self.root
|
ML4GWREPO_NAMEhermesPATH_START.@hermes_extracted@hermes-main@hermes@quiver@io@local.py@.PATH_END.py
|
{
"filename": "script_example_model+fit_2_lines.ipynb",
"repo_name": "thomasorb/orcs",
"repo_path": "orcs_extracted/orcs-master/docs/_build/doctrees/nbsphinx/script_example_model+fit_2_lines.ipynb",
"type": "Jupyter Notebook"
}
|
# Modelling and fitting a spectrum with two resolved lines
Based on what we have seen in the example [Modelling and fitting one emission line](./script_example_model+fit_1_line.ipynb) we will model and fit a spectrum with two resolved lines. This example will then be used in [Modelling and fitting two unresolved emission lines with a Bayesian approach](./script_example_model+fit_2_lines_bayes.ipynb)
```python
import orb.fit
import pylab as pl
import numpy as np
from orb.core import Lines
```
## Second step: modelling and fitting a spectrum with two resolved lines
No particular difficulty here. A classical algorithm is good enough.
```python
halpha_cm1 = Lines().get_line_cm1('Halpha')
step = 2943
order = 8
step_nb = 840
axis_corr = 1.0374712062298759
theta = orb.utils.spectrum.corr2theta(axis_corr)
print('incident angle theta (in degrees):', theta)
zpd_index = 168
# model spectrum
velocity1 = 250
broadening1 = 15
spectrum_axis, spectrum1 = orb.fit.create_cm1_lines_model_raw([halpha_cm1], [1], step, order, step_nb, axis_corr, zpd_index=zpd_index, fmodel='sincgauss',
sigma=broadening1, vel=velocity1)
velocity2 = 10
broadening2 = 30
spectrum_axis, spectrum2 = orb.fit.create_cm1_lines_model_raw([halpha_cm1], [1], step, order, step_nb, axis_corr, zpd_index=zpd_index, fmodel='sincgauss',
sigma=broadening2, vel=velocity2)
spectrum = spectrum1 + spectrum2
# add noise
spectrum += np.random.standard_normal(spectrum.shape) * 0.02
spectrum_axis = orb.utils.spectrum.create_cm1_axis(np.size(spectrum), step, order, corr=axis_corr)
pl.plot(spectrum_axis, spectrum)
pl.xlim((15200, 15270))
```
incident angle theta (in degrees): 15.445939567249903
(15200, 15270)

```python
nm_laser = 543.5 # wavelength of the calibration laser, in fact it can be any real positive number (e.g. 1 is ok)
# pos_def must be given here because, by default all the lines are considered
# to share the same velocity. i.e. sigma_def = ['1', '1']. As the two lines do not have
# the same velocity we put them in two different velocity groups: sigma_def = ['1', '2']
#
# pos_cov is the velocity of the lines in km/s. It is a covarying parameter,
# because the reference position -i.e. the initial guess- of the lines is set
#
# sigma_guess is the initial guess on the broadening (in km/s)
fit = orb.fit.fit_lines_in_spectrum(spectrum, [halpha_cm1, halpha_cm1], step, order, nm_laser, theta, zpd_index,
wavenumber=True, apodization=1, fmodel='sincgauss',
pos_def=['1', '2'],
pos_cov=[velocity1, velocity2],
sigma_guess=[broadening1, broadening2])
print('velocity (in km/s): ', fit['velocity_gvar'])
print('broadening (in km/s): ', fit['broadening_gvar'])
print('flux (in the unit of the spectrum amplitude / unit of the axis fwhm): ', fit['flux_gvar'])
pl.plot(spectrum_axis, spectrum, label='real_spectrum')
pl.plot(spectrum_axis, fit['fitted_vector'], label='fit')
pl.xlim((15200, 15270))
pl.legend()
```
velocity (in km/s): [244.5(1.4) 10.51(86)]
broadening (in km/s): [20.7(2.2) 31.33(98)]
flux (in the unit of the spectrum amplitude / unit of the axis fwhm): [0.673(40) 1.663(51)]
<matplotlib.legend.Legend at 0x7f93b44b7310>

|
thomasorbREPO_NAMEorcsPATH_START.@orcs_extracted@orcs-master@docs@_build@doctrees@nbsphinx@script_example_model+fit_2_lines.ipynb@.PATH_END.py
|
{
"filename": "test_gcs.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/io/test_gcs.py",
"type": "Python"
}
|
from io import BytesIO
import os
import pathlib
import tarfile
import zipfile
import numpy as np
import pytest
from pandas.compat.pyarrow import pa_version_under17p0
from pandas import (
DataFrame,
Index,
date_range,
read_csv,
read_excel,
read_json,
read_parquet,
)
import pandas._testing as tm
from pandas.util import _test_decorators as td
pytestmark = pytest.mark.filterwarnings(
"ignore:Passing a BlockManager to DataFrame:DeprecationWarning"
)
@pytest.fixture
def gcs_buffer():
"""Emulate GCS using a binary buffer."""
pytest.importorskip("gcsfs")
fsspec = pytest.importorskip("fsspec")
gcs_buffer = BytesIO()
gcs_buffer.close = lambda: True
class MockGCSFileSystem(fsspec.AbstractFileSystem):
@staticmethod
def open(*args, **kwargs):
gcs_buffer.seek(0)
return gcs_buffer
def ls(self, path, **kwargs):
# needed for pyarrow
return [{"name": path, "type": "file"}]
# Overwrites the default implementation from gcsfs to our mock class
fsspec.register_implementation("gs", MockGCSFileSystem, clobber=True)
return gcs_buffer
# Patches pyarrow; other processes should not pick up change
@pytest.mark.single_cpu
@pytest.mark.parametrize("format", ["csv", "json", "parquet", "excel", "markdown"])
def test_to_read_gcs(gcs_buffer, format, monkeypatch, capsys, request):
"""
Test that many to/read functions support GCS.
GH 33987
"""
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
path = f"gs://test/test.{format}"
if format == "csv":
df1.to_csv(path, index=True)
df2 = read_csv(path, parse_dates=["dt"], index_col=0)
elif format == "excel":
path = "gs://test/test.xlsx"
df1.to_excel(path)
df2 = read_excel(path, parse_dates=["dt"], index_col=0)
elif format == "json":
msg = (
"The default 'epoch' date format is deprecated and will be removed "
"in a future version, please use 'iso' date format instead."
)
with tm.assert_produces_warning(FutureWarning, match=msg):
df1.to_json(path)
df2 = read_json(path, convert_dates=["dt"])
elif format == "parquet":
pytest.importorskip("pyarrow")
pa_fs = pytest.importorskip("pyarrow.fs")
class MockFileSystem(pa_fs.FileSystem):
@staticmethod
def from_uri(path):
print("Using pyarrow filesystem")
to_local = pathlib.Path(path.replace("gs://", "")).absolute().as_uri()
return pa_fs.LocalFileSystem(to_local)
request.applymarker(
pytest.mark.xfail(
not pa_version_under17p0,
raises=TypeError,
reason="pyarrow 17 broke the mocked filesystem",
)
)
with monkeypatch.context() as m:
m.setattr(pa_fs, "FileSystem", MockFileSystem)
df1.to_parquet(path)
df2 = read_parquet(path)
captured = capsys.readouterr()
assert captured.out == "Using pyarrow filesystem\nUsing pyarrow filesystem\n"
elif format == "markdown":
pytest.importorskip("tabulate")
df1.to_markdown(path)
df2 = df1
expected = df1[:]
if format in ["csv", "excel"]:
expected["dt"] = expected["dt"].dt.as_unit("s")
tm.assert_frame_equal(df2, expected)
def assert_equal_zip_safe(result: bytes, expected: bytes, compression: str):
"""
For zip compression, only compare the CRC-32 checksum of the file contents
to avoid checking the time-dependent last-modified timestamp which
in some CI builds is off-by-one
See https://en.wikipedia.org/wiki/ZIP_(file_format)#File_headers
"""
if compression == "zip":
# Only compare the CRC checksum of the file contents
with (
zipfile.ZipFile(BytesIO(result)) as exp,
zipfile.ZipFile(BytesIO(expected)) as res,
):
for res_info, exp_info in zip(res.infolist(), exp.infolist()):
assert res_info.CRC == exp_info.CRC
elif compression == "tar":
with (
tarfile.open(fileobj=BytesIO(result)) as tar_exp,
tarfile.open(fileobj=BytesIO(expected)) as tar_res,
):
for tar_res_info, tar_exp_info in zip(
tar_res.getmembers(), tar_exp.getmembers()
):
actual_file = tar_res.extractfile(tar_res_info)
expected_file = tar_exp.extractfile(tar_exp_info)
assert (actual_file is None) == (expected_file is None)
if actual_file is not None and expected_file is not None:
assert actual_file.read() == expected_file.read()
else:
assert result == expected
@pytest.mark.parametrize("encoding", ["utf-8", "cp1251"])
def test_to_csv_compression_encoding_gcs(
gcs_buffer, compression_only, encoding, compression_to_extension
):
"""
Compression and encoding should with GCS.
GH 35677 (to_csv, compression), GH 26124 (to_csv, encoding), and
GH 32392 (read_csv, encoding)
"""
df = DataFrame(
1.1 * np.arange(120).reshape((30, 4)),
columns=Index(list("ABCD")),
index=Index([f"i-{i}" for i in range(30)]),
)
# reference of compressed and encoded file
compression = {"method": compression_only}
if compression_only == "gzip":
compression["mtime"] = 1 # be reproducible
buffer = BytesIO()
df.to_csv(buffer, compression=compression, encoding=encoding, mode="wb")
# write compressed file with explicit compression
path_gcs = "gs://test/test.csv"
df.to_csv(path_gcs, compression=compression, encoding=encoding)
res = gcs_buffer.getvalue()
expected = buffer.getvalue()
assert_equal_zip_safe(res, expected, compression_only)
read_df = read_csv(
path_gcs, index_col=0, compression=compression_only, encoding=encoding
)
tm.assert_frame_equal(df, read_df)
# write compressed file with implicit compression
file_ext = compression_to_extension[compression_only]
compression["method"] = "infer"
path_gcs += f".{file_ext}"
df.to_csv(path_gcs, compression=compression, encoding=encoding)
res = gcs_buffer.getvalue()
expected = buffer.getvalue()
assert_equal_zip_safe(res, expected, compression_only)
read_df = read_csv(path_gcs, index_col=0, compression="infer", encoding=encoding)
tm.assert_frame_equal(df, read_df)
def test_to_parquet_gcs_new_file(monkeypatch, tmpdir):
"""Regression test for writing to a not-yet-existent GCS Parquet file."""
pytest.importorskip("fastparquet")
pytest.importorskip("gcsfs")
from fsspec import AbstractFileSystem
df1 = DataFrame(
{
"int": [1, 3],
"float": [2.0, np.nan],
"str": ["t", "s"],
"dt": date_range("2018-06-18", periods=2),
}
)
class MockGCSFileSystem(AbstractFileSystem):
def open(self, path, mode="r", *args):
if "w" not in mode:
raise FileNotFoundError
return open(os.path.join(tmpdir, "test.parquet"), mode, encoding="utf-8")
monkeypatch.setattr("gcsfs.GCSFileSystem", MockGCSFileSystem)
df1.to_parquet(
"gs://test/test.csv", index=True, engine="fastparquet", compression=None
)
@td.skip_if_installed("gcsfs")
def test_gcs_not_present_exception():
with tm.external_error_raised(ImportError):
read_csv("gs://test/test.csv")
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@io@test_gcs.py@.PATH_END.py
|
{
"filename": "test_h_voxel_volume.py",
"repo_name": "jrenaud90/TidalPy",
"repo_path": "TidalPy_extracted/TidalPy-main/Tests/Test_Old/Test_SetB_Package/test_h_voxel_volume.py",
"type": "Python"
}
|
""" Tests for spherical helper function to calculate the volume of spherical voxels. """
import numpy as np
import TidalPy
from TidalPy.utilities.spherical_helper.volume import calculate_voxel_volumes, calculate_voxel_volumes_npy
planet_radius = 6300.0e3
def test_voxel_volume_numba():
""" Test the voxel volume calculation using the numba version of the func.
Check that the results match expectations. """
radius_array = np.linspace(0., planet_radius, 50)
longitude_array_deg = np.linspace(0., 360., 20)
colatitude_array_deg = np.linspace(0., 180., 25)
longitude_array = np.radians(longitude_array_deg)
colatitude_array = np.radians(colatitude_array_deg)
voxel_volumes = calculate_voxel_volumes(radius_array, longitude_array, colatitude_array)
# Check shape
assert len(voxel_volumes.shape) == 3
assert voxel_volumes.shape[0] == len(radius_array)
assert voxel_volumes.shape[1] == len(longitude_array)
assert voxel_volumes.shape[2] == len(colatitude_array)
# See how results compare to expectations
real_total_volume = (4. / 3.) * np.pi * planet_radius**3
voxel_total_volume = np.sum(voxel_volumes)
percent_diff = np.abs(real_total_volume - voxel_total_volume) / real_total_volume
np.testing.assert_almost_equal(0., percent_diff, decimal=1)
def test_voxel_volume_numpy():
""" Test the voxel volume calculation using the numpy version of the func.
Check that the results match expectations. """
radius_array = np.linspace(0., planet_radius, 20)
longitude_array_deg = np.linspace(0., 360., 20)
colatitude_array_deg = np.linspace(0., 180., 20)
longitude_array = np.radians(longitude_array_deg)
colatitude_array = np.radians(colatitude_array_deg)
voxel_volumes = calculate_voxel_volumes_npy(radius_array, longitude_array, colatitude_array)
# Check shape
assert len(voxel_volumes.shape) == 3
assert voxel_volumes.shape[0] == len(radius_array)
assert voxel_volumes.shape[1] == len(longitude_array)
assert voxel_volumes.shape[2] == len(colatitude_array)
# See how results compare to expectations
real_total_volume = (4. / 3.) * np.pi * planet_radius**3
voxel_total_volume = np.sum(voxel_volumes)
percent_diff = np.abs(real_total_volume - voxel_total_volume) / real_total_volume
np.testing.assert_almost_equal(0., percent_diff, decimal=1)
def test_voxel_volume_numba_higherN():
""" See if using a higher N will result in better results. """
radius_array = np.linspace(0., planet_radius, 400)
longitude_array_deg = np.linspace(0., 360., 100)
colatitude_array_deg = np.linspace(0., 180., 100)
longitude_array = np.radians(longitude_array_deg)
colatitude_array = np.radians(colatitude_array_deg)
voxel_volumes = calculate_voxel_volumes(radius_array, longitude_array, colatitude_array)
# Check shape
assert len(voxel_volumes.shape) == 3
assert voxel_volumes.shape[0] == len(radius_array)
assert voxel_volumes.shape[1] == len(longitude_array)
assert voxel_volumes.shape[2] == len(colatitude_array)
# See how results compare to expectations
real_total_volume = (4. / 3.) * np.pi * planet_radius**3
voxel_total_volume = np.sum(voxel_volumes)
percent_diff = np.abs(real_total_volume - voxel_total_volume) / real_total_volume
np.testing.assert_almost_equal(0., percent_diff, decimal=2)
|
jrenaud90REPO_NAMETidalPyPATH_START.@TidalPy_extracted@TidalPy-main@Tests@Test_Old@Test_SetB_Package@test_h_voxel_volume.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergeo/marker/colorbar/title/font/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._size import SizeValidator
from ._family import FamilyValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._size.SizeValidator", "._family.FamilyValidator", "._color.ColorValidator"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergeo@marker@colorbar@title@font@__init__.py@.PATH_END.py
|
{
"filename": "test_classifier.py",
"repo_name": "morpheus-project/morpheus",
"repo_path": "morpheus_extracted/morpheus-master/morpheus/tests/test_classifier.py",
"type": "Python"
}
|
# MIT License
# Copyright 2018 Ryan Hausen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# ==============================================================================
"""Tests morpheus.classifier module."""
import os
import pytest
import numpy as np
from morpheus.classifier import Classifier
from morpheus.data import example
import morpheus.tests.data_helper as dh
@pytest.mark.unit
class TestClassifier:
"""Tests morpheus.classifier.Classifier"""
@staticmethod
def test_variables_not_none():
"""Tests _variables_not_none method."""
names = ["a", "b", "c"]
values = [1, 2, 3]
Classifier._variables_not_none(names, values)
@staticmethod
def test_variables_not_none_throws():
"""Tests _variables_not_none, throws ValueError."""
names = ["a", "b", "c"]
values = [1, 2, None]
with pytest.raises(ValueError):
Classifier._variables_not_none(names, values)
@staticmethod
def test_arrays_same_size():
"""Tests _arrays_same_size method."""
shape = (10, 10)
arrs = [np.zeros(shape) for _ in range(3)]
Classifier._arrays_same_size(arrs)
@staticmethod
def test_arrays_same_size_throws():
"""Tests _arrays_same_size, throws ValueError."""
shape = (10, 10)
arrs = [np.zeros(shape) for _ in range(3)]
arrs.append(np.zeros((20, 20)))
with pytest.raises(ValueError):
Classifier._arrays_same_size(arrs)
@staticmethod
def test_standardize_img():
"""Test _standardize_img method."""
img = np.random.normal(loc=1.0, scale=3.0, size=(10, 10, 10))
img = Classifier._standardize_img(img)
np.testing.assert_allclose(np.mean(img), 0, atol=1e-07)
np.testing.assert_allclose(np.var(img), 1, atol=1e-07)
@staticmethod
def test_make_runnable_file():
"""Test _make_runnable_file."""
local = os.path.dirname(os.path.abspath(__file__))
Classifier._make_runnable_file(local)
assert os.path.exists(os.path.join(local, "main.py"))
os.remove(os.path.join(local, "main.py"))
@staticmethod
def test_variables_not_none_raises():
"""Test _variables_not_none."""
with pytest.raises(ValueError):
Classifier._variables_not_none(["good", "bad"], [1, None])
@staticmethod
def test_validate_parallel_params_raises_cpus_gpus():
"""Test _validate_parallel_params.
Throws ValueError for passing values for both cpus an gpus.
"""
gpus = [0]
cpus = 0
with pytest.raises(ValueError):
Classifier._validate_parallel_params(gpus=gpus, cpus=cpus)
@staticmethod
def test_validate_parallel_params_raises_single_gpu():
"""Test _validate_parallel_params.
Throws ValueError for passing a single gpu.
"""
gpus = [0]
with pytest.raises(ValueError):
Classifier._validate_parallel_params(gpus=gpus)
@staticmethod
def test_validate_parallel_params_raises_single_cpu():
"""Test _validate_parallel_params.
Throws ValueError for passing a single gpu.
"""
cpus = 1
with pytest.raises(ValueError):
Classifier._validate_parallel_params(cpus=cpus)
@staticmethod
def test_segmap_from_classified():
"""Test the segmap_from_classified method."""
data = dh.get_expected_morpheus_output()
h, _, _, _ = example.get_sample()
mask = np.zeros_like(h, dtype=np.int)
mask[5:-5, 5:-5] = 1
expected_segmap = dh.get_expected_segmap()["segmap"]
actual_segmap = Classifier.segmap_from_classified(data, h, mask=mask)
np.testing.assert_array_equal(expected_segmap, actual_segmap)
@staticmethod
def test_segmap_from_classified_fails():
"""Test the segmap_from_classified method."""
data = dh.get_expected_morpheus_output()
h, _, _, _ = example.get_sample()
mask = np.zeros_like(h, dtype=np.int)
mask[5:-5, 5:-5] = 1
with pytest.raises(ValueError):
Classifier.segmap_from_classified(data, h, mask=mask, bkg_src_threshold=1.0)
@staticmethod
def test_catalog_from_classified():
"""Test the catalog_from_classified method."""
classified = dh.get_expected_morpheus_output()
h, _, _, _ = example.get_sample()
segmap = dh.get_expected_segmap()["segmap"]
expected_catalog = dh.get_expected_catalog()["catalog"]
actual_catalog = Classifier.catalog_from_classified(classified, h, segmap)
assert expected_catalog == actual_catalog
@staticmethod
def test_colorize_classified():
"""Test colorize_classified."""
data = dh.get_expected_morpheus_output()
expected_color = dh.get_expected_colorized_pngs()["no_hidden"]
actual_color = Classifier.colorize_classified(data, hide_unclassified=False)
actual_color = (actual_color * 255).astype(np.uint8)
np.testing.assert_array_almost_equal(expected_color, actual_color)
@staticmethod
def test_colorize_classified_hidden():
"""Test colorize_classified with hidden."""
classified = dh.get_expected_morpheus_output()
expected_color = dh.get_expected_colorized_pngs()["hidden"]
actual_color = Classifier.colorize_classified(
classified, hide_unclassified=True
)
actual_color = (actual_color * 255).astype(np.uint8)
np.testing.assert_array_almost_equal(expected_color, actual_color)
@staticmethod
def test_valid_input_types_is_str_ndarray():
"""Test _valid_input_types_is_str."""
h, j, v, z = [np.zeros([10]) for _ in range(4)]
assert not Classifier._valid_input_types_is_str(h, j, v, z)
@staticmethod
def test_valid_input_types_is_str_str():
"""Test _valid_input_types_is_str."""
h, j, v, z = ["" for _ in range(4)]
assert Classifier._valid_input_types_is_str(h, j, v, z)
@staticmethod
def test_valid_input_types_is_str_throws_mixed():
"""Test _valid_input_types_is_str."""
h, j = ["" for _ in range(2)]
v, z = [np.zeros([10]) for _ in range(2)]
with pytest.raises(ValueError):
Classifier._valid_input_types_is_str(h, j, v, z)
@staticmethod
def test_valid_input_types_is_str_throws_wrong_type():
"""Test _valid_input_types_is_str."""
h, j, v, z = [1 for _ in range(4)]
with pytest.raises(ValueError):
Classifier._valid_input_types_is_str(h, j, v, z)
|
morpheus-projectREPO_NAMEmorpheusPATH_START.@morpheus_extracted@morpheus-master@morpheus@tests@test_classifier.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmapbox/stream/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._token import TokenValidator
from ._maxpoints import MaxpointsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._token.TokenValidator", "._maxpoints.MaxpointsValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmapbox@stream@__init__.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.