metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "export.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/packages/vaex-core/vaex/export.py",
"type": "Python"
}
|
__author__ = 'maartenbreddels'
import os
import sys
import collections
import logging
import concurrent.futures
import threading
import numpy as np
import pyarrow as pa
import vaex
import vaex.utils
import vaex.execution
from vaex.column import ColumnStringArrow, _to_string_sequence
max_length = int(1e5)
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
try:
import h5py
except:
if not on_rtd:
raise
# from vaex.dataset import DatasetLocal
logger = logging.getLogger("vaex.export")
progress_lock = threading.Lock()
class ProgressStatus(object):
pass
def _export(dataset_input, dataset_output, random_index_column, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True, parallel=True):
"""
:param DatasetLocal dataset: dataset to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue
:return:
"""
if selection:
if selection == True: # easier to work with the name
selection = "default"
N = len(dataset_input) if not selection else dataset_input.selected_length(selection)
if N == 0:
raise ValueError("Cannot export empty table")
if shuffle and sort:
raise ValueError("Cannot shuffle and sort at the same time")
if shuffle:
shuffle_array = dataset_output.columns[random_index_column]
partial_shuffle = shuffle and len(dataset_input) != N
order_array = None
order_array_inverse = None
# for strings we also need the inverse order_array, keep track of that
has_strings = any([dataset_input.is_string(k) for k in column_names])
if partial_shuffle:
# if we only export a portion, we need to create the full length random_index array, and
shuffle_array_full = np.random.choice(len(dataset_input), len(dataset_input), replace=False)
# then take a section of it
shuffle_array[:] = shuffle_array_full[shuffle_array_full < N]
del shuffle_array_full
order_array = shuffle_array
elif shuffle:
# better to do this in memory
shuffle_array_memory = np.random.choice(N, N, replace=False)
shuffle_array[:] = shuffle_array_memory
order_array = shuffle_array
if order_array is not None:
indices_r = np.zeros_like(order_array)
indices_r[order_array] = np.arange(len(order_array))
order_array_inverse = indices_r
del indices_r
if sort:
if selection:
raise ValueError("sorting selections not yet supported")
# these indices sort the input array, but we evaluate the input in sequential order and write it out in sorted order
# e.g., not b[:] = a[indices]
# but b[indices_r] = a
logger.info("sorting...")
indices = np.argsort(dataset_input.evaluate(sort))
indices_r = np.zeros_like(indices)
indices_r[indices] = np.arange(len(indices))
if has_strings:
# in this case we already have the inverse ready
order_array_inverse = indices if ascending else indices[:--1]
else:
del indices
order_array = indices_r if ascending else indices_r[::-1]
logger.info("sorting done")
if progress == True:
progress = vaex.utils.progressbar_callable(title="exporting")
progress = progress or (lambda value: True)
progress_total = len(column_names) * len(dataset_input)
progress_status = ProgressStatus()
progress_status.cancelled = False
progress_status.value = 0
if selection:
dataset_input.count(selection=selection) # fill cache for filter and selection
else:
len(dataset_input) # fill filter cache
sparse_groups = collections.defaultdict(list)
sparse_matrices = {} # alternative to a set of matrices, since they are not hashable
string_columns = []
futures = []
thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=1)
if True:
for column_name in column_names:
sparse_matrix = dataset_output._sparse_matrix(column_name)
if sparse_matrix is not None:
# sparse columns are written differently
sparse_groups[id(sparse_matrix)].append(column_name)
sparse_matrices[id(sparse_matrix)] = sparse_matrix
continue
logger.debug(" exporting column: %s " % column_name)
future = thread_pool.submit(_export_column, dataset_input, dataset_output, column_name,
shuffle, sort, selection, N, order_array, order_array_inverse, progress_status, parallel=parallel)
futures.append(future)
done = False
progress(0)
while not done:
done = True
for future in futures:
try:
future.result(0.1/4)
except concurrent.futures.TimeoutError:
done = False
break
if not done:
if not progress(progress_status.value / float(progress_total)):
progress_status.cancelled = True
if not progress_status.cancelled:
progress(1)
for sparse_matrix_id, column_names in sparse_groups.items():
sparse_matrix = sparse_matrices[sparse_matrix_id]
for column_name in column_names:
assert not shuffle
assert selection in [None, False]
column = dataset_output.columns[column_name]
column.matrix.data[:] = dataset_input.columns[column_name].matrix.data
column.matrix.indptr[:] = dataset_input.columns[column_name].matrix.indptr
column.matrix.indices[:] = dataset_input.columns[column_name].matrix.indices
return column_names
def _export_column(dataset_input, dataset_output, column_name, shuffle, sort, selection, N,
order_array, order_array_inverse, progress_status, parallel=True):
if 1:
to_array = dataset_output.columns[column_name]
dtype = dataset_input.data_type(column_name)
is_string = dtype.is_string
if is_string:
# assert isinstance(to_array, pa.Array) # we don't support chunked arrays here
# TODO legacy: we still use ColumnStringArrow to write, find a way to do this with arrow
# this is the case with hdf5 and remote storage
if not isinstance(to_array, ColumnStringArrow):
to_array = ColumnStringArrow.from_arrow(to_array)
if shuffle or sort: # we need to create a in memory copy, otherwise we will do random writes which is VERY inefficient
to_array_disk = to_array
if np.ma.isMaskedArray(to_array):
to_array = np.empty_like(to_array_disk)
else:
if vaex.array_types.is_string_type(dtype):
# we create an empty column copy
to_array = to_array._zeros_like()
else:
to_array = np.zeros_like(to_array_disk)
to_offset = 0 # we need this for selections
to_offset_unselected = 0 # we need this for filtering
count = len(dataset_input)# if not selection else dataset_input.length_unfiltered()
# TODO: if no filter, selection or mask, we can choose the quick path for str
string_byte_offset = 0
for i1, i2, values in dataset_input.evaluate(column_name, chunk_size=max_length, filtered=True, parallel=parallel, selection=selection, array_type='numpy-arrow'):
logger.debug("from %d to %d (total length: %d, output length: %d)", i1, i2, len(dataset_input), N)
no_values = len(values)
if no_values:
if is_string:
# for strings, we don't take sorting/shuffling into account when building the structure
to_column = to_array
from_sequence = _to_string_sequence(values)
to_sequence = to_column.string_sequence.slice(to_offset, to_offset+no_values, string_byte_offset)
string_byte_offset += to_sequence.fill_from(from_sequence)
to_offset += no_values
else:
fill_value = np.nan if dtype.kind == "f" else None
# assert np.ma.isMaskedArray(to_array) == np.ma.isMaskedArray(values), "to (%s) and from (%s) array are not of both masked or unmasked (%s)" %\
# (np.ma.isMaskedArray(to_array), np.ma.isMaskedArray(values), column_name)
if shuffle or sort:
target_set_item = order_array[i1:i2]
else:
target_set_item = slice(to_offset, to_offset + no_values)
if dtype.is_datetime:
values = values.view(np.int64)
if np.ma.isMaskedArray(to_array) and np.ma.isMaskedArray(values):
to_array.data[target_set_item] = values.filled(fill_value)
to_array.mask[target_set_item] = values.mask
elif not np.ma.isMaskedArray(to_array) and np.ma.isMaskedArray(values):
to_array[target_set_item] = values.filled(fill_value)
else:
to_array[target_set_item] = values
to_offset += no_values
with progress_lock:
progress_status.value += i2 - i1
if progress_status.cancelled:
break
#if not progress(progress_value / float(progress_total)):
# break
if is_string: # write out the last index
to_column = to_array
if selection:
to_column.indices[to_offset] = string_byte_offset
else:
to_column.indices[count] = string_byte_offset
if shuffle or sort: # write to disk in one go
if is_string: # strings are sorted afterwards
view = to_array.string_sequence.lazy_index(order_array_inverse)
to_array_disk.string_sequence.fill_from(view)
else:
if np.ma.isMaskedArray(to_array) and np.ma.isMaskedArray(to_array_disk):
to_array_disk.data[:] = to_array.data
to_array_disk.mask[:] = to_array.mask
else:
to_array_disk[:] = to_array
def export_hdf5_v1(dataset, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True):
kwargs = locals()
import vaex.hdf5.export
vaex.hdf5.export.export_hdf5_v1(**kwargs)
def export_hdf5(dataset, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=True, sort=None, ascending=True, parallel=True):
kwargs = locals()
import vaex.hdf5.export
vaex.hdf5.export.export_hdf5(**kwargs)
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@packages@vaex-core@vaex@export.py@.PATH_END.py
|
{
"filename": "DiffTreeWidget.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/widgets/DiffTreeWidget.py",
"type": "Python"
}
|
import numpy as np
from .. import functions as fn
from ..Qt import QtWidgets
from .DataTreeWidget import DataTreeWidget
__all__ = ['DiffTreeWidget']
class DiffTreeWidget(QtWidgets.QWidget):
"""
Widget for displaying differences between hierarchical python data structures
(eg, nested dicts, lists, and arrays)
"""
def __init__(self, parent=None, a=None, b=None):
QtWidgets.QWidget.__init__(self, parent)
self.layout = QtWidgets.QHBoxLayout()
self.setLayout(self.layout)
self.trees = [DataTreeWidget(self), DataTreeWidget(self)]
for t in self.trees:
self.layout.addWidget(t)
if a is not None:
self.setData(a, b)
def setData(self, a, b):
"""
Set the data to be compared in this widget.
"""
self.data = (a, b)
self.trees[0].setData(a)
self.trees[1].setData(b)
return self.compare(a, b)
def compare(self, a, b, path=()):
"""
Compare data structure *a* to structure *b*.
Return True if the objects match completely.
Otherwise, return a structure that describes the differences:
{ 'type': bool
'len': bool,
'str': bool,
'shape': bool,
'dtype': bool,
'mask': array,
}
"""
bad = (255, 200, 200)
# generate typestr, desc, childs for each object
typeA, descA, childsA, _ = self.trees[0].parse(a)
typeB, descB, childsB, _ = self.trees[1].parse(b)
if typeA != typeB:
self.setColor(path, 1, bad)
if descA != descB:
self.setColor(path, 2, bad)
if isinstance(a, dict) and isinstance(b, dict):
keysA = set(a.keys())
keysB = set(b.keys())
for key in keysA - keysB:
self.setColor(path+(key,), 0, bad, tree=0)
for key in keysB - keysA:
self.setColor(path+(key,), 0, bad, tree=1)
for key in keysA & keysB:
self.compare(a[key], b[key], path+(key,))
elif isinstance(a, (list, tuple)) and isinstance(b, (list, tuple)):
for i in range(max(len(a), len(b))):
if len(a) <= i:
self.setColor(path+(i,), 0, bad, tree=1)
elif len(b) <= i:
self.setColor(path+(i,), 0, bad, tree=0)
else:
self.compare(a[i], b[i], path+(i,))
elif isinstance(a, np.ndarray) and isinstance(b, np.ndarray) and a.shape == b.shape:
tableNodes = [tree.nodes[path].child(0) for tree in self.trees]
if a.dtype.fields is None and b.dtype.fields is None:
eq = self.compareArrays(a, b)
if not np.all(eq):
for n in tableNodes:
n.setBackground(0, fn.mkBrush(bad))
#for i in np.argwhere(~eq):
else:
if a.dtype == b.dtype:
for i,k in enumerate(a.dtype.fields.keys()):
eq = self.compareArrays(a[k], b[k])
if not np.all(eq):
for n in tableNodes:
n.setBackground(0, fn.mkBrush(bad))
#for j in np.argwhere(~eq):
# dict: compare keys, then values where keys match
# list:
# array: compare elementwise for same shape
def compareArrays(self, a, b):
intnan = -9223372036854775808 # happens when np.nan is cast to int
anans = np.isnan(a) | (a == intnan)
bnans = np.isnan(b) | (b == intnan)
eq = anans == bnans
mask = ~anans
eq[mask] = np.allclose(a[mask], b[mask])
return eq
def setColor(self, path, column, color, tree=None):
brush = fn.mkBrush(color)
# Color only one tree if specified.
if tree is None:
trees = self.trees
else:
trees = [self.trees[tree]]
for tree in trees:
item = tree.nodes[path]
item.setBackground(column, brush)
def _compare(self, a, b):
"""
Compare data structure *a* to structure *b*.
"""
# Check test structures are the same
assert type(info) is type(expect)
if hasattr(info, '__len__'):
assert len(info) == len(expect)
if isinstance(info, dict):
for k in info:
assert k in expect
for k in expect:
assert k in info
self.compare_results(info[k], expect[k])
elif isinstance(info, list):
for i in range(len(info)):
self.compare_results(info[i], expect[i])
elif isinstance(info, np.ndarray):
assert info.shape == expect.shape
assert info.dtype == expect.dtype
if info.dtype.fields is None:
intnan = -9223372036854775808 # happens when np.nan is cast to int
inans = np.isnan(info) | (info == intnan)
enans = np.isnan(expect) | (expect == intnan)
assert np.all(inans == enans)
mask = ~inans
assert np.allclose(info[mask], expect[mask])
else:
for k in info.dtype.fields.keys():
self.compare_results(info[k], expect[k])
else:
try:
assert info == expect
except Exception:
raise NotImplementedError("Cannot compare objects of type %s" % type(info))
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@widgets@DiffTreeWidget.py@.PATH_END.py
|
{
"filename": "test_history_aware_retriever.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/tests/unit_tests/chains/test_history_aware_retriever.py",
"type": "Python"
}
|
from langchain_core.documents import Document
from langchain_core.language_models import FakeListLLM
from langchain_core.prompts import PromptTemplate
from langchain.chains import create_history_aware_retriever
from tests.unit_tests.retrievers.parrot_retriever import FakeParrotRetriever
def test_create() -> None:
answer = "I know the answer!"
llm = FakeListLLM(responses=[answer])
retriever = FakeParrotRetriever()
question_gen_prompt = PromptTemplate.from_template("hi! {input} {chat_history}")
chain = create_history_aware_retriever(llm, retriever, question_gen_prompt)
expected_output = [Document(page_content="What is the answer?")]
output = chain.invoke({"input": "What is the answer?", "chat_history": []})
assert output == expected_output
output = chain.invoke({"input": "What is the answer?"})
assert output == expected_output
expected_output = [Document(page_content="I know the answer!")]
output = chain.invoke(
{"input": "What is the answer?", "chat_history": ["hi", "hi"]}
)
assert output == expected_output
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@tests@unit_tests@chains@test_history_aware_retriever.py@.PATH_END.py
|
{
"filename": "Results.py",
"repo_name": "TommasoRonconi/galapy",
"repo_path": "galapy_extracted/galapy-main/galapy/sampling/Results.py",
"type": "Python"
}
|
""" Implements the class used for storing sampling results.
"""
#############################################################################################
# External imports
import warnings
import numpy
import pickle
from collections.abc import MutableMapping as MM
#############################################################################################
# Internal imports
import galapy
from galapy.Galaxy import GXY, PhotoGXY
from galapy.Noise import Noise, CalibrationError
from galapy.Handlers import ModelParameters, GXYParameters, NoiseParameters
from galapy.sampling.Observation import Observation
from galapy.sampling.Sampler import Sampler
from galapy.internal.utils import now_string, func_scalar_or_array, quantile_weighted, get_credible_interval, find_nearest
from galapy.io.hdf5 import write_to_hdf5, load_from_hdf5
#############################################################################################
def generate_output_base ( out_dir = '', name = '' ) :
"""
Parameters
----------
out_dir : string
Position in the filesystem where the results will be stored.
Default to the directory where the command has been called.
name : string
A string identifying the run that will be saved.
By default it will use a string with the current date+time
"""
import os
# If no output directory is passed, set it to the current working directory
if len( out_dir ) == 0 or out_dir is None :
out_dir = os.getcwd()
# First check whether the required output directory exists,
# if not it will be created in the correct position of the file-system
if not os.path.isdir( out_dir ) :
try :
# creates multi-level subdirs. similarly to the *Nix command `mkdir -p`
# (while os.mkdir() only allows to create the highest level directory)
os.makedirs( out_dir )
except OSError:
print ( f"Creation of the directory {out_dir} failed" )
# If no name for the current run has been provided, set it to
# a string with the current date+time: 'year+month+day+hour+minute'
if len(name) == 0 :
name = now_string()
# Set the string with the output base name
outbase = os.path.join( out_dir, name )
return outbase
def dump_results ( model, handler, data, sampler,
noise = None, outbase = '',
method = 'hdf5', lightweight = False ) :
from time import time
if len(outbase) == 0 :
outbase = nowstring()
if not isinstance( sampler, Sampler ) :
raise ValueError( "Argument ``sampler`` should be an instance of type Sampler" )
sample_res, sample_logl, sample_weights = sampler.return_samples_logl_weights()
if not isinstance( model, GXY ) :
raise ValueError( "Argument ``model`` should be an instance of type GXY" )
if not isinstance( handler, ModelParameters ) :
raise ValueError( "Argument ``handler`` should be an instance of type ModelParameters" )
if not isinstance( data, Observation ) :
raise ValueError( "Argument ``data`` should be an instance of type Observation" )
if noise is not None :
if not isinstance( noise, Noise ) :
raise ValueError( "Argument ``noise`` should be an instance of type Noise" )
if lightweight :
if method in { 'hdf5', 'h5' } :
outfile = '_'.join( [ outbase, sampler.which_sampler, 'results_light.galapy.hdf5' ] )
write_to_hdf5(
outfile,
metadata = dict(
storage_method = 'light',
galapy_version = galapy.__version__,
),
hard = True,
results = {
'model' : model.dump(),
'handler' : handler.dump(),
'sample_res' : sample_res,
'sample_logl' : sample_logl,
'sample_weights' : sample_weights,
'data' : None if data is None else data.dump(),
'noise' : None if noise is None else noise.dump(),
'sampler_name' : sampler.which_sampler,
}
)
return outbase
else :
warnings.warn(
"Lightweight (`lightweight=True`) dumping only available with `method = 'hdf5'`. "
"Falling back to HDF5 output."
)
print( 'Now processing the sampling results, this might require some time ...' )
tstart = time()
results = Results( model, handler,
sample_res, sample_logl,
sample_weights = sample_weights,
data = data, noise = noise,
sampler_name = sampler.which_sampler )
ndur = time() - tstart
print( f'... done in {ndur} seconds.' )
if method == 'pickle' :
# Pickle the Results instance
with open( '_'.join( [ outbase, sampler.which_sampler,
'results.galapy.pickle' ] ), 'wb' ) as pfw :
pickle.dump( results, pfw )
if method in { 'hdf5', 'h5' } :
outfile = '_'.join( [ outbase, sampler.which_sampler, 'results.galapy.hdf5' ] )
write_to_hdf5(
outfile,
metadata = dict(
storage_method = 'heavy',
galapy_version = galapy.__version__,
),
hard = True,
results = results.dump()
)
print( f'Results stored in files with prefix: {outbase}' )
return outbase
def load_results ( infile, method = None, lightweight = None ) :
if method is None :
method = infile.split('.')[-1]
res = None
if method in { 'h5', 'hdf5' } :
res_dict = load_from_hdf5( infile )
if lightweight is None :
lightweight = res_dict['metadata']['storage_method'] == 'light'
if lightweight :
from time import time
print( 'Now processing the sampling results, this might require some time ...' )
tstart = time()
res = Results(
model = (
PhotoGXY.load(res_dict['results']['model'])
if 'pms_kwargs' in res_dict['results']['model']
else GXY.load(res_dict['results']['model'])
),
handler = ModelParameters.load( res_dict['results']['handler'] ),
sample_res = res_dict['results']['sample_res'],
sample_logl = res_dict['results']['sample_logl'],
sample_weights = (
res_dict['results']['sample_weights']
if 'sample_weights' in res_dict['results']
else None
),
data = (
Observation.load( res_dict['results']['data'] )
if 'data' in res_dict['results']
else None
),
noise = (
CalibrationError.load( res_dict['results']['noise'] )
if res_dict['results']['noise'] is not None
else None
),
sampler_name = res_dict['results']['sampler_name']
)
ndur = time() - tstart
print( f'... done in {ndur} seconds.' )
else :
res = Results.load( res_dict['results'] )
return res
if method == 'pickle' :
with open( infile, 'rb' ) as pfr :
res = pickle.load( pfr )
return res
#############################################################################################
class Results () :
def __init__ ( self, model, handler, sample_res, sample_logl,
sample_weights = None, data = None, noise = None,
sampler_name = 'dynesty' ) :
""" A class for storing the results of a sampling run.
Parameters
----------
model : galapy.Galaxy.GXY
An instance of type GXY (or derived). It stores the model's architecture
used for running sampling.
handler : galapy.Handlers.ModelParameters
An instance ot type ModelParameters with the parameterisation used in
the sampling run.
sample_res : numpy.ndarray
the matrix containing all the samples of the run
sample_logl : numpy.ndarray
1D array with the loglikelihood values corresponding to the samples
sample_weights : numpy.ndarray
(Optional) 1D array with the weight of each sample in the run.
Default is ``None``, in which case the array will be padded with ones
(i.e. all samples have the same weight)
data : galapy.sampling.Observation.Observation
(Optional) An instance of type ``Observation`` with the fluxes
measurements used in the sampling run
noise : galapy.Noise.Noise
(Optional) An instance of type ``Noise`` with
the eventual noise model used in the sampling run
sampler_name : str
Which sampler has been used in the sampling run (i.e. 'dynesty' or 'emcee',
default is 'dynesty')
"""
# Store the model architecture
if not isinstance(model, GXY) and not isinstance( model, MM ) :
raise AttributeError(
'Attribute "model" should be an instance of type ``GXY``'
)
if isinstance( model, MM ) :
self._mod = model
model = self.get_model()
else :
self._mod = model.dump()
# Store the parameters specs
if not isinstance(handler, ModelParameters) and not isinstance( handler, MM ) :
raise AttributeError(
'Attribute "handler" should be an instance of type ``ModelParameters``'
)
if isinstance( handler, MM ) :
self._han = handler
handler = self.get_handler()
else :
self._han = handler.dump()
if sample_weights is None :
sample_weights = numpy.ones_like( sample_logl )
# Store the observation
self.Ndof = 1
self._obs = None
if data is not None :
if not isinstance(data,Observation) and not isinstance( data, MM ) :
raise AttributeError(
'Attribute "data" should be an instance of type ``Observation``'
)
if isinstance( data, MM ) :
self._obs = data
data = self.get_observation()
else :
self._obs = data.dump()
self.Ndof = len(data.pms) - len(handler.par_free)
# Store the noise model
self._noise = None
if noise is not None :
if not isinstance( noise, Noise ) and not isinstance( noise, MM ) :
raise AttributeError(
'Attribute "noise" should be an instance of type ``Noise``'
)
if isinstance( noise, MM ) :
self._noise = noise
noise = self.get_noise()
else :
self._noise = noise.dump()
# Store the sampler's name and specs
self.sampler = sampler_name
self.ndim = len( handler.par_free )
self.size = len(sample_res)
if self.size != len(sample_logl) or self.size != len(sample_weights):
raise RuntimeError(
'Arguments sample_res, sample_logl and sample_weights should have same length'
)
self.params = []
self.logl = numpy.asarray( sample_logl )
self.samples = numpy.asarray( sample_res )
self.weights = numpy.asarray( sample_weights )
self.wnot0 = ( self.weights > 0. )
self.SED = numpy.empty(shape=(self.size,
*model.wl().shape))
self.Mstar = numpy.empty(shape=(self.size,))
self.Mdust = numpy.empty(shape=(self.size,))
self.Mgas = numpy.empty(shape=(self.size,))
self.Zstar = numpy.empty(shape=(self.size,))
self.Zgas = numpy.empty(shape=(self.size,))
self.SFR = numpy.empty(shape=(self.size,))
self.TMC = numpy.empty(shape=(self.size,))
self.TDD = numpy.empty(shape=(self.size,))
for i, par in enumerate(sample_res) :
self.params += [handler.return_nested(par)['galaxy']]
try :
model.set_parameters( **self.params[-1] )
except RuntimeError :
self.SED[i] = -numpy.inf * numpy.ones_like(model.wl())
self.Mstar[i] = -numpy.inf
self.Mdust[i] = -numpy.inf
self.Mgas[i] = -numpy.inf
self.Zstar[i] = -numpy.inf
self.Zgas[i] = -numpy.inf
self.SFR[i] = -numpy.inf
self.TMC[i] = -numpy.inf
self.TDD[i] = -numpy.inf
continue
age = model.age
self.SED[i] = model.get_SED()
self.Mstar[i] = model.sfh.Mstar(age)
self.Mdust[i] = model.sfh.Mdust(age)
self.Mgas[i] = model.sfh.Mgas(age)
self.Zstar[i] = model.sfh.Zstar(age)
self.Zgas[i] = model.sfh.Zgas(age)
self.SFR[i] = model.sfh(age)
self.TMC[i] = model.ism.mc.T
self.TDD[i] = model.ism.dd.T
def dump ( self ) :
return dict(
# Models' architecture
model = self._mod,
handler = self._han,
data = self._obs,
noise = self._noise,
# Sampling run hyperparameters
sampler_name = self.sampler,
size = self.size, Ndof = self.Ndof,
# Sampling run stored quantities
logl = self.logl,
samples = self.samples,
weights = self.weights,
wnot0 = self.wnot0,
# Derived quantities
SED = self.SED,
Mstar = self.Mstar,
Mdust = self.Mdust,
Mgas = self.Mgas,
Zstar = self.Zstar,
Zgas = self.Zgas,
SFR = self.SFR,
TMC = self.TMC,
TDD = self.TDD,
)
@classmethod
def load ( cls, dictionary ) :
# build object
ret = cls(
model = dict( dictionary['model'] ),
handler = dict( dictionary['handler'] ),
data = dict( dictionary['data'] ),
noise = ( dict( dictionary['noise'] )
if dictionary['noise'] is not None
else None ),
sampler_name = dictionary['sampler_name'],
sample_res = [],
sample_logl = [],
sample_weights = []
)
# Sampling run stored quantities
ret.logl = dictionary['logl']
ret.samples = dictionary['samples']
ret.weights = dictionary['weights']
ret.wnot0 = dictionary['wnot0']
# Additional hyperparameters
ret.Ndof = dictionary['Ndof']
ret.size = dictionary['size']
# Derived quantities
ret.SED = dictionary['SED']
ret.Mstar = dictionary['Mstar']
ret.Mdust = dictionary['Mdust']
ret.Mgas = dictionary['Mgas']
ret.Zstar = dictionary['Zstar']
ret.Zgas = dictionary['Zgas']
ret.SFR = dictionary['SFR']
ret.TMC = dictionary['TMC']
ret.TDD = dictionary['TDD']
# Compute parameters' dictionaries
handler = ret.get_handler()
for i, par in enumerate(ret.samples) :
ret.params += [handler.return_nested(par)['galaxy']]
return ret
def get_stored_quantities ( self ) :
""" Returns a list with all the quantities stored in the instance
"""
return list( self.__dict__.keys() )
def get_residuals ( self, which_model = 'bestfit', standardised = True ) :
"""
"""
_gxy = self.get_model()
_obs = self.get_observation()
_han = self.get_handler()
if which_model == 'bestfit' :
params = self.get_bestfit( 'samples' )
elif which_model == 'mean' :
params = self.get_mean( 'samples' )
elif which_model == 'median' :
params = self.get_quantile( 'samples' )
else :
warnings.warn( 'Choice invalid, falling back to default choice (="bestfit")' )
params = self.get_bestfit( 'samples' )
nested = _han.return_nested(params)
_gxy.set_parameters( **nested['galaxy'] )
model = _gxy.photoSED()
data = _obs.fluxes
if standardised :
error = _obs.errors
if hasattr(self, '_noise') and self._noise is not None :
_noi = self.get_noise()
_noi.set_parameters( **nested['noise'] )
error = _noi.apply( error, model )
return ( data - model ) / error
return data - model
def get_chi2 ( self, which_model = 'bestfit', reduced = True ) :
"""
"""
chi = self.get_residuals( which_model, standardised = True )
redfact = 1.
if reduced :
redfact /= self.Ndof
return numpy.sum( chi**2 ) * redfact
def get_bestfit ( self, key ) :
""" Returns the bestfit value of the stored quantity corresponding
to the input key
Parameters
----------
key : str or str-sequence
if a string, it should name one of the stored quantities
if a list of strings, all the strings in the list will be
matched.
Returns
-------
: scalar or ndarray
depending on the input ``key``.
"""
idmax = self.logl.argmax()
return func_scalar_or_array(
var = key,
function = lambda k : self.__dict__[k][idmax]
)
def get_mean ( self, key ) :
""" Returns the weighted mean value of the stored quantity
corresponding to the input key
Parameters
----------
key : str or str-sequence
if a string, it should name one of the stored quantities
if a list of strings, all the strings in the list will be
matched.
Returns
-------
: scalar or ndarray
depending on the input ``key``.
"""
return func_scalar_or_array(
var = key,
function = lambda k : numpy.average( self.__dict__[k][self.wnot0],
weights = self.weights[self.wnot0],
axis = 0 )
)
def get_std ( self, key ) :
""" Returns the weighted standard deviation value of the stored
quantity corresponding to the input key
Parameters
----------
key : str or str-sequence
if a string, it should name one of the stored quantities
if a list of strings, all the strings in the list will be
matched.
Returns
-------
: scalar or ndarray
depending on the input ``key``.
"""
return func_scalar_or_array(
var = key,
function = lambda k : numpy.sqrt(
numpy.average( ( self.__dict__[k][self.wnot0] - self.get_mean( k ) )**2,
weights = self.weights[self.wnot0], axis = 0 )
)
)
def get_quantile ( self, key, quantile = 0.5 ) :
""" Returns the weighted standard deviation value of the stored
quantity corresponding to the input key
Parameters
----------
key : str or str-sequence
if a string, it should name one of the stored quantities
if a list of strings, all the strings in the list will be
matched.
Returns
-------
: scalar or ndarray
depending on the input ``key``.
"""
return func_scalar_or_array(
var = key,
function = lambda k : quantile_weighted( self.__dict__[ k ][self.wnot0],
quantile,
weights = self.weights[self.wnot0],
axis = 0 )
)
def get_median ( self, key ) :
"""Returns the median of a stored quantity.
This is a shortcut for ``Results.get_quantile( key, quantile=0.5 )``
"""
return self.get_quantile( key, quantile=0.5 )
def get_credible_interval ( self, key, percent = 0.68, centre = 'bestfit' ) :
"""Returns the credible interval around some position enclosing a user-defined
probability integral.
Automatically accounts for upper or lower limits on some parameter.
If an interval can be defined it returns the lower and upper distances from
the centre of the interval.
If only upper/lower limits can be defined, it returns the position of the given limit.
Parameters
----------
key : str
the name of one of the stored quantities.
percent : float
(Optional, default = 0.68) probability enclosed by the interval
centre : str
(Optional, default = 'bestfit') one among ('bestfit', 'mean', 'median'),
where to define the centre of the interval.
Note that, if this is set to 'median', and percent=0.5, this is equivalent
to call ``Results.get_quantile`` with the argument ``quantile=(0.25, 0.75)``
Returns
-------
low, upp : tuple
If the interval is completely defined by the samples, returns the credible
limits around the centre so that the integral of the posterior between
``centre-low`` and ``centre+upp`` is equal to ``percent``.
In the case only upper/lower limits can be defined it returns either
``(-numpy.inf, upp)`` for an upper limit, so that the integral between ``-inf``
and ``upp`` is equal to ``percent``, or ``(low, +numpy.inf)`` for a lower limit,
so that the integral between ``-inf`` and ``low`` is equal to ``1-percent``.
"""
samples = self.__dict__[key][self.wnot0]
if centre == 'bestfit' :
idcentre = self.logl[self.wnot0].argmax()
elif centre == 'mean' :
idcentre = find_nearest( samples, self.get_mean( key ) )
elif centre == 'median' :
idcentre = find_nearest( samples, self.get_median( key ) )
else :
raise RuntimeError(
"``centre`` argument should be one among ('bestfit', 'mean', 'median')"
)
# get limits from the weighted distribution
low, upp = get_credible_interval(
samples, idcentre, percent, self.weights[self.wnot0]
)
# if upper-limit is found, return upper-limited indefinite interval
if low is None :
return -numpy.inf, upp
# if lower-limit is found, return lower-limited indefinite interval
if upp is None :
return low, +numpy.inf
# return lower and upper credibility distances from centre
return samples[idcentre]-low, upp-samples[idcentre]
def get_model ( self ) :
""" Instantiates a model corresponding to the one used for sampling.
Returns
-------
: galapy.Galaxy.GXY
"""
if 'pms_kwargs' in self._mod.keys() :
return PhotoGXY.load( self._mod )
else :
return GXY.load( self._mod )
def get_handler ( self ) :
""" Instantiates a handler corresponding to the one used for sampling.
Returns
-------
: galapy.GalaxyParameters.ModelParameters
"""
return ModelParameters.load( self._han )
def get_observation ( self ) :
""" Instantiates a dataset corresponding to the one used for sampling.
Returns
-------
: galapy.sampling.Observation.Observation
"""
if self._obs is not None :
return Observation.load( self._obs )
else :
warnings.warn( "The current instance has no Observation stored, passing None" )
return None
def get_noise ( self ) :
""" Instantiates a noise model corresponding to the one used for sampling.
Returns
-------
: galapy.sampling.Noise.Noise
"""
if hasattr(self, '_noise') and self._noise is not None :
return CalibrationError.load( self._noise )
else :
warnings.warn( "The current instance has no Noise stored, passing None" )
return None
def get_sampling_params ( self ) :
""" Instantiates a handler corresponding to the one used for sampling.
Returns
-------
: galapy.GalaxyParameters.GXYParameters
"""
return self.get_handler()
#############################################################################################
|
TommasoRonconiREPO_NAMEgalapyPATH_START.@galapy_extracted@galapy-main@galapy@sampling@Results.py@.PATH_END.py
|
{
"filename": "test_launchpad_244811.py",
"repo_name": "brandon-rhodes/pyephem",
"repo_path": "pyephem_extracted/pyephem-master/ephem/tests/test_launchpad_244811.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import ephem
# Make sure that a series of next-risings does not keep returning the
# same time over and over again.
class Launchpad244811Tests(unittest.TestCase):
def runTest(self):
boston = ephem.city("Boston")
boston.pressure = 1010.0 # undo pressure auto-adjustment
mars = ephem.Mars()
cur_date = ephem.Date("2009/6/29 07:00:00")
cur_date = boston.next_rising(mars, start=cur_date)
self.assertEqual(str(cur_date), '2009/6/30 06:17:37')
cur_date = boston.next_rising(mars, start=cur_date)
self.assertEqual(str(cur_date), '2009/7/1 06:15:45')
cur_date = boston.next_rising(mars, start=cur_date)
self.assertEqual(str(cur_date), '2009/7/2 06:13:53')
|
brandon-rhodesREPO_NAMEpyephemPATH_START.@pyephem_extracted@pyephem-master@ephem@tests@test_launchpad_244811.py@.PATH_END.py
|
{
"filename": "_showlegend.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/pie/_showlegend.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowlegendValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showlegend", parent_name="pie", **kwargs):
super(ShowlegendValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@pie@_showlegend.py@.PATH_END.py
|
{
"filename": "_textcasesrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnelarea/insidetextfont/_textcasesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcasesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="textcasesrc",
parent_name="funnelarea.insidetextfont",
**kwargs,
):
super(TextcasesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnelarea@insidetextfont@_textcasesrc.py@.PATH_END.py
|
{
"filename": "copy_injection_recovery.py",
"repo_name": "ThibeauWouters/TurboPE-BNS",
"repo_path": "TurboPE-BNS_extracted/TurboPE-BNS-main/injections/outdir_NRTv2/injection_77/copy_injection_recovery.py",
"type": "Python"
}
|
"""
Idea: try different learning rate schemes to try and fix the injections
"""
import psutil
p = psutil.Process()
p.cpu_affinity([0])
import os
os.environ['CUDA_VISIBLE_DEVICES'] = "2"
os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.10"
import numpy as np
import argparse
# Regular imports
import argparse
import copy
import numpy as np
from astropy.time import Time
import time
import shutil
import json
import jax
jax.config.update("jax_enable_x64", True)
import jax.numpy as jnp
from jimgw.jim import Jim
from jimgw.single_event.detector import H1, L1, V1
from jimgw.single_event.likelihood import HeterodynedTransientLikelihoodFD, TransientLikelihoodFD
from jimgw.single_event.waveform import RippleTaylorF2, RippleIMRPhenomD_NRTidalv2
from jimgw.prior import Uniform, Composite
import utils # our plotting and postprocessing utilities script
import optax
# Names of the parameters and their ranges for sampling parameters for the injection
NAMING = ['M_c', 'q', 's1_z', 's2_z', 'lambda_1', 'lambda_2', 'd_L', 't_c', 'phase_c', 'cos_iota', 'psi', 'ra', 'sin_dec']
PRIOR = {
"M_c": [0.8759659737275101, 2.6060030916165484],
"q": [0.5, 1.0],
"s1_z": [-0.05, 0.05],
"s2_z": [-0.05, 0.05],
"lambda_1": [0.0, 5000.0],
"lambda_2": [0.0, 5000.0],
"d_L": [30.0, 300.0],
"t_c": [-0.1, 0.1],
"phase_c": [0.0, 2 * jnp.pi],
"cos_iota": [-1.0, 1.0],
"psi": [0.0, jnp.pi],
"ra": [0.0, 2 * jnp.pi],
"sin_dec": [-1, 1]
}
################
### ARGPARSE ###
################
# TODO save these into a new file
def get_parser(**kwargs):
add_help = kwargs.get("add_help", True)
parser = argparse.ArgumentParser(
description="Perform an injection recovery.",
add_help=add_help,
)
# TODO os does not use them
# parser.add_argument(
# "--GPU-device",
# type=int,
# default=0,
# help="Select GPU index to use.",
# )
# parser.add_argument(
# "--GPU-memory-fraction",
# type=float,
# default=0.5,
# help="Select percentage of GPU memory to use.",
# )
parser.add_argument(
"--outdir",
type=str,
default="./outdir/",
help="Output directory for the injection.",
)
parser.add_argument(
"--load-existing-config",
type=bool,
default=False,
help="Whether to load and redo an existing injection (True) or to generate a new set of parameters (False).",
)
parser.add_argument(
"--N",
type=str,
default="",
help="Number (or generically, a custom identifier) of this injection, used to locate the output directory. If an empty string is passed (default), we generate a new injection.",
)
parser.add_argument(
"--SNR-threshold",
type=float,
default=12,
help="Skip injections with SNR below this threshold.",
)
parser.add_argument(
"--waveform-approximant",
type=str,
default="TaylorF2",
help="Which waveform approximant to use. Recommended to use TaylorF2 for now, NRTidalv2 might still be a bit unstable.",
)
parser.add_argument(
"--relative-binning-binsize",
type=int,
default=100,
help="Number of bins for the relative binning.",
)
parser.add_argument(
"--relative-binning-ref-params-equal-true-params",
type=bool,
default=True,
help="Whether to set the reference parameters in the relative binning code to injection parameters.",
)
parser.add_argument(
"--save-training-chains",
type=bool,
default=False,
help="Whether to save training chains or not (can be very large!)",
)
parser.add_argument(
"--eps-mass-matrix",
type=float,
default=1e-6,
help="Overall scale factor to rescale the step size of the local sampler.",
)
parser.add_argument(
"--which-local-sampler",
type=str,
default="MALA",
help="Which local sampler to use.",
)
parser.add_argument(
"--smart-initial-guess",
type=bool,
default=False,
help="Distribute the walkers around the injected parameters. TODO change this to reference parameters found by the relative binning code.",
)
parser.add_argument(
"--use-scheduler",
type=bool,
default=True,
help="Use a learning rate scheduler instead of a fixed learning rate.",
)
parser.add_argument(
"--stopping-criterion-global-acc",
type=float,
default=1.0,
help="Stop the run once we reach this global acceptance rate.",
)
parser.add_argument(
"--save-likelihood",
type=bool,
default=False,
help="Whether to save the likelihood object",
)
parser.add_argument(
"--tight-Mc-prior",
type=bool,
default=False,
help="Whether to use a tight prior on the Mc values or not",
)
# # TODO this has to be implemented
# parser.add_argument(
# "--autotune_local_sampler",
# type=bool,
# default=False,
# help="TODO Still has to be implemented! Specify whether to use autotuning for the local sampler.",
# )
return parser
####################
### Script setup ###
####################
def body(args):
"""
Run an injection and recovery. To get an explanation of the hyperparameters, go to:
- jim hyperparameters: https://github.com/ThibeauWouters/jim/blob/8cb4ef09fefe9b353bfb89273a4bc0ee52060d72/src/jimgw/jim.py#L26
- flowMC hyperparameters: https://github.com/ThibeauWouters/flowMC/blob/ad1a32dcb6984b2e178d7204a53d5da54b578073/src/flowMC/sampler/Sampler.py#L40
"""
start_time = time.time()
# TODO move and get these as arguments
# Deal with the hyperparameters
naming = NAMING
HYPERPARAMETERS = {
"flowmc":
{
"n_loop_training": 400,
"n_loop_production": 50,
"n_local_steps": 5,
"n_global_steps": 400,
"n_epochs": 50,
"n_chains": 1000,
"learning_rate": 0.001, # using a scheduler below
"max_samples": 50000,
"momentum": 0.9,
"batch_size": 50000,
"use_global": True,
"logging": True,
"keep_quantile": 0.0,
"local_autotune": None,
"train_thinning": 10,
"output_thinning": 30,
"n_sample_max": 10000,
"precompile": False,
"verbose": False,
"outdir": args.outdir,
"stopping_criterion_global_acc": args.stopping_criterion_global_acc,
"which_local_sampler": "MALA"
},
"jim":
{
"seed": 0,
"n_chains": 1000,
"num_layers": 10,
"hidden_size": [128, 128],
"num_bins": 8,
}
}
flowmc_hyperparameters = HYPERPARAMETERS["flowmc"]
jim_hyperparameters = HYPERPARAMETERS["jim"]
hyperparameters = {**flowmc_hyperparameters, **jim_hyperparameters}
# TODO can I just replace this with update dict?
for key, value in args.__dict__.items():
if key in hyperparameters:
hyperparameters[key] = value
### POLYNOMIAL SCHEDULER
if args.use_scheduler:
print("Using polynomial learning rate scheduler")
total_epochs = hyperparameters["n_epochs"] * hyperparameters["n_loop_training"]
start = int(total_epochs / 10)
start_lr = 1e-3
end_lr = 1e-5
power = 4.0
schedule_fn = optax.polynomial_schedule(start_lr, end_lr, power, total_epochs-start, transition_begin=start)
hyperparameters["learning_rate"] = schedule_fn
print(f"Saving output to {args.outdir}")
# Fetch waveform used
supported_waveforms = ["TaylorF2", "NRTidalv2", "IMRPhenomD_NRTidalv2"]
if args.waveform_approximant not in supported_waveforms:
print(f"Waveform approximant {args.waveform_approximant} not supported. Supported waveforms are {supported_waveforms}. Changing to TaylorF2.")
args.waveform_approximant = "TaylorF2"
if args.waveform_approximant == "TaylorF2":
ripple_waveform_fn = RippleTaylorF2
elif args.waveform_approximant in ["IMRPhenomD_NRTidalv2", "NRTv2", "NRTidalv2"]:
ripple_waveform_fn = RippleIMRPhenomD_NRTidalv2
else:
raise ValueError(f"Waveform approximant {args.waveform_approximant} not supported.")
# Before main code, check if outdir is correct dir format TODO improve with sys?
if args.outdir[-1] != "/":
args.outdir += "/"
outdir = f"{args.outdir}injection_{args.N}/"
# Get the prior bounds, both as 1D and 2D arrays
prior_ranges = jnp.array([PRIOR[name] for name in naming])
prior_low, prior_high = prior_ranges[:, 0], prior_ranges[:, 1]
bounds = np.array(list(PRIOR.values()))
# Now go over to creating parameters, and potentially check SNR cutoff
network_snr = 0.0
print(f"The SNR threshold parameter is set to {args.SNR_threshold}")
while network_snr < args.SNR_threshold:
# Generate the parameters or load them from an existing file
if args.load_existing_config:
config_path = f"{outdir}config.json"
print(f"Loading existing config, path: {config_path}")
config = json.load(open(config_path))
else:
print(f"Generating new config")
config = utils.generate_config(prior_low, prior_high, naming, args.N, args.outdir)
key = jax.random.PRNGKey(config["seed"])
# Save the given script hyperparams
with open(f"{outdir}script_args.json", 'w') as json_file:
json.dump(args.__dict__, json_file)
# Start injections
print("Injecting signals . . .")
waveform = ripple_waveform_fn(f_ref=config["fref"])
# Create frequency grid
freqs = jnp.arange(
config["fmin"],
config["f_sampling"] / 2, # maximum frequency being halved of sampling frequency
1. / config["duration"]
)
# convert injected mass ratio to eta, and apply arccos and arcsin
q = config["q"]
eta = q / (1 + q) ** 2
iota = float(jnp.arccos(config["cos_iota"]))
dec = float(jnp.arcsin(config["sin_dec"]))
# Setup the timing setting for the injection
epoch = config["duration"] - config["post_trigger_duration"]
gmst = Time(config["trigger_time"], format='gps').sidereal_time('apparent', 'greenwich').rad
# Array of injection parameters
true_param = {
'M_c': config["M_c"], # chirp mass
'eta': eta, # symmetric mass ratio 0 < eta <= 0.25
's1_z': config["s1_z"], # aligned spin of priminary component s1_z.
's2_z': config["s2_z"], # aligned spin of secondary component s2_z.
'lambda_1': config["lambda_1"], # tidal deformability of priminary component lambda_1.
'lambda_2': config["lambda_2"], # tidal deformability of secondary component lambda_2.
'd_L': config["d_L"], # luminosity distance
't_c': config["t_c"], # timeshift w.r.t. trigger time
'phase_c': config["phase_c"], # merging phase
'iota': iota, # inclination angle
'psi': config["psi"], # polarization angle
'ra': config["ra"], # right ascension
'dec': dec # declination
}
# Get the true parameter values for the plots
truths = copy.deepcopy(true_param)
truths["eta"] = q
truths = np.fromiter(truths.values(), dtype=float)
detector_param = {
'ra': config["ra"],
'dec': dec,
'gmst': gmst,
'psi': config["psi"],
'epoch': epoch,
't_c': config["t_c"],
}
print(f"The injected parameters are {true_param}")
# Generating the geocenter waveform
h_sky = waveform(freqs, true_param)
# Setup interferometers
ifos = [H1, L1, V1]
psd_files = ["./psds/psd.txt", "./psds/psd.txt", "./psds/psd_virgo.txt"]
# inject signal into ifos
for idx, ifo in enumerate(ifos):
key, subkey = jax.random.split(key)
ifo.inject_signal(
subkey,
freqs,
h_sky,
detector_param,
psd_file=psd_files[idx] # note: the function load_psd actaully loads the asd
)
print("Signal injected")
# Compute the SNR
h1_snr = utils.compute_snr(H1, h_sky, detector_param)
l1_snr = utils.compute_snr(L1, h_sky, detector_param)
v1_snr = utils.compute_snr(V1, h_sky, detector_param)
network_snr = np.sqrt(h1_snr**2 + l1_snr**2 + v1_snr**2)
# If the SNR is too low, we need to generate new parameters
if network_snr < args.SNR_threshold:
print(f"Network SNR is less than {args.SNR_threshold}, generating new parameters")
if args.load_existing_config:
raise ValueError("SNR is less than threshold, but loading existing config. This should not happen!")
print("H1 SNR:", h1_snr)
print("L1 SNR:", l1_snr)
print("V1 SNR:", v1_snr)
print("Network SNR:", network_snr)
print(f"Saving network SNR")
with open(outdir + 'network_snr.txt', 'w') as file:
file.write(str(network_snr))
print("Start prior setup")
# Priors without transformation
if args.tight_Mc_prior:
print("INFO: Using a tight chirp mass prior")
true_mc = true_param["M_c"]
Mc_prior = Uniform(true_mc - 0.1, true_mc + 0.1, naming=['M_c'])
else:
Mc_prior = Uniform(prior_low[0], prior_high[0], naming=['M_c'])
q_prior = Uniform(prior_low[1], prior_high[1], naming=['q'],
transforms={
'q': (
'eta',
lambda params: params['q'] / (1 + params['q']) ** 2
)
}
)
s1z_prior = Uniform(prior_low[2], prior_high[2], naming=['s1_z'])
s2z_prior = Uniform(prior_low[3], prior_high[3], naming=['s2_z'])
lambda_1_prior = Uniform(prior_low[4], prior_high[4], naming=['lambda_1'])
lambda_2_prior = Uniform(prior_low[5], prior_high[5], naming=['lambda_2'])
dL_prior = Uniform(prior_low[6], prior_high[6], naming=['d_L'])
tc_prior = Uniform(prior_low[7], prior_high[7], naming=['t_c'])
phic_prior = Uniform(prior_low[8], prior_high[8], naming=['phase_c'])
cos_iota_prior = Uniform(prior_low[9], prior_high[9], naming=["cos_iota"],
transforms={
"cos_iota": (
"iota",
lambda params: jnp.arccos(
jnp.arcsin(jnp.sin(params["cos_iota"] / 2 * jnp.pi)) * 2 / jnp.pi
),
)
},
)
psi_prior = Uniform(prior_low[10], prior_high[10], naming=["psi"])
ra_prior = Uniform(prior_low[11], prior_high[11], naming=["ra"])
sin_dec_prior = Uniform(prior_low[12], prior_high[12], naming=["sin_dec"],
transforms={
"sin_dec": (
"dec",
lambda params: jnp.arcsin(
jnp.arcsin(jnp.sin(params["sin_dec"] / 2 * jnp.pi)) * 2 / jnp.pi
),
)
},
)
# Save the prior bounds
print("Saving prior bounds")
utils.save_prior_bounds(prior_low, prior_high, outdir)
# Compose the prior
prior_list = [
Mc_prior,
q_prior,
s1z_prior,
s2z_prior,
lambda_1_prior,
lambda_2_prior,
dL_prior,
tc_prior,
phic_prior,
cos_iota_prior,
psi_prior,
ra_prior,
sin_dec_prior,
]
complete_prior = Composite(prior_list)
bounds = jnp.array([[p.xmin, p.xmax] for p in complete_prior.priors])
print("Finished prior setup")
print("Initializing likelihood")
if args.relative_binning_ref_params_equal_true_params:
ref_params = true_param
print("Using the true parameters as reference parameters for the relative binning")
else:
ref_params = None
print("Will search for reference waveform for relative binning")
### TODO remove
# Explicitly fix relative binning for NRTidalv2
if args.waveform_approximant in ["IMRPhenomD_NRTidalv2", "NRTidalv2"]:
# ## TODO this might be broken?
# # # Explicitly set the f_min and f_max used there
# # relbin_kwargs = {"f_min": config["fmin"], "f_max": config["f_sampling"] / 2}
# relbin_kwargs = {}
# # Set the reference parameters at the ideal location for not breaking relative binning
# print("Setting the reference parameters to not break the relative binning for NRTidalv2")
# ref_params = true_param
# ref_params["lambda_1"] = 1.0
# ref_params["lambda_2"] = 1.0
print("Now, the reference parameters are: ")
print(ref_params)
else:
relbin_kwargs = {}
relbin_kwargs = {}
likelihood = HeterodynedTransientLikelihoodFD(
ifos,
prior=complete_prior,
bounds=bounds,
n_bins = args.relative_binning_binsize,
waveform=waveform,
trigger_time=config["trigger_time"],
duration=config["duration"],
post_trigger_duration=config["post_trigger_duration"],
ref_params=ref_params,
**relbin_kwargs
)
if args.save_likelihood:
print(f"INFO: Saving the likelihood to {outdir}")
import pickle
with open(f'{outdir}likelihood.pickle', 'wb') as handle:
pickle.dump(likelihood, handle, protocol=pickle.HIGHEST_PROTOCOL)
# Save the ref params
utils.save_relative_binning_ref_params(likelihood, outdir)
# Generate arguments for the local samplercd
mass_matrix = jnp.eye(len(prior_list))
for idx, prior in enumerate(prior_list):
mass_matrix = mass_matrix.at[idx, idx].set(prior.xmax - prior.xmin) # fetch the prior range
local_sampler_arg = {'step_size': mass_matrix * args.eps_mass_matrix} # set the overall step size
hyperparameters["local_sampler_arg"] = local_sampler_arg
# Create jim object
jim = Jim(
likelihood,
complete_prior,
**hyperparameters
)
if args.smart_initial_guess:
n_chains = hyperparameters["n_chains"]
n_dim = len(prior_list)
initial_guess = utils.generate_smart_initial_guess(gmst, [H1, L1, V1], true_param, n_chains, n_dim, prior_low, prior_high)
# Plot it
utils.plot_chains(initial_guess, "initial_guess", outdir, truths = truths)
else:
initial_guess = jnp.array([])
### Finally, do the sampling
jim.sample(jax.random.PRNGKey(23), initial_guess = initial_guess)
# === Show results, save output ===
# Print a summary to screen:
jim.print_summary()
# Save and plot the results of the run
# - training phase
name = outdir + f'results_training.npz'
print(f"Saving samples to {name}")
state = jim.Sampler.get_sampler_state(training = True)
chains, log_prob, local_accs, global_accs, loss_vals = state["chains"], state["log_prob"], state["local_accs"], state["global_accs"], state["loss_vals"]
local_accs = jnp.mean(local_accs, axis=0)
global_accs = jnp.mean(global_accs, axis=0)
if args.save_training_chains:
np.savez(name, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs, loss_vals=loss_vals, chains=chains)
else:
np.savez(name, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs, loss_vals=loss_vals)
utils.plot_accs(local_accs, "Local accs (training)", "local_accs_training", outdir)
utils.plot_accs(global_accs, "Global accs (training)", "global_accs_training", outdir)
utils.plot_loss_vals(loss_vals, "Loss", "loss_vals", outdir)
utils.plot_log_prob(log_prob, "Log probability (training)", "log_prob_training", outdir)
# - production phase
name = outdir + f'results_production.npz'
state = jim.Sampler.get_sampler_state(training = False)
chains, log_prob, local_accs, global_accs = state["chains"], state["log_prob"], state["local_accs"], state["global_accs"]
local_accs = jnp.mean(local_accs, axis=0)
global_accs = jnp.mean(global_accs, axis=0)
np.savez(name, chains=chains, log_prob=log_prob, local_accs=local_accs, global_accs=global_accs)
utils.plot_accs(local_accs, "Local accs (production)", "local_accs_production", outdir)
utils.plot_accs(global_accs, "Global accs (production)", "global_accs_production", outdir)
utils.plot_log_prob(log_prob, "Log probability (production)", "log_prob_production", outdir)
# Plot the chains as corner plots
utils.plot_chains(chains, "chains_production", outdir, truths = truths)
# Save the NF and show a plot of samples from the flow
print("Saving the NF")
jim.Sampler.save_flow(outdir + "nf_model")
name = outdir + 'results_NF.npz'
chains = jim.Sampler.sample_flow(10_000)
np.savez(name, chains = chains)
# Finally, copy over this script to the outdir for reproducibility
shutil.copy2(__file__, outdir + "copy_injection_recovery.py")
print("Saving the jim hyperparameters")
jim.save_hyperparameters(outdir = outdir)
end_time = time.time()
runtime = end_time - start_time
print(f"Time taken: {runtime} seconds ({(runtime)/60} minutes)")
print(f"Saving runtime")
with open(outdir + 'runtime.txt', 'w') as file:
file.write(str(runtime))
print("Finished injection recovery successfully!")
############
### MAIN ###
############
def main(given_args = None):
parser = get_parser()
args = parser.parse_args()
print(given_args)
# Update with given args
if given_args is not None:
args.__dict__.update(given_args)
if args.load_existing_config and args.N == "":
raise ValueError("If load_existing_config is True, you need to specify the N argument to locate the existing injection. ")
print("------------------------------------")
print("Arguments script:")
for key, value in args.__dict__.items():
print(f"{key}: {value}")
print("------------------------------------")
print("Starting main code")
# If no N is given, fetch N from the structure of outdir
if len(args.N) == 0:
N = utils.get_N(args.outdir)
args.N = N
# TODO fix that os uses these
# import os
# os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = str(args.GPU_memory_fraction)
# os.environ['CUDA_VISIBLE_DEVICES'] = str(args.GPU_device)
# print(f"Running on GPU {args.GPU_device}")
# Execute the script
body(args)
if __name__ == "__main__":
main()
|
ThibeauWoutersREPO_NAMETurboPE-BNSPATH_START.@TurboPE-BNS_extracted@TurboPE-BNS-main@injections@outdir_NRTv2@injection_77@copy_injection_recovery.py@.PATH_END.py
|
{
"filename": "parsnip.py",
"repo_name": "LSSTDESC/parsnip",
"repo_path": "parsnip_extracted/parsnip-main/parsnip/parsnip.py",
"type": "Python"
}
|
from tqdm import tqdm
import functools
import multiprocessing
import numpy as np
import os
import sys
from astropy.cosmology import Planck18
import astropy.table
import extinction
import sncosmo
import pkg_resources
import lcdata
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torch.utils.data import DataLoader
from .light_curve import preprocess_light_curve, grid_to_time, time_to_grid, \
SIDEREAL_SCALE
from .utils import frac_to_mag, parse_device, replace_nan_grads
from .settings import parse_settings, default_model
from .sncosmo import ParsnipSncosmoSource
class ResidualBlock(nn.Module):
"""1D residual convolutional neural network block
This module operates on 1D sequences. The input will be padded so that length of the
sequences is be left unchanged.
Parameters
----------
in_channels : int
Number of channels for the input
out_channels : int
Number of channels for the output
dilation : int
Dilation to use in the convolution
"""
def __init__(self, in_channels, out_channels, dilation):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if self.out_channels < self.in_channels:
raise Exception("out_channels must be >= in_channels.")
self.conv1 = nn.Conv1d(in_channels, out_channels, 3, dilation=dilation,
padding=dilation)
self.conv2 = nn.Conv1d(out_channels, out_channels, 3,
dilation=dilation, padding=dilation)
def forward(self, x):
out = self.conv1(x)
out = F.relu(out)
out = self.conv2(out)
# Add back in the input. If it is smaller than the output, pad it first.
if self.in_channels < self.out_channels:
pad_size = self.out_channels - self.in_channels
pad_x = F.pad(x, (0, 0, 0, pad_size))
else:
pad_x = x
# Residual connection
out = out + pad_x
out = F.relu(out)
return out
class Conv1dBlock(nn.Module):
"""1D convolutional neural network block
This module operates on 1D sequences. The input will be padded so that length of the
sequences is be left unchanged.
Parameters
----------
in_channels : int
Number of channels for the input
out_channels : int
Number of channels for the output
dilation : int
Dilation to use in the convolution
"""
def __init__(self, in_channels, out_channels, dilation):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.conv = nn.Conv1d(in_channels, out_channels, 5, dilation=dilation,
padding=2*dilation)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv(x)
out = self.relu(out)
return out
class GlobalMaxPoolingTime(nn.Module):
"""Time max pooling layer for 1D sequences
This layer applies global max pooling over all channels to elminate the channel
dimension while preserving the time dimension.
"""
def forward(self, x):
out, inds = torch.max(x, 2)
return out
class ParsnipModel(nn.Module):
"""Generative model of transient light curves
This class represents a generative model of transient light curves. Given a set of
latent variables representing a transient, it can predict the full spectral time
series of that transient. It can also use variational inference to predict the
posterior distribution over the latent variables for a given light curve.
Parameters
----------
path : str
Path to where the model should be stored on disk.
bands : List[str]
Bands that the model uses as input for variational inference
device : str
PyTorch device to use for the model
threads : int
Number of threads to use
settings : dict
Settings for the model. Any settings specified here will override the defaults
set in settings.py
ignore_unknown_settings : bool
If True, ignore any settings that are specified that are unknown. Otherwise,
raise a KeyError if an unknown setting is specified. By default False.
"""
def __init__(self, path, bands, device='cpu', threads=8, settings={},
ignore_unknown_settings=False):
super().__init__()
# Parse settings
self.settings = parse_settings(bands, settings,
ignore_unknown_settings=ignore_unknown_settings)
self.path = path
self.threads = threads
# Setup the device
self.device = parse_device(device)
torch.set_num_threads(self.threads)
# Setup the bands
self._setup_band_weights()
# Setup the color law. We scale this so that the color law has a B-V color of 1,
# meaning that a coefficient multiplying the color law is the b-v color.
color_law = extinction.fm07(self.model_wave, 3.1)
self.color_law = torch.FloatTensor(color_law).to(self.device)
# Setup the timing
self.input_times = (torch.arange(self.settings['time_window'],
device=self.device)
- self.settings['time_window'] // 2)
# Build the model
self._build_model()
# Set up the training
self.epoch = 0
optim_kwargs = {
'params': self.parameters(),
'lr': self.settings['learning_rate'],
}
if self.settings['optimizer'].lower() == 'adam':
self.optimizer = optim.Adam(**optim_kwargs)
elif self.settings['optimizer'].lower() == 'sgd':
self.optimizer = optim.SGD(momentum=self.settings['sgd_momentum'], **optim_kwargs)
else:
raise ValueError('Unknown optimizer "{}"'.format(self.settings['optimizer']))
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, factor=self.settings['scheduler_factor'], verbose=True
)
# Send the model weights to the desired device
self.to(self.device, force=True)
def to(self, device, force=False):
"""Send the model to the specified device
Parameters
----------
device : str
PyTorch device
force : bool, optional
If True, force the model to be sent to the device even if it is there
already (useful if only parts of the model are there), by default False
"""
new_device = parse_device(device)
if self.device == new_device and not force:
# Already on that device
return
self.device = new_device
# Send all of the weights
super().to(self.device)
# Send all of the variables that we create manually
self.color_law = self.color_law.to(self.device)
self.input_times = self.input_times.to(self.device)
self.band_interpolate_locations = \
self.band_interpolate_locations.to(self.device)
self.band_interpolate_weights = self.band_interpolate_weights.to(self.device)
def save(self):
"""Save the model"""
os.makedirs(os.path.dirname(self.path), exist_ok=True)
torch.save([self.settings, self.state_dict()], self.path)
def _setup_band_weights(self):
"""Setup the interpolation for the band weights used for photometry"""
# Build the model in log wavelength
model_log_wave = np.linspace(np.log10(self.settings['min_wave']),
np.log10(self.settings['max_wave']),
self.settings['spectrum_bins'])
model_spacing = model_log_wave[1] - model_log_wave[0]
band_spacing = model_spacing / self.settings['band_oversampling']
band_max_log_wave = (
np.log10(self.settings['max_wave'] * (1 + self.settings['max_redshift']))
+ band_spacing
)
# Oversampling must be odd.
assert self.settings['band_oversampling'] % 2 == 1
pad = (self.settings['band_oversampling'] - 1) // 2
band_log_wave = np.arange(np.log10(self.settings['min_wave']),
band_max_log_wave, band_spacing)
band_wave = 10**(band_log_wave)
band_pad_log_wave = np.arange(
np.log10(self.settings['min_wave']) - band_spacing * pad,
band_max_log_wave + band_spacing * pad,
band_spacing
)
band_pad_dwave = (
10**(band_pad_log_wave + band_spacing / 2.)
- 10**(band_pad_log_wave - band_spacing / 2.)
)
ref = sncosmo.get_magsystem(self.settings['magsys'])
band_weights = []
for band_name in self.settings['bands']:
band = sncosmo.get_bandpass(band_name)
band_transmission = band(10**(band_pad_log_wave))
# Convolve the bands to match the sampling of the spectrum.
band_conv_transmission = np.convolve(
band_transmission * band_pad_dwave,
np.ones(self.settings['band_oversampling']),
mode='valid'
)
band_weight = (
band_wave
* band_conv_transmission
/ sncosmo.constants.HC_ERG_AA
/ ref.zpbandflux(band)
* 10**(0.4 * -20.)
)
band_weights.append(band_weight)
# Get the locations that should be sampled at redshift 0. We can scale these to
# get the locations at any redshift.
band_interpolate_locations = torch.arange(
0,
self.settings['spectrum_bins'] * self.settings['band_oversampling'],
self.settings['band_oversampling']
)
# Save the variables that we need to do interpolation.
self.band_interpolate_locations = band_interpolate_locations.to(self.device)
self.band_interpolate_spacing = band_spacing
self.band_interpolate_weights = torch.FloatTensor(band_weights).to(self.device)
self.model_wave = 10**(model_log_wave)
def _calculate_band_weights(self, redshifts):
"""Calculate the band weights for a given set of redshifts
We have precomputed the weights for each bandpass, so we simply interpolate
those weights at the desired redshifts. We are working in log-wavelength, so a
change in redshift just gives us a shift in indices.
Parameters
----------
redshifts : List[float]
Redshifts to calculate the band weights at
Returns
-------
`~numpy.ndarray`
Band weights for each redshift/band combination
"""
# Figure out the locations to sample at for each redshift.
locs = (
self.band_interpolate_locations
+ torch.log10(1 + redshifts)[:, None] / self.band_interpolate_spacing
)
flat_locs = locs.flatten()
# Linear interpolation
int_locs = flat_locs.long()
remainders = flat_locs - int_locs
start = self.band_interpolate_weights[..., int_locs]
end = self.band_interpolate_weights[..., int_locs + 1]
flat_result = remainders * end + (1 - remainders) * start
result = flat_result.reshape((-1,) + locs.shape).permute(1, 2, 0)
# We need an extra term of 1 + z from the filter contraction.
result /= (1 + redshifts)[:, None, None]
return result
def _test_band_weights(self, redshift, source='salt2-extended'):
"""Test the accuracy of the band weights
We compare sncosmo photometry to the photometry calculated by this class.
Parameters
----------
redshift : float
Redshift to evaluate the model at
source : str, optional
SNCosmo source to use, by default 'salt2-extended'
"""
model = sncosmo.Model(source=source)
# sncosmo photometry
model.set(z=redshift)
sncosmo_photometry = model.bandflux(self.settings['bands'], 0., zp=-20.,
zpsys=self.settings['magsys'])
# parsnip photometry
model.set(z=0.)
model_flux = model._flux(0., self.model_wave)[0]
band_weights = self._calculate_band_weights(
torch.FloatTensor([redshift]))[0].numpy()
parsnip_photometry = np.sum(model_flux[:, None] * band_weights, axis=0)
print(f"z = {redshift}")
print(f"sncosmo photometry: {sncosmo_photometry}")
print(f"parsnip photometry: {parsnip_photometry}")
print(f"ratio: {parsnip_photometry / sncosmo_photometry}")
def preprocess(self, dataset, chunksize=64, verbose=True):
"""Preprocess an lcdata dataset
The preprocessing will be done over multiple threads. Set `ParsnipModel.threads`
to change how many are used. If the dataset is already preprocessed, then
nothing will be done and it will be returned as is.
Parameters
----------
dataset : `~lcdata.Dataset`
Dataset to preprocess
chunksize : int, optional
Number of light curves to process at a time, by default 64
verbose : bool, optional
Whether to show a progress bar, by default True
Returns
-------
`~lcdata.Dataset`
Preprocessed dataset
"""
import lcdata
# Check if we were given a preprocessed dataset. We store our preprocessed data
# as the parsnip_data variable.
if ('parsnip_preprocessed' in dataset.meta.keys()
and np.all(dataset.meta['parsnip_preprocessed'])):
return dataset
if self.threads == 1:
iterator = dataset.light_curves
if verbose:
iterator = tqdm(dataset.light_curves, file=sys.stdout,
desc="Preprocessing dataset")
# Run on a single core without multiprocessing
preprocessed_light_curves = []
for lc in iterator:
preprocessed_light_curves.append(
preprocess_light_curve(lc, self.settings, raise_on_invalid=False)
)
else:
# Run with multiprocessing in multiple threads.
func = functools.partial(preprocess_light_curve, settings=self.settings,
raise_on_invalid=False)
with multiprocessing.Pool(self.threads) as p:
iterator = p.imap(func, dataset.light_curves, chunksize=chunksize)
if verbose:
iterator = tqdm(iterator, total=len(dataset.light_curves),
file=sys.stdout, desc="Preprocessing dataset")
preprocessed_light_curves = list(iterator)
# Check if any light curves failed to process
none_count = 0
for lc in preprocessed_light_curves:
if lc is None:
none_count += 1
if none_count > 0:
print(f"WARNING: Rejecting {none_count}/{len(preprocessed_light_curves)} "
"light curves. Consider using 'parsnip.load_dataset()' or "
"'parsnip.parse_dataset()' to load/parse the dataset and hopefully "
"avoid this.")
preprocessed_light_curves = [i for i in preprocessed_light_curves if i is
not None]
dataset = lcdata.from_light_curves(preprocessed_light_curves)
return dataset
def augment_light_curves(self, light_curves, as_table=True):
"""Augment a set of light curves
Parameters
----------
light_curves : List[`~astropy.table.Table`]
List of light curves to augment
as_table : bool, optional
Whether to return the light curves as astropy Tables, by default True.
Constructing new tables is relatively slow, so internally we skip this step
when training the ParSNIP model.
Returns
-------
List
Augmented light curves
"""
# Check if we have a list of light curves or a single one and handle it
# appropriately.
if isinstance(light_curves, astropy.table.Table):
# Single object. Wrap it so that we can process it as an array. We'll unwrap
# it at the end.
single = True
light_curves = [light_curves]
else:
single = False
new_light_curves = np.empty(shape=len(light_curves), dtype=object)
for idx, lc in enumerate(light_curves):
# Convert the table to a numpy recarray. This is much faster to work with.
data = lc.as_array()
# Make a copy of the metadata to work off of.
meta = lc.meta.copy(use_cache=not as_table)
# Randomly drop observations and make a copy of the light curve.
drop_frac = np.random.uniform(0, 0.5)
mask = np.random.rand(len(data)) > drop_frac
data = data[mask]
# Shift the time randomly.
time_shift = np.round(
np.random.normal(0., self.settings['time_sigma'])
).astype(int)
meta['parsnip_reference_time'] += time_shift / SIDEREAL_SCALE
data['grid_time'] -= time_shift
data['time_index'] -= time_shift
# Add noise to the observations
if np.random.rand() < 0.5 and len(data) > 0:
# Choose an overall scale for the noise from a lognormal
# distribution.
noise_scale = np.random.lognormal(-4., 1.) * meta['parsnip_scale']
# Choose the noise levels for each observation from a lognormal
# distribution.
noise_sigmas = np.random.lognormal(np.log(noise_scale), 1., len(data))
# Add the noise to the observations.
noise = np.random.normal(0., noise_sigmas)
data['flux'] += noise
data['fluxerr'] = np.sqrt(data['fluxerr']**2 + noise_sigmas**2)
# Scale the amplitude that we input to the model randomly.
amp_scale = np.exp(np.random.normal(0, 0.5))
meta['parsnip_scale'] *= amp_scale
# Convert back to an astropy Table if desired. This is somewhat slow, so we
# skip it internally when training the model.
if as_table:
new_lc = astropy.table.Table(data, meta=meta)
else:
new_lc = (data, meta)
new_light_curves[idx] = new_lc
if single:
return new_light_curves[0]
else:
return new_light_curves
def _get_data(self, light_curves):
"""Extract data needed by ParSNIP from a set of light curves.
Parameters
----------
light_curves : List[`~astropy.table.Table`]
Light curves to extract data from
Returns
-------
data : dict
A dictionary with the following keys:
- 'input_data' : A `~torch.FloatTensor` that is used as input to the ParSNIP
encoder.
- 'compare_data' : A `~torch.FloatTensor` containing data that is used for
comparisons with the output of the ParSNIP decoder.
- 'redshift' : A `~torch.FloatTensor` containing the redshifts of each
light curve.
- 'band_indices' : A `~torch.LongTensor` containing the band indices for
each observation that will be compared
- 'photoz' : A `~torch.FloatTensor` containing the photozs of each
observation. Only available if the 'predict_redshift' model setting is
True.
- 'photoz_error' : A `~torch.FloatTensor` containing the photoz errors of
each observation. Only available if the 'predict_redshift' model setting
is True.
"""
redshifts = []
if self.settings['predict_redshift']:
photozs = []
photoz_errors = []
compare_data = []
compare_band_indices = []
# Build a grid for the input
grid_flux = np.zeros((len(light_curves), len(self.settings['bands']),
self.settings['time_window']))
grid_weights = np.zeros_like(grid_flux)
for idx, lc in enumerate(light_curves):
# Convert the table to a numpy recarray. This is much faster to work with.
# For augmentation, we skip creating a Table because that is slow and just
# keep the recarray. Handle that too.
if isinstance(lc, astropy.table.Table):
lc_data = lc.as_array()
lc_meta = lc.meta
else:
lc_data, lc_meta = lc
# Extract the redshift.
if self.settings['predict_redshift']:
# Note: this uses the keys for PLAsTiCC and should be adapted to handle
# more general surveys.
redshifts.append(lc_meta['hostgal_specz'])
photozs.append(lc_meta['hostgal_photoz'])
photoz_errors.append(lc_meta['hostgal_photoz_err'])
else:
redshifts.append(lc_meta['redshift'])
# Mask out observations that are outside of our window.
mask = (lc_data['time_index'] >= 0) & (lc_data['time_index'] <
self.settings['time_window'])
lc_data = lc_data[mask]
# Scale the flux and fluxerr appropriately. Note that applying the mask
# makes a copy of the array, so this won't affect the original data.
lc_data['flux'] /= lc_meta['parsnip_scale']
lc_data['fluxerr'] /= lc_meta['parsnip_scale']
# Calculate weights with an error floor included. Note that this typically a
# very large number. For the comparison this doesn't matter, but for the
# input we scale it by the error floor so that it becomes a number between 0
# and 1.
weights = 1 / (lc_data['fluxerr']**2 + self.settings['error_floor']**2)
# Fill in the input array.
grid_flux[idx, lc_data['band_index'], lc_data['time_index']] = \
lc_data['flux']
grid_weights[idx, lc_data['band_index'], lc_data['time_index']] = \
self.settings['error_floor']**2 * weights
# Stack all of the data that will be used for comparisons and convert it to
# a torch tensor.
obj_compare_data = torch.FloatTensor(np.vstack([
lc_data['grid_time'],
lc_data['flux'],
lc_data['fluxerr'],
weights,
]))
compare_data.append(obj_compare_data.T)
compare_band_indices.append(torch.LongTensor(lc_data['band_index'].copy()))
# Gather the input data.
redshifts = np.array(redshifts)
if self.settings['predict_redshift']:
photozs = np.array(photozs)
photoz_errors = np.array(photoz_errors)
# Add extra features to the input.
if self.settings['input_redshift']:
if self.settings['predict_redshift']:
extra_input_data = [photozs, photoz_errors]
else:
extra_input_data = [redshifts]
# Stack everything together.
input_data = np.concatenate(
[i[:, None, None].repeat(self.settings['time_window'], axis=2) for i in
extra_input_data]
+ [grid_flux, grid_weights],
axis=1
)
# Convert to torch tensors
input_data = torch.FloatTensor(input_data).to(self.device)
redshifts = torch.FloatTensor(redshifts).to(self.device)
# Pad all of the compare data to have the same shape.
compare_data = nn.utils.rnn.pad_sequence(compare_data, batch_first=True)
compare_data = compare_data.permute(0, 2, 1)
compare_band_indices = nn.utils.rnn.pad_sequence(compare_band_indices,
batch_first=True)
compare_data = compare_data.to(self.device)
compare_band_indices = compare_band_indices.to(self.device)
data = {
'input_data': input_data,
'compare_data': compare_data,
'redshift': redshifts,
'band_indices': compare_band_indices,
}
if self.settings['predict_redshift']:
data['photoz'] = torch.FloatTensor(photozs).to(self.device)
data['photoz_error'] = torch.FloatTensor(photoz_errors).to(self.device)
return data
def _build_model(self):
"""Build the model"""
input_size = len(self.settings['bands']) * 2
if self.settings['input_redshift']:
if self.settings['predict_redshift']:
input_size += 2
else:
input_size += 1
if self.settings['encode_block'] == 'conv1d':
encode_block = Conv1dBlock
elif self.settings['encode_block'] == 'residual':
encode_block = ResidualBlock
else:
raise Exception(f"Unknown block {self.settings['encode_block']}.")
# Encoder architecture. We start with an input of size input_size x
# time_window We apply a series of convolutional blocks to this that produce
# outputs that are the same size. The type of block is specified by
# settings['encode_block']. Each convolutional block has a dilation that is
# given by settings['encode_conv_dilations'].
if (len(self.settings['encode_conv_architecture']) !=
len(self.settings['encode_conv_dilations'])):
raise Exception("Layer sizes and dilations must have the same length!")
encode_layers = []
# Convolutional layers.
last_size = input_size
for layer_size, dilation in zip(self.settings['encode_conv_architecture'],
self.settings['encode_conv_dilations']):
encode_layers.append(
encode_block(last_size, layer_size, dilation)
)
last_size = layer_size
# Fully connected layers for the encoder following the convolution blocks.
# These are Conv1D layers with a kernel size of 1 that mix within the time
# indexes.
for layer_size in self.settings['encode_fc_architecture']:
encode_layers.append(nn.Conv1d(last_size, layer_size, 1))
encode_layers.append(nn.ReLU())
last_size = layer_size
self.encode_layers = nn.Sequential(*encode_layers)
# Fully connected layers for the time-indexing layer. These are Conv1D layers
# with a kernel size of 1 that mix within time indexes.
time_last_size = last_size
encode_time_layers = []
for layer_size in self.settings['encode_time_architecture']:
encode_time_layers.append(nn.Conv1d(time_last_size, layer_size, 1))
encode_time_layers.append(nn.ReLU())
time_last_size = layer_size
# Final layer, go down to a single channel with no activation function.
encode_time_layers.append(nn.Conv1d(time_last_size, 1, 1))
self.encode_time_layers = nn.Sequential(*encode_time_layers)
# Fully connected layers to calculate the latent space parameters for the VAE.
encode_latent_layers = []
latent_last_size = last_size
for layer_size in self.settings['encode_latent_prepool_architecture']:
encode_latent_layers.append(nn.Conv1d(latent_last_size, layer_size, 1))
encode_latent_layers.append(nn.ReLU())
latent_last_size = layer_size
# Apply a global max pooling over the time channels.
encode_latent_layers.append(GlobalMaxPoolingTime())
# Apply fully connected layers to get the embedding.
for layer_size in self.settings['encode_latent_postpool_architecture']:
encode_latent_layers.append(nn.Linear(latent_last_size, layer_size))
encode_latent_layers.append(nn.ReLU())
latent_last_size = layer_size
self.encode_latent_layers = nn.Sequential(*encode_latent_layers)
# Finally, use a last FC layer to get mu and logvar
mu_size = self.settings['latent_size'] + 1
logvar_size = self.settings['latent_size'] + 2
if self.settings['predict_redshift']:
# Predict the redshift
mu_size += 1
logvar_size += 1
self.encode_mu_layer = nn.Linear(latent_last_size, mu_size)
self.encode_logvar_layer = nn.Linear(latent_last_size, logvar_size)
# MLP decoder. We start with an input that is the intrinsic latent space + one
# dimension for time, and output a spectrum of size
# self.settings['spectrum_bins']. We also have hidden layers with sizes given
# by self.settings['decode_layers']. We implement this using a Conv1D layer
# with a kernel size of 1 for computational reasons so that it decodes multiple
# spectra for each transient all at the same time, but the decodes are all done
# independently so this is really an MLP.
decode_last_size = self.settings['latent_size'] + 1
decode_layers = []
for layer_size in self.settings['decode_architecture']:
decode_layers.append(nn.Conv1d(decode_last_size, layer_size, 1))
decode_layers.append(nn.Tanh())
decode_last_size = layer_size
# Final layer. Use a FC layer to get us to the correct number of bins, and use
# a softplus activation function to get positive flux.
decode_layers.append(nn.Conv1d(decode_last_size,
self.settings['spectrum_bins'], 1))
decode_layers.append(nn.Softplus())
self.decode_layers = nn.Sequential(*decode_layers)
def get_data_loader(self, dataset, augment=False, **kwargs):
"""Get a PyTorch DataLoader for an lcdata Dataset
Parameters
----------
dataset : `~lcdata.Dataset`
Dataset to load
augment : bool, optional
Whether to augment the dataset, by default False
Returns
-------
`~torch.utils.data.DataLoader`
PyTorch DataLoader for the dataset
"""
# Preprocess the dataset if it isn't already.
dataset = self.preprocess(dataset)
if augment:
# Reset the metadata caches that we use to speed up augmenting.
for lc in dataset.light_curves:
lc.meta.copy(update_cache=True)
# To speed things up, don't create new astropy.Table objects for the
# augmented light curves. The `forward` method can handle the result that
# is returned by `augment_light_curves`.
collate_fn = functools.partial(self.augment_light_curves, as_table=False)
else:
collate_fn = list
return DataLoader(dataset.light_curves, batch_size=self.settings['batch_size'],
collate_fn=collate_fn, **kwargs)
def encode(self, input_data):
"""Predict the latent variables for a set of light curves
We use variational inference, and predict the parameters of a posterior
distribution over the latent space.
Parameters
----------
input_data : `~torch.FloatTensor`
Input data representing a set of gridded light curves
Returns
-------
`~torch.FloatTensor`
Mean predictions for each latent variable
`~torch.FloatTensor`
Log-variance predictions for each latent variable
"""
# Apply common encoder blocks
e = self.encode_layers(input_data)
# Reference time branch. First, apply additional FC layers to get to an output
# that has a single channel.
e_time = self.encode_time_layers(e)
# Apply the time-indexing layer to calculate the reference time. This is a
# special layer that is invariant to translations of the input.
t_vec = torch.nn.functional.softmax(torch.squeeze(e_time, 1), dim=1)
ref_time_mu = (
torch.sum(t_vec * self.input_times, 1)
/ self.settings['time_sigma']
)
# Latent space branch.
e_latent = self.encode_latent_layers(e)
# Predict mu and logvar
encoding_mu = self.encode_mu_layer(e_latent)
encoding_logvar = self.encode_logvar_layer(e_latent)
# Prepend the time mu value to get the full encoding.
encoding_mu = torch.cat([ref_time_mu[:, None], encoding_mu], 1)
# Constrain the logvar so that it doesn't go to crazy values and throw
# everything off with floating point precision errors. This will not be a
# concern for a properly trained model, but things can go wrong early in the
# training at high learning rates.
encoding_logvar = torch.clamp(encoding_logvar, None, 5.)
return encoding_mu, encoding_logvar
def decode_spectra(self, encoding, phases, color, amplitude=None):
"""Predict the spectra at a given set of latent variables
Parameters
----------
encoding : `~torch.FloatTensor`
Coordinates in the ParSNIP intrinsic latent space for each light curve
phases : `~torch.FloatTensor`
Phases to decode each light curve at
color : `~torch.FloatTensor`
Color of each light curve
amplitude : `~torch.FloatTensor`, optional
Amplitude to scale each light curve by, by default no scaling will be
applied.
Returns
-------
`~torch.FloatTensor`
Predicted spectra
"""
scale_phases = phases / (self.settings['time_window'] // 2)
repeat_encoding = encoding[:, :, None].expand((-1, -1, scale_phases.shape[1]))
stack_encoding = torch.cat([repeat_encoding, scale_phases[:, None, :]], 1)
# Apply intrinsic decoder
model_spectra = self.decode_layers(stack_encoding)
if color is not None:
# Apply colors
apply_colors = 10**(-0.4 * color[:, None] * self.color_law[None, :])
model_spectra = model_spectra * apply_colors[..., None]
if amplitude is not None:
# Apply amplitude
model_spectra = model_spectra * amplitude[:, None, None]
return model_spectra
def decode(self, encoding, ref_times, color, times, redshifts, band_indices,
amplitude=None):
"""Predict the light curves for a given set of latent variables
Parameters
----------
encoding : `~torch.FloatTensor`
Coordinates in the ParSNIP intrinsic latent space for each light curve
ref_times : `~torch.FloatTensor`
Reference time for each light curve
color : `~torch.FloatTensor`
Color of each light curve
times : `~torch.FloatTensor`
Times to predict each light curve at
redshifts : `~torch.FloatTensor`
Redshift of each light curve
band_indices : `~torch.LongTensor`
Band indices for each observation
amplitude : `~torch.FloatTensor`, optional
Amplitude to scale each light curve by, by default no scaling will be
applied
Returns
-------
`~torch.FloatTensor`
Model spectra
`~torch.FloatTensor`
Model photometry
"""
phases = (
(times - ref_times[:, None])
/ (1 + redshifts[:, None])
)
# Generate the restframe spectra
model_spectra = self.decode_spectra(encoding, phases, color, amplitude)
# Figure out the weights for each band
band_weights = self._calculate_band_weights(redshifts)
num_batches = band_indices.shape[0]
num_observations = band_indices.shape[1]
batch_indices = (
torch.arange(num_batches, device=encoding.device)
.repeat_interleave(num_observations)
)
obs_band_weights = (
band_weights[batch_indices, :, band_indices.flatten()]
.reshape((num_batches, num_observations, -1))
.permute(0, 2, 1)
)
# Sum over each filter.
model_flux = torch.sum(model_spectra * obs_band_weights, axis=1)
return model_spectra, model_flux
def _reparameterize(self, mu, logvar, sample=True):
if sample:
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + eps*std
else:
return mu
def _sample(self, encoding_mu, encoding_logvar, sample=True):
sample_encoding = self._reparameterize(encoding_mu, encoding_logvar,
sample=sample)
time_sigma = self.settings['time_sigma']
color_sigma = self.settings['color_sigma']
if self.settings['predict_redshift']:
redshift = torch.exp(sample_encoding[:, -1] - 1)
sample_encoding = sample_encoding[:, :-1]
else:
redshift = torch.zeros_like(sample_encoding[:, 0])
# Rescale variables
ref_times = sample_encoding[:, 0] * time_sigma
color = sample_encoding[:, 1] * color_sigma
encoding = sample_encoding[:, 2:]
# Constrain the color and reference time so that things don't go to crazy values
# and throw everything off with floating point precision errors. This will not
# be a concern for a properly trained model, but things can go wrong early in
# the training at high learning rates.
ref_times = torch.clamp(ref_times, -10. * time_sigma, 10. * time_sigma)
color = torch.clamp(color, -10. * color_sigma, 10. * color_sigma)
redshift = torch.clamp(redshift, 0., self.settings['max_redshift'])
return redshift, ref_times, color, encoding
def forward(self, light_curves, sample=True, to_numpy=False):
"""Run a set of light curves through the full ParSNIP model
We use variational inference to predict the latent representation of each light
curve, and we then use the generative model to predict the light curves for
those representations.
Parameters
----------
light_curves : List[`~astropy.table.Table`]
List of light curves
sample : bool, optional
If True (default), sample from the posterior distribution. If False, use the
MAP.
to_numpy : bool, optional
Whether to convert the outputs to numpy arrays, by default False
Returns
-------
dict
Result dictionary. If to_numpy is True, all of the elements will be numpy
arrays. Otherwise, they will be PyTorch tensors on the model's device.
"""
# Extract the data that we need and move it to the right device.
data = self._get_data(light_curves)
# Encode the light curves.
encoding_mu, encoding_logvar = self.encode(data['input_data'])
# Sample from the latent space.
predicted_redshifts, ref_times, color, encoding = self._sample(
encoding_mu, encoding_logvar, sample=sample
)
if self.settings['predict_redshift']:
use_redshifts = predicted_redshifts
else:
use_redshifts = data['redshift']
time = data['compare_data'][:, 0]
obs_flux = data['compare_data'][:, 1]
obs_fluxerr = data['compare_data'][:, 2]
obs_weight = data['compare_data'][:, 3]
# Decode the light curves
model_spectra, model_flux = self.decode(
encoding, ref_times, color, time, use_redshifts, data['band_indices']
)
# Analytically evaluate the conditional distribution for the amplitude and
# sample from it.
amplitude_mu, amplitude_logvar = self._compute_amplitude(obs_weight, model_flux,
obs_flux)
amplitude = self._reparameterize(amplitude_mu, amplitude_logvar, sample=sample)
model_flux = model_flux * amplitude[:, None]
model_spectra = model_spectra * amplitude[:, None, None]
result = {
'ref_times': ref_times,
'color': color,
'encoding': encoding,
'amplitude': amplitude,
'redshift': data['redshift'],
'predicted_redshift': predicted_redshifts,
'time': time,
'obs_flux': obs_flux,
'obs_fluxerr': obs_fluxerr,
'obs_weight': obs_weight,
'band_indices': data['band_indices'],
'model_flux': model_flux,
'model_spectra': model_spectra,
'encoding_mu': encoding_mu,
'encoding_logvar': encoding_logvar,
'amplitude_mu': amplitude_mu,
'amplitude_logvar': amplitude_logvar,
}
if self.settings['predict_redshift']:
result['photoz'] = data['photoz']
result['photoz_error'] = data['photoz_error']
if to_numpy:
result = {k: v.detach().cpu().numpy() for k, v in result.items()}
return result
def _compute_amplitude(self, weight, model_flux, flux):
num = torch.sum(weight * model_flux * flux, axis=1)
denom = torch.sum(weight * model_flux * model_flux, axis=1)
# With augmentation, can very rarely end up with no light curve points. Handle
# that gracefully by setting the amplitude to 0 with a very large uncertainty.
denom[denom == 0.] = 1e-5
amplitude_mu = num / denom
amplitude_logvar = torch.log(1. / denom)
return amplitude_mu, amplitude_logvar
def loss_function(self, result, return_components=False, return_individual=False):
"""Compute the loss function for a set of light curves
Parameters
----------
result : dict
Output of `~ParsnipModel.forward`
return_components : bool, optional
Whether to return the individual parts of the loss function, by default
False.
return_individual : bool, optional
Whether to return the loss function for each light curve individually, by
default False.
Returns
-------
float or `~torch.FloatTensor`
If return_components and return_individual are False, return a single value
representing the loss function for a set of light curves.
If return_components is True, then we return a set of four values
representing the negative log likelihood, the KL divergence, the
regularization penalty, and the amplitude probability.
If return_individual is True, then we return the loss function for each
light curve individually.
"""
# Reconstruction likelihood
nll = (0.5 * result['obs_weight']
* (result['obs_flux'] - result['model_flux'])**2)
# KL divergence
kld = -0.5 * (1 + result['encoding_logvar']
- result['encoding_mu'].pow(2)
- result['encoding_logvar'].exp())
# Regularization of spectra
diff = (
(result['model_spectra'][:, 1:, :] - result['model_spectra'][:, :-1, :])
/ (result['model_spectra'][:, 1:, :] + result['model_spectra'][:, :-1, :])
)
penalty = self.settings['penalty'] * diff**2
# Amplitude probability for the importance sampling integral
amp_prob = -0.5 * ((result['amplitude'] - result['amplitude_mu'])**2
/ result['amplitude_logvar'].exp())
# Redshift error
if self.settings['predict_redshift']:
# Prior from photoz estimate
photoz_diff = result['predicted_redshift'] - result['photoz']
redshift_nll = 0.5 * photoz_diff**2 / result['photoz_error']**2
# Prior from true redshift
mask = ~torch.isnan(result['redshift'])
diff_redshifts = (result['predicted_redshift'][mask]
- result['redshift'][mask])
redshift_nll[mask] += (
0.5 * diff_redshifts**2 / self.settings['specz_error']**2
)
else:
redshift_nll = torch.zeros_like(amp_prob)
if return_individual:
nll = torch.sum(nll, axis=1)
kld = torch.sum(kld, axis=1)
penalty = torch.sum(torch.sum(penalty, axis=2), axis=1)
else:
nll = torch.sum(nll)
kld = torch.sum(kld)
penalty = torch.sum(penalty)
amp_prob = torch.sum(amp_prob)
redshift_nll = torch.sum(redshift_nll)
if return_components:
return torch.stack([nll, kld, penalty, amp_prob, redshift_nll])
else:
return nll + kld + penalty + amp_prob + redshift_nll
def score(self, dataset, rounds=1, return_components=False, sample=True):
"""Evaluate the loss function on a given dataset.
Parameters
----------
dataset : `~lcdata.Dataset`
Dataset to run on
rounds : int, optional
Number of rounds to use for evaluation. VAEs are stochastic, so the loss
function is not deterministic. By running for multiple rounds, the
uncertainty on the loss function can be decreased. Default 1.
return_components : bool, optional
Whether to return the individual parts of the loss function, by default
False. See `~ParsnipModel.loss_function` for details.
Returns
-------
loss
Computed loss function
"""
self.eval()
total_loss = 0
total_count = 0
loader = self.get_data_loader(dataset)
# Compute the loss
for round in range(rounds):
for batch_lcs in loader:
result = self.forward(batch_lcs, sample=sample)
loss = self.loss_function(result, return_components)
if return_components:
total_loss += loss.detach().cpu().numpy()
else:
total_loss += loss.item()
total_count += len(batch_lcs)
loss = total_loss / total_count
return loss
def fit(self, dataset, max_epochs=1000, augment=True, test_dataset=None):
"""Fit the model to a dataset
Parameters
----------
dataset : `~lcdata.Dataset`
Dataset to fit to
max_epochs : int, optional
Maximum number of epochs, by default 1000
augment : bool, optional
Whether to use augmentation, by default True
test_dataset : `~lcdata.Dataset`, optional
Test dataset that will be scored at the end of each epoch, by default None
"""
# The model is stochastic, so the loss function will have a fair bit of noise.
# If the dataset is small, we run through several augmentations of it every
# epoch to get the noise down.
repeats = int(np.ceil(25000 / len(dataset)))
loader = self.get_data_loader(dataset, augment=augment, shuffle=True)
if test_dataset is not None:
test_dataset = self.preprocess(test_dataset)
while self.epoch < max_epochs:
self.train()
train_loss = 0
train_count = 0
with tqdm(range(len(loader) * repeats), file=sys.stdout) as pbar:
for repeat in range(repeats):
# Training step
for batch_lcs in loader:
self.optimizer.zero_grad()
result = self.forward(batch_lcs)
loss = self.loss_function(result)
loss.backward()
replace_nan_grads(self.parameters())
train_loss += loss.item()
self.optimizer.step()
train_count += len(batch_lcs)
total_loss = train_loss / train_count
batch_loss = loss.item() / len(batch_lcs)
pbar.set_description(
f'Epoch {self.epoch:4d}: Loss: {total_loss:8.4f} '
f'({batch_loss:8.4f})',
refresh=False
)
pbar.update()
if test_dataset is not None:
# Calculate the test loss
test_loss = self.score(test_dataset)
pbar.set_description(
f'Epoch {self.epoch:4d}: Loss: {total_loss:8.4f}, '
f'Test loss: {test_loss:8.4f}',
)
else:
pbar.set_description(
f'Epoch {self.epoch:4d}: Loss: {total_loss:8.4f}'
)
self.scheduler.step(train_loss)
# Checkpoint and save the model
self.save()
# Check if the learning rate is below our threshold, and exit if it is.
lr = self.optimizer.param_groups[0]['lr']
if lr < self.settings['min_learning_rate']:
break
self.epoch += 1
def predict(self, light_curves, augment=False):
"""Generate predictions for a light curve or set of light curves.
Parameters
----------
light_curves : `~astropy.table.Table` or List[`~astropy.table.Table`]
Light curve(s) to generate predictions for.
augment : bool, optional
Whether to augment the light curve(s), by default False
Returns
-------
`~astropy.table.Table` or dict
Table (for multiple light curves) or dict (for a single light curve)
containing the predictions.
"""
# Check if we have a list of light curves or a single one and handle it
# appropriately.
if isinstance(light_curves, astropy.table.Table):
# Single object. Wrap it so that we can process it as an array. We'll unwrap
# it at the end.
single = True
light_curves = [light_curves]
else:
single = False
# Wrap the light curves in an lcdata dataset and use that to process them.
dataset = lcdata.from_light_curves(light_curves)
predictions = self.predict_dataset(dataset, augment=augment)
if single:
return dict(zip(predictions[0].keys(), predictions[0].values()))
else:
return predictions
def predict_dataset(self, dataset, augment=False):
"""Generate predictions for a dataset
Parameters
----------
dataset : `~lcdata.Dataset`
Dataset to generate predictions for.
augment : bool, optional
Whether to perform augmentation, False by default.
Returns
-------
predictions : `~astropy.table.Table`
astropy Table with one row for each light curve and columns with each of the
predicted values.
"""
predictions = []
dataset = self.preprocess(dataset, verbose=len(dataset) > 100)
loader = self.get_data_loader(dataset, augment=augment)
for batch_lcs in loader:
# Run the data through the model.
result = self.forward(batch_lcs, to_numpy=True, sample=False)
# Pull out the reference time and reference scale. Note that if we are
# working with an augmented dataset, get_data_loader doesn't construct a
# full astropy Table to save time. Handle either case.
parsnip_reference_time = []
parsnip_scale = []
for lc in batch_lcs:
if isinstance(lc, astropy.table.Table):
lc_meta = lc.meta
else:
lc_data, lc_meta = lc
parsnip_reference_time.append(lc_meta['parsnip_reference_time'])
parsnip_scale.append(lc_meta['parsnip_scale'])
parsnip_reference_time = np.array(parsnip_reference_time)
parsnip_scale = np.array(parsnip_scale)
encoding_mu = result['encoding_mu']
encoding_err = np.sqrt(np.exp(result['encoding_logvar']))
# Update the reference time.
reference_time_offset = (
encoding_mu[:, 0] * self.settings['time_sigma'] / SIDEREAL_SCALE
)
reference_time = parsnip_reference_time + reference_time_offset
reference_time_error = (
encoding_err[:, 0] * self.settings['time_sigma'] / SIDEREAL_SCALE
)
amplitude_mu = result['amplitude_mu'] * parsnip_scale
amplitude_error = (
np.sqrt(np.exp(result['amplitude_logvar'])) * parsnip_scale
)
# Pull out the keys that we care about saving.
batch_predictions = {
'reference_time': reference_time,
'reference_time_error': reference_time_error,
'color': encoding_mu[:, 1] * self.settings['color_sigma'],
'color_error': encoding_err[:, 1] * self.settings['color_sigma'],
'amplitude': amplitude_mu,
'amplitude_error': amplitude_error,
}
for idx in range(self.settings['latent_size']):
batch_predictions[f's{idx+1}'] = encoding_mu[:, 2 + idx]
batch_predictions[f's{idx+1}_error'] = encoding_err[:, 2 + idx]
if self.settings['predict_redshift']:
pred_redshift = np.clip(
np.exp(encoding_mu[:, -1] - 1),
0, self.settings['max_redshift']
)
pred_redshift_pos = np.exp(encoding_mu[:, -1] + encoding_err[:, -1] - 1)
pred_redshift_neg = np.exp(encoding_mu[:, -1] - encoding_err[:, -1] - 1)
pred_redshift_error = (pred_redshift_pos - pred_redshift_neg) / 2.
batch_predictions['predicted_redshift'] = pred_redshift
batch_predictions['predicted_redshift_error'] = pred_redshift_error
# Calculate other useful features.
time = result['time']
obs_flux = result['obs_flux']
obs_fluxerr = result['obs_fluxerr']
model_flux = result['model_flux']
fluxerr_mask = obs_fluxerr == 0
obs_fluxerr[fluxerr_mask] = -1.
# Signal-to-noise
s2n = obs_flux / obs_fluxerr
s2n[fluxerr_mask] = 0.
batch_predictions['total_s2n'] = np.sqrt(np.sum(s2n**2, axis=1))
# Number of observations
batch_predictions['count'] = np.sum(~fluxerr_mask, axis=1)
# Number of observations with signal-to-noise above some threshold.
batch_predictions['count_s2n_3'] = np.sum(s2n > 3, axis=1)
batch_predictions['count_s2n_5'] = np.sum(s2n > 5, axis=1)
# Number of observations with signal-to-noise above some threshold in
# different time windows.
compare_time = reference_time_offset[:, None]
mask_pre = time < compare_time - 50.
mask_rise = (time >= compare_time - 50.) & (time < compare_time)
mask_fall = (time >= compare_time) & (time < compare_time + 50.)
mask_post = (time >= compare_time + 50.)
mask_s2n = s2n > 3
batch_predictions['count_s2n_3_pre'] = np.sum(mask_pre & mask_s2n, axis=1)
batch_predictions['count_s2n_3_rise'] = np.sum(mask_rise & mask_s2n, axis=1)
batch_predictions['count_s2n_3_fall'] = np.sum(mask_fall & mask_s2n, axis=1)
batch_predictions['count_s2n_3_post'] = np.sum(mask_post & mask_s2n, axis=1)
# Chi-square
all_chisq = (obs_flux - model_flux)**2 / obs_fluxerr**2
all_chisq[fluxerr_mask] = 0.
batch_predictions['model_chisq'] = np.sum(all_chisq, axis=1)
batch_predictions['model_dof'] = (
batch_predictions['count']
- 3 # amplitude, color, reference time
- self.settings['latent_size']
)
predictions.append(astropy.table.Table(batch_predictions))
predictions = astropy.table.vstack(predictions, 'exact')
# Drop any old predictions from the metadata, and merge it in.
meta = dataset.meta.copy()
common_columns = set(predictions.colnames) & set(meta.colnames)
meta.remove_columns(common_columns)
predictions = astropy.table.hstack([meta, predictions], 'exact')
# Estimate the absolute luminosity.
# Figure out which light curves we can calculate the luminosity for.
amplitudes = predictions['amplitude'].copy()
amplitude_mask = amplitudes > 0.
if self.settings['predict_redshift']:
redshifts = predictions['predicted_redshift'].copy()
else:
redshifts = predictions['redshift'].copy()
redshift_mask = redshifts > 0.
amplitude_error_mask = predictions['amplitude_error'] < 0.5 * amplitudes
luminosity_mask = amplitude_mask & redshift_mask & amplitude_error_mask
# Mask out invalid data for luminosities
redshifts[~luminosity_mask] = 1.
amplitudes[~luminosity_mask] = 1.
frac_diff = predictions['amplitude_error'] / amplitudes
frac_diff[~luminosity_mask] = 0.5
luminosity = (
-2.5*np.log10(amplitudes)
+ self.settings['zeropoint']
- Planck18.distmod(redshifts).value
)
luminosity[~luminosity_mask] = np.nan
predictions['luminosity'] = luminosity
# Luminosity uncertainty
int_mag_err = frac_to_mag(frac_diff)
int_mag_err[~luminosity_mask] = np.nan
predictions['luminosity_error'] = int_mag_err
# Remove the processing flag.
del predictions['parsnip_preprocessed']
return predictions
def predict_dataset_augmented(self, dataset, augments=10):
"""Generate predictions for a dataset with augmentation
This will first generate predictions for the dataset without augmentation,
and will then generate predictions for the dataset with augmentation the
given number of times. This returns a dataframe in the same format as
`~predict_dataset`, but with the following additional columns:
- original_object_id: the original object_id for each augmentation.
- augmented: True for augmented light curves, False for original ones.
Parameters
----------
dataset : `~lcdata.Dataset`
Dataset to generate predictions for.
augments : int, optional
Number of times to augment the dataset, by default 10
Returns
-------
predictions : `~astropy.table.Table`
astropy Table with one row for each light curve and columns with each of the
predicted values.
"""
# First pass without augmentation.
pred = self.predict_dataset(dataset)
pred['original_object_id'] = pred['object_id']
pred['augmented'] = False
predictions = [pred]
# Next passes with augmentation.
for idx in tqdm(range(augments), file=sys.stdout):
pred = self.predict_dataset(dataset, augment=True)
pred['original_object_id'] = pred['object_id']
pred['augmented'] = True
pred['object_id'] = [i + f'_aug_{idx+1}' for i in pred['object_id']]
predictions.append(pred)
predictions = astropy.table.vstack(predictions, 'exact')
return predictions
def _predict_time_series(self, light_curve, pred_times, pred_bands, sample, count):
# Preprocess the light curve if it wasn't already.
light_curve = preprocess_light_curve(light_curve, self.settings)
# Convert given times to our internal times.
grid_times = time_to_grid(pred_times,
light_curve.meta['parsnip_reference_time'])
grid_times = torch.FloatTensor(grid_times)[None, :].to(self.device)
pred_bands = torch.LongTensor(pred_bands)[None, :].to(self.device)
if count is not None:
# Predict multiple light curves
light_curves = [light_curve] * count
grid_times = grid_times.repeat(count, 1)
pred_bands = pred_bands.repeat(count, 1)
else:
light_curves = [light_curve]
# Sample VAE parameters
result = self.forward(light_curves, sample)
# Do the predictions
if self.settings['predict_redshift']:
redshifts = result['predicted_redshift']
else:
redshifts = result['redshift']
model_spectra, model_flux = self.decode(
result['encoding'],
result['ref_times'],
result['color'],
grid_times,
redshifts,
pred_bands,
result['amplitude'],
)
model_flux = model_flux.cpu().detach().numpy()
model_spectra = model_spectra.cpu().detach().numpy()
if count is None:
# Get rid of the batch index
model_flux = model_flux[0]
model_spectra = model_spectra[0]
cpu_result = {k: v.detach().cpu().numpy() for k, v in result.items()}
# Scale everything to the original light curve scale.
model_flux *= light_curve.meta['parsnip_scale']
model_spectra *= light_curve.meta['parsnip_scale']
return model_flux, model_spectra, cpu_result
def predict_light_curve(self, light_curve, sample=False, count=None, sampling=1.,
pad=50.):
"""Predict the flux of a light curve on a grid
Parameters
----------
light_curve : `~astropy.table.Table`
Light curve to predict
sample : bool, optional
If True, sample from the latent variable posteriors. Otherwise,
use the MAP. By default False.
count : int, optional
Number of light curves to predict, by default None (single prediction)
sampling : int, optional
Grid sampling in days, by default 1.
pad : int, optional
Number of days before and after the light curve observations to predict the
light curve at, by default 50.
Returns
-------
`~numpy.ndarray`
Times that the model was sampled at
`~numpy.ndarray`
Flux of the model in each band
`~numpy.ndarray`
Model result from ParsnipModel.forward
"""
# Figure out where to sample the light curve
min_time = np.min(light_curve['time']) - pad
max_time = np.max(light_curve['time']) + pad
model_times = np.arange(min_time, max_time + sampling, sampling)
band_indices = np.arange(len(self.settings['bands']))
pred_times = np.tile(model_times, len(band_indices))
pred_bands = np.repeat(band_indices, len(model_times))
model_flux, model_spectra, model_result = self._predict_time_series(
light_curve, pred_times, pred_bands, sample, count
)
# Reshape model_flux so that it has the shape (batch, band, time)
model_flux = model_flux.reshape((-1, len(band_indices), len(model_times)))
if count == 0:
# Get rid of the batch index
model_flux = model_flux[0]
return model_times, model_flux, model_result
def predict_spectrum(self, light_curve, time, sample=False, count=None):
"""Predict the spectrum of a light curve at a given time
Parameters
----------
light_curve : `~astropy.table.Table`
Light curve
time : float
Time to predict the spectrum at
sample : bool, optional
If True, sample from the latent variable posteriors. Otherwise,
use the MAP. By default False.
count : int, optional
Number of spectra to predict, by default None (single prediction)
Returns
-------
`~numpy.ndarray`
Predicted spectrum at the wavelengths specified by
`~ParsnipModel.model_wave`
"""
pred_times = [time]
pred_bands = [0]
model_flux, model_spectra, model_result = self._predict_time_series(
light_curve, pred_times, pred_bands, sample, count
)
return model_spectra[..., 0]
def predict_sncosmo(self, light_curve, sample=False):
"""Package the predictions for a light curve as an sncosmo model
This method performs variational inference on a light curve to predict its
latent representation. It then initializes an SNCosmo model with that
representation.
Parameters
----------
light_curve : `~astropy.table.Table`
Light curve
sample : bool, optional
If True, sample from the latent variable posteriors. Otherwise,
use the MAP. By default False.
Returns
-------
`~ParsnipSncosmoModel`
SNCosmo model initialized with the light curve's predicted latent
representation
"""
light_curve = preprocess_light_curve(light_curve, self.settings)
# Run through the model to predict parameters.
result = self.forward([light_curve], sample=sample, to_numpy=True)
# Build the sncosmo model.
model = sncosmo.Model(source=ParsnipSncosmoSource(self))
meta = light_curve.meta
if self.settings['predict_redshift']:
model['z'] = result['predicted_redshift'][0]
else:
model['z'] = meta['redshift']
model['t0'] = grid_to_time(result['ref_times'][0],
meta['parsnip_reference_time'])
model['color'] = result['color'][0]
# Note: ZP of amplitude is 25, and we use an internal offset of 20 for building
# the model so that things are close to 1. Combined, that means that we need to
# apply an offset of 45 mag when calculating the amplitude for sncosmo.
model['amplitude'] = (
light_curve.meta['parsnip_scale'] * result['amplitude'][0]
* 10**(-0.4 * (20 + self.settings['zeropoint']))
)
for i in range(self.settings['latent_size']):
model[f's{i+1}'] = result['encoding'][0, i]
return model
def predict_redshift_distribution(self, light_curve, min_redshift=0.,
max_redshift=None, sampling=0.01):
"""Predict the redshift distribution for a light curve.
Given observations y, and latent variables s, we want to compute the redshift
distribution p(z|y) marginalized over the latent variables. Working this out
with Bayes' theorem::
p(z|y) = Integral[p(y|s,z) p(s,z) ds] / p(y)
p(y|s,z) is the term that we compute as the negative log-likelihood in our loss
function. We assume that p(s,z) is constant. p(y) just contributes an overall
normalization term and can be ignored.
The correct way to evaluate this function would be to perform a Monte Carlo
integration p(y|s,z) like we currently do to marginalize over the amplitude.
However, that procedure is stochastic and requires many computations to average
out. Here we instead approximate p(z|y) by simply evaluating p(y|s,z) at the MAP
value of the model parameters. This is not correct, but should provide a
reasonable approximation to the integral in most cases and only requires a
single evaluation per redshift.
We evaluate this approximate redshift distribution on a prespecified grid of
redshifts and normalize so that the distribution sums to 1.
Parameters
----------
light_curve : `~astropy.table.Table`
Light curve
min_redshift : float, optional
Minimum redshift to consider, by default 0.
max_redshift : float, optional
Maximum redshift to consider, by default specified by
settings['max_redshift'].
sampling : float, optional
Sampling to use, by default 0.01.
Returns
-------
`~numpy.ndarray`
Redshifts that the probability distribution was evaluated at
`~numpy.ndarray`
Redshift probability distribution
"""
if max_redshift is None:
max_redshift = self.settings['max_redshift']
sample_redshifts = np.arange(min_redshift, max_redshift + sampling / 100.,
sampling)
if min_redshift == 0:
# Having the redshift be equal to zero can cause problems. Handle this
# gracefully.
sample_redshifts[0] = 0.0001
light_curve = preprocess_light_curve(light_curve, self.settings,
ignore_missing_redshift=True)
lcs = []
for redshift in sample_redshifts:
redshift_lc = light_curve.copy()
redshift_lc.meta['redshift'] = redshift
lcs.append(redshift_lc)
result = self.forward(lcs, sample=False)
nll = self.loss_function(result, return_individual=True,
return_components=True)[0]
nll = nll.detach().cpu().numpy()
# Normalize the probability distribution
prob = np.exp(-(nll - np.min(nll)))
prob = prob / np.sum(prob) / sampling
return sample_redshifts, prob
def predict_redshift(self, light_curve, min_redshift=0., max_redshift=None,
sampling=0.01):
"""Predict the redshift of a light curve.
This evaluates the MAP estimate of the redshift.
Parameters
----------
light_curve : `~astropy.table.Table`
Light curve
Returns
-------
float
MAP estimate of the redshift
"""
redshifts, redshift_distribution = self.predict_redshift_distribution(
light_curve,
min_redshift=min_redshift,
max_redshift=max_redshift,
sampling=sampling
)
return redshifts[np.argmax(redshift_distribution)]
def load_model(path=None, device='cpu', threads=8):
"""Load a ParSNIP model.
Parameters
----------
path : str, optional
Path to the model on disk, or name of a model. If not specified, the
default_model specified in settings.py is loaded.
device : str, optional
Torch device to load the model to, by default 'cpu'
threads : int, optional
Number of threads to use, by default 8
Returns
-------
`~ParsnipModel`
Loaded model
"""
if path is None:
path = default_model
print(f"Loading default ParSNIP model '{path}'")
# Figure out if we were given the path to a model or a built-in model.
if '.' not in path:
# We were given the name of a built-in model.
resource_path = f'models/{path}.pt'
full_path = pkg_resources.resource_filename('parsnip', resource_path)
if not os.path.exists(full_path):
raise ValueError(f"No built-in model named '{path}'")
path = full_path
# Load the model data
use_device = parse_device(device)
settings, state_dict = torch.load(path, use_device)
# Instantiate the model
model = ParsnipModel(path, settings['bands'], use_device, threads, settings)
model.load_state_dict(state_dict)
return model
|
LSSTDESCREPO_NAMEparsnipPATH_START.@parsnip_extracted@parsnip-main@parsnip@parsnip.py@.PATH_END.py
|
{
"filename": "generate_shift_script.py",
"repo_name": "samuelyeewl/specmatch-emp",
"repo_path": "specmatch-emp_extracted/specmatch-emp-master/specmatchemp/buildlib/generate_shift_script.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
@filename generate_shift_script.py
Generate script lines for shifting spectra
"""
from __future__ import print_function
import os
import pandas as pd
from argparse import ArgumentParser
from specmatchemp import SPECMATCHDIR
if __name__ == '__main__':
psr = ArgumentParser(description="Generate script for shifting")
psr.add_argument('-l', '--libpath', type=str,
default=os.path.join(SPECMATCHDIR, 'libstars.csv'),
help="Path to parameters csv file")
psr.add_argument('-o', '--outpath', type=str,
default='./shift_script.sh',
help="Path to output shift script")
psr.add_argument('-s', '--suffix', type=str, default="",
help="Suffix to append to shift results")
args = psr.parse_args()
params = pd.read_csv(args.libpath)
with open(args.outpath, 'w') as f:
for idx, row in params.iterrows():
obs = row['lib_obs'][1:]
name = row['cps_name']
s = "smemp shift " + obs + " "
s += "-pp "
s += "-o /home/syee/specmatchemp-working/specmatchemp/results/ "
s += "-n " + name
s += "\n"
f.write(s)
|
samuelyeewlREPO_NAMEspecmatch-empPATH_START.@specmatch-emp_extracted@specmatch-emp-master@specmatchemp@buildlib@generate_shift_script.py@.PATH_END.py
|
{
"filename": "uvotplot.py",
"repo_name": "PaulKuin/uvotpy",
"repo_path": "uvotpy_extracted/uvotpy-master/uvotpy/uvotplot.py",
"type": "Python"
}
|
'''
These are function for making plots of UVOT grism stuff.
binplot(*args, **kwargs)
Bin up the arrays with the keyword bin=<number>
waveAccPlot( wave_obs,pix_obs, wave_zmx, pix_zmx, disp_coef, acc, order=None)
display a figure of the accuracy of the wavelength solution
contourpk(x,y,f, levels=None,xb=None,xe=None,yb=None,ye=None):
contour plot with 1-D array inputs for X, Y and F , limits [xb,xe], [yb,ye]
'''
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from builtins import str
from builtins import range
from past.utils import old_div
try:
from uvotpy import uvotplot,uvotmisc,uvotwcs,rationalfit,mpfit,uvotio
except:
pass
#from . import uvotgetspec as uvotgrism
import numpy as N
try:
import pyfits
except:
from astropy.io import fits as pyfits
pass
from pylab import ioff,ion,arange, plot, subplot, xlim, ylim, title, xlabel, \
ylabel, polyval, figure, contour, plt, legend, polyval, polyfit, savefig, \
text , grid, clf, gca
import os
from . import uvotmisc, uvotgetspec
def binplot(*args, **kwargs):
'''Bin up the arrays with the keyword bin=<number>
Same parameters as used by plot (pyplot)
'''
if 'bin' in list(kwargs.keys()):
nbin = kwargs['bin']
del kwargs['bin']
print('uvotplot nbin = ', nbin)
nargs = len(args)
print('uvotplot nargs = ',nargs)
if nargs == 1:
x = args[0]
m = int( old_div((len(x)+0.5*nbin),nbin))+1
print('uvotplot m = ', m)
xx = 0.0*x[0:m].copy()
for i in range(len(x)):
j = int(old_div(i,nbin))
xx[j] = xx[j] + x[i]
if xx[m-1] == 0.0: xx = xx[0:m-1]
args = (xx)
else:
x = args[0]
y = args[1]
m = int( old_div((len(x)+0.5*nbin),nbin))+1
print('uvotplot m = ', m,' len(x) = ',len(x))
xx = 0.0*x[0:m].copy()
yy = xx.copy()
for i in range(len(x)):
j = int(old_div(i,nbin))
xx[j] = xx[j] + x[i]
yy[j] = yy[j] + y[i]
if xx[m-1] == 0.0:
xx = xx[0:m-1]
yy = yy[0:m-1]
xx = old_div(xx,nbin)
if nargs == 2:
args = xx, yy
elif nargs == 3:
z1 = args[2]
args = xx, yy, z1
elif nargs == 4:
z1 = args[2]
z2 = args[3]
args = xx, yy, z1, z2
else:
print('cannot handle more than 4 arguments')
args = xx,yy
plot(*args, **kwargs)
else:
plot(*args, **kwargs)
return
def zmxCoefOnAxis():
'''
These are the dispersion coefficients from the
ReportZemaxUVGrism.v1.3, (UV nominal) in reverse order
so they can be used with polyval
'''
return N.array([8.14e-10, -1.634e-6, 1.366e-3, 3.206, 2597.9])
def plot_ellipsoid_regions(Xim,Yim,Xa,Yb,Thet,b2mag,matched,ondetector,
img_pivot,img_pivot_ori,img_size,limitMag,img_angle=0.0,lmap=False,
makeplot=True,color='k',annulusmag=13.0,ax=None,chatter=1):
'''
This routine is to plot ellipsoid regions on the grism image/graph, which
may be a rotated, cropped part of the detector image
Parameters ellipse
------------------
Xim, Yim : ndarray
center ellipse: Xim,Yim,
Xa, Xb : ndarray
length X-axis Xa, length Y-axis Xb,
Thet : float
angle ellipse orientation major axis Thet
b2mag : ndarray
B magnitude b2mag
matched : ndarray, bool
indicating a match between USNO-B1 and uvotdetected
ondetector : ndarray, bool
indicating center is on the detector image
Parameters image
----------------
img_angle : float
rotation of the detector image (to left)
img_pivot_ori : list,ndarray[2]
the original X,Y detector coordinate of the center of rotation
img_pivot : list, ndarray[2]
the coordinate of the center of rotation in the rotated image
img_size : list, ndarray[2]
the size of the image
Parameters map
--------------
lmap : bool
if lmap=True, produce a truth map excluding the selected ellipses
Returns
-------
None or boolean map image, plots an ellipse on the current figure
'''
from .uvotmisc import uvotrotvec
from numpy import where, sin, cos, ones, asarray, outer
ann_size = 49.0
# validate the input (TBD)
if (chatter > 1) & makeplot: print("plotting ellipsoid regions on image for zeroth orders")
if chatter > 2:
print('plot_ellipsoid_regions input data: shape Xim, etc ', Xim.shape)
print('Yim ',Yim.shape,' Xa ',Xa.shape,' Yb ',Yb.shape,' Thet ',Thet.shape)
print('img_pivot = ',img_pivot)
print('omg_pivot_ori = ',img_pivot_ori)
print('img_size = ',img_size)
print('limitMag = ',limitMag)
print('img_angle = ',img_angle)
print('lmap = ',lmap)
print('annulusmag = ',annulusmag)
if chatter > 3:
print('B2mag :',b2mag)
img_size = asarray(img_size)
if len(img_size) != 2:
print("error img_size must be the x and y dimensions of the image")
return
# rotate the ellipse data and place on the coordinate system of the image
if img_angle != 0.0:
X,Y = uvotrotvec( Xim -img_pivot_ori[0], Yim -img_pivot_ori[1], -img_angle )
#X += img_pivot[0]
Y = Y + img_pivot[1]
else:
X = Xim -img_pivot_ori[0] + img_pivot[0]
Y = Yim -img_pivot_ori[1] + img_pivot[1]
# select items on the image with B2mag > limitMag
if ax == None:
xmin = 0 #-img_pivot[0]
xmax = img_size[0] # img_size[0] + xmin
ymin = 0 # -img_pivot[1]
ymax = img_size[1] # img_size[1] + ymin
else: # select the image slice
xlimits = ax.get_xlim()
ylimits = ax.get_ylim()
xmin = xlimits[0] -img_pivot[0]
xmax = xlimits[1] # img_size[0] + xmin
ymin = ylimits[0] # -img_pivot[1]
ymax = ylimits[1] # img_size[1] + ymin
if chatter > 2:
print("Plot_ellipsoid_regions center limits to X:", xmin, xmax," Y:",ymin,ymax)
q = where((b2mag < limitMag) & (X > xmin) & (X < xmax) & (Y > ymin) & (Y < ymax))
nq = len(q[0])
# saturated source with annulus
qsat = where((b2mag < annulusmag) & (X > xmin) & (X < xmax) & (Y > ymin) & (Y < ymax))
nqsat = len(qsat[0])
if chatter > 4:
print('xmin, xmax, ymin, ymax = ', xmin,xmax, ymin, ymax)
print('normal selection q = ',q)
print('len(q[0]) ', nq)
print('saturated selection qsat = ',qsat)
print('len(qsat[0]) ', nqsat)
if nq == 0:
if chatter > 2: print("no zeroth order regions within magnitude bounds found ")
makeplot = False
if chatter > 1:
print("found ",nqsat," bright source(s) which may have a bright annulus on the image")
# scale the ellipse axes according to Bmag
# calibrate to some function of Bmag, limitMag,
# probably length depends on sptype brightness, width is limited.
Xa1 = 14.0 + 0.* Xa.copy() + 1.5*(19 - b2mag)
Yb1 = 5.5 + 0.* Yb.copy()
# plot the ellipses on the current image
if makeplot:
for i in range(nq):
qq = q[0][i]
ang = Thet[qq]-img_angle
if chatter>4:
print('plotting ellipse number ',qq)
print('angle = ',ang)
Ellipse( (X[qq],Y[qq]), (Xa1[qq], Yb1[qq]), ang, lw=1, color=color )
# plot saturated annulus on the current image
if nqsat > 0:
for i in range(nqsat):
qq = qsat[0][i]
ang = Thet[qq]-img_angle
if chatter>4:
print('plotting annulus number ',qq)
print('angle = ',ang)
Ellipse( (X[qq],Y[qq]), (ann_size, ann_size), ang, lw=1, color=color )
if lmap:
# create a truth map for the image excluding the ellipses
mapimg = ones(img_size, dtype=bool)
if nq == 0:
if chatter > 1:
print('no zeroth orders to put on map. mapimg.shape = ',mapimg.shape)
return mapimg
else:
for i in range(nq):
qq = q[0][i]
x,y,a,b,th = X[qq],Y[qq], Xa1[qq], Yb1[qq], Thet[qq]-img_angle
mapimg = maskEllipse(mapimg, x,y,a,b,th)
if nqsat > 0:
# update the truth map for bright annulus excluding a circular region
for i in range(nqsat):
qq = qsat[0][i]
x,y,a,b,th = X[qq],Y[qq], ann_size, ann_size, Thet[qq]-img_angle
mapimg = maskEllipse(mapimg, x,y,a,b,th)
if chatter > 1: print("masked bright source annulus at position [",x,",",y,"]")
return mapimg
def maskEllipse(maskimg, x,y,a,b,theta, test=0, chatter=1):
'''update a mask excluding ellipse region
Parameters
----------
maskimg : ndarray, 2D, bool
boolean array to aplly mask to (i.e., numpy.ones( array([200,400]),dtype=bool) )
x,y : int, float
ellipse center coordinate x,y
a,b : float
ellipse major axis a; minor axis b;
theta : float
rotation angle theta counterclockwise in deg.
Returns
-------
maskimg with all pixels inside the ellipse are set to False
note
----
x and y , a and b are interchanged
'''
from numpy import sin, cos, abs, arange, ones, where, outer, asarray, pi, int
maskimg = asarray(maskimg)
ca = old_div(1.,(a*a))
cb = old_div(1.,(b*b))
th = theta / 180. * pi
m11 = cos(th)
m12 = sin(th)
m21 = -sin(th)
m22 = cos(th)
# locate coordinates (xmin, ymin) (xmax, ymax)
# and operate on the subset
xmin, xmax = x-abs(a), x+abs(a)+1
ymin, ymax = y-abs(a), y+abs(a)+1
x8,x9 = int(xmin), int(xmax)
y8,y9 = int(ymin), int(ymax)
# if ellipse (x,y,a,b,theta) outside maskimg, then return
if not ( (xmin < x) & (x < xmax) & (ymin < y) & (y < ymax) ):
return maskimg
subimsize=maskimg[x8:x9,y8:y9].shape
x7 = outer(arange(subimsize[0]) - abs(a),ones(subimsize[1]))
y7 = outer(ones(subimsize[0]),arange(subimsize[1]) - abs(a))
zx6 = m11*x7+m12*y7
zy6 = m21*x7+m22*y7
if test == 1:
maskimg[x8:x9,y8:y9][where(ca*zx6*zx6+cb*zy6*zy6 <= 1.0)] = False
else:
img_size = maskimg.shape
x1 = outer(arange(img_size[0]) - x,ones(img_size[1]))
y1 = outer(ones(img_size[0]),arange(img_size[1]) - y)
zx = m11*x1+m12*y1
zy = m21*x1+m22*y1
maskimg[where(ca*zx*zx+cb*zy*zy <= 1.0)] = False
if chatter > 2:
print('center (',x,',',y,')')
print('ellipse a = ',a,' b = ',b,' theta = ',theta)
print(ca,cb,m11,m12,m21,m22)
print(xmin,xmax,ymin,ymax)
print(x8,x9,y8,y9)
print(subimsize)
print(x7)
print(y7)
print(x1,y1)
print(maskimg.shape)
return maskimg
def Ellipse(xxx_todo_changeme, xxx_todo_changeme1, angle=0.0, resolution=200, **kwargs):
'''
plot an ellipse using an N-sided polygon
Parameters
----------
(x,y) : float
centre ellipse
(rx,ry) : float
half axis ellipse
angle : float
angle in units of degrees
resolution : int
determines number of points to use
and additional kwargs for pyplot.plot()
Note
----
Can only plot one ellipse at a time.
'''
(x,y) = xxx_todo_changeme
(rx, ry) = xxx_todo_changeme1
from numpy import arange, cos, sin, pi
from matplotlib.pylab import plot
from .uvotmisc import uvotrotvec
# check x is a single value etc.
theta = 2.0*pi/resolution*arange(resolution)
xs = rx * cos(theta)
ys = ry * sin(theta)
if angle != 0.0:
xs, ys = uvotrotvec(xs,ys,angle)
xs += x
ys += y
return plot(xs,ys,'-', **kwargs)
def contourpk(x,y,f, levels=None,xb=None,xe=None,yb=None,ye=None,s=60,kx=1,ky=1,dolabels=True, **kwargs):
'''Make a contour plot with 1-D array inputs for X, Y and F. This is a
wrapper to convert lists of points (X,Y,Z) in 2-D arrays, then calls contour()
Parameters
----------
X, Y: ndarrays[:], 1D on a 2D plane
coordinates X, Y
Z : ndarray[:], 1D function on X,Y
kwargs : dict
-------------
- **xb,xe,yb,ye** : float
limits x,y for bispline interpolation valid region
- **s** : float
smoothing parameter for bisplrep
- **kx, ky** : int
order for the interpolation
- **dolabels** : bool
labels on the contours if true
- **levels** : list
contour levels
Note
----
warning: X, Y axis may have been interchanged
'''
import numpy
from scipy import interpolate
from pylab import contour, plt
x1, x2, y1, y2 = min(x), max(x), min(y), max(y)
xx = numpy.linspace(x1, x2)
yy = numpy.linspace(y1, y2)
X, Y = numpy.meshgrid(xx, yy)
shp = X.shape
task = 0
tck = interpolate.bisplrep(x,y,f,kx=kx,ky=ky,s=s,xb=xb,xe=xe,yb=yb,ye=ye)
Z = interpolate.bisplev(xx, yy, tck)
if levels == None:
C = contour(Y, X, Z,**kwargs)
else:
C = contour(Y, X, Z, levels=levels,**kwargs)
if dolabels:
plt.clabel(C, inline=1,fontsize=10)
return Y,X,Z,tck, C
def waveAccPlot(wave_obs,pix_obs, wave_zmx, pix_zmx, disp_coef, obsid=None,
acc=None, order=None, wheelpos=200, figureno=1,legloc=[1,2]):
'''Plots of the accuracy of the wavelength solution from zemax compared to
the observed wavelengths.
Parameters
----------
wave_obs, pix_obs : ndarray
observed wavelengths points (green circles)
wave_zmx ,pix_zmx : ndarray
calculated zemax points (or the interpolated solution (red crosses)
disp_coef : ndarray
dispersion coefficients
disp_coef : list
coefficients in reverse order: if p is of length N, this the polynomial
is as follows for coeff named p:
y(x) = p[0]*(x**N-1) + p[1]*(x**N-2) + ... + p[N-2]*x + p[N-1]
kwargs : dict
- **acc** : accuracy in wavelength
- **order** : order of polynomial disp_coef (default len(coef) )
- **obsid** : if given, append to title
Notes
-----
**Figure description**
x-axis : pix - pixel number referenced to [260nm in first order]
*Top panel only*
y-axis: lambda - lambda_linear
*linear term in the dispersion*
a linear term is fit to the wavelengths
$\lambda_{lin}$ = coef[0]+coef[1]*pix
*Bottom panel only*
y-axis: residuals
wave_obs, pix_obs - wave(pix_obs) (green circles)
wave_zmx, pix_zmx - wave(pix_zmx) (red crosses)
'''
if wheelpos < 500:
ref_wave = 2600.
titl = 'Wavelength accuracy UV grism - '
textstart = 1600
else:
ref_wave = 4200.
titl = 'Wavelength accuracy V grism - '
textstart = 2700
# zero of pix_obs forced to ref_wave (2600.or 4200.) for initial plot
if order == None:
order = len(disp_coef)
dcoef = polyfit(wave_obs,pix_obs,order)
doff=polyval(dcoef,ref_wave)
pix_obs = pix_obs - doff
print("fit through observations pixel position of anchor = ",doff)
n1, n2 = len(pix_obs), len(pix_zmx)
pix1 = N.zeros( (n1+n2) )
pix1[0:n1,] = pix_obs
pix1[n1:(n1+n2),] = pix_zmx
pix2 = pix1.min()
pix3 = pix1.max()
pix = N.arange(pix2,pix3)
wav = polyval(disp_coef, pix)
# wavlin = disp_coef[-1]+disp_coef[-2]*xxx_pix
# linear term in dispersion:
w_obs = wave_obs - (disp_coef[-1]+disp_coef[-2]*pix_obs)
w_zmx = wave_zmx - (disp_coef[-1]+disp_coef[-2]*pix_zmx)
wavlin = wav - (disp_coef[-1]+disp_coef[-2]*pix)
zero_offset = (wave_obs-polyval(disp_coef, pix_obs+doff) ).mean()
zo = zero_offset
if acc == None:
wave_off = (wave_obs-polyval(disp_coef, pix_obs+doff) )
acc = wave_off.std()
print(' initial acc (all points) = ',acc)
# remove outlyers
q_in = N.where(abs(wave_off-zo) < 3.* acc)
acc = (wave_off[q_in]).std()
print(' after removing outliers: acc = ', acc)
print('accuracy of the fit = ',acc, ' angstrom')
stracc = str(old_div(((10*acc+0.5).__int__()),10.)) +'$\AA$'
zero_offset = old_div(((10*zero_offset+0.5).__int__()),10.)
txt = '<$\Delta\lambda$> = '+str(zero_offset)+'$\AA\ \ \ \sigma_{observed-model}$ = '+stracc
figure( num=figureno )
subplot(211)
plot(pix, wavlin, '-')
plot(pix_obs,w_obs,'ob')
plot(pix_zmx,w_zmx,'+r')
ylabel('$\lambda$ - $\lambda_{linear}$ ($\AA$)')
xlabel('pixels')
if order == 4:
sord = 'fourth '
elif order == 3:
sord = 'third '
elif order == 2:
sord = 'second '
elif order == 1:
sord = 'first '
else:
sord = 'unknown '
legend((sord+'order fit','observed data','model'),loc=legloc[1])
if obsid == None: obsid=''
title(titl+obsid)
# a = getp( gca )
# setp(a, xlim=(pix1,pix2), xticks=[])
subplot(212)
w1 = wave_obs-polyval(disp_coef, pix_obs+doff)
w2 = wave_zmx-polyval(disp_coef, pix_zmx)
plot(wave_obs,w1, 'ob',label='_nolegend_')
plot(wave_zmx,w2, '+r',label='_nolegend_')
p0 = pix*0.
p1 = p0 - acc+zo
p2 = p0 + acc+zo
plot(wav,p0,'-r',label='_nolegend_')
plot(wav, p1,'--b',label='1-$\sigma$ limits')
plot(wav, p2,'--b',label='_nolegend_' )
ylabel('$\Delta\lambda$ ($\AA$)')
xlabel('$\lambda$ ($\AA$)')
a = gca()
ylim = a.get_ylim()
#if (ylim[0] > -16.0): ylim=(-16.0,ylim[1])
ylim=(zo-2.1*acc,zo+2.1*acc)
a.set_ylim(ylim)
legend(loc=legloc[0])
text(textstart,ylim[0]*0.9,txt)
#a = getp( gca )
#lim1, lim2 = 1.1*max(w1), 1.1*min(w1)
#setp(a, xlim=(lim1,lim2)) #, xticks=[])
savefig('accuracy.png')
return acc, zero_offset
def make_spec_plot(nspec=10, parmfile='plotparm.par',wheelpos=160):
'''
Reads parameters from a comma delimited file
Each line is for one plot
nspec is the number of plots on a page.
Note: this program has not been used since 2010, so probably needs updating
'''
# read plot parameter file in list
f = open(parmfile,"r")
plines = f.readlines()
f.close()
nfig = len(plines)
nplot = old_div((nfig+1),nspec)
pwd = os.getcwd()
NN = nspec*3000
if wheelpos == 160: clocked = True
if wheelpos == 200: clocked = False
for kp in range(nplot):
xwa = N.zeros(NN).reshape(3000,nspec)
xsp = N.zeros(NN).reshape(3000,nspec)
speclen = N.zeros(nspec)
texp= N.zeros(nspec)
id = N.empty(nspec,dtype='|S40')
nsubplot = 0
for kf in range(nspec): # process the data
print('length list ' , len(xwa), len(xsp))
nfig -= 1
if nfig < 0: break
nsubplot += 1
k = kf+nspec*kp # specific plot
dir_,filestub,ra,dec,ext1,lfilt1,ext2,filt2,wpixscale,spextwid = (plines[k]).split(',')
ra = float(ra)
dec = float(dec)
print("procesing: ",dir_,filestub,ra,dec,ext1,lfilt1,ext2,filt2,wpixscale,spextwid)
print("filestub = ", filestub)
print("extension= ", ext1)
print("width spectral extraction = ",spextwid)
print("changing directory . . .")
os.chdir(dir_)
print("new directory = ", os.getcwd())
print("processing figure ",k," . . . ")
if filt2 == "None" : filt2 = None
out = uvotgetspec.getSpec(ra,dec,filestub,int(ext1),lfilter=lfilt1, lfilt2=filt2,chatter=1,lfilt2_ext=int(ext2), spextwidth=int(spextwid), clocked=clocked)
( (dis, spnet, angle, anker, anker2, anker_field, ank_c), \
(bg, bg1, bg2, extimg, spimg, spnetimg, offset) , \
(C_1,C_2, img, H_lines, WC_lines), hdr ) = out
exposure = hdr['EXPOSURE']
pos = ank_c[1] # anchor in extracted image
ll = max( (pos-350,0) )
#ul = min( (pos+1900, len(dis)-pos) )
print('ul' , (pos+1900, len(dis)))
ul = pos+1900
print("exposure time = ", exposure)
print("spectrum pixel range = ",ll," -- ",ul)
print("saving spectrum . . . number ", kf)
wav = (polyval(C_1,dis[ll:ul]))
spe = (spnet[ll:ul])
speclen[kf] = len(wav)
figure(4+kp); plot(wav,old_div(spe,exposure)); xlim(1700,6500)
xsp[:speclen[kf],kf] = old_div(spe,exposure)
xwa[:speclen[kf],kf] = wav
texp[kf] = exposure
id[kf] = filestub+'['+str(ext1)+']'
#
# calculate # plots left
grid
xlim(1750,6500)
ylim(0,8)
savefig('/Volumes/users/Users/kuin/caldata/specplot_sums'+str(kp)+'.png')
spmax = 0.7*xsp.max()
print("plotting spectra . . .")
clf()
for kf in range(nsubplot): # make the plots
subplot(nspec,1,kf)
#k = kf+nspec*kp ; specific plot
#
wl = xwa[:speclen[kf],kf]
sp = xsp[:speclen[kf],kf]
texpo = texp[kf]
plot(wl, sp, 'k', ls='steps')
xlim(1700,6000)
ylim(0,spmax)
#
text(1800,0.85*spmax,id[kf]+' '+str(texp[kf])+'s')
grid
xlabel('wavelength')
ylabel('countrate')
savefig('/Volumes/users/Users/kuin/caldata/specplot_'+str(kp)+'.png')
# perhaps make here a summed spectrum
os.chdir(pwd)
return None
#####################################################################################
|
PaulKuinREPO_NAMEuvotpyPATH_START.@uvotpy_extracted@uvotpy-master@uvotpy@uvotplot.py@.PATH_END.py
|
{
"filename": "_pad.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/layout/slider/_pad.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Pad(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.slider"
_path_str = "layout.slider.pad"
_valid_props = {"b", "l", "r", "t"}
# b
# -
@property
def b(self):
"""
The amount of padding (in px) along the bottom of the
component.
The 'b' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["b"]
@b.setter
def b(self, val):
self["b"] = val
# l
# -
@property
def l(self):
"""
The amount of padding (in px) on the left side of the
component.
The 'l' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["l"]
@l.setter
def l(self, val):
self["l"] = val
# r
# -
@property
def r(self):
"""
The amount of padding (in px) on the right side of the
component.
The 'r' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["r"]
@r.setter
def r(self, val):
self["r"] = val
# t
# -
@property
def t(self):
"""
The amount of padding (in px) along the top of the component.
The 't' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["t"]
@t.setter
def t(self, val):
self["t"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
b
The amount of padding (in px) along the bottom of the
component.
l
The amount of padding (in px) on the left side of the
component.
r
The amount of padding (in px) on the right side of the
component.
t
The amount of padding (in px) along the top of the
component.
"""
def __init__(self, arg=None, b=None, l=None, r=None, t=None, **kwargs):
"""
Construct a new Pad object
Set the padding of the slider component along each side.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.slider.Pad`
b
The amount of padding (in px) along the bottom of the
component.
l
The amount of padding (in px) on the left side of the
component.
r
The amount of padding (in px) on the right side of the
component.
t
The amount of padding (in px) along the top of the
component.
Returns
-------
Pad
"""
super(Pad, self).__init__("pad")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.slider.Pad
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.slider.Pad`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("b", None)
_v = b if b is not None else _v
if _v is not None:
self["b"] = _v
_v = arg.pop("l", None)
_v = l if l is not None else _v
if _v is not None:
self["l"] = _v
_v = arg.pop("r", None)
_v = r if r is not None else _v
if _v is not None:
self["r"] = _v
_v = arg.pop("t", None)
_v = t if t is not None else _v
if _v is not None:
self["t"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@layout@slider@_pad.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "kpenev/poet",
"repo_path": "poet_extracted/poet-master/documentation/sphinx/source/conf.py",
"type": "Python"
}
|
"""Configuration for SPHINX to generate documentation."""
import sys
import inspect
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
sys.path.insert(
0,
os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'../../../PythonPackage'
)
)
)
# -- Project information -----------------------------------------------------
project = '(P)lanetary (O)rbital (E)volution due to (T)ides'
#pylint: disable=redefined-builtin
copyright = '2019, Kaloyan Penev'
#pylint: enable=redefined-builtin
author = 'Kaloyan Penev'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'sphinx.ext.inheritance_diagram',
'nbsphinx',
# 'breathe',
# 'exhale'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PlanetaryOrbitalEvolutionduetoTidesdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
'PlanetaryOrbitalEvolutionduetoTides.tex',
'(P)lanetary (O)rbital (E)volution due to (T)ides Documentation',
'Kaloyan Penev',
'manual'
),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
'planetaryorbitalevolutionduetotides',
'(P)lanetary (O)rbital (E)volution due to (T)ides Documentation',
[author],
1
)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
'PlanetaryOrbitalEvolutionduetoTides',
'(P)lanetary (O)rbital (E)volution due to (T)ides Documentation',
author,
'PlanetaryOrbitalEvolutionduetoTides',
'One line description of project.',
'Miscellaneous'
),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for autodoc extension -------------------------------------------
autodoc_default_flags = ['members',
'undoc-members',
'show-inheritance']
#Napolean extension defined names.
#pylint: disable=invalid-name
napoleon_include_private_with_doc = True
napoleon_include_special_with_doc = True
napoleon_include_init_with_doc = True
#pylint: enable=invalid-name
inheritance_graph_attrs = dict(rankdir="TB",
fontsize="24",
ratio='auto',
size='120')
#Call signature defined by SPHINX autodoc plugin.
#pylint: disable=too-many-arguments
#pylint: disable=unused-argument
def add_inheritance_diagram(app, what, name, obj, options, lines):
"""Add an inheritance diagram for all classes."""
if what == 'module':
class_list = [member[0]
for member in inspect.getmembers(sys.modules[name],
inspect.isclass)]
if class_list:
lines.insert(0, '')
lines.insert(
0, '.. inheritance-diagram:: '
+
' '.join(class_list)
)
lines.insert(0, '=========================')
lines.insert(0, 'Class Inheritance Diagram')
elif what == 'class':
lines.insert(0, '')
lines.insert(0,
'.. inheritance-diagram:: ' + name)
#pylint: enable=too-many-arguments
def setup(app):
"""Connect handler for adding inheritance diagrams."""
app.add_stylesheet('unlimited_width.css')
app.connect('autodoc-process-docstring', add_inheritance_diagram)
doxygen_xml = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'../../doxygen/build/xml'
)
)
#breathe_projects = {
# 'C++ library': doxygen_xml
#}
#breathe_default_project = "C++ library"
#breathe_default_members = ('members',
# 'protected-members',
# 'private-members',
# 'undoc-members')
## Setup the exhale extension
#exhale_args = {
# # These arguments are required
# "containmentFolder": "./cpp_api",
# "rootFileName": "library_root.rst",
# "rootFileTitle": "C++ API",
# "doxygenStripFromPath": "..",
# # Suggested optional arguments
# "createTreeView": True
#}
|
kpenevREPO_NAMEpoetPATH_START.@poet_extracted@poet-master@documentation@sphinx@source@conf.py@.PATH_END.py
|
{
"filename": "test_qnm.py",
"repo_name": "duetosymmetry/qnm",
"repo_path": "qnm_extracted/qnm-master/test/test_qnm.py",
"type": "Python"
}
|
import pytest
import qnm
import numpy as np
try:
from pathlib import Path # py 3
except ImportError:
from pathlib2 import Path # py 2
class QnmTestDownload(object):
"""
Base class so that each test will automatically download_data
"""
@classmethod
def setup_class(cls):
"""
Download the data when setting up the test class.
"""
qnm.download_data()
class TestQnmFileOps(QnmTestDownload):
def test_cache_file_operations(self):
"""Test file operations and downloading the on-disk cache.
"""
print("Downloading with overwrite=True")
qnm.cached.download_data(overwrite=True)
print("Clearing disk cache but not tarball")
qnm.cached._clear_disk_cache(delete_tarball=False)
print("Decompressing tarball")
qnm.cached._decompress_data()
class TestQnmOneMode(QnmTestDownload):
def test_one_mode(self):
"""
An example of a test
"""
grav_220 = qnm.modes_cache(s=-2,l=2,m=2,n=0)
omega, A, C = grav_220(a=0.68)
assert np.allclose(omega, (0.5239751042900845 - 0.08151262363119974j))
class TestQnmNewLeaverSolver(QnmTestDownload):
def test_compare_old_new_Leaver(self):
""" Check consistency between old and new Leaver solvers """
from qnm.radial import leaver_cf_inv_lentz_old, leaver_cf_inv_lentz
old = leaver_cf_inv_lentz_old(omega=.4 - 0.2j, a=0.02, s=-2, m=2, A=4.+0.j, n_inv=0)
new = leaver_cf_inv_lentz(omega=.4 - 0.2j, a=0.02, s=-2, m=2, A=4.+0.j, n_inv=0)
assert np.all([old[i] == new[i] for i in range(3)])
class TestQnmSolveInterface(QnmTestDownload):
"""
Test the various interface options for solving
"""
def test_interp_only(self):
"""Check that we get reasonable values (but not identical!)
with just interpolation.
"""
grav_220 = qnm.modes_cache(s=-2,l=2,m=2,n=0)
a = 0.68
assert a not in grav_220.a
omega_int, A_int, C_int = grav_220(a=a, interp_only=True)
omega_sol, A_sol, C_sol = grav_220(a=a, interp_only=False, store=False)
assert np.allclose(omega_int, omega_sol) and not np.equal(omega_int, omega_sol)
assert np.allclose(A_int, A_sol) and not np.equal(A_int, A_sol)
assert np.allclose(C_int, C_sol) and not all(np.equal(C_int, C_sol))
def test_store_a(self):
"""Check that the option store=True updates a spin sequence"""
grav_220 = qnm.modes_cache(s=-2,l=2,m=2,n=0)
old_n = len(grav_220.a)
k = int(old_n/2)
new_a = 0.5 * (grav_220.a[k] + grav_220.a[k+1])
assert new_a not in grav_220.a
_, _, _ = grav_220(new_a, store=False)
n_1 = len(grav_220.a)
assert old_n == n_1
_, _, _ = grav_220(new_a, store=True)
n_2 = len(grav_220.a)
assert n_2 == n_1 + 1
def test_resolve(self):
"""Test that option resolve_if_found=True really does a new
solve"""
grav_220 = qnm.modes_cache(s=-2,l=2,m=2,n=0)
n = len(grav_220.a)
k = int(n/2)
a = grav_220.a[k]
grav_220.solver.solved = False
omega_old, A_old, C_old = grav_220(a=a, resolve_if_found=False)
solved_1 = grav_220.solver.solved
omega_new, A_new, C_new = grav_220(a=a, resolve_if_found=True)
solved_2 = grav_220.solver.solved
assert (solved_1 is False) and (solved_2 is True)
assert np.allclose(omega_new, omega_old)
assert np.allclose(A_new, A_old)
assert np.allclose(C_new, C_old)
class TestMirrorModeTransformation(QnmTestDownload):
@pytest.mark.parametrize( "s, l, m, n, a",
[(-2, 2, 2, 0, 0.1), # Low spin
(-2, 2, 2, 0, 0.9), # High spin
(-2, 2, 2, 4, 0.7), # Different overtone
(-2, 3, 2, 0, 0.7), # l odd
(-2, 3, 1, 0, 0.7), # l and m odd
(-1, 3, 1, 0, 0.7), # s, l, and m odd
])
def test_mirror_mode_transformation(self, s, l, m, n, a):
import copy
mode = qnm.modes_cache(s=s, l=l, m=m, n=n)
om, A, C = mode(a=a)
solver = copy.deepcopy(mode.solver) # need to import copy -- don't want to actually modify this mode's solver
solver.clear_results()
solver.set_params(a=a, m=-m, A_closest_to=A.conj(), omega_guess=-om.conj())
om_prime = solver.do_solve()
assert np.allclose(-om.conj() , solver.omega)
assert np.allclose(A.conj(), solver.A)
assert np.allclose((-1)**(l + qnm.angular.ells(s, m, mode.l_max)) * C.conj(), solver.C)
@pytest.mark.slow
class TestQnmBuildCache(QnmTestDownload):
def test_build_cache(self):
"""Check the default cache-building functionality"""
qnm.cached._clear_disk_cache(delete_tarball=False)
qnm.modes_cache.seq_dict = {}
qnm.cached.build_package_default_cache(qnm.modes_cache)
assert 860 == len(qnm.modes_cache.seq_dict.keys())
qnm.modes_cache.write_all()
cache_data_dir = qnm.cached.get_cachedir() / 'data'
# Magic number, default num modes is 860
assert 860 == len(list(cache_data_dir.glob('*.pickle')))
|
duetosymmetryREPO_NAMEqnmPATH_START.@qnm_extracted@qnm-master@test@test_qnm.py@.PATH_END.py
|
{
"filename": "_stepdefaults.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/slider/_stepdefaults.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StepdefaultsValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="stepdefaults", parent_name="layout.slider", **kwargs
):
super(StepdefaultsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Step"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@slider@_stepdefaults.py@.PATH_END.py
|
{
"filename": "2112.09586.ipynb",
"repo_name": "DMGW-Goethe/imripy",
"repo_path": "imripy_extracted/imripy-main/examples/2112.09586.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from scipy.interpolate import interp1d
from scipy.integrate import quad
from scipy.special import gamma
from imripy import halo, constants as c, merger_system as ms, inspiral, waveform, detector, plot_utils as pu, kepler
from imripy.inspiral import forces
import plot_utils
```
## Dyanmical Friction with and without DM phase space
### Compare to https://arxiv.org/pdf/1908.10241.pdf
```python
m1 = 1e3 * c.solar_mass_to_pc
m2 = 10 * c.solar_mass_to_pc
# The luminosity distance to the system
D = 5e8 # in pc
hs = ms.HostSystem(m1, D_l=D)
```
```python
rho_spike = 226 * c.solar_mass_to_pc
r_spike = 0.54
spike_1 = halo.Spike(rho_spike, r_spike, 1.5, m1)
spike_2 = halo.Spike(rho_spike, r_spike, 2., m1)
spike_3 = halo.Spike(rho_spike, r_spike, 7./3., m1)
```
```python
def compareModels(hs, spike, a0, e0, lnLambda1, lnLambda2, ax_ea, label="", acc=1e-10, verbose=1, color=None):
afin = hs.r_isco
opt = inspiral.Classic.EvolutionOptions(dissipativeForces=[forces.GWLoss(), forces.DynamicalFriction(halo=spike,haloPhaseSpaceDescription=False, ln_Lambda=lnLambda1)],
verbose=verbose, accuracy=acc)
ko = kepler.KeplerOrbit(hs, m2, a0, e0)
ev_stat = inspiral.Classic.Evolve(hs, ko, a_fin=afin, opt=opt)
l, = ax_ea.plot( ev_stat.a*(1.-ev_stat.e**2)/hs.m1, ev_stat.e, label=label+r", stat", linestyle='-.', alpha=0.6, color=color)
opt=inspiral.Classic.EvolutionOptions(dissipativeForces=[forces.GWLoss(), forces.DynamicalFriction(halo=spike, haloPhaseSpaceDescription=True, ln_Lambda=lnLambda2, includeHigherVelocities=False)],
verbose=verbose, accuracy=acc)
ev_dyn = inspiral.Classic.Evolve(hs, ko, a_fin=afin, opt=opt)
ax_ea.plot(ev_dyn.a*(1.-ev_dyn.e**2)/hs.m1, ev_dyn.e, color=l.get_c(), label=label+r", psd")
```
```python
fig = plt.figure(figsize=(10,6)); ax_ea = fig.gca()
p0 = 5000 * m1
e0 = 0.3
a0 = p0 /(1-e0**2)
compareModels(hs, spike_1, a0, e0, 10., -1, ax_ea, color='red', label=r"$\alpha_\text{spike}=1.5$")
compareModels(hs, spike_2, a0, e0, 10., -1, ax_ea, color='blue', label=r"$\alpha_\text{spike}=2$")
compareModels(hs, spike_3, a0, e0, 10., -1, ax_ea, color='green', label=r"$\alpha_\text{spike}=7/3$")
e0 = 0.6
a0 = p0 /(1-e0**2)
compareModels(hs, spike_1, a0, e0, 10., -1, ax_ea, color='red', label=r"$\alpha_\text{spike}=1.5$")
compareModels(hs, spike_2, a0, e0, 10., -1, ax_ea, color='blue', label=r"$\alpha_\text{spike}=2$")
compareModels(hs, spike_3, a0, e0, 10., -1, ax_ea, color='green', label=r"$\alpha_\text{spike}=7/3$")
ax_ea.set_xscale('log')
ax_ea.set_xlim(left=3e1, right=p0/m1);
ax_ea.set_ylim(bottom=0., top=1.)
ax_ea.set_xlabel("semilatus rectum")
ax_ea.set_ylabel("eccentricity")
ax_ea.grid()
#fig.savefig("plots/xiEffects_pe.pdf")
```
Evolving from 915.7509157509156 to 1.0 r_isco with initial eccentricity 0.3 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 1.7944s real time
Evolving from 915.7509157509156 to 1.0 r_isco with initial eccentricity 0.3 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 18.7522s real time
Evolving from 915.7509157509156 to 1.0 r_isco with initial eccentricity 0.3 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 2.0736s real time
Evolving from 915.7509157509156 to 1.0 r_isco with initial eccentricity 0.3 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 14.6814s real time
Evolving from 915.7509157509156 to 1.0 r_isco with initial eccentricity 0.3 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
/media/data/Documents/PhD/imripy/src/imripy/kepler.py:240: RuntimeWarning: invalid value encountered in sqrt
v = np.sqrt(self.m_tot *(2./r - 1./self.a))
/media/data/Documents/PhD/imripy/src/imripy/halo/dm.py:147: RuntimeWarning: invalid value encountered in double_scalars
return np.where(r > self.r_min, self.rho_spike * (self.r_spike/r)**self.alpha, 0.)
/media/data/Documents/PhD/imripy/src/imripy/inspiral/forces.py:241: IntegrationWarning: The occurrence of roundoff error is detected, which prevents
the requested tolerance from being achieved. The error may be
underestimated.
return -(1.-e**2)**(3./2.)/2./np.pi * quad(integrand, 0., 2.*np.pi, limit = 100)[0]
/media/data/Documents/PhD/imripy/src/imripy/inspiral/forces.py:370: RuntimeWarning: invalid value encountered in double_scalars
return -32./5. * ko.m_red**2 * ko.m_tot**(5./2.) / ko.a**(7./2.) / (1. - ko.e**2)**2 * (1. + 7./8.*ko.e**2)
/media/data/Documents/PhD/imripy/src/imripy/inspiral/forces.py:264: RuntimeWarning: invalid value encountered in sqrt
return -(1.-e**2)**(3./2.)/2./np.pi *np.sqrt(ko.m_tot * a*(1.-e**2))* quad(integrand, 0., 2.*np.pi, limit = 100)[0]
/media/data/Documents/PhD/imripy/src/imripy/inspiral/forces.py:264: IntegrationWarning: The occurrence of roundoff error is detected, which prevents
the requested tolerance from being achieved. The error may be
underestimated.
return -(1.-e**2)**(3./2.)/2./np.pi *np.sqrt(ko.m_tot * a*(1.-e**2))* quad(integrand, 0., 2.*np.pi, limit = 100)[0]
/media/data/Documents/PhD/imripy/src/imripy/inspiral/classic.py:139: RuntimeWarning: invalid value encountered in sqrt
L = np.sqrt(ko.a * (1-ko.e**2) * ko.m_tot * ko.m_red**2 )
A termination event occurred.
-> Ended at 2.669e+00r_isco. Evolution took 2.1549s real time
Evolving from 915.7509157509156 to 1.0 r_isco with initial eccentricity 0.3 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 13.3883s real time
Evolving from 1302.0833333333333 to 1.0 r_isco with initial eccentricity 0.6 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 2.6508s real time
Evolving from 1302.0833333333333 to 1.0 r_isco with initial eccentricity 0.6 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 19.4004s real time
Evolving from 1302.0833333333333 to 1.0 r_isco with initial eccentricity 0.6 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 2.8966s real time
Evolving from 1302.0833333333333 to 1.0 r_isco with initial eccentricity 0.6 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 18.2055s real time
Evolving from 1302.0833333333333 to 1.0 r_isco with initial eccentricity 0.6 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
/media/data/Documents/PhD/imripy/src/imripy/inspiral/forces.py:353: RuntimeWarning: invalid value encountered in double_scalars
return -32./5. * ko.m_red**2 * ko.m_tot**3 / ko.a**5 / (1. - ko.e**2)**(7./2.) * (1. + 73./24. * ko.e**2 + 37./96. * ko.e**4)
/media/data/Documents/PhD/imripy/src/imripy/inspiral/forces.py:241: RuntimeWarning: invalid value encountered in double_scalars
return -(1.-e**2)**(3./2.)/2./np.pi * quad(integrand, 0., 2.*np.pi, limit = 100)[0]
/media/data/Documents/PhD/imripy/src/imripy/inspiral/forces.py:264: RuntimeWarning: invalid value encountered in double_scalars
return -(1.-e**2)**(3./2.)/2./np.pi *np.sqrt(ko.m_tot * a*(1.-e**2))* quad(integrand, 0., 2.*np.pi, limit = 100)[0]
A termination event occurred.
-> Ended at 2.683e+00r_isco. Evolution took 3.5389s real time
Evolving from 1302.0833333333333 to 1.0 r_isco with initial eccentricity 0.6 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 16.8322s real time

### Compare dephasing effects
```python
def compareN2(hs, k0, spike, lnLambda, ax_dN2, label="", acc=1e-10, verbose=1, color=None):
opt_0=inspiral.Classic.EvolutionOptions(dissipativeForces=[forces.GWLoss()], verbose=verbose, accuracy=acc)
ev_0 = inspiral.Classic.Evolve(hs, k0, opt=opt_0)
f_gw_0, N2_0 = waveform.N_cycles_n(2, hs, ev_0)
N2_0_interp = interp1d(f_gw_0, N2_0, kind='cubic', bounds_error=False, fill_value=(0.,0.))
df_stat = forces.DynamicalFriction(halo=spike, haloPhaseSpaceDescription=False, ln_Lambda=lnLambda)
opt_stat=inspiral.Classic.EvolutionOptions(dissipativeForces=[forces.GWLoss(), df_stat], verbose=verbose, accuracy=acc)
ev_stat = inspiral.Classic.Evolve(hs, k0, opt=opt_stat)
f_gw_stat, N2_stat = waveform.N_cycles_n(2, hs, ev_stat)
N2_stat_interp = interp1d(f_gw_stat, N2_stat, kind='cubic', bounds_error=False, fill_value=(0.,0.))
df_dyn = forces.DynamicalFriction(halo=spike, haloPhaseSpaceDescription=True, ln_Lambda=lnLambda, includeHigherVelocities=False)
opt_dyn=inspiral.Classic.EvolutionOptions(dissipativeForces=[forces.GWLoss(), df_dyn], verbose=verbose, accuracy=acc)
ev_dyn = inspiral.Classic.Evolve(hs, k0, opt=opt_dyn)
f_gw_dyn, N2_dyn = waveform.N_cycles_n(2, hs, ev_dyn)
dN2_stat = np.abs(N2_stat - N2_0_interp(f_gw_stat))
dN2_stat_interp = interp1d(f_gw_stat, dN2_stat, kind='cubic', bounds_error=False, fill_value=(0.,0.))
dN2_dyn = np.abs(N2_dyn - N2_0_interp(f_gw_dyn))
#l, = ax_dN2.loglog(f_gw_stat/ms.hz_to_invpc, dN2_stat, label=r"$|\Delta N^{(2)}_{stat}|$" + label)
#ax_dN2.loglog(f_gw_dyn/ms.hz_to_invpc, dN2_dyn,
# color = l.get_c(), linestyle='--', label=r"$|\Delta N^{(2)}_{dyn}|$" + label)
ax_dN2.plot(f_gw_dyn/c.hz_to_invpc, dN2_dyn/dN2_stat_interp(f_gw_dyn),
label= label)
```
```python
m1 = 1e3 * c.solar_mass_to_pc
m2 = 1 * c.solar_mass_to_pc
D = 5e8 # in pc
hs = ms.HostSystem(m1, D_l=D)
rho_spike = 226 * c.solar_mass_to_pc
r_spike = 0.54
alpha = 7/3
spike = halo.Spike(rho_spike, r_spike, alpha, m1)
```
```python
fig = plt.figure(figsize=(10,6)); ax_dN2 = fig.gca()
a0 = 200 * hs.r_isco
k0 = kepler.KeplerOrbit(hs, m2, a0)
k0.e = 1e-4
compareN2(hs, k0, spike, -1, ax_dN2, label=r"$e_0=10^{-4}$")
k0.e = 0.2
compareN2(hs, k0, spike, -1, ax_dN2, label=r"$e_0=0.2$")
k0.e = 0.9
compareN2(hs, k0, spike, -1, ax_dN2, label=r"$e_0=0.9$")
plt.grid();
plt.xscale('log'); plt.ylim(bottom = 0., top=1.1)
plt.xlim(right=1)
plt.xlabel("frequency / Hz")
plt.ylabel("$\Delta N^{(2)}_{psd}/\Delta N^{(2)}_{stat}$")
plt.axhline(0.58, color='black')
plt.legend(loc='upper right')
#fig.savefig("plots/xi_dephasing.pdf")
```
Evolving from 200.0 to 1.0 r_isco with initial eccentricity 0.0001 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0521s real time
Evolving from 200.0 to 1.0 r_isco with initial eccentricity 0.0001 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 1.0352s real time
Evolving from 200.0 to 1.0 r_isco with initial eccentricity 0.0001 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.3840s real time
Evolving from 200.0 to 1.0 r_isco with initial eccentricity 0.2 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0544s real time
Evolving from 200.0 to 1.0 r_isco with initial eccentricity 0.2 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 1.1620s real time
Evolving from 200.0 to 1.0 r_isco with initial eccentricity 0.2 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.5047s real time
Evolving from 200.0 to 1.0 r_isco with initial eccentricity 0.9 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0467s real time
Evolving from 200.0 to 1.0 r_isco with initial eccentricity 0.9 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
<ipython-input-6-6b04a9b21574>:27: RuntimeWarning: divide by zero encountered in divide
ax_dN2.plot(f_gw_dyn/c.hz_to_invpc, dN2_dyn/dN2_stat_interp(f_gw_dyn),
A termination event occurred.
-> Ended at 2.671e+00r_isco. Evolution took 2.3260s real time
Evolving from 200.0 to 1.0 r_isco with initial eccentricity 0.9 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.668e+00r_isco. Evolution took 24.2699s real time
<matplotlib.legend.Legend at 0x7f63b8b04880>

## GWsignal effects
```python
# The masses of the primary and secondary object
m1 = 1e3 * c.solar_mass_to_pc
m2 = 1. * c.solar_mass_to_pc
# The luminosity distance to the system
D = 1e8 # in pc
hs = ms.HostSystem(m1, D_l=D)
# The dark matter spike according to https://arxiv.org/pdf/2108.04154.pdf
rho6 = 5.448e15 * c.solar_mass_to_pc # in 1/pc^2
alphas = [(7./3., r'$\alpha_{spike}=7/3$'), (9./4., r'$\alpha_{spike}=9/4$'), (7./4., r'$\alpha_{spike}=7/4$')]
spikes = [halo.Spike.FromRho6(rho6, m1, alpha) for alpha, label in alphas]
```
```python
# Plots for comparison
plt.rcParams['axes.prop_cycle'] = mpl.cycler(color=["#3f90da", "#ffa90e", "#bd1f01", "#94a4a2"])
plt.figure(figsize=(10, 8))
ax_pc = plt.gca()
ax_risco = ax_pc.twiny()
r_grid = np.geomspace(hs.r_isco, 1e6*hs.r_isco)
for i, sp in enumerate(spikes):
l, = ax_pc.loglog(r_grid, sp.density(r_grid)/c.solar_mass_to_pc, label=alphas[i][1])
ax_risco.loglog(r_grid/hs.r_isco, sp.density(r_grid)/c.solar_mass_to_pc, color=l.get_c())
ax_pc.set_xlabel("r / pc"); ax_risco.set_xlabel("r / $r_{isco}$"); ax_pc.set_ylabel(r"Density / $M_\odot/pc^3$")
ax_pc.legend(); ax_pc.grid()
#plt.savefig("plots/rho_dm.pdf")
```

```python
def compareModels(hs, spikes, k0, ax_a=None, ax_e=None, ax_ae=None, ax_h=None, ax_dN2=None, ax_m=None, ax_n=None,
label="", acc=1e-10, verbose=1, fgw5year_line=False):
# no dm
opt_0=inspiral.Classic.EvolutionOptions(dissipativeForces=[forces.GWLoss()], verbose=verbose, accuracy=acc)
ev_0 = inspiral.Classic.Evolve(hs, k0, opt=opt_0)
pu.plotEvolution(hs, ev_0, ax_a=ax_a, ax_e=ax_e, ax_ae=ax_ae, ax_n=ax_n, label=label + r'\text{vacuum}')
if not ax_h is None:
pu.plotGWcharacteristicStrain(hs, ev_0, ax_h, label=label+ r'\text{vacuum}' , harmonics=[2])
fgw_0, N2_0 = waveform.N_cycles_n(2, hs, ev_0)
N2_0interp = interp1d(fgw_0, N2_0, kind='cubic', bounds_error=False, fill_value=(0.,0.))
if fgw5year_line:
f_gw5yr = interp1d(ev_0.t, fgw_0, kind='cubic', bounds_error=True)(ev_0.t[-1] - 5.*c.year_to_pc)
ax_dN2.axvline(f_gw5yr/c.hz_to_invpc, linestyle='--')
t_fin = ev_0.t[-1] * 10**(-2.*(np.log10(a0/hs.r_isco) - 2.))
for spike,label_dm in spikes:
df = forces.DynamicalFriction(halo=spike, haloPhaseSpaceDescription=True, includeHigherVelocities=False)
opt_dm=inspiral.Classic.EvolutionOptions(dissipativeForces=[forces.GWLoss(), df], verbose=verbose, accuracy=acc)
ev_dm = inspiral.Classic.Evolve(hs, k0, t_fin=t_fin, opt=opt_dm)
l_dm = pu.plotEvolution(hs, ev_dm, ax_a=ax_a, ax_e=ax_e, ax_ae=ax_ae, ax_n=ax_n, label=label + label_dm)
# Plot the characteristic strain of the second harmonic
if not ax_h is None:
pu.plotGWcharacteristicStrain(hs, ev_dm, ax_h, harmonics=[2,3])
# Calculate Delta N for the second harmonic
if not ax_dN2 is None:
pu.plotDeltaN(hs, ev_0, ev_dm, ax_dN2, label=r"$\Delta N^{(2)}$,"+label_dm, color=(l_dm.get_c() if not l_dm is None else None))
```
```python
def preparePlots( figF, axesF, axes_ae=None, ax_m=None, axes_a=None, e_min=1e-5, f_min=8e-4):
figF.subplots_adjust(hspace=0, wspace=0)
if not axes_ae is None:
for ax in axes_ae:
ax.set_xlabel('semimajor axis / $r_{isco}$')
ax.grid()
axes_ae[0].set_ylabel('eccentricity')
if axes_a is None:
axes_ae[-1].legend(loc='upper left', bbox_to_anchor=(0.9, 1.))
#axes_ae[0].set_ylim(bottom=e_min, top =1.)
#if np.shape(axesT)[0] > 2:
# axesT[2,0].set_ylabel(r'$\Delta m_2/m_2$')
# axesT[2,0].set_ylim(bottom=1e-4)
if not axes_a is None:
for ax in axes_a:
ax.set_xlabel('time / yr')
ax.grid()
axes_a[0].set_ylabel('semimajor axis / $r_{isco}$')
axes_a[-1].legend(loc='upper left', bbox_to_anchor=(0.9, 1.))
axesF[0,0].set_ylabel('characteristic strain')
axesF[1,0].set_ylabel(r'$|\Delta N^{(2)}|$')
f_gw = np.geomspace(detector.Lisa().Bandwith()[0], detector.Lisa().Bandwith()[1], 100)
for ax in axesF[0,:]:
ax.loglog(f_gw/c.hz_to_invpc, detector.Lisa().NoiseStrain(f_gw), label='LISA')
for ax in axesF[1,:]:
ax.set_xlabel('frequency / Hz')
axesF[0,0].set_ylim(bottom=1e-23)
from matplotlib.lines import Line2D
explanatory_lines = [mpl.lines.Line2D([0], [0], color='black', linestyle='-'),
mpl.lines.Line2D([0], [0], color='black', linestyle='--'),
mpl.lines.Line2D([0], [0], color="#832db6")]
axesF[0,-1].legend(explanatory_lines, ["$h^{(2)}_{c,+}$", "$h^{(3)}_{c,+}$", "LISA"],
loc='upper left', bbox_to_anchor=(0.9, 1.))
if axes_a is None and axes_ae is None:
axesF[1,-1].legend(loc='upper left', bbox_to_anchor=(0.9, 1.))
axesF[-1,0].set_ylim(bottom=1., top=1e8)
axesF[-1,0].set_xlim(left=f_min)
for ax in axesF.flatten():
ax.grid()
```
```python
# An example case
plt.rcParams['axes.prop_cycle'] = mpl.cycler(color=["#94a4a2", "#3f90da", "#ffa90e", "#bd1f01", "#832db6"])
figT, axes_ae = plt.subplots(2, 1, figsize=(6,10))
figF, axes_gw = plt.subplots(3, 1, sharex='col', figsize=(6,15))
# Set initial conditions for orbital evolution
a0 = 300 * hs.r_isco
e0 = 0.1
k0 = kepler.KeplerOrbit(hs, m2, a0, e0)
spikes_and_labels = [(spike, alphas[i][1]) for i, spike in enumerate(spikes)]
compareModels(hs, spikes_and_labels, k0, ax_a=axes_ae[0],
ax_ae=axes_ae[1], ax_h=axes_gw[0], ax_dN2=axes_gw[1], ax_n=axes_gw[2], fgw5year_line=True)
preparePlots(figF, np.array([[axes_gw[0]], [axes_gw[1]]]), axes_ae=[axes_ae[1]], axes_a=[axes_ae[0]])
axes_gw[2].grid()
#figT.savefig("plots/evolution_example.pdf", bbox_inches="tight"); figF.savefig("plots/gwSignal_example.pdf", bbox_inches="tight")
```
Evolving from 300.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0542s real time
/media/data/Documents/PhD/imripy/src/imripy/waveform.py:142: IntegrationWarning: The occurrence of roundoff error is detected, which prevents
the requested tolerance from being achieved. The error may be
underestimated.
mean_anomaly = 2.*np.pi* np.cumsum([quad(F_interp, ev.t[i-1], ev.t[i], epsabs=acc, epsrel=acc, limit=200)[0] if i > 0 else 0. for i in range(len(ev.t))])
Evolving from 300.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.4112s real time
Evolving from 300.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.2024s real time
Evolving from 300.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 14.6228s real time


```python
# Look at different initial eccentricities
figT, axesT = plt.subplots(2, 3, figsize=(16, 10), sharex='col')
figF, axesF = plt.subplots(2, 3, figsize=(16,10), sharex='all', sharey='row')
# Set initial conditions for orbital evolution
a0 = 100 * hs.r_isco
k0 = kepler.KeplerOrbit(hs, m2, a0)
k0.e = 1e-4
axesT[0,0].set_title(r"$e_0=10^{-4}$");# axesF[0,0].set_title(r"$e_0=10^{-4}$")
compareModels(hs, spikes_and_labels, k0, ax_a=axesT[0,0], ax_ae=axesT[1,0], ax_h=axesF[0,0], ax_dN2=axesF[1,0])
k0.e = 1e-1
axesT[0,1].set_title(r"$e_0=0.1$");# axesF[0,1].set_title(r"$e_0=0.1$")
compareModels(hs, spikes_and_labels, k0, ax_a=axesT[0,1], ax_ae=axesT[1,1], ax_h=axesF[0,1], ax_dN2=axesF[1,1])
k0.e = 0.6
axesT[0,2].set_title(r"$e_0=0.6$"); #axesF[0,2].set_title(r"$e_0=0.6$")
compareModels(hs, spikes_and_labels, k0, ax_a=axesT[0,2], ax_ae=axesT[1,2], ax_h=axesF[0,2], ax_dN2=axesF[1,2])
preparePlots( figF, axesF, axes_ae = axesT[1,:], axes_a=axesT[0,:])
figT.subplots_adjust(wspace=0)
axesT[0,0].set_yscale('log')
#figT.savefig("plots/evolution_e0.pdf", bbox_inches="tight"); figF.savefig("plots/gwSignal_e0.pdf", bbox_inches="tight")
```
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.0001 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0495s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.0001 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 11.3467s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.0001 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.2748s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.0001 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 14.3939s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0437s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.0474s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 13.0133s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 14.9653s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.6 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0451s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.6 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 14.0373s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.6 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 14.8610s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.6 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 16.1484s real time


```python
# Look at different initial separation
figT, axesT = plt.subplots(2, 3, figsize=(16,10), sharey='row')
figF, axesF = plt.subplots(2, 3, figsize=(16,10), sharex='all', sharey='row')
k0.e = 1e-1
k0.a = 1e2*hs.r_isco
axesT[0,0].set_title(r"$a_0=10^2r_{isco}$");# axesF[0,0].set_title(r"$a_0=10^2r_{isco}$")
compareModels(hs, spikes_and_labels, k0, ax_a=axesT[0,0], ax_ae=axesT[1,0], ax_h=axesF[0,0], ax_dN2=axesF[1,0])
k0.a = 1e3*hs.r_isco
axesT[0,1].set_title(r"$a_0=10^3r_{isco}$");# axesF[0,1].set_title(r"$a_0=10^3r_{isco}$")
compareModels(hs, spikes_and_labels, k0, ax_a=axesT[0,1], ax_ae=axesT[1,1], ax_h=axesF[0,1], ax_dN2=axesF[1,1], acc=1e-11)
k0.a = 1e4*hs.r_isco
axesT[0,2].set_title(r"$a_0=10^4r_{isco}$"); #axesF[0,2].set_title(r"$a_0=10^4r_{isco}$")
compareModels(hs, spikes_and_labels, k0, ax_a=axesT[0,2], ax_ae=axesT[1,2], ax_h=axesF[0,2], ax_dN2=axesF[1,2])
preparePlots( figF, axesF, f_min=1e-4, axes_ae = axesT[1,:], axes_a=axesT[0,:])
figT.subplots_adjust(wspace=0)
#figT.savefig("plots/evolution_a0.pdf", bbox_inches="tight"); figF.savefig("plots/gwSignal_a0.pdf", bbox_inches="tight")
```
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0490s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.0046s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 13.1148s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 14.8762s real time
Evolving from 1000.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0705s real time
/media/data/Documents/PhD/imripy/src/imripy/waveform.py:142: IntegrationWarning: The maximum number of subdivisions (200) has been achieved.
If increasing the limit yields no improvement it is advised to analyze
the integrand in order to determine the difficulties. If the position of a
local difficulty can be determined (singularity, discontinuity) one will
probably gain from splitting up the interval and calling the integrator
on the subranges. Perhaps a special-purpose integrator should be used.
mean_anomaly = 2.*np.pi* np.cumsum([quad(F_interp, ev.t[i-1], ev.t[i], epsabs=acc, epsrel=acc, limit=200)[0] if i > 0 else 0. for i in range(len(ev.t))])
Evolving from 1000.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 16.1833s real time
Evolving from 1000.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 16.1568s real time
Evolving from 1000.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 18.7692s real time
Evolving from 10000.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.696e+00r_isco. Evolution took 0.0728s real time
/media/data/Documents/PhD/imripy/src/imripy/waveform.py:142: IntegrationWarning: Extremely bad integrand behavior occurs at some points of the
integration interval.
mean_anomaly = 2.*np.pi* np.cumsum([quad(F_interp, ev.t[i-1], ev.t[i], epsabs=acc, epsrel=acc, limit=200)[0] if i > 0 else 0. for i in range(len(ev.t))])
Evolving from 10000.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.673e+00r_isco. Evolution took 12.6166s real time
Evolving from 10000.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.654e+00r_isco. Evolution took 12.7055s real time
Evolving from 10000.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.665e+00r_isco. Evolution took 14.2823s real time


```python
# Look at different Dark Matter Densities
figT, axesT = plt.subplots(2, 3, figsize=(16,10), sharey='row')
figF, axesF = plt.subplots(2, 3, figsize=(16,10), sharex='all', sharey='row')
k0.a = 100 * hs.r_isco
k0.e = 0.1
rho6 = 5.448e13 * c.solar_mass_to_pc # in 1/pc^2
spikes_and_labels_1 = [(halo.Spike.FromRho6(rho6, m1, alpha), label) for alpha,label in alphas]
axesT[0,0].set_title(r"$\rho_6=" + plot_utils.latex_float(rho6/c.solar_mass_to_pc) + "M_\odot/pc^3$")
compareModels(hs, spikes_and_labels_1, k0, ax_a=axesT[0,0], ax_ae=axesT[1,0], ax_h=axesF[0,0], ax_dN2=axesF[1,0])
rho6 = 5.448e15 * c.solar_mass_to_pc # in 1/pc^2
spikes_and_labels_2 = [(halo.Spike.FromRho6(rho6, m1, alpha), label) for alpha,label in alphas]
axesT[0,1].set_title(r"$\rho_6=" + plot_utils.latex_float(rho6/c.solar_mass_to_pc) + "M_\odot/pc^3$")
compareModels(hs, spikes_and_labels_2, k0, ax_a=axesT[0,1], ax_ae=axesT[1,1], ax_h=axesF[0,1], ax_dN2=axesF[1,1])
rho6 = 5.448e17 * c.solar_mass_to_pc # in 1/pc^2
spikes_and_labels_3 = [(halo.Spike.FromRho6(rho6, m1, alpha), label) for alpha,label in alphas]
axesT[0,2].set_title(r"$\rho_6=" + plot_utils.latex_float(rho6/c.solar_mass_to_pc) + "M_\odot/pc^3$")
compareModels(hs, spikes_and_labels_3, k0, ax_a=axesT[0,2], ax_ae=axesT[1,2], ax_h=axesF[0,2], ax_dN2=axesF[1,2])
preparePlots( figF, axesF, axes_ae = axesT[1,:], axes_a=axesT[0,:])
figT.subplots_adjust(wspace=0)
#figT.savefig("plots/evolution_rho6.pdf", bbox_inches="tight"); figF.savefig("plots/gwSignal_rho6.pdf", bbox_inches="tight")
```
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0480s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 16.3628s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 17.0556s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 15.3994s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0445s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.1807s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.4697s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 15.4402s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0443s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 7.9190s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 8.1410s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 9.8947s real time


```python
# Look at different m1
figT, axesT = plt.subplots(2, 3, figsize=(16,10), sharey='row')
figF, axesF = plt.subplots(2, 3, figsize=(16,10), sharex='all', sharey='row')
rho6 = 5.448e15 * c.solar_mass_to_pc # in 1/pc^2
e0 = 0.1
# 1
m1 = 1e3 * c.solar_mass_to_pc
hs_1 = ms.HostSystem(m1, D_l=D)
spikes_and_labels_1 = [(halo.Spike.FromRho6(rho6, m1, alpha), label) for alpha,label in alphas]
a0 = 1e2*hs_1.r_isco
k0_1 = kepler.KeplerOrbit(hs_1, m2, a0, e0)
axesT[0,0].set_title(r"$m_1 = 10^3 M_{\odot}$"); #axesF[0,0].set_title(r"$m_1 = 10^3 M_{\odot}$")
compareModels(hs_1, spikes_and_labels_1, k0_1, ax_a=axesT[0,0], ax_ae=axesT[1,0], ax_h=axesF[0,0], ax_dN2=axesF[1,0], fgw5year_line=True)
# 2
m1 = 1e4 * c.solar_mass_to_pc
hs_2 = ms.HostSystem(m1, D_l=D)
spikes_and_labels_2 = [(halo.Spike.FromRho6(rho6, m1, alpha), label) for alpha,label in alphas]
a0 = 1e2*hs_2.r_isco
k0_2 = kepler.KeplerOrbit(hs_2, m2, a0, e0)
axesT[0,1].set_title(r"$m_1 = 10^4 M_{\odot}$"); #axesF[0,1].set_title(r"$m_1 = 10^4 M_{\odot}$")
compareModels(hs_2, spikes_and_labels_2, k0_2, ax_a=axesT[0,1], ax_ae=axesT[1,1], ax_h=axesF[0,1], ax_dN2=axesF[1,1], acc=1e-11, fgw5year_line=True)
# 3
m1 = 1e5 * c.solar_mass_to_pc
hs_3 = ms.HostSystem(m1, D_l=D)
spikes_and_labels_3 = [(halo.Spike.FromRho6(rho6, m1, alpha), label) for alpha,label in alphas]
a0 = 1e2*hs_3.r_isco
k0_3 = kepler.KeplerOrbit(hs_3, m2, a0, e0)
axesT[0,2].set_title(r"$m_1 = 10^5 M_{\odot}$"); #axesF[0,2].set_title(r"$m_1 = 10^5 M_{\odot}$")
compareModels(hs_3, spikes_and_labels_3, k0_3, ax_a=axesT[0,2], ax_ae=axesT[1,2], ax_h=axesF[0,2], ax_dN2=axesF[1,2], acc=1e-11, fgw5year_line=True)
preparePlots(figF, axesF, f_min=1e-4, axes_ae = axesT[1,:], axes_a=axesT[0,:])
figT.subplots_adjust(wspace=0)
#figT.savefig("plots/evolution_m1.pdf", bbox_inches="tight"); figF.savefig("plots/gwSignal_m1.pdf", bbox_inches="tight")
```
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0450s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.4085s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.9127s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-10
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 15.0252s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0400s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 11.6532s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.1932s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.5406s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 0.0422s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.1971s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.0275s real time
Evolving from 100.0 to 1.0 r_isco with initial eccentricity 0.1 with Options: dissipative forces employed {GWLoss, DynamicalFriction, }, accuracy = 1.0e-11
A termination event occurred.
-> Ended at 2.667e+00r_isco. Evolution took 12.4035s real time


```python
```
|
DMGW-GoetheREPO_NAMEimripyPATH_START.@imripy_extracted@imripy-main@examples@2112.09586.ipynb@.PATH_END.py
|
{
"filename": "line_flux.py",
"repo_name": "psheehan/pdspy",
"repo_path": "pdspy_extracted/pdspy-master/pdspy/spectroscopy/line_flux.py",
"type": "Python"
}
|
from ..constants.physics import c
from ..constants.math import pi
from ..constants.astronomy import Jy
from numpy import arange,ones,concatenate,sqrt,exp,where,array,mat,sin,log
from numpy import abs as absv
from scipy.optimize import leastsq
import matplotlib.pyplot as plt
def line_flux(data,lines,nleft=8,nright=8,plotout=None,quiet=False, \
fixed_width=False,fringing=False):
B = 1.0/1500
lines = array(lines)
wave = data.wave
flux = data.flux
unc = data.unc
nlines = lines.size
ind = arange(nlines)
left = where(absv(wave-lines[0]) == absv(wave-lines[0]).min())
right = where(absv(wave-lines[lines.size-1]) == absv(wave- \
lines[lines.size-1]).min())
prange = arange(right[0]-left[0]+nleft+nright+1)-nleft+left[0]
wave_fit = wave[prange]
flux_fit = flux[prange]
unc_fit = unc[prange]
flux_in = ones(lines.size)
#sigma_in = lines*B
fwhm_in = lines/(888.488-9.553*lines)
sigma_in = fwhm_in/(2*sqrt(2*log(2)))
omega_in = array([2*pi/(4*sigma_in[0])])
amp_in = array([0.1])
phi_in = array([pi/2])
slope = array([(flux_fit[flux_fit.size-1]-flux_fit[0])/(wave_fit.max()- \
wave_fit.min())])
yint = array([flux_fit[0]-slope[0]*wave_fit.min()])
if fringing:
A = concatenate((flux_in,lines,sigma_in,yint,slope, \
amp_in,omega_in,phi_in))
else:
A = concatenate((flux_in,lines,sigma_in,yint,slope))
parinfo = []
for i in arange(A.size):
parinfo.append({"limited":[False,False], "limits":[0.0,0.0], \
"mpside":2, "fixed":False})
for i in arange(lines.size):
parinfo[i]["limited"] = [True,False]
parinfo[i]["limits"] = [0.001,0.0]
parinfo[lines.size+i]["limited"] = [True,True]
parinfo[lines.size+i]["limits"] = [lines[i]-0.025,lines[i]+0.025]
parinfo[2*lines.size+i]["limited"] = [True,False]
parinfo[2*lines.size+i]["limits"] = [1.0e-3,0.0]
if fixed_width:
parinfo[2*lines.size+i]["fixed"] = True
if fringing:
parinfo[-1]["limited"] = [True,True]
parinfo[-1]["limits"] = [0.0,2*pi]
fa = {"x":wave_fit, "y":flux_fit, "err":unc_fit, "fringing":fringing}
mfit = mpfit(gauss,xall=A,functkw=fa,parinfo=parinfo,quiet=1)
A=mfit.params
fit = gauss(A,x=wave_fit,y=flux_fit,err=unc_fit,fringing=fringing)[1]* \
unc_fit*(-1)+flux_fit
chisq = ((fit - flux_fit)**2/unc_fit**2).sum()/(wave_fit.size-A.size)
# Calculate the flux and uncertainty.
F = sqrt(2*pi)*A[ind]*Jy*(c*A[ind+2*nlines]*1.0e-4)/(A[ind+nlines]* \
1.0e-4)**2/1.0e7
deltaF = ones(nlines)
for i in arange(nlines):
deltaF[i] = sqrt(((unc_fit*Jy*c*B/(wave_fit*1.0e-4)*exp(-1.0*( \
wave_fit-A[i+nlines])**2/(2*A[i+2*nlines]**2)))**2).sum())/1.0e7
# Output the results.
Results = concatenate((array(mat(A[ind+nlines]).T),array(mat(F).T), \
array(mat(deltaF).T),array(mat(A[ind+2*nlines]*2*sqrt(2*log(2))).T)), \
axis=1)
if quiet == False:
print("")
for i in arange(lines.size):
print(" {0:>6.3f} {1:>9.3e} {2:>9.3e} {3:>6.4f}".format( \
Results[i,0],Results[i,1],Results[i,2],Results[i,3]))
print("")
print("Reduced chi-squared of the fit: ",chisq)
print("")
# Plot the results.
if (plotout != None) or (quiet == False):
plt.errorbar(wave_fit,flux_fit,fmt="b",yerr=unc_fit)
plt.plot(wave_fit,fit,"r")
plt.xlabel("$\lambda$ [$\mu$"+"m]")
plt.ylabel(r"F$_{\nu}$ [Jy]")
if plotout != None:
plt.savefig(plotout)
elif quiet == False:
plt.show()
plt.clf()
return Results, chisq
def gauss(p, fjac=None, x=None, y=None, err=None, fringing=False):
if fringing:
model = p[p.size-5]+p[p.size-4]*x+ \
p[p.size-3]*sin(p[p.size-2]*x+p[p.size-1])
for i in arange(p.size/3-1):
model += p[i]*exp(-1*(x-p[p.size/3-1+i])**2/ \
(2*p[2*(p.size/3-1)+i]**2))
else:
model = p[p.size-2]+p[p.size-1]*x
for i in arange(p.size/3):
model += p[i]*exp(-1*(x-p[p.size/3+i])**2/ \
(2*p[2*(p.size/3)+i]**2))
status = 0
return [status, (y-model)/err]
|
psheehanREPO_NAMEpdspyPATH_START.@pdspy_extracted@pdspy-master@pdspy@spectroscopy@line_flux.py@.PATH_END.py
|
{
"filename": "structure2feff.py",
"repo_name": "xraypy/xraylarch",
"repo_path": "xraylarch_extracted/xraylarch-master/larch/xrd/structure2feff.py",
"type": "Python"
}
|
import os
from random import Random
from xraydb import atomic_symbol, atomic_number, xray_edge
from larch.utils.strutils import fix_varname, strict_ascii
from larixite.amcsd_utils import (SpacegroupAnalyzer, Molecule,
IMolecule, IStructure)
rng = Random()
def get_atom_map(structure):
"""generalization of pymatgen atom map
Returns:
dict of ipots
"""
unique_pot_atoms = []
all_sites = []
for site in structure:
for elem in site.species.elements:
if elem.symbol not in unique_pot_atoms:
unique_pot_atoms.append(elem.symbol)
atom_map = {}
for i, atom in enumerate(unique_pot_atoms):
atom_map[atom] = i + 1
return atom_map
def read_structure(structure_text, fmt="cif"):
"""read structure from text
Arguments
---------
structure_text (string): text of structure file
fmt (string): format of structure file (cif, poscar, etc)
Returns
-------
pymatgen Structure object or Molecule object
"""
if Molecule is None:
raise ImportError("pymatgen required. Try 'pip install pymatgen'.")
try:
if fmt.lower() in ('cif', 'poscar', 'contcar', 'chgcar', 'locpot', 'cssr', 'vasprun.xml'):
struct = IStructure.from_str(structure_text, fmt, merge_tol=5.e-4)
else:
struct = IMolecule.from_str(structure_text, fmt)
parse_ok = True
file_found = True
except:
parse_ok = False
file_found = False
if os.path.exists(structure_text):
file_found = True
fmt = os.path.splitext(structure_text)[-1].lower()
try:
if fmt.lower() in ('cif', 'poscar', 'contcar', 'chgcar', 'locpot', 'cssr', 'vasprun.xml'):
struct = IStructure.from_file(structure_text, merge_tol=5.e-4)
else:
struct = IMolecule.from_file(structure_text)
parse_ok = True
except:
parse_ok = False
if not parse_ok:
if not file_found:
raise FileNotFoundError(f'file {structure_text:s} not found')
else:
raise ValueError('invalid text of structure file')
return struct
def structure_sites(structure_text, absorber=None, fmt='cif'):
"return list of sites for the structure"
struct = read_structure(structure_text, fmt=fmt)
out = struct.sites
if absorber is not None:
abname = absorber.lower()
out = []
for site in struct.sites:
species = site.species_string.lower()
if ',' in species and ':' in species: # multi-occupancy site
for siteocc in species.split(','):
sname, occ = siteocc.split(':')
if sname.strip() == abname:
out.append(site)
elif species == abname:
out.append(site)
if len(out) == 0:
out = struct.sites[0]
return out
def parse_structure(structure_text, fmt='cif', fname="default.filename"):
try:
struct = read_structure(structure_text, fmt=fmt)
except ValueError:
return '# could not read structure file'
return {'formula': struct.composition.reduced_formula, 'sites': struct.sites, 'structure_text': structure_text, 'fmt': fmt, 'fname': fname}
def structure2feffinp(structure_text, absorber, edge=None, cluster_size=8.0,
absorber_site=1, site_index=None, extra_titles=None,
with_h=False, version8=True, fmt='cif', rng_seed=None):
"""convert structure text to Feff8 or Feff6l input file
Arguments
---------
structure_text (string): text of CIF file or name of the CIF file.
absorber (string or int): atomic symbol or atomic number of absorbing element
(see Note 1)
edge (string or None): edge for calculation (see Note 2) [None]
cluster_size (float): size of cluster, in Angstroms [8.0]
absorber_site (int): index of site for absorber (see Note 3) [1]
site_index (int or None): index of site for absorber (see Note 4) [None]
extra_titles (list of str or None): extra title lines to include [None]
with_h (bool): whether to include H atoms [False]
version8 (bool): whether to write Feff8l input (see Note 5)[True]
fmt (string): format of structure file (cif, poscar, etc) [cif]
rng_seed (int or None): seed for RNG to get reproducible occupancy selections [None]
Returns
-------
text of Feff input file
Notes
-----
1. absorber is the atomic symbol or number of the absorbing element, and
must be an element in the CIF structure.
2. If edge is a string, it must be one of 'K', 'L', 'M', or 'N' edges (note
Feff6 supports only 'K', 'L3', 'L2', and 'L1' edges). If edge is None,
it will be assigned to be 'K' for absorbers with Z < 58 (Ce, with an
edge energy < 40 keV), and 'L3' for absorbers with Z >= 58.
3. for structures with multiple sites for the absorbing atom, the site
can be selected by the order in which they are listed in the sites
list. This depends on the details of the CIF structure, which can be
found with `cif_sites(ciftext)`, starting counting by 1.
4. to explicitly state the index of the site in the sites list, use
site_index (starting at 1!)
5. if version8 is False, outputs will be written for Feff6l
"""
try:
struct = read_structure(structure_text, fmt=fmt)
except ValueError:
return '# could not read structure file'
global rng
if rng_seed is not None:
rng.seed(rng_seed)
is_molecule = False
if isinstance(struct, IStructure):
sgroup = SpacegroupAnalyzer(struct).get_symmetry_dataset()
space_group = sgroup["international"]
else:
space_group = 'Molecule'
is_molecule = True
if isinstance(absorber, int):
absorber = atomic_symbol(absorber_z)
absorber_z = atomic_number(absorber)
if edge is None:
edge = 'K' if absorber_z < 58 else 'L3'
edge_energy = xray_edge(absorber, edge).energy
edge_comment = f'{absorber:s} {edge:s} edge, around {edge_energy:.0f} eV'
unique_pot_atoms = []
for site in struct:
for elem in site.species.elements:
if elem.symbol not in unique_pot_atoms:
unique_pot_atoms.append(elem.symbol)
atoms_map = {}
for i, atom in enumerate(unique_pot_atoms):
atoms_map[atom] = i + 1
if absorber not in atoms_map:
atlist = ', '.join(atoms_map.keys())
raise ValueError(f'atomic symbol {absorber:s} not listed in structure data: ({atlist})')
site_atoms = {} # map xtal site with list of atoms occupying that site
site_tags = {}
absorber_count = 0
for sindex, site in enumerate(struct.sites):
site_species = [e.symbol for e in site.species]
if len(site_species) > 1:
s_els = [s.symbol for s in site.species.keys()]
s_wts = [s for s in site.species.values()]
site_atoms[sindex] = rng.choices(s_els, weights=s_wts, k=1000)
site_tags[sindex] = f'({site.species_string:s})_{1+sindex:d}'
else:
site_atoms[sindex] = [site_species[0]] * 1000
site_tags[sindex] = f'{site.species_string:s}_{1+sindex:d}'
if absorber in site_species:
absorber_count += 1
if absorber_count == absorber_site:
absorber_index = sindex
if site_index is not None:
absorber_index = site_index - 1
# print("Got sites ", len(cstruct.sites), len(site_atoms), len(site_tags))
center = struct[absorber_index].coords
sphere = struct.get_neighbors(struct[absorber_index], cluster_size)
symbols = [absorber]
coords = [[0, 0, 0]]
tags = [f'{absorber:s}_{1+absorber_index:d}']
for i, site_dist in enumerate(sphere):
s_index = site_dist[0].index
site_symbol = site_atoms[s_index].pop()
tags.append(site_tags[s_index])
symbols.append(site_symbol)
coords.append(site_dist[0].coords - center)
cluster = Molecule(symbols, coords)
out_text = ['*** feff input generated by xraylarch structure2feff using pymatgen ***']
if extra_titles is not None:
for etitle in extra_titles[:]:
if not etitle.startswith('TITLE '):
etitle = 'TITLE ' + etitle
out_text.append(etitle)
out_text.append(f'TITLE Formula: {struct.composition.reduced_formula:s}')
out_text.append(f'TITLE SpaceGroup: {space_group:s}')
out_text.append(f'TITLE # sites: {struct.num_sites}')
out_text.append('* crystallographics sites: note that these sites may not be unique!')
out_text.append(f'* using absorber at site {1+absorber_index:d} in the list below')
out_text.append(f'* selected as absorber="{absorber:s}", absorber_site={absorber_site:d}')
out_text.append('* index X Y Z species')
for i, site in enumerate(struct):
# The method of obtaining the cooridanates depends on whether the structure is a molecule or not
if is_molecule:
fc = site.coords
else:
fc = site.frac_coords
species_string = fix_varname(site.species_string.strip())
marker = ' <- absorber' if (i == absorber_index) else ''
out_text.append(f'* {i+1:3d} {fc[0]:.6f} {fc[1]:.6f} {fc[2]:.6f} {species_string:s} {marker:s}')
out_text.extend(['* ', '', ''])
if version8:
out_text.append(f'EDGE {edge:s}')
out_text.append('S02 1.0')
out_text.append('CONTROL 1 1 1 1 1 1')
out_text.append('PRINT 1 0 0 0 0 3')
out_text.append('EXAFS 20.0')
out_text.append('NLEG 6')
out_text.append(f'RPATH {cluster_size:.2f}')
out_text.append('*SCF 5.0')
else:
edge_index = {'K': 1, 'L1': 2, 'L2': 3, 'L3': 4}[edge]
out_text.append(f'HOLE {edge_index:d} 1.0 * {edge_comment:s} (2nd number is S02)')
out_text.append('CONTROL 1 1 1 0 * phase, paths, feff, chi')
out_text.append('PRINT 1 0 0 0')
out_text.append(f'RMAX {cluster_size:.2f}')
out_text.extend(['', 'EXCHANGE 0', '',
'* POLARIZATION 0 0 0', '',
'POTENTIALS', '* IPOT Z Tag'])
# loop to find atoms actually in cluster, in case some atom
# (maybe fractional occupation) is not included
at_lines = [(0, cluster[0].x, cluster[0].y, cluster[0].z, 0, absorber, tags[0])]
ipot_map = {}
next_ipot = 0
for i, site in enumerate(cluster[1:]):
sym = site.species_string
if sym == 'H' and not with_h:
continue
if sym in ipot_map:
ipot = ipot_map[sym]
else:
next_ipot += 1
ipot_map[sym] = ipot = next_ipot
dist = cluster.get_distance(0, i+1)
at_lines.append((dist, site.x, site.y, site.z, ipot, sym, tags[i+1]))
ipot, z = 0, absorber_z
out_text.append(f' {ipot:4d} {z:4d} {absorber:s}')
for sym, ipot in ipot_map.items():
z = atomic_number(sym)
out_text.append(f' {ipot:4d} {z:4d} {sym:s}')
out_text.append('')
out_text.append('ATOMS')
out_text.append(f'* x y z ipot tag distance site_info')
acount = 0
for dist, x, y, z, ipot, sym, tag in sorted(at_lines, key=lambda x: x[0]):
acount += 1
if acount > 500:
break
sym = (sym + ' ')[:2]
out_text.append(f' {x: .5f} {y: .5f} {z: .5f} {ipot:4d} {sym:s} {dist:.5f} * {tag:s}')
out_text.append('')
out_text.append('* END')
out_text.append('')
return strict_ascii('\n'.join(out_text))
|
xraypyREPO_NAMExraylarchPATH_START.@xraylarch_extracted@xraylarch-master@larch@xrd@structure2feff.py@.PATH_END.py
|
{
"filename": "get_fink_data.py",
"repo_name": "nikhil-sarin/redback",
"repo_path": "redback_extracted/redback-master/examples/get_fink_data.py",
"type": "Python"
}
|
#An example to get data from FINK and plot it
import redback
name = 'ZTF22abdjqlm'
data = redback.get_data.get_fink_data(transient=name, transient_type='supernova')
# FINK and OAC have the same data format, so we can just use the OAC class method to load this data
supernova = redback.supernova.Supernova.from_open_access_catalogue(name=name,
data_mode='flux')
# lets now plot it and change some plotting attributes
supernova.plot_data(ylim_high=1e-12, ylim_low=1e-14)
|
nikhil-sarinREPO_NAMEredbackPATH_START.@redback_extracted@redback-master@examples@get_fink_data.py@.PATH_END.py
|
{
"filename": "testUVSub.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/Obit/testScripts/testUVSub.py",
"type": "Python"
}
|
# Test interferometric model subtraction /division script
# The argument, if given, is the data directory, defaults to "../testIt"
# Output UVdata should have ~zero phase and ~unit amplitude.
# Need some automated uv data comparison
import Obit, OSystem, OErr, sys
if len(sys.argv)>=2:
dataDir = sys.argv[1]
else:
dataDir = "../testIt/"
# Init Obit
err=OErr.OErr()
ObitSys=OSystem.OSystem ("UVSub", 1, 100, 1, ["../AIPSdata/"], 1, [dataDir], 1, 0, err)
OErr.printErrMsg(err, "Error with Obit startup")
# Allow multiple threads
OSystem.PAllowThreads(2) # 2 threads
import UV, UVImager, Image, ImageMosaic, SkyModel
from Obit import Bomb
# Files (FITS)
inDisk = 1
outDisk = 1
inFile = 'UVSubTestIn.uvtab'
inModel = 'UVSubTestModIn.fits'
outFile = 'UVSubTestOut.uvtab'
masterDisk = 1
masterFile = 'UVSubTestMaster.uvtab'
# Bombs away
#Bomb()
# Set data
print "Set data"
inData = UV.newPFUV("Input uv data", inFile, inDisk, True, err)
inImage = Image.newPFImage("Input image",inModel, inDisk, True, err)
outData = UV.newPFUV("Output uv data", outFile, outDisk, False, err)
OErr.printErrMsg(err, "Error initializing")
# Make Mosaic
mosaic = ImageMosaic.newObit("Mosaic", 2, err)
#mosaic = ImageMosaic.newObit("Mosaic", 1, err)
OErr.printErrMsg(err, "Error making mosaic")
# Add image
ImageMosaic.PSetImage(mosaic, 0, inImage)
ImageMosaic.PSetImage(mosaic, 1, inImage)
# Make SkyModel model
model = SkyModel.PCreate("SkyModel", mosaic)
OErr.printErrMsg(err, "Error making SkyModel")
# control parameters
Input = SkyModel.UVSubInput
Input['InData'] = inData
Input['SkyModel'] = model
Input['OutData'] = outData
Input['doCalSelect'] = False
Input['Stokes'] = ' '
Input['CCVer'] = [2]
Input['BChan'] = 0
Input['EChan'] = 0
Input['BIF'] = 0
Input['EIF'] = 0
Input['Factor'] = 1.0
Input['Factor'] = 0.5
Input['Mode'] = 0 # Fastest
Input['Mode'] = 1 # DFT
#Input['Mode'] = 2 # Grid
#Input['Type'] = 1 # Image
Input['Type'] = 0 # CC
#replace data with model?
#Input['REPLACE'] = True
# Subtract
#print "Subtract"
#print "Replace"
#SkyModel.PSubUV(err, Input)
#OErr.printErrMsg(err, "Error subtracting")
# Divide
print "Divide"
SkyModel.PDivUV(err, Input)
OErr.printErrMsg(err, "Error dividing")
# Compare with master lie [rms diff]
masterData = UV.newPFUV("Master UVData", masterFile, masterDisk, True, err)
diff = UV.PUtilVisCompare(outData, masterData, err);
print "Comparison with master lie, fractional RMS R,I difference",diff
# Say something
#print "Subtracted",inModel,"From", inFile,"to",outFile
print "Divided FT of",inModel,"into", inFile,"to",outFile
#def PUtilUVUtilVisCompare (in1UV, in2UV, err):
# returns RMS
# Shutdown Obit
OErr.printErr(err)
OSystem.Shutdown(ObitSys)
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@Obit@testScripts@testUVSub.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "D-arioSpace/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/ned/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is the old namespace for querying the NASA Extragalactic Database (NED).
Please update your imports and use it from astroquery.ipac.ned
.. deprecated:: 0.4.4
"""
import warnings
warnings.warn("the ``ned`` module has been moved to astroquery.ipac.ned, "
"please update your imports.", DeprecationWarning, stacklevel=2)
from astroquery.ipac.ned import Ned, NedClass, Conf, conf
__all__ = ['Ned', 'NedClass', 'Conf', 'conf']
|
D-arioSpaceREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@ned@__init__.py@.PATH_END.py
|
{
"filename": "stagger.py",
"repo_name": "andycasey/smhr",
"repo_path": "smhr_extracted/smhr-master/smh/photospheres/stagger.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Functions for dealing with the Stagger model photospheres. """
from __future__ import division, absolute_import, print_function
__author__ = "Andy Casey <arc@ast.cam.ac.uk>"
import logging
import numpy as np
from .interpolator import BaseInterpolator
logger = logging.getLogger(__name__)
class Interpolator(BaseInterpolator):
opacity_scale = "logtau"
def __init__(self, filename, **kwargs):
return super(self.__class__, self).__init__(filename, **kwargs)
def pickle_from_tsv_file(filename, depth_scale="optical", skiprows=72,
delimiter=";"):
"""
Pickle the Stagger-grid models from TSV-formatted filename.
:param filename:
The path of the TSV-formatted file.
:type filename:
str
:param depth_scale: [optional, optical assumed]
Which horizontal averaging method to use. Available options are:
optical, mass density, Rosseland, or geometric height
:type depth_scale:
str
:param skiprows: [optional]
The number of rows at the top of the file before the header information.
:type skiprows:
int
:param delimiter: [optional]
The delimiting character between columns.
:type delimiter:
str
"""
depth_scale_hint = depth_scale.lower()[0] # work it out from first letter
if depth_scale_hint not in ("o", "m", "r", "z", "g", "h"): # zgh are same
raise ValueError(
"depth scale expected to be 'optical', 'mass density', "
"Rosseland, or geometric height")
if depth_scale_hint in ("g", "h"):
depth_scale_hint = "z"
elif depth_scale_hint == "r":
depth_scale_hint = "R"
depth_scale = {
"o": "optical",
"m": "mass density",
"R": "Rossland opacity",
"z": "geometric height",
}[depth_scale_hint]
with open(filename, "r") as fp:
contents = fp.readlines()[skiprows + 1:]
if contents[-1] == "\n": contents.pop(-1)
# Number of extra columns in each row.
n = 4
# First three lines are for headers
names = contents[0].strip().split(delimiter)
units = contents[1].strip().split(delimiter)
contents = contents[3:]
num_models = len(set([row.split(delimiter)[n - 1] for row in contents]))
parameters = np.nan * np.ones((num_models, n - 1))
# Assume they all have the same number of depth points.
assert (len(contents) % num_models) == 0
num_depth_points = int(len(contents) / num_models)
num_photospheric_quantitites = len(names) - n
photospheres = np.nan * np.ones(
(num_models, num_depth_points, num_photospheric_quantitites))
for i in range(num_models):
# The '4:' arises from the first four columns being the model parameters
parameters[i, :] = \
map(float, contents[i*num_depth_points].split(delimiter)[:n-1])
photospheres[i, :, :] = np.array(
[map(float, map(str.strip, _.split(delimiter)[n:])) \
for _ in contents[i*num_depth_points:(i + 1)*num_depth_points]])
names, units = names[n:], units[n:]
# Replace dimensionless columns with "" for astropy.
# Which depth scale do we want?
indices = np.array([0] + [i for i, name in enumerate(names) \
if name.endswith("({})".format(depth_scale_hint))])
names = [names[0]] + [names[i][:-3] for i in indices[1:]]
units = [units[i].replace("[-]", "") for i in indices]
photospheres = photospheres[:, :, indices]
meta = {
"kind": "Stagger",
"source_path": filename,
"horizontal_averaging": depth_scale,
"photospheric_units": units
}
assert np.all(np.isfinite(parameters))
assert np.all(np.isfinite(photospheres))
parameters = np.core.records.fromarrays(parameters.T,
names=("effective_temperature", "surface_gravity", "metallicity"))
return (parameters, photospheres, names, meta)
|
andycaseyREPO_NAMEsmhrPATH_START.@smhr_extracted@smhr-master@smh@photospheres@stagger.py@.PATH_END.py
|
{
"filename": "wheel.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/pip/vendor/distlib/wheel.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
from __future__ import unicode_literals
import base64
import codecs
import datetime
import distutils.util
from email import message_from_file
import hashlib
import imp
import json
import logging
import os
import posixpath
import re
import shutil
import sys
import tempfile
import zipfile
from . import DistlibException
from .compat import sysconfig, ZipFile, fsdecode, text_type, filter
from .database import DistributionPath, InstalledDistribution
from .metadata import Metadata
from .scripts import ScriptMaker
from .util import (FileOperator, convert_path, CSVReader, CSVWriter,
cached_property, get_cache_base)
logger = logging.getLogger(__name__)
if hasattr(sys, 'pypy_version_info'):
IMP_PREFIX = 'pp'
elif sys.platform.startswith('java'):
IMP_PREFIX = 'jy'
elif sys.platform == 'cli':
IMP_PREFIX = 'ip'
else:
IMP_PREFIX = 'cp'
VER_SUFFIX = sysconfig.get_config_var('py_version_nodot')
if not VER_SUFFIX: # pragma: no cover
VER_SUFFIX = '%s%s' % sys.version_info[:2]
PYVER = 'py' + VER_SUFFIX
IMPVER = IMP_PREFIX + VER_SUFFIX
ARCH = distutils.util.get_platform().replace('-', '_').replace('.', '_')
ABI = sysconfig.get_config_var('SOABI')
if ABI and ABI.startswith('cpython-'):
ABI = ABI.replace('cpython-', 'cp')
else:
ABI = 'none'
FILENAME_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?
-(?P<py>\w+\d+(\.\w+\d+)*)
-(?P<bi>\w+)
-(?P<ar>\w+)
\.whl$
''', re.IGNORECASE | re.VERBOSE)
NAME_VERSION_RE = re.compile(r'''
(?P<nm>[^-]+)
-(?P<vn>\d+[^-]*)
(-(?P<bn>\d+[^-]*))?$
''', re.IGNORECASE | re.VERBOSE)
SHEBANG_RE = re.compile(br'\s*#![^\r\n]*')
if os.sep == '/':
to_posix = lambda o: o
else:
to_posix = lambda o: o.replace(os.sep, '/')
class Mounter(object):
def __init__(self):
self.impure_wheels = {}
self.libs = {}
def add(self, pathname, extensions):
self.impure_wheels[pathname] = extensions
self.libs.update(extensions)
def remove(self, pathname):
extensions = self.impure_wheels.pop(pathname)
for k, v in extensions:
if k in self.libs:
del self.libs[k]
def find_module(self, fullname, path=None):
if fullname in self.libs:
result = self
else:
result = None
return result
def load_module(self, fullname):
if fullname in sys.modules:
result = sys.modules[fullname]
else:
if fullname not in self.libs:
raise ImportError('unable to find extension for %s' % fullname)
result = imp.load_dynamic(fullname, self.libs[fullname])
result.__loader__ = self
result.__package__, _ = fullname.rsplit('.', 1)
return result
_hook = Mounter()
class Wheel(object):
"""
Class to build and install from Wheel files (PEP 427).
"""
wheel_version = (1, 0)
hash_kind = 'sha256'
def __init__(self, filename=None, sign=False, verify=False):
"""
Initialise an instance using a (valid) filename.
"""
self.sign = sign
self.verify = verify
self.buildver = ''
self.pyver = [PYVER]
self.abi = ['none']
self.arch = ['any']
self.dirname = os.getcwd()
if filename is None:
self.name = 'dummy'
self.version = '0.1'
self._filename = self.filename
else:
m = NAME_VERSION_RE.match(filename)
if m:
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self._filename = self.filename
else:
dirname, filename = os.path.split(filename)
m = FILENAME_RE.match(filename)
if not m:
raise DistlibException('Invalid name or '
'filename: %r' % filename)
if dirname:
self.dirname = os.path.abspath(dirname)
self._filename = filename
info = m.groupdict('')
self.name = info['nm']
self.version = info['vn']
self.buildver = info['bn']
self.pyver = info['py'].split('.')
self.abi = info['bi'].split('.')
self.arch = info['ar'].split('.')
@property
def filename(self):
"""
Build and return a filename from the various components.
"""
if self.buildver:
buildver = '-' + self.buildver
else:
buildver = ''
pyver = '.'.join(self.pyver)
abi = '.'.join(self.abi)
arch = '.'.join(self.arch)
return '%s-%s%s-%s-%s-%s.whl' % (self.name, self.version, buildver,
pyver, abi, arch)
@property
def tags(self):
for pyver in self.pyver:
for abi in self.abi:
for arch in self.arch:
yield pyver, abi, arch
@cached_property
def metadata(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'METADATA')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
result = Metadata()
result.read_file(wf)
return result
@cached_property
def info(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
metadata_filename = posixpath.join(info_dir, 'WHEEL')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(metadata_filename) as bf:
wf = wrapper(bf)
message = message_from_file(wf)
result = dict(message)
return result
def process_shebang(self, data):
m = SHEBANG_RE.match(data)
if m:
data = b'#!python' + data[m.end():]
else:
cr = data.find(b'\r')
lf = data.find(b'\n')
if cr < 0 or cr > lf:
term = b'\n'
else:
if data[cr:cr + 2] == b'\r\n':
term = b'\r\n'
else:
term = b'\r'
data = b'#!python' + term + data
return data
def get_hash(self, data, hash_kind=None):
if hash_kind is None:
hash_kind = self.hash_kind
try:
hasher = getattr(hashlib, hash_kind)
except AttributeError:
raise DistlibException('Unsupported hash algorithm: %r' % hash_kind)
result = hasher(data).digest()
result = base64.urlsafe_b64encode(result).rstrip(b'=').decode('ascii')
return hash_kind, result
def write_record(self, records, record_path, base):
with CSVWriter(record_path) as writer:
for row in records:
writer.writerow(row)
p = to_posix(os.path.relpath(record_path, base))
writer.writerow((p, '', ''))
def build(self, paths, tags=None):
"""
Build a wheel from files in specified paths, and use any specified tags
when determining the name of the wheel.
"""
if tags is None:
tags = {}
libkey = list(filter(lambda o: o in paths, ('purelib', 'platlib')))[0]
if libkey == 'platlib':
is_pure = 'false'
default_pyver = [IMPVER]
default_abi = [ABI]
default_arch = [ARCH]
else:
is_pure = 'true'
default_pyver = [PYVER]
default_abi = ['none']
default_arch = ['any']
self.pyver = tags.get('pyver', default_pyver)
self.abi = tags.get('abi', default_abi)
self.arch = tags.get('arch', default_arch)
libdir = paths[libkey]
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
archive_paths = []
# First, stuff which is not in site-packages
for key in ('data', 'headers', 'scripts'):
if key not in paths:
continue
path = paths[key]
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for fn in files:
p = fsdecode(os.path.join(root, fn))
rp = os.path.relpath(p, path)
ap = to_posix(os.path.join(data_dir, key, rp))
archive_paths.append((ap, p))
if key == 'scripts' and not p.endswith('.exe'):
with open(p, 'rb') as f:
data = f.read()
data = self.process_shebang(data)
with open(p, 'wb') as f:
f.write(data)
# Now, stuff which is in site-packages, other than the
# distinfo stuff.
path = libdir
distinfo = None
for root, dirs, files in os.walk(path):
if root == path:
# At the top level only, save distinfo for later
# and skip it for now
for i, dn in enumerate(dirs):
dn = fsdecode(dn)
if dn.endswith('.dist-info'):
distinfo = os.path.join(root, dn)
del dirs[i]
break
assert distinfo, '.dist-info directory expected, not found'
for fn in files:
# comment out next suite to leave .pyc files in
if fsdecode(fn).endswith(('.pyc', '.pyo')):
continue
p = os.path.join(root, fn)
rp = to_posix(os.path.relpath(p, path))
archive_paths.append((rp, p))
# Now distinfo. Assumed to be flat, i.e. os.listdir is enough.
files = os.listdir(distinfo)
for fn in files:
if fn not in ('RECORD', 'INSTALLER', 'SHARED'):
p = fsdecode(os.path.join(distinfo, fn))
ap = to_posix(os.path.join(info_dir, fn))
archive_paths.append((ap, p))
import distlib
wheel_metadata = [
'Wheel-Version: %d.%d' % self.wheel_version,
'Generator: distlib %s' % distlib.__version__,
'Root-Is-Purelib: %s' % is_pure,
]
for pyver, abi, arch in self.tags:
wheel_metadata.append('Tag: %s-%s-%s' % (pyver, abi, arch))
p = os.path.join(distinfo, 'WHEEL')
with open(p, 'w') as f:
f.write('\n'.join(wheel_metadata))
ap = to_posix(os.path.join(info_dir, 'WHEEL'))
archive_paths.append((ap, p))
# Now, at last, RECORD.
# Paths in here are archive paths - nothing else makes sense.
records = []
hasher = getattr(hashlib, self.hash_kind)
for ap, p in archive_paths:
with open(p, 'rb') as f:
data = f.read()
digest = '%s=%s' % self.get_hash(data)
size = os.path.getsize(p)
records.append((ap, digest, size))
p = os.path.join(distinfo, 'RECORD')
self.write_record(records, p, libdir)
ap = to_posix(os.path.join(info_dir, 'RECORD'))
archive_paths.append((ap, p))
# Now, ready to build the zip file
pathname = os.path.join(self.dirname, self.filename)
with ZipFile(pathname, 'w', zipfile.ZIP_DEFLATED) as zf:
for ap, p in archive_paths:
logger.debug('Wrote %s to %s in wheel', p, ap)
zf.write(p, ap)
return pathname
def install(self, paths, dry_run=False, executable=None, warner=None):
"""
Install a wheel to the specified paths. If ``executable`` is specified,
it should be the Unicode absolute path the to the executable written
into the shebang lines of any scripts installed. If ``warner`` is
specified, it should be a callable, which will be called with two
tuples indicating the wheel version of this software and the wheel
version in the file, if there is a discrepancy in the versions.
This can be used to issue any warnings to raise any exceptions.
"""
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
data_dir = '%s.data' % name_ver
info_dir = '%s.dist-info' % name_ver
wheel_metadata_name = posixpath.join(info_dir, 'WHEEL')
record_name = posixpath.join(info_dir, 'RECORD')
wrapper = codecs.getreader('utf-8')
with ZipFile(pathname, 'r') as zf:
with zf.open(wheel_metadata_name) as bwf:
wf = wrapper(bwf)
message = message_from_file(wf)
wv = message['Wheel-Version'].split('.', 1)
file_version = tuple([int(i) for i in wv])
if (file_version != self.wheel_version) and warner:
warner(self.wheel_version, file_version)
if message['Root-Is-Purelib'] == 'true':
libdir = paths['purelib']
else:
libdir = paths['platlib']
records = {}
with zf.open(record_name) as bf:
with CSVReader(record_name, stream=bf) as reader:
for row in reader:
p = row[0]
records[p] = row
data_pfx = posixpath.join(data_dir, '')
script_pfx = posixpath.join(data_dir, 'scripts', '')
fileop = FileOperator(dry_run=dry_run)
fileop.record = True # so we can rollback if needed
bc = not sys.dont_write_bytecode # Double negatives. Lovely!
outfiles = [] # for RECORD writing
# for script copying/shebang processing
workdir = tempfile.mkdtemp()
# set target dir later
# we default add_launchers to False, as the
# Python Launcher should be used instead
maker = ScriptMaker(workdir, None, fileop=fileop,
add_launchers=False)
maker.executable = executable
try:
for zinfo in zf.infolist():
arcname = zinfo.filename
if isinstance(arcname, text_type):
u_arcname = arcname
else:
u_arcname = arcname.decode('utf-8')
row = records[u_arcname]
if row[2] and str(zinfo.file_size) != row[2]:
raise DistlibException('size mismatch for '
'%s' % u_arcname)
if row[1]:
kind, value = row[1].split('=', 1)
with zf.open(arcname) as bf:
data = bf.read()
_, digest = self.get_hash(data, kind)
if digest != value:
raise DistlibException('digest mismatch for '
'%s' % arcname)
is_script = (u_arcname.startswith(script_pfx)
and not u_arcname.endswith('.exe'))
if u_arcname.startswith(data_pfx):
_, where, rp = u_arcname.split('/', 2)
outfile = os.path.join(paths[where], convert_path(rp))
else:
# meant for site-packages.
if u_arcname in (wheel_metadata_name, record_name):
continue
outfile = os.path.join(libdir, convert_path(u_arcname))
if not is_script:
with zf.open(arcname) as bf:
fileop.copy_stream(bf, outfile)
outfiles.append(outfile)
# Double check the digest of the written file
if not dry_run and row[1]:
with open(outfile, 'rb') as bf:
data = bf.read()
_, newdigest = self.get_hash(data, kind)
if newdigest != digest:
raise DistlibException('digest mismatch '
'on write for '
'%s' % outfile)
if bc and outfile.endswith('.py'):
try:
pyc = fileop.byte_compile(outfile)
outfiles.append(pyc)
except Exception:
# Don't give up if byte-compilation fails,
# but log it and perhaps warn the user
logger.warning('Byte-compilation failed',
exc_info=True)
else:
fn = os.path.basename(convert_path(arcname))
workname = os.path.join(workdir, fn)
with zf.open(arcname) as bf:
fileop.copy_stream(bf, workname)
dn, fn = os.path.split(outfile)
maker.target_dir = dn
filenames = maker.make(fn)
fileop.set_executable_mode(filenames)
outfiles.extend(filenames)
p = os.path.join(libdir, info_dir)
dist = InstalledDistribution(p)
# Write SHARED
paths = dict(paths) # don't change passed in dict
del paths['purelib']
del paths['platlib']
paths['lib'] = libdir
p = dist.write_shared_locations(paths, dry_run)
outfiles.append(p)
# Write RECORD
dist.write_installed_files(outfiles, paths['prefix'],
dry_run)
return dist
except Exception as e: # pragma: no cover
logger.exception('installation failed.')
fileop.rollback()
raise
finally:
shutil.rmtree(workdir)
def _get_dylib_cache(self):
result = os.path.join(get_cache_base(), 'dylib-cache')
if not os.path.isdir(result):
os.makedirs(result)
return result
def _get_extensions(self):
pathname = os.path.join(self.dirname, self.filename)
name_ver = '%s-%s' % (self.name, self.version)
info_dir = '%s.dist-info' % name_ver
arcname = posixpath.join(info_dir, 'EXTENSIONS')
wrapper = codecs.getreader('utf-8')
result = []
with ZipFile(pathname, 'r') as zf:
try:
with zf.open(arcname) as bf:
wf = wrapper(bf)
extensions = json.load(wf)
cache_base = self._get_dylib_cache()
for name, relpath in extensions.items():
dest = os.path.join(cache_base, convert_path(relpath))
if not os.path.exists(dest):
extract = True
else:
file_time = os.stat(dest).st_mtime
file_time = datetime.datetime.fromtimestamp(file_time)
info = zf.getinfo(relpath)
wheel_time = datetime.datetime(*info.date_time)
extract = wheel_time > file_time
if extract:
zf.extract(relpath, cache_base)
result.append((name, dest))
except KeyError:
pass
return result
def mount(self, append=False):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if not is_compatible(self):
msg = 'Wheel %s not mountable in this Python.' % pathname
raise DistlibException(msg)
if pathname in sys.path:
logger.debug('%s already in path', pathname)
else:
if append:
sys.path.append(pathname)
else:
sys.path.insert(0, pathname)
extensions = self._get_extensions()
if extensions:
if _hook not in sys.meta_path:
sys.meta_path.append(_hook)
_hook.add(pathname, extensions)
def unmount(self):
pathname = os.path.abspath(os.path.join(self.dirname, self.filename))
if pathname not in sys.path:
logger.debug('%s not in path', pathname)
else:
sys.path.remove(pathname)
if pathname in _hook.impure_wheels:
_hook.remove(pathname)
if not _hook.impure_wheels:
if _hook in sys.meta_path:
sys.meta_path.remove(_hook)
def compatible_tags():
"""
Return (pyver, abi, arch) tuples compatible with this Python.
"""
versions = [VER_SUFFIX]
major = VER_SUFFIX[0]
for minor in range(sys.version_info[1] - 1, - 1, -1):
versions.append(''.join([major, str(minor)]))
abis = []
for suffix, _, _ in imp.get_suffixes():
if suffix.startswith('.abi'):
abis.append(suffix.split('.', 2)[1])
abis.sort()
if ABI != 'none':
abis.insert(0, ABI)
abis.append('none')
result = []
# Most specific - our Python version, ABI and arch
for abi in abis:
result.append((''.join((IMP_PREFIX, versions[0])), abi, ARCH))
# where no ABI / arch dependency, but IMP_PREFIX dependency
for i, version in enumerate(versions):
result.append((''.join((IMP_PREFIX, version)), 'none', 'any'))
if i == 0:
result.append((''.join((IMP_PREFIX, version[0])), 'none', 'any'))
# no IMP_PREFIX, ABI or arch dependency
for i, version in enumerate(versions):
result.append((''.join(('py', version)), 'none', 'any'))
if i == 0:
result.append((''.join(('py', version[0])), 'none', 'any'))
return result
COMPATIBLE_TAGS = compatible_tags()
del compatible_tags
def is_compatible(wheel, tags=None):
if not isinstance(wheel, Wheel):
wheel = Wheel(wheel) # assume it's a filename
result = False
if tags is None:
tags = COMPATIBLE_TAGS
for ver, abi, arch in tags:
if ver in wheel.pyver and abi in wheel.abi and arch in wheel.arch:
result = True
break
return result
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@pip@vendor@distlib@wheel.py@.PATH_END.py
|
{
"filename": "_dtickrange.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/carpet/baxis/tickformatstop/_dtickrange.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DtickrangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self,
plotly_name="dtickrange",
parent_name="carpet.baxis.tickformatstop",
**kwargs
):
super(DtickrangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
items=kwargs.pop(
"items",
[
{"valType": "any", "editType": "calc"},
{"valType": "any", "editType": "calc"},
],
),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@carpet@baxis@tickformatstop@_dtickrange.py@.PATH_END.py
|
{
"filename": "spec.md",
"repo_name": "EranOfek/AstroPack",
"repo_path": "AstroPack_extracted/AstroPack-main/matlab/astro/+astro/+spec/spec.md",
"type": "Markdown"
}
|
# Overview
# List of Subpackages
# Subpackages
# Usage
# Notes
# Known Issues
# See Also
|
EranOfekREPO_NAMEAstroPackPATH_START.@AstroPack_extracted@AstroPack-main@matlab@astro@+astro@+spec@spec.md@.PATH_END.py
|
{
"filename": "property_modifiers.py",
"repo_name": "CaymanUnterborn/ExoPlex",
"repo_path": "ExoPlex_extracted/ExoPlex-master/ExoPlex/burnman/eos/property_modifiers.py",
"type": "Python"
}
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
import numpy as np
import scipy.optimize as opt
from ..constants import gas_constant
"""
Functions for modifying the thermodynamic properties of minerals
Currently includes modifications for:
- second order transitions (landau, landau_hp),
- order-disorder (bragg_williams),
- magnetism (magnetic_chs),
- and a linear modification (linear).
"""
def _landau_excesses(pressure, temperature, params):
"""
Applies a tricritical Landau correction to the properties
of an endmember which undergoes a displacive phase transition.
This correction follows Putnis (1992), and is done relative to
the completely *ordered* state (at 0 K).
It therefore differs in implementation from both
Stixrude and Lithgow-Bertelloni (2011) and
Holland and Powell (2011), who compute properties relative to
the completely disordered state and standard states respectively.
The current implementation is preferred, as the excess
entropy (and heat capacity) terms are equal to zero at 0 K.
N.B. The excesses are for a *completely relaxed* mineral;
for example, seismic wave propagation is *slow* compared to the
rate of reaction.
"""
Tc = params['Tc_0'] + params['V_D'] * pressure / params['S_D']
G_disordered = -params['S_D'] * ((temperature - Tc) + params['Tc_0'] / 3.)
dGdT_disordered = -params['S_D']
dGdP_disordered = params['V_D']
if temperature < Tc:
# Wolfram input to check partial differentials
# x = T, y = P, a = S, c = Tc0, d = V
# D[D[a ((x - c - d*y/a)*(1 - x/(c + d*y/a))^0.5 + c/3*(1 - x/(c +
# d*y/a))^1.5), x], x]
Q2 = np.sqrt(1. - temperature / Tc)
G = (params['S_D']
* ((temperature - Tc) * Q2
+ params['Tc_0'] * Q2 * Q2 * Q2 / 3.) + G_disordered)
dGdP = (-params['V_D'] * Q2 * (1. + 0.5 * temperature / Tc
* (1. - params['Tc_0'] / Tc))
+ dGdP_disordered)
dGdT = (params['S_D'] * Q2 * (1.5 - 0.5 * params['Tc_0'] / Tc)
+ dGdT_disordered)
d2GdP2 = (params['V_D'] * params['V_D'] * temperature
/ (params['S_D'] * Tc * Tc * Q2)
* (temperature * (1. + params['Tc_0'] / Tc) / (4. * Tc)
+ Q2 * Q2 * (1. - params['Tc_0'] / Tc) - 1.))
d2GdT2 = -params['S_D'] / (Tc * Q2) * (0.75 - 0.25 * params['Tc_0']
/ Tc)
d2GdPdT = (params['V_D'] / (2. * Tc * Q2)
* (1. + (temperature / (2. * Tc) - Q2 * Q2)
* (1. - params['Tc_0'] / Tc)))
else:
Q2 = 0.
G = G_disordered
dGdT = dGdT_disordered
dGdP = dGdP_disordered
d2GdT2 = 0.
d2GdP2 = 0.
d2GdPdT = 0.
excesses = {'G': G, 'dGdT': dGdT, 'dGdP': dGdP,
'd2GdT2': d2GdT2, 'd2GdP2': d2GdP2, 'd2GdPdT': d2GdPdT}
return (excesses, {'Q': np.sqrt(Q2)})
def _landau_hp_excesses(pressure, temperature, params):
"""
Applies a tricritical Landau correction to the properties
of an endmember which undergoes a displacive phase transition.
This correction is done relative to the standard state, as per
Holland and Powell (1998).
Includes the correction published within landaunote.pdf
(Holland, pers. comm), which 'corrects' the terms involving
the critical temperature Tc / Tc*
Note that this formalism is still inconsistent, as it predicts that
the order parameter can be greater than one. For this reason
_landau_excesses is preferred.
N.B. The excesses are for a *completely relaxed* mineral;
i.e. the seismic wave propagation is *slow* compared to the
rate of reaction.
"""
P = pressure
T = temperature
if params['T_0'] < params['Tc_0']:
Q_0 = np.power((params['Tc_0'] - params['T_0']) / params['Tc_0'], 0.25)
else:
Q_0 = 0.
Tc = params['Tc_0'] + params['V_D'] * (P - params['P_0']) / params['S_D']
if T < Tc:
Q = np.power((Tc - T) / params['Tc_0'], 0.25)
else:
Q = 0.
# Gibbs
G = (params['Tc_0'] * params['S_D'] * (Q_0 * Q_0 - np.power(Q_0, 6.) / 3.)
- params['S_D'] * (Tc * Q * Q - params['Tc_0'] * np.power(Q, 6.) / 3.)
- T * params['S_D'] * (Q_0 * Q_0 - Q * Q)
+ (P - params['P_0']) * params['V_D'] * Q_0 * Q_0)
dGdT = params['S_D'] * (Q * Q - Q_0 * Q_0)
dGdP = -params['V_D'] * (Q * Q - Q_0 * Q_0)
if Q > 1.e-12:
d2GdT2 = -params['S_D'] / (2. * params['Tc_0'] * Q * Q)
d2GdP2 = (-params['V_D'] * params['V_D']
/ (2. * params['S_D'] * params['Tc_0'] * Q * Q))
d2GdPdT = params['V_D'] / (2. * params['Tc_0'] * Q * Q)
else:
d2GdT2 = 0.
d2GdP2 = 0.
d2GdPdT = 0.
excesses = {'G': G, 'dGdT': dGdT, 'dGdP': dGdP,
'd2GdT2': d2GdT2, 'd2GdP2': d2GdP2, 'd2GdPdT': d2GdPdT}
return (excesses, {'Q': Q})
def _linear_excesses(pressure, temperature, params):
"""
Applies a 'Darken's quadratic formalism' correction (Powell, 1987)
to the thermodynamic properties of a mineral endmember.
This correction is relative to P = 0 and T = 0 and linear in P and T
and therefore corresponds to a constant volume and entropy correction.
Applying either a volume or entropy term will generally break
equations of state (i.e. the properties of the mineral will
no longer obey the equation of state defined in the
params dictionary. However, this form of excess is extremely
useful as a first order tweak to free energies
(especially in solid solution calculations)
"""
G = params['delta_E'] \
- (temperature) * params['delta_S'] \
+ (pressure) * params['delta_V']
dGdT = -params['delta_S']
dGdP = params['delta_V']
d2GdT2 = 0.
d2GdP2 = 0.
d2GdPdT = 0.
excesses = {'G': G, 'dGdT': dGdT, 'dGdP': dGdP,
'd2GdT2': d2GdT2, 'd2GdP2': d2GdP2, 'd2GdPdT': d2GdPdT}
return (excesses, None)
def _bragg_williams_excesses(pressure, temperature, params):
"""
Applies a Bragg-Williams type correction to the thermodynamic
properties of a mineral endmember. Used for modelling
order-disorder processes.
Expressions are from Holland and Powell (1996).
N.B. The excesses are for a *completely relaxed* mineral;
i.e. the seismic wave propagation is *slow* compared to the
rate of reaction.
This may not be reasonable for order-disorder, especially
for slow or coupled diffusers (Si-Al, for example).
The completely *unrelaxed* mineral (in terms of order-disorder)
can be calculated with a solid solution model.
"""
R = gas_constant
n = params['n']
if params['factor'] > 0.:
f = [params['factor'], params['factor']]
else:
f = [1., -params['factor']]
# Equation A2-2
def flnarxn(n, Q, f):
return (n / (n + 1.) * (f[0] * np.log(n*(1. - Q))
+ f[1] * np.log(1. - Q)
- f[0] * np.log(1. + n*Q)
- f[1] * np.log(n + Q)))
# Equation A2-4
# Can be derived from A2-2 noting that
# delta_H + f*R*T*lnarxn = delta_G + f*R*T*(lnadisord - lnaord)
def reaction_bragg_williams(Q, delta_H, temperature, n, f, W):
return (delta_H + R * temperature * flnarxn(n, Q, f)
+ (2. * Q - 1.) * W)
def order_gibbs(pressure, temperature, params):
W = params['Wh'] + pressure * params['Wv']
H_disord = (params['deltaH']
+ pressure * params['deltaV'])
# We can use brentq, but don't let the lower bracket = 0
try:
Q = opt.brentq(reaction_bragg_williams, 1.e-12, 1. - 1.e-12,
args=(H_disord, temperature, n, f, W))
except ValueError:
Q = 0.
S = - R * (f[0] * ((1. + n*Q)*np.log((1. + n * Q)/(n + 1.))
+ n * (1. - Q) * np.log(n * (1. - Q) / (n + 1.)))
+ f[1] * (n * (1. - Q) * np.log((1. - Q) / (n + 1.))
+ n * (n + Q) * np.log((n + Q) / (n + 1.)))
) / (n + 1.)
G = (1. - Q)*H_disord + (1. - Q)*Q*W - temperature*S
return Q, G
# Calculating partial differentials with respect to P and T
# are complicated by the fact that Q changes with P and T
# Since there's no analytical solution for Q(P, T), we are
# unfortunately driven to numerical differentiation. Schade.
dT = 0.1
dP = 1000.
Q, G = order_gibbs(pressure, temperature, params)
Q, GsubPsubT = order_gibbs(pressure - dP, temperature - dT, params)
Q, GsubPaddT = order_gibbs(pressure - dP, temperature + dT, params)
Q, GaddPsubT = order_gibbs(pressure + dP, temperature - dT, params)
Q, GaddPaddT = order_gibbs(pressure + dP, temperature + dT, params)
Q, GsubP = order_gibbs(pressure - dP, temperature, params)
Q, GaddP = order_gibbs(pressure + dP, temperature, params)
Q, GsubT = order_gibbs(pressure, temperature - dT, params)
Q, GaddT = order_gibbs(pressure, temperature + dT, params)
dGdT = (GaddT - GsubT) / (2. * dT)
dGdP = (GaddP - GsubP) / (2. * dP)
d2GdT2 = (GaddT + GsubT - 2. * G) / (dT * dT)
d2GdP2 = (GaddP + GsubP - 2. * G) / (dP * dP)
d2GdPdT = (GaddPaddT - GsubPaddT - GaddPsubT + GsubPsubT) / (4. * dT * dP)
excesses = {'G': G, 'dGdT': dGdT, 'dGdP': dGdP,
'd2GdT2': d2GdT2, 'd2GdP2': d2GdP2, 'd2GdPdT': d2GdPdT}
return (excesses, {'Q': Q})
def _magnetic_excesses_chs(pressure, temperature, params):
"""
Applies a magnetic contribution to the thermodynamic
properties of a mineral endmember.
The expression for the gibbs energy contribution is that
used by Chin, Hertzman and Sundman (1987) as reported
in the Journal of Phase Equilibria (Sundman, 1991).
"""
structural_parameter = params['structural_parameter']
curie_temperature = params['curie_temperature'][
0] + pressure * params['curie_temperature'][1]
tau = temperature / curie_temperature
dtaudT = 1. / curie_temperature
dtaudP = -(temperature * params['curie_temperature'][1]) / (
curie_temperature * curie_temperature)
d2taudPdT = params['curie_temperature'][
1] / (curie_temperature * curie_temperature)
d2taudP2 = (2. * temperature * params['curie_temperature'][1]
* params['curie_temperature'][1]
/ (curie_temperature * curie_temperature * curie_temperature))
magnetic_moment = params['magnetic_moment'][
0] + pressure * params['magnetic_moment'][1]
dmagnetic_momentdP = params['magnetic_moment'][1]
A = (518. / 1125.) + (11692. / 15975.) * ((1. / structural_parameter) - 1.)
if tau < 1:
f = 1. - (1. / A) * (79. / (140. * structural_parameter * tau)
+ (474. / 497.) * (1. / structural_parameter - 1.)
* (np.power(tau, 3.) / 6.
+ np.power(tau, 9.) / 135.
+ np.power(tau, 15.) / 600.))
dfdtau = -(1. / A) * (-79. / (140. * structural_parameter * tau * tau)
+ (474. / 497.) * (1. / structural_parameter
- 1.)
* (tau * tau / 2.
+ np.power(tau, 8.) / 15.
+ np.power(tau, 14.) / 40.))
d2fdtau2 = -(1. / A) * (2. * 79. / (140. * structural_parameter
* np.power(tau, 3.))
+ (474. / 497.) * (1. / structural_parameter
- 1.)
* (tau
+ 8. * np.power(tau, 7.) / 15.
+ 14. * np.power(tau, 13.) / 40.))
else:
f = - (1. / A) * (np.power(tau, -5.) / 10. + np.power(
tau, -15.) / 315. + np.power(tau, -25.) / 1500.)
dfdtau = (1. / A) * (np.power(tau, -6.) / 2.
+ np.power(tau, -16.) / 21.
+ np.power(tau, -26.)
/ 60.)
d2fdtau2 = - (1. / A) * (6. * np.power(tau, -7.) / 2. + 16.
* np.power(tau, -17.) / 21. + 26.
* np.power(tau, -27.)
/ 60.)
dfdT = dfdtau * dtaudT
d2fdT2 = d2fdtau2 * dtaudT * dtaudT
dfdP = dfdtau * dtaudP
d2fdP2 = d2fdtau2 * dtaudP * dtaudP + dfdtau * d2taudP2
d2fdPdT = d2fdtau2 * dtaudT * dtaudP - dfdtau * d2taudPdT
G = gas_constant * temperature * np.log(magnetic_moment + 1.) * f
dGdT = gas_constant * \
np.log(magnetic_moment + 1.) * (f + temperature * dfdT)
d2GdT2 = gas_constant * \
np.log(magnetic_moment + 1.) * (2. * dfdT + temperature * d2fdT2)
dGdP = gas_constant * temperature * (f * dmagnetic_momentdP
/ (magnetic_moment + 1.)
+ dfdP * np.log(magnetic_moment + 1.))
d2GdP2 = gas_constant * temperature * (-f * np.power(dmagnetic_momentdP
/ (magnetic_moment + 1.), 2.)
+ 2 * dfdP * dmagnetic_momentdP
/ (magnetic_moment + 1.)
+ d2fdP2 * np.log(magnetic_moment + 1.))
d2GdPdT = dGdP / temperature + (gas_constant * temperature
* np.log(magnetic_moment + 1.)
* d2fdPdT
+ gas_constant * temperature
* dmagnetic_momentdP
/ (magnetic_moment + 1.) * dfdT)
excesses = {'G': G, 'dGdT': dGdT, 'dGdP': dGdP,
'd2GdT2': d2GdT2, 'd2GdP2': d2GdP2, 'd2GdPdT': d2GdPdT}
return (excesses, None)
def calculate_property_modifications(mineral):
"""
Sums the excesses from all the modifiers.
To calculate thermodynamic properties from the outputs,
the following functions should be used
(the _o suffix stands for original value):
gibbs = gibbs_o + excesses['G']
S = S_o - excesses['dGdT']
V = V_o + excesses['dGdP']
K_T = V / ((V_o / K_T_o) - excesses['d2GdP2'])
C_p = C_p_o - temperature*excesses['d2GdT2']
alpha = ((alpha_o*V_o) + excesses['d2GdPdT']) / V
H = gibbs + temperature*S
helmholtz = gibbs - pressure*V
C_v = C_p - V*temperature*alpha*alpha*K_T
gr = alpha*K_T*V/C_v
K_S = K_T*C_p/C_v
"""
excesses = {'G': 0., 'dGdT': 0., 'dGdP': 0.,
'd2GdT2': 0., 'd2GdP2': 0., 'd2GdPdT': 0.}
mineral.property_modifier_properties = []
for modifier in mineral.property_modifiers:
if modifier[0] == 'landau':
xs_function = _landau_excesses
if modifier[0] == 'landau_hp':
xs_function = _landau_hp_excesses
if modifier[0] == 'linear':
xs_function = _linear_excesses
if modifier[0] == 'bragg_williams':
xs_function = _bragg_williams_excesses
if modifier[0] == 'magnetic_chs':
xs_function = _magnetic_excesses_chs
xs_component, properties = xs_function(
mineral.pressure, mineral.temperature, modifier[1])
mineral.property_modifier_properties.append(properties)
for key in xs_component:
excesses[key] += xs_component[key]
return excesses
|
CaymanUnterbornREPO_NAMEExoPlexPATH_START.@ExoPlex_extracted@ExoPlex-master@ExoPlex@burnman@eos@property_modifiers.py@.PATH_END.py
|
{
"filename": "cosmology_fields.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/fields/cosmology_fields.py",
"type": "Python"
}
|
from .derived_field import ValidateParameter
from .field_exceptions import NeedsConfiguration, NeedsParameter
from .field_plugin_registry import register_field_plugin
@register_field_plugin
def setup_cosmology_fields(registry, ftype="gas", slice_info=None):
unit_system = registry.ds.unit_system
# slice_info would be the left, the right, and the factor.
# For example, with the old Enzo-ZEUS fields, this would be:
# slice(None, -2, None)
# slice(1, -1, None)
# 1.0
# Otherwise, we default to a centered difference.
if slice_info is None:
sl_left = slice(None, -2, None)
sl_right = slice(2, None, None)
div_fac = 2.0
else:
sl_left, sl_right, div_fac = slice_info
def _matter_density(field, data):
return data[ftype, "density"] + data[ftype, "dark_matter_density"]
registry.add_field(
(ftype, "matter_density"),
sampling_type="local",
function=_matter_density,
units=unit_system["density"],
)
def _matter_mass(field, data):
return data[ftype, "matter_density"] * data["index", "cell_volume"]
registry.add_field(
(ftype, "matter_mass"),
sampling_type="local",
function=_matter_mass,
units=unit_system["mass"],
)
# rho_total / rho_cr(z).
def _overdensity(field, data):
if (
not hasattr(data.ds, "cosmological_simulation")
or not data.ds.cosmological_simulation
):
raise NeedsConfiguration("cosmological_simulation", 1)
co = data.ds.cosmology
return data[ftype, "matter_density"] / co.critical_density(
data.ds.current_redshift
)
registry.add_field(
(ftype, "overdensity"), sampling_type="local", function=_overdensity, units=""
)
# rho_baryon / <rho_baryon>
def _baryon_overdensity(field, data):
if (
not hasattr(data.ds, "cosmological_simulation")
or not data.ds.cosmological_simulation
):
raise NeedsConfiguration("cosmological_simulation", 1)
omega_baryon = data.get_field_parameter("omega_baryon")
if omega_baryon is None:
raise NeedsParameter("omega_baryon")
co = data.ds.cosmology
# critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
# mean matter density(z) ~ omega_matter * (1 + z)^3
return (
data[ftype, "density"]
/ omega_baryon
/ co.critical_density(0.0)
/ (1.0 + data.ds.current_redshift) ** 3
)
registry.add_field(
(ftype, "baryon_overdensity"),
sampling_type="local",
function=_baryon_overdensity,
units="",
validators=[ValidateParameter("omega_baryon")],
)
# rho_matter / <rho_matter>
def _matter_overdensity(field, data):
if (
not hasattr(data.ds, "cosmological_simulation")
or not data.ds.cosmological_simulation
):
raise NeedsConfiguration("cosmological_simulation", 1)
co = data.ds.cosmology
# critical_density(z) ~ omega_lambda + omega_matter * (1 + z)^3
# mean density(z) ~ omega_matter * (1 + z)^3
return (
data[ftype, "matter_density"]
/ data.ds.omega_matter
/ co.critical_density(0.0)
/ (1.0 + data.ds.current_redshift) ** 3
)
registry.add_field(
(ftype, "matter_overdensity"),
sampling_type="local",
function=_matter_overdensity,
units="",
)
# r / r_vir
def _virial_radius_fraction(field, data):
virial_radius = data.get_field_parameter("virial_radius")
if virial_radius == 0.0:
ret = 0.0
else:
ret = data["index", "radius"] / virial_radius
return ret
registry.add_field(
("index", "virial_radius_fraction"),
sampling_type="local",
function=_virial_radius_fraction,
validators=[ValidateParameter("virial_radius")],
units="",
)
# Weak lensing convergence.
# Eqn 4 of Metzler, White, & Loken (2001, ApJ, 547, 560).
# This needs to be checked for accuracy.
def _weak_lensing_convergence(field, data):
if (
not hasattr(data.ds, "cosmological_simulation")
or not data.ds.cosmological_simulation
):
raise NeedsConfiguration("cosmological_simulation", 1)
co = data.ds.cosmology
pc = data.ds.units.physical_constants
observer_redshift = data.get_field_parameter("observer_redshift")
source_redshift = data.get_field_parameter("source_redshift")
# observer to lens
dl = co.angular_diameter_distance(observer_redshift, data.ds.current_redshift)
# observer to source
ds = co.angular_diameter_distance(observer_redshift, source_redshift)
# lens to source
dls = co.angular_diameter_distance(data.ds.current_redshift, source_redshift)
# removed the factor of 1 / a to account for the fact that we are projecting
# with a proper distance.
return (
1.5
* (co.hubble_constant / pc.clight) ** 2
* (dl * dls / ds)
* data[ftype, "matter_overdensity"]
).in_units("1/cm")
registry.add_field(
(ftype, "weak_lensing_convergence"),
sampling_type="local",
function=_weak_lensing_convergence,
units=unit_system["length"] ** -1,
validators=[
ValidateParameter("observer_redshift"),
ValidateParameter("source_redshift"),
],
)
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@fields@cosmology_fields.py@.PATH_END.py
|
{
"filename": "galsim_config.py",
"repo_name": "gbernstein/pixmappy",
"repo_path": "pixmappy_extracted/pixmappy-master/pixmappy/galsim_config.py",
"type": "Python"
}
|
# This class makes the pixmappy.GalSimWCS usable as a GalSim config wcs type.
# To use it, just add pixmappy to the modules section of a config file.
# Then in the config file, the wcs type is called 'Pixmappy'.
try:
import galsim
from .GalSimWCS import GalSimWCS
class PixmappyBuilder(galsim.config.WCSBuilder):
def buildWCS(self, config, base, logger):
# req = GalSimWCS.{ "file_name" : str,
# "exp" : int,
# "ccdnum" : int
# }
# opt = { "dir" : str,
# "exposure_file" : str, "resids_file" : str, "affine_file" : str
# }
kwargs, safe = galsim.config.GetAllParams(config, base, opt=GalSimWCS._opt_params,
single=GalSimWCS._single_params)
if 'exp' in kwargs:
logger.info('Loading WCS for exposure %s ccd %s',kwargs['exp'],kwargs['ccdnum'])
else:
logger.info('Loading WCS for map',kwargs['wcs_name'])
wcs = GalSimWCS(**kwargs)
logger.info('Done loading pixmappy WCS')
return wcs
galsim.config.RegisterWCSType('Pixmappy', PixmappyBuilder())
except ImportError:
pass # Don't fail if galsim isn't available. Only fail it they try to use
# the PixmappyBuilder and galsim isn't available.
|
gbernsteinREPO_NAMEpixmappyPATH_START.@pixmappy_extracted@pixmappy-master@pixmappy@galsim_config.py@.PATH_END.py
|
{
"filename": "plot_demo_distributions.py",
"repo_name": "andrewmbuchan4/PyllutedWD_Public",
"repo_path": "PyllutedWD_Public_extracted/PyllutedWD_Public-main/src/plot_demo_distributions.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from enum import Enum
import csv
import geology_info as gi
import model_parameters as mp
import numpy as np
def load_hollands_distributions():
teffs = list()
loggs = list()
teff_bins = np.linspace(4000, 9000, 11)
logg_bins = np.linspace(7.1, 8.7, 9) # important to have a bin centred on 8: a lot of log(g)s are equal to 8 exactly
file_name = 'WDInputData'
min_row = 1
max_row = 201
with open('../data/' + file_name + '.csv', encoding='utf-8') as config_csv:
row_count = 0
for row in csv.reader(config_csv, delimiter=','):
if min_row <= row_count <= max_row:
try:
teff = int(row[3])
except ValueError:
teff = None
if teff is not None:
teffs.append(teff)
try:
logg = float(row[4])
except ValueError:
logg = None
if logg is not None:
loggs.append(logg)
row_count += 1
teff_counts, teff_edges = np.histogram(teffs, bins=teff_bins)
logg_counts, logg_edges = np.histogram(loggs, bins=logg_bins)
teff_tuples = [(teff_edges[i], teff_edges[i+1]) for i in range(0, len(teff_edges) - 1)]
logg_tuples = [(logg_edges[i], logg_edges[i+1]) for i in range(0, len(logg_edges) - 1)]
predefined_distributions['HollandsTeffs'] = (teff_tuples, teff_counts)
predefined_distributions['HollandsLoggs'] = (logg_tuples, logg_counts)
def plot_demo_distributions():
import graph_factory as gf
graph_fac = gf.GraphFactory()
xbar = [0.05, 0.15, 0.25, 0.35, 0.45, 0.55, 0.65, 0.75, 0.85, 0.95]
all_heights1 = [
[1, 1, 0, 0, 0, 0, 0, 0, 0, 1]
]
all_heights2 = [
[10, 9, 8, 7, 7, 6, 3, 3, 3, 5]
]
all_heights3 = [
[10, 11, 12, 20, 15, 4, 3, 3, 3, 3]
]
graph_fac.make_histogram(
xbar,
all_heights1,
['Example 1'],
'DemoDist',
0.1,
1,
'Fragment Core Number Fraction',
'1',
dict(),
None
)
graph_fac.make_histogram(
xbar,
all_heights2,
['Example 2'],
'DemoDist',
0.1,
1,
'Fragment Core Number Fraction',
'2',
dict(),
None
)
graph_fac.make_histogram(
xbar,
all_heights3,
['Example 3'],
'DemoDist',
0.1,
1,
'Fragment Core Number Fraction',
'3',
dict(),
None
)
def main():
plot_demo_distributions()
if __name__ == '__main__':
main()
|
andrewmbuchan4REPO_NAMEPyllutedWD_PublicPATH_START.@PyllutedWD_Public_extracted@PyllutedWD_Public-main@src@plot_demo_distributions.py@.PATH_END.py
|
{
"filename": "ldmodel.py",
"repo_name": "hpparvi/ldtk",
"repo_path": "ldtk_extracted/ldtk-master/ldtk/ldmodel.py",
"type": "Python"
}
|
"""
Limb darkening toolkit
Copyright (C) 2015 Hannu Parviainen <hpparvi@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from numba import njit
from numpy import ndarray, sqrt, zeros, power, exp, log, log2
# Model implementations
# =====================
# shape = [ipv, ipb, icf]
@njit
def evaluate_ld(ldm, mu, pvo):
if pvo.ndim == 1:
pv = pvo.reshape((1, 1, -1))
elif pvo.ndim == 2:
pv = pvo.reshape((1, pvo.shape[0], pvo.shape[1]))
else:
pv = pvo
npv = pv.shape[0]
npb = pv.shape[1]
ldp = zeros((npv, npb, mu.size))
for ipv in range(npv):
for ipb in range(npb):
ldp[ipv, ipb, :] = ldm(mu, pv[ipv, ipb])
return ldp
@njit(fastmath=True)
def ld_linear(mu, pv):
return 1. - pv[0] * (1. - mu)
@njit(fastmath=True)
def ld_quadratic(mu, pv):
return 1. - pv[0] * (1. - mu) - pv[1] * (1. - mu) ** 2
@njit(fastmath=True)
def ld_quadratic_tri(mu, pv):
a, b = sqrt(pv[0]), 2 * pv[1]
u, v = a * b, a * (1. - b)
return 1. - u * (1. - mu) - v * (1. - mu) ** 2
@njit(fastmath=True)
def ld_nonlinear(mu, pv):
return 1. - pv[0] * (1. - sqrt(mu)) - pv[1] * (1. - mu) - pv[2] * (1. - power(mu, 1.5)) - pv[3] * (1. - mu ** 2)
@njit(fastmath=True)
def ld_general(mu, pv):
ldp = zeros(mu.size)
for i in range(pv.size):
ldp += pv[i] * (1.0 - mu ** (i + 1))
return ldp
@njit(fastmath=True)
def ld_square_root(mu, pv):
return 1. - pv[0] * (1. - mu) - pv[1] * (1. - sqrt(mu))
@njit(fastmath=True)
def ld_logarithmic(mu, pv):
return 1. - pv[0] * (1. - mu) - pv[1] * mu * log(mu)
@njit(fastmath=True)
def ld_exponential(mu, pv):
return 1. - pv[0] * (1. - mu) - pv[1] / (1. - exp(mu))
@njit(fastmath=True)
def ld_power_2(mu, pv):
return 1. - pv[0] * (1. - mu ** pv[1])
@njit(fastmath=True)
def ld_power_2_pm(mu, pv):
c = 1 - pv[0] + pv[1]
a = log2(c/pv[1])
return 1. - c * (1. - mu**a)
# Model Classes
# =============
class LDModel(object):
npar = None
name = None
abbr = None
def __init__(self):
raise NotImplementedError
def __call__(self, mu, pv):
raise NotImplementedError
@classmethod
def evaluate(cls, mu: ndarray, pv: ndarray) -> ndarray:
raise NotImplementedError
class LinearModel(LDModel):
"""Linear limb darkening model (Mandel & Agol, 2001)."""
npar = 1
name = 'linear'
abbr = 'ln'
@classmethod
def evaluate(cls, mu: ndarray, pv: ndarray) -> ndarray:
return evaluate_ld(ld_linear, mu, pv)
class QuadraticModel(LDModel):
"""Quadratic limb darkening model (Kopal, 1950)."""
npar = 2
name = 'quadratic'
abbr = 'qd'
@classmethod
def evaluate(cls, mu: ndarray, pv: ndarray) -> ndarray:
return evaluate_ld(ld_quadratic, mu, pv)
class TriangularQuadraticModel(LDModel):
"""Quadratic limb darkening model with the parametrisation described by Kipping (MNRAS 435, 2013)."""
npar = 2
name = 'triangular_quadratic'
abbr = 'tq'
@classmethod
def evaluate(cls, mu: ndarray, pv: ndarray) -> ndarray:
return evaluate_ld(ld_quadratic_tri, mu, pv)
class SquareRootModel(LDModel):
"""Square root limb darkening model (van Hamme, 1993)."""
npar = 2
name = 'sqrt'
abbr = 'sq'
@classmethod
def evaluate(cls, mu: ndarray, pv: ndarray) -> ndarray:
return evaluate_ld(ld_square_root, mu, pv)
class NonlinearModel(LDModel):
"""Nonlinear limb darkening model (Claret, 2000)."""
npar = 4
name = 'nonlinear'
abbr = 'nl'
@classmethod
def evaluate(cls, mu: ndarray, pv: ndarray) -> ndarray:
return evaluate_ld(ld_nonlinear, mu, pv)
class GeneralModel(LDModel):
"""General limb darkening model (Gimenez, 2006)"""
npar = None
name = 'general'
abbr = 'ge'
@classmethod
def evaluate(cls, mu: ndarray, pv: ndarray) -> ndarray:
return evaluate_ld(ld_general, mu, pv)
class Power2Model(LDModel):
"""Power-2 limb darkening model (Morello et al, 2017)."""
npar = 2
name = 'power2'
abbr = 'p2'
@classmethod
def evaluate(cls, mu: ndarray, pv: ndarray) -> ndarray:
return evaluate_ld(ld_power_2, mu, pv)
class Power2MPModel(LDModel):
"""Power-2 limb darkening model with an alternative parametrisation (Maxted, P.F.L., 2018)."""
npar = 2
name = 'power2mp'
abbr = 'p2mp'
@classmethod
def evaluate(cls, mu: ndarray, pv: ndarray) -> ndarray:
return evaluate_ld(ld_power_2_pm, mu, pv)
models = {'linear': LinearModel,
'quadratic': QuadraticModel,
'triquadratic': TriangularQuadraticModel,
'sqrt': SquareRootModel,
'nonlinear': NonlinearModel,
'general': GeneralModel,
'power2': Power2Model,
'power2mp': Power2MPModel}
|
hpparviREPO_NAMEldtkPATH_START.@ldtk_extracted@ldtk-master@ldtk@ldmodel.py@.PATH_END.py
|
{
"filename": "caltables.py",
"repo_name": "caracal-pipeline/caracal",
"repo_path": "caracal_extracted/caracal-master/caracal/dispatch_crew/caltables.py",
"type": "Python"
}
|
import caracal.dispatch_crew.catalog_parser as cp
import caracal
import os
__DB_FILENAME = os.path.join(
caracal.pckgdir, "data/southern_calibrators.txt")
__DB_CASA_FILENAME = os.path.join(
caracal.pckgdir, "data/casa_calibrators.txt")
__CALIBRATOR_DB = None
__CASA_CALIBRATOR_DB = None
def calibrator_database():
""" Return the Southern standard calibrator database """
global __CALIBRATOR_DB
# Do a lazy load
if __CALIBRATOR_DB is not None:
return __CALIBRATOR_DB
# OK its not loaded, read it in
# There isn't a Southern standard in CASA
# so construct a little database of them for reference
caracal.log.info("Obtaining divine knowledge from %s" % __DB_FILENAME)
__CALIBRATOR_DB = cp.catalog_parser(__DB_FILENAME)
# caracal.log.info("\n" + str(__CALIBRATOR_DB))
return __CALIBRATOR_DB
def casa_calibrator_database():
""" Return the CASA standard calibrator database """
# same as in calibrator_database
global __CASA_CALIBRATOR_DB
if __CASA_CALIBRATOR_DB is not None:
return __CASA_CALIBRATOR_DB
caracal.log.info("Obtaining divine knowledge from %s" % __DB_CASA_FILENAME)
__CASA_CALIBRATOR_DB = cp.catalog_parser(__DB_CASA_FILENAME)
return __CASA_CALIBRATOR_DB
|
caracal-pipelineREPO_NAMEcaracalPATH_START.@caracal_extracted@caracal-master@caracal@dispatch_crew@caltables.py@.PATH_END.py
|
{
"filename": "ClassSmearSM.py",
"repo_name": "saopicc/DDFacet",
"repo_path": "DDFacet_extracted/DDFacet-master/DDFacet/Imager/SSD/GA/ClassSmearSM.py",
"type": "Python"
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DDFacet.compatibility import range
import numpy as np
import scipy.signal
from DDFacet.Imager.SSD import ClassConvMachine
from DDFacet.Other import logger
log=logger.getLogger("ClassSmearSM")
import multiprocessing
import queue
from DDFacet.Array import NpShared
from DDFacet.Array import ModLinAlg
import time
from DDFacet.Other.progressbar import ProgressBar
class ClassSmearSM():
def __init__(self,MeanResidual,MeanModelImage,PSFServer,DeltaChi2=4.,IdSharedMem="",NCPU=6):
IdSharedMem+="SmearSM."
NpShared.DelAll(IdSharedMem)
self.IdSharedMem=IdSharedMem
self.NCPU=NCPU
self.MeanModelImage=NpShared.ToShared("%sMeanModelImage"%self.IdSharedMem,MeanModelImage)
self.MeanResidual=NpShared.ToShared("%sMeanResidual"%self.IdSharedMem,MeanResidual)
NPixStats=10000
RandomInd=np.int64(np.random.rand(NPixStats)*(MeanResidual.size))
self.RMS=np.std(np.real(self.MeanResidual.ravel()[RandomInd]))
self.FWHMMin=3.
self.PSFServer=PSFServer
self.DeltaChi2=DeltaChi2
self.Var=self.RMS**2
self.NImGauss=31
self.CubeMeanVariablePSF=NpShared.ToShared("%sCubeMeanVariablePSF"%self.IdSharedMem,self.PSFServer.DicoVariablePSF['CubeMeanVariablePSF'])
self.DicoConvMachine={}
N=self.NImGauss
dx,dy=np.mgrid[-(N//2):N//2:1j*N,-(N//2):N//2:1j*N]
ListPixParms=[(int(dx.ravel()[i]),int(dy.ravel()[i])) for i in range(dx.size)]
ListPixData=ListPixParms
ConvMode="Matrix"
N=self.NImGauss
#stop
#for
#ClassConvMachine():
#def __init__(self,PSF,ListPixParms,ListPixData,ConvMode):
d=np.sqrt(dx**2+dy**2)
self.dist=d
self.NGauss=10
GSig=np.linspace(0.,2,self.NGauss)
self.GSig=GSig
ListGauss=[]
One=np.zeros_like(d)
One[N//2,N//2]=1.
ListGauss.append(One)
for sig in GSig[1::]:
v=np.exp(-d**2/(2.*sig**2))
Sv=np.sum(v)
v/=Sv
ListGauss.append(v)
self.ListGauss=ListGauss
print("Declare convolution machines", file=log)
NJobs=self.PSFServer.NFacets
pBAR= ProgressBar(Title=" Declare ")
#pBAR.disable()
pBAR.render(0, '%4i/%i' % (0,NJobs))
for iFacet in range(self.PSFServer.NFacets):
#print iFacet,"/",self.PSFServer.NFacets
PSF=self.PSFServer.DicoVariablePSF['CubeMeanVariablePSF'][iFacet]#[0,0]
_,_,NPixPSF,_=PSF.shape
PSF=PSF[:,:,NPixPSF//2-N:NPixPSF//2+N+1,NPixPSF//2-N:NPixPSF//2+N+1]
#print PSF.shape
#sig=1
#PSF=(np.exp(-self.dist**2/(2.*sig**2))).reshape(1,1,N,N)
self.DicoConvMachine[iFacet]=ClassConvMachine.ClassConvMachine(PSF,ListPixParms,ListPixData,ConvMode)
CM=self.DicoConvMachine[iFacet].CM
NpShared.ToShared("%sCM_Facet%4.4i"%(self.IdSharedMem,iFacet),CM)
#invCM=ModLinAlg.invSVD(np.float64(CM[0,0]))/self.Var
#NpShared.ToShared("%sInvCov_Facet%4.4i"%(self.IdSharedMem,iFacet),invCM)
NDone=iFacet+1
intPercent=int(100* NDone / float(NJobs))
pBAR.render(intPercent, '%4i/%i' % (NDone,NJobs))
PSFMean=np.mean(self.PSFServer.DicoVariablePSF['CubeMeanVariablePSF'],axis=0)
self.ConvMachineMeanPSF=ClassConvMachine.ClassConvMachine(PSFMean,ListPixParms,ListPixData,ConvMode)
CM=self.ConvMachineMeanPSF.CM
invCM=ModLinAlg.invSVD(np.float64(CM[0,0]),Cut=1e-8)/self.Var
NpShared.ToShared("%sInvCov_AllFacet"%(self.IdSharedMem),invCM)
self.FindSupport()
def CleanUpSHM(self):
NpShared.DelAll(self.IdSharedMem)
def FindSupport(self):
ConvMachine=self.CurrentConvMachine=self.ConvMachineMeanPSF
N=self.NImGauss
Dirty=np.zeros((N,N),dtype=np.float32)
Dirty[N//2,N//2]=1.
Dirty=self.CurrentConvMachine.Convolve(Dirty.reshape(1,Dirty.size)).reshape((N,N))
InvCov=ConvMachine.GiveInvertCov(1.)
Sol=np.dot(InvCov,Dirty.reshape((Dirty.size,1))).reshape((N,N))
Profile=(Sol[N//2,:]+Sol[:,N//2])[N//2:]
xp=np.arange(N//2+1)
Val=np.max(Profile)/2.
xx=np.linspace(0,N//2,1000)
a=np.interp(xx,xp,Profile-Val)
ind=np.where(np.abs(a)==np.min(np.abs(a)))[0]
FWHM=a[ind[0]]*2
if FWHM<self.FWHMMin: FWHM=self.FWHMMin
self.RestoreFWHM=FWHM
self.SigMin=(FWHM/2.)/np.sqrt(2.*np.log(2.))
RestoringBeam=(np.exp(-self.dist**2/(2.*self.SigMin**2))).reshape(N,N)
ListRestoredGauss=[]
for Func in self.ListGauss:
v=scipy.signal.fftconvolve(Func, RestoringBeam, mode='same')
ListRestoredGauss.append(v)
self.ListRestoredGauss=ListRestoredGauss
print("Support for restoring beam: %5.2f pixels (sigma = %5.2f pixels)"%(FWHM,self.SigMin), file=log)
# import pylab
# pylab.clf()
# pylab.plot(xp,Profile)
# pylab.scatter(xx,a)
# # pylab.subplot(1,2,1)
# # pylab.imshow(Dirty,interpolation="nearest")
# # pylab.colorbar()
# # vmax=Sol.max()
# # pylab.subplot(1,2,2)
# # pylab.imshow(Sol,interpolation="nearest",vmax=vmax,vmin=-0.1*vmax)
# # pylab.colorbar()
# pylab.draw()
# pylab.show(False)
# pylab.pause(0.1)
# stop
def Smear(self,Parallel=True):
if Parallel:
NCPU=self.NCPU
else:
NCPU=1
StopWhenQueueEmpty=True
print("Building queue", file=log)
self.ModelOut=np.zeros_like(self.MeanModelImage)
indx,indy=np.where(self.MeanModelImage[0,0]!=0)
#indx,indy=np.where(self.MeanModelImage==np.max(self.MeanModelImage))
work_queue = multiprocessing.Queue()
result_queue=multiprocessing.Queue()
SizeMax=int(indx.size/float(NCPU)/100.)
SizeMax=np.max([SizeMax,1])
iPix=0
iQueue=0
Queue=[]
while iPix<indx.size:
xc,yc=indx[iPix],indy[iPix]
FacetID=self.PSFServer.giveFacetID2(xc,yc)
Queue.append([xc,yc,FacetID])
iPix+=1
if (len(Queue)==SizeMax)|(iPix==indx.size):
NpShared.ToShared("%sQueue_%3.3i"%(self.IdSharedMem,iQueue),np.array(Queue))
work_queue.put(iQueue)
Queue=[]
iQueue+=1
NJobs=indx.size
workerlist=[]
pBAR= ProgressBar(Title=" Find gaussian")
pBAR.render(0, '%4i/%i' % (0,NJobs))
for ii in range(NCPU):
W=WorkerSmear(work_queue,
result_queue,
IdSharedMem=self.IdSharedMem,
StopWhenQueueEmpty=StopWhenQueueEmpty,
NImGauss=self.NImGauss,
DeltaChi2=self.DeltaChi2,
ListGauss=self.ListGauss,
GSig=self.GSig,
Var=self.Var,
SigMin=self.SigMin)
workerlist.append(W)
if Parallel:
workerlist[ii].start()
else:
workerlist[ii].run()
N=self.NImGauss
iResult=0
success = True
try:
while iResult < NJobs:
DicoResult=None
try:
DicoResult=result_queue.get_nowait()
except queue.Empty:
time.sleep(.1)
continue
except Exception as e:
print("The following unhandled exception occured.", file=log)
import traceback
traceback.print_tb(e.__traceback__, file=log)
success = False
break
if DicoResult is not None and DicoResult["Success"]:
iQueue=DicoResult["iQueue"]
Queue=NpShared.GiveArray("%sQueue_%3.3i"%(self.IdSharedMem,iQueue))
for iJob in range(Queue.shape[0]):
x0,y0,iGauss=Queue[iJob]
SMax=self.MeanModelImage[0,0,x0,y0]
SubModelOut=self.ModelOut[0,0][x0-N//2:x0+N//2+1,y0-N//2:y0+N//2+1]
SubModelOut+=self.ListRestoredGauss[iGauss]*SMax
SubModelOut+=self.ListGauss[iGauss]*SMax
iResult+=1
NDone=iResult
intPercent=int(100* NDone / float(NJobs))
pBAR.render(intPercent, '%4i/%i' % (NDone,NJobs))
finally:
for ii in range(NCPU):
try:
workerlist[ii].shutdown()
workerlist[ii].terminate()
workerlist[ii].join()
except Exception as e:
print("The following unhandled exception occured.", file=log)
import traceback
traceback.print_tb(e.__traceback__, file=log)
if not success:
raise RuntimeError("Some parallel jobs have failed. Check your log and report the issue if "
"not a memory issue. Bus errors indicate memory allocation errors")
return self.ModelOut
# ##############################################################################################################
# ##############################################################################################################
# ##############################################################################################################
# ##############################################################################################################
class WorkerSmear(multiprocessing.Process):
def __init__(self,
work_queue,
result_queue,
IdSharedMem=None,
StopWhenQueueEmpty=False,
NImGauss=31,
DeltaChi2=4.,
ListGauss=None,
GSig=None,
SigMin=None,
Var=None):
multiprocessing.Process.__init__(self)
self.work_queue = work_queue
self.result_queue = result_queue
self.kill_received = False
self.exit = multiprocessing.Event()
self.IdSharedMem=IdSharedMem
self.StopWhenQueueEmpty=StopWhenQueueEmpty
self.CubeMeanVariablePSF=NpShared.GiveArray("%sCubeMeanVariablePSF"%self.IdSharedMem)
self.MeanModelImage=NpShared.GiveArray("%sMeanModelImage"%self.IdSharedMem)
self.MeanResidual=NpShared.GiveArray("%sMeanResidual"%self.IdSharedMem)
self.NImGauss=NImGauss
self.DeltaChi2=DeltaChi2
self.ListGauss=ListGauss
self.NGauss=len(ListGauss)
self.GSig=GSig
self.SigMin=SigMin
self.Var=Var
def shutdown(self):
self.exit.set()
def CondContinue(self):
if self.StopWhenQueueEmpty:
return not(self.work_queue.qsize()==0)
else:
return True
def GiveChi2(self,Resid):
Chi2=np.sum(Resid**2)/self.Var
return Chi2
InvCov=self.CurrentInvCov#ConvMachine.GiveInvertCov(self.Var)
NPixResid=Resid.size
return np.dot(np.dot(Resid.reshape((1,NPixResid)),InvCov),Resid.reshape((NPixResid,1))).ravel()[0]
def GiveConv(self,SubModelOrig):
N=self.NImGauss
ConvModel=np.dot(self.CurrentCM[0,0],SubModelOrig.reshape((SubModelOrig.size,1))).reshape((N,N))
return ConvModel
def SmearThisComp(self,x0,y0):
FacetID=self.CurrentFacetID
PSF=self.CubeMeanVariablePSF[FacetID][0,0]
N=self.NImGauss
SubResid=self.MeanResidual[0,0][x0-N//2:x0+N//2+1,y0-N//2:y0+N//2+1]
SubModelOrig=self.MeanModelImage[0,0][x0-N//2:x0+N//2+1,y0-N//2:y0+N//2+1].copy()
xc=yc=N//2
NPSF,_=PSF.shape
xcPSF=ycPSF=NPSF//2
SubPSF=PSF[xcPSF-N//2:xcPSF+N//2+1,ycPSF-N//2:ycPSF+N//2+1]
ConvModel=self.GiveConv(SubModelOrig)
Dirty=SubResid+ConvModel
DeltaChi2=self.DeltaChi2
Chi2Min=self.GiveChi2(SubResid)
SMax=SubModelOrig[xc,yc]
SubModel0=SubModelOrig.copy()
SubModel0[xc,yc]=0
iGauss=0
Chi2=Chi2Min
while True:
if iGauss==self.NGauss-1:
break
v=self.ListGauss[iGauss]
Add=v*SMax
ModifiedSubModel=SubModel0+Add
ConvModel=self.GiveConv(ModifiedSubModel)
ThisDirty=ConvModel
ThisResid=Dirty-ThisDirty
Chi2=self.GiveChi2(ThisResid)#/Chi2Min
if Chi2/Chi2Min> DeltaChi2:#Chi2Min+DeltaChi2:
break
iGauss+=1
if self.GSig[iGauss]<self.SigMin:
iGauss=0
return iGauss
def run(self):
success = True
while not self.kill_received and self.CondContinue():
try:
iQueue = self.work_queue.get_nowait()
except queue.Empty:
time.sleep(.1)
continue
except Exception as e:
print("The following unhandled exception occured.", file=log)
import traceback
traceback.print_tb(e.__traceback__, file=log)
success = False
break
Queue=NpShared.GiveArray("%sQueue_%3.3i"%(self.IdSharedMem,iQueue))
self.CurrentInvCov=NpShared.GiveArray("%sInvCov_AllFacet"%(self.IdSharedMem))
for iJob in range(Queue.shape[0]):
x0,y0,FacetID=Queue[iJob]
iFacet=FacetID
self.CurrentFacetID=FacetID
self.CurrentCM=NpShared.GiveArray("%sCM_Facet%4.4i"%(self.IdSharedMem,iFacet))
iGauss=self.SmearThisComp(x0,y0)
Queue[iJob,2]=iGauss
self.result_queue.put({"Success":True,"iQueue":iQueue})
if not success:
raise RuntimeError("Some parallel jobs have failed. Check your log and report the issue if "
"not a memory issue. Bus errors indicate memory allocation errors")
|
saopiccREPO_NAMEDDFacetPATH_START.@DDFacet_extracted@DDFacet-master@DDFacet@Imager@SSD@GA@ClassSmearSM.py@.PATH_END.py
|
{
"filename": "test_langsmith.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/core/tests/unit_tests/document_loaders/test_langsmith.py",
"type": "Python"
}
|
import datetime
import uuid
from unittest.mock import MagicMock, patch
from langsmith.schemas import Example
from langchain_core.document_loaders import LangSmithLoader
from langchain_core.documents import Document
def test_init() -> None:
LangSmithLoader(api_key="secret")
EXAMPLES = [
Example(
inputs={"first": {"second": "foo"}},
outputs={"res": "a"},
dataset_id=uuid.uuid4(),
id=uuid.uuid4(),
created_at=datetime.datetime.now(),
),
Example(
inputs={"first": {"second": "bar"}},
outputs={"res": "b"},
dataset_id=uuid.uuid4(),
id=uuid.uuid4(),
created_at=datetime.datetime.now(),
),
Example(
inputs={"first": {"second": "baz"}},
outputs={"res": "c"},
dataset_id=uuid.uuid4(),
id=uuid.uuid4(),
created_at=datetime.datetime.now(),
),
]
@patch("langsmith.Client.list_examples", MagicMock(return_value=iter(EXAMPLES)))
def test_lazy_load() -> None:
loader = LangSmithLoader(
api_key="dummy",
dataset_id="mock",
content_key="first.second",
format_content=(lambda x: x.upper()),
)
expected = []
for example in EXAMPLES:
metadata = {
k: v if not v or isinstance(v, dict) else str(v)
for k, v in example.dict().items()
}
expected.append(
Document(example.inputs["first"]["second"].upper(), metadata=metadata)
)
actual = list(loader.lazy_load())
assert expected == actual
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@core@tests@unit_tests@document_loaders@test_langsmith.py@.PATH_END.py
|
{
"filename": "subtitle.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/document_loaders/subtitle.ipynb",
"type": "Jupyter Notebook"
}
|
# Subtitle
>[The SubRip file format](https://en.wikipedia.org/wiki/SubRip#SubRip_file_format) is described on the `Matroska` multimedia container format website as "perhaps the most basic of all subtitle formats." `SubRip (SubRip Text)` files are named with the extension `.srt`, and contain formatted lines of plain text in groups separated by a blank line. Subtitles are numbered sequentially, starting at 1. The timecode format used is hours:minutes:seconds,milliseconds with time units fixed to two zero-padded digits and fractions fixed to three zero-padded digits (00:00:00,000). The fractional separator used is the comma, since the program was written in France.
How to load data from subtitle (`.srt`) files
Please, download the [example .srt file from here](https://www.opensubtitles.org/en/subtitles/5575150/star-wars-the-clone-wars-crisis-at-the-heart-en).
```python
%pip install --upgrade --quiet pysrt
```
```python
from langchain_community.document_loaders import SRTLoader
```
```python
loader = SRTLoader(
"example_data/Star_Wars_The_Clone_Wars_S06E07_Crisis_at_the_Heart.srt"
)
```
```python
docs = loader.load()
```
```python
docs[0].page_content[:100]
```
'<i>Corruption discovered\nat the core of the Banking Clan!</i> <i>Reunited, Rush Clovis\nand Senator A'
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@document_loaders@subtitle.ipynb@.PATH_END.py
|
{
"filename": "HIPfromSimbad.py",
"repo_name": "dsavransky/EXOSIMS",
"repo_path": "EXOSIMS_extracted/EXOSIMS-master/EXOSIMS/StarCatalog/HIPfromSimbad.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from EXOSIMS.Prototypes.StarCatalog import StarCatalog
from astroquery.simbad import Simbad
from astroquery.vizier import Vizier
v = Vizier(columns=["Plx", "B-V", "Hpmag"], catalog="I/311/hip2")
# Simbad.reset_votable_fields()
Simbad.add_votable_fields(
"typed_id", # queries value (i.e. HP)
"flux(V)", # m_V
"flux(B)", # m_B
"flux(R)", # m_R
"flux(I)", # m_I
"flux(H)", # m_K
"flux(J)", # m_K
"flux(K)", # m_K
"distance", # parsecs
"flux_bibcode(V)", # flux citation
"flux_error(V)", # v-band uncertainty
"sp", # spectral type
)
class HIPfromSimbad(StarCatalog):
"""
Catalog generator class that uses astroquery to get stellar properties from SIMBAD
Args:
HIP (list or string):
List of Hipparcos identifiers (HIP numbers) or path to text file.
**specs:
:ref:`sec:inputspec`
Example file format:
```HIP 37279```
```HIP 97649```
"""
def __init__(self, catalogpath=None, **specs):
if catalogpath is None:
raise ValueError("catalogpath keyword must be specified for HIPfromSimbad")
# classpath = os.path.split(inspect.getfile(self.__class__))[0]
# filename = 'hip.csv'
# catalogpath = os.path.join(classpath, filename)
if isinstance(catalogpath, str):
HIP = np.loadtxt(catalogpath, delimiter=",", dtype="str")
if HIP[0][:3] != "HIP":
raise ValueError(
"First value in list is not explicitly an HIP Identifier"
)
HIP_names = [HIP[i] for i in range(len(HIP))]
elif isinstance(catalogpath, list):
HIP_names = ["HIP " + str(catalogpath[i]) for i in range(len(catalogpath))]
else:
raise ValueError(
(
"Input is neither a list of integers nor a path to a list of "
"HIP identifier strings"
)
)
print(HIP_names)
StarCatalog.__init__(self, ntargs=len(HIP_names), **specs)
simbad_list = Simbad.query_objects(HIP_names)
BV = []
# fill in distances
for i, targ in enumerate(simbad_list["Distance_distance"]):
try:
result = v.query_object(simbad_list["TYPED_ID"][i])["I/311/hip2"]
d = 1000 / result["Plx"]
simbad_list["Distance_distance"][i] = d.data.data[0]
simbad_list["Distance_method"][i] = "hip2"
BV.append(result["B-V"].data.data[0])
simbad_list["FLUX_V"][i] = result["Hpmag"].data.data[0]
except Exception as err:
print("simbad_list" + simbad_list["TYPED_ID"][i])
print("Exception returned in Vizier Query for query:")
print(err)
d = np.nan
data = simbad_list
# Distance to the planetary system in units of parsecs
self.dist = simbad_list["Distance_distance"].data.data * u.pc
# print(simbad_list['RA'].data.data)
self.coords = SkyCoord(
ra=simbad_list["RA"].data.data,
dec=simbad_list["DEC"].data.data,
distance=self.dist,
unit=(u.hourangle, u.deg, u.pc),
)
# Right Ascension of the planetary system in decimal degrees
# Declination of the planetary system in decimal degrees
# self.pmra = data['st_pmra'].data*u.mas/u.yr
# Angular change in right ascension over time as seen from the center of
# mass of the Solar System, units (mas/yr)
# self.pmdec = data['st_pmdec'].data*u.mas/u.yr #Angular change in declination
# over time as seen from the center of mass of the Solar System, units (mas/yr)
self.L = np.empty(data["SP_TYPE"].size)
self.L[:] = np.nan
# data['st_lbol'].data
# Amount of energy emitted by a star per unit time, measured in units of solar
# luminosities. The bolometric corrections are derived from V-K or B-V colors,
# units [log(solar)]
# list of non-astropy attributes
self.Name = np.array(
HIP_names
) # Name of the star as given by the Hipparcos Catalog.
self.Spec = np.array(data["SP_TYPE"]).astype(str)
# Classification of the star based on their spectral characteristics following
# the Morgan-Keenan system
self.Vmag = np.array(data["FLUX_V"].data.data) # V mag
self.Jmag = np.array(data["FLUX_J"].data.data) # Stellar J Magnitude Value
self.Hmag = np.array(data["FLUX_H"].data.data) # Stellar H Magnitude Value
self.Imag = np.array(data["FLUX_I"].data.data) # Stellar I Magnitude Value
self.Bmag = np.array(data["FLUX_B"].data.data)
self.Kmag = np.array(data["FLUX_K"].data.data)
self.BV = np.array(BV)
# data['BV'] #Color of the star as measured by the difference between B and V
# bands, units of [mag]
# absolute V mag
self.MV = self.Vmag - 5.0 * (np.log10(self.dist.to("pc").value) - 1.0)
# self.Teff = data['st_teff']
# st_mbol Apparent magnitude of the star at a distance of 10 parsec
# units of [mag]
# self.BC = -self.Vmag + data['st_mbol'] # bolometric correction
# self.stellar_diameters = data['st_rad']*2.*R_sun # stellar_diameters
# in solar diameters
# self.Binary_Cut = ~data['wds_sep'].mask #WDS (Washington Double Star) C
# atalog separation (arcsecs)
# save original data
self.data = np.array(data)
|
dsavranskyREPO_NAMEEXOSIMSPATH_START.@EXOSIMS_extracted@EXOSIMS-master@EXOSIMS@StarCatalog@HIPfromSimbad.py@.PATH_END.py
|
{
"filename": "V1_7_Samples.md",
"repo_name": "hpc4cmb/toast",
"repo_path": "toast_extracted/toast-main/src/libtoast/gtest/googletest/docs/V1_7_Samples.md",
"type": "Markdown"
}
|
If you're like us, you'd like to look at some Google Test sample code. The
[samples folder](../samples) has a number of well-commented samples showing how to use a
variety of Google Test features.
* [Sample #1](../samples/sample1_unittest.cc) shows the basic steps of using Google Test to test C++ functions.
* [Sample #2](../samples/sample2_unittest.cc) shows a more complex unit test for a class with multiple member functions.
* [Sample #3](../samples/sample3_unittest.cc) uses a test fixture.
* [Sample #4](../samples/sample4_unittest.cc) is another basic example of using Google Test.
* [Sample #5](../samples/sample5_unittest.cc) teaches how to reuse a test fixture in multiple test cases by deriving sub-fixtures from it.
* [Sample #6](../samples/sample6_unittest.cc) demonstrates type-parameterized tests.
* [Sample #7](../samples/sample7_unittest.cc) teaches the basics of value-parameterized tests.
* [Sample #8](../samples/sample8_unittest.cc) shows using `Combine()` in value-parameterized tests.
* [Sample #9](../samples/sample9_unittest.cc) shows use of the listener API to modify Google Test's console output and the use of its reflection API to inspect test results.
* [Sample #10](../samples/sample10_unittest.cc) shows use of the listener API to implement a primitive memory leak checker.
|
hpc4cmbREPO_NAMEtoastPATH_START.@toast_extracted@toast-main@src@libtoast@gtest@googletest@docs@V1_7_Samples.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergeo/unselected/marker/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._size import SizeValidator
from ._opacity import OpacityValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._size.SizeValidator",
"._opacity.OpacityValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergeo@unselected@marker@__init__.py@.PATH_END.py
|
{
"filename": "util.py",
"repo_name": "jpierel14/space_phot",
"repo_path": "space_phot_extracted/space_phot-main/space_phot/util.py",
"type": "Python"
}
|
import warnings,os,sys,time,glob,shutil
import numpy as np
import urllib.request
import scipy
import webbpsf
import sncosmo
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import astropy
from astropy import wcs
from astropy.io import fits
from astropy.table import Table,vstack
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.stats import sigma_clipped_stats
from astropy.time import Time
from astropy.wcs.utils import skycoord_to_pixel,pixel_to_skycoord
from astropy.nddata import extract_array
import photutils
from photutils import CircularAperture, CircularAnnulus, aperture_photometry
from photutils.psf import EPSFModel
warnings.simplefilter('ignore')
import jwst
from jwst import datamodels
from jwst import source_catalog
from jwst.source_catalog import reference_data
import os
from jwst.datamodels import RampModel, ImageModel
from jwst.pipeline import Detector1Pipeline, Image2Pipeline, Image3Pipeline
from jwst.associations import asn_from_list
from jwst.associations.lib.rules_level3_base import DMS_Level3_Base
from .wfc3_photometry.psf_tools.PSFUtils import make_models
from .wfc3_photometry.psf_tools.PSFPhot import get_standard_psf
__all__ = ['get_jwst_psf','get_hst_psf','get_jwst3_psf','get_hst3_psf','get_jwst_psf_grid',
'get_jwst_psf_from_grid']
def fancy_background_sub(st_obs,sky_locations=None,pixel_locations=None,width=13,
bkg_mode='polynomial',combine_fits=True,do_fit=True,
degree=2,h_wht_s = 1,v_wht_s=1,h_wht_p=1,v_wht_p=1,
show_plot=False,minval=-np.inf,fudge_center_pre=False,
fudge_center_post=False,
finalmin=-np.inf):
assert sky_locations is not None or pixel_locations is not None, "Must give skycoord or pixel."
sys.path.append('/Users/jpierel/CodeBase/manuscript_jupyter/pearls_sn/background_sub')
import MIRIMBkgInterp
mbi = MIRIMBkgInterp.MIRIMBkgInterp()
mbi.src_x = (width+2-1)/2
mbi.src_y = (width+2-1)/2
mbi.aper_rad = 3 # radius of aperture around source
mbi.ann_width = 3 # width of annulus to compute interpolation from
mbi.bkg_mode=bkg_mode # type of interpolation. Options "none","simple","polynomial"
mbi.combine_fits = True # use the simple model to attenuate the polynomial model
mbi.degree = degree # degree of polynomial fit
mbi.h_wht_s = h_wht_s # horizontal weight of simple model
mbi.v_wht_s = v_wht_s # vertical weight of simple model
mbi.h_wht_p = h_wht_p # horizontal weight of polynomial model
mbi.v_wht_p = v_wht_p # vertical weight of simple model
if pixel_locations is None and not isinstance(sky_locations,(list,tuple,np.ndarray)):
sky_locations = [sky_locations]*st_obs.n_exposures
elif isinstance(pixel_locations[0],(int,float)):
pixel_locations = [pixel_locations]*st_obs.n_exposures
final_pixels = []
nests = []
for i in range(st_obs.n_exposures):
if pixel_locations is None:
if st_obs.n_exposures==1:
wcs = st_obs.wcs
else:
wcs = st_obs.wcs_list[i]
x,y = wcs.world_to_pixel(sky_locations[i])
x = int(x)
y = int(y)
else:
x,y = pixel_locations[i]
x = int(x)
y = int(y)
#print(x,y)
width+=2
if st_obs.n_exposures==1:
cutout = st_obs.data[y-int((width-1)/2):y+int((width-1)/2)+1,
x-int((width-1)/2):x+int((width-1)/2)+1]
else:
cutout = st_obs.data_arr_pam[i][y-int((width-1)/2):y+int((width-1)/2)+1,
x-int((width-1)/2):x+int((width-1)/2)+1]
cutout[cutout<minval] = np.nan
if fudge_center_pre:
init_center = int((width-1)/2)
#plt.imshow(cutout)
#plt.show()
#plt.imshow(cutout[init_center-1:init_center+2,init_center-1:init_center+2])
#plt.show()
maxcell = np.argmax(cutout[init_center-1:init_center+2,init_center-1:init_center+2])
max_ind = np.unravel_index(maxcell,(3,3))
x,y = np.array([x,y]) + (np.flip(max_ind)-np.array([1,1]))
#print(x,y)
if st_obs.n_exposures==1:
cutout = st_obs.data[y-int((width-1)/2):y+int((width-1)/2)+1,
x-int((width-1)/2):x+int((width-1)/2)+1]
else:
cutout = st_obs.data_arr_pam[i][y-int((width-1)/2):y+int((width-1)/2)+1,
x-int((width-1)/2):x+int((width-1)/2)+1]
final_pixels.append([y,x])
# run interpolation
if not do_fit:
diff, bkg, mask = mbi.run(cutout)
else:
(diff, bkg, mask), result_nest = mbi.run_opt(cutout)
nests.append(result_nest)
if fudge_center_post:
width-=2
nests = nests[:-1]
final_pixels = final_pixels[:-1]
init_center = int((width-1)/2)
maxcell = np.argmax(diff[0][init_center-1:init_center+2,init_center-1:init_center+2])
max_ind = np.unravel_index(maxcell,(3,3))
x,y = np.array([x,y]) + (np.flip(max_ind)-np.array([1,1]))
width+=2
if st_obs.n_exposures==1:
cutout = st_obs.data[y-int((width-1)/2):y+int((width-1)/2)+1,
x-int((width-1)/2):x+int((width-1)/2)+1]
else:
cutout = st_obs.data_arr_pam[i][y-int((width-1)/2):y+int((width-1)/2)+1,
x-int((width-1)/2):x+int((width-1)/2)+1]
final_pixels.append([y,x])
if not do_fit:
diff, bkg, mask = mbi.run(cutout)
else:
(diff, bkg, mask), result_nest = mbi.run_opt(cutout)
nests.append(result_nest)
width-=2
if show_plot:
norm = astropy.visualization.simple_norm(cutout[1:-1,1:-1],invalid=0)
fig, axes = plt.subplots(1,4,figsize=(12,5))
axes[0].set_title('image')
axes[0].imshow(cutout[1:-1,1:-1],origin='lower',norm=norm,cmap='viridis')
#print(np.nanmedian(diff[0]))
#print(np.nanmedian(bkg[0]))
#print(np.nanmedian(mask[0]))
#print()
#print()
for n in range(int((width-1)/2)-1,int((width-1)/2)+2):
for j in range(int((width-1)/2)-1,int((width-1)/2)+2):
if diff[0][n][j]<finalmin:
diff[0][n][j] = 0
if st_obs.n_exposures==1:
st_obs.data[y-int((width-1)/2):y+int((width-1)/2)+1,
x-int((width-1)/2):x+int((width-1)/2)+1] = diff[0]
else:
st_obs.data_arr_pam[i][y-int((width-1)/2):y+int((width-1)/2)+1,
x-int((width-1)/2):x+int((width-1)/2)+1] = diff[0]
if show_plot:
axes[1].set_title('masked')
axes[1].imshow(mask[0],origin='lower',norm=norm,cmap='viridis')
axes[2].set_title('bkg')
im1 = axes[2].imshow(bkg[0],origin='lower',norm=norm,cmap='viridis')
divider = make_axes_locatable(axes[2])
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im1, cax=cax, orientation='vertical')
axes[3].set_title('bkg sub')
norm = astropy.visualization.simple_norm(diff[0],invalid=0)
im2 = axes[3].imshow(diff[0],origin='lower',norm=norm,cmap='seismic')
divider2 = make_axes_locatable(axes[3])
cax2 = divider2.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im2, cax=cax2, orientation='vertical')
plt.show()
st_obs.fancy_background_centers = final_pixels
st_obs.fancy_bkg_diff = mask[0]-bkg[0]
return st_obs,nests,mbi
def mjd_dict_from_list(filelist,tolerance=0):
mjd_dict = {}
for f in filelist:
dat = astropy.io.fits.open(f)
try:
mjd = dat[0].header['MJD-AVG']
except:
mjd = dat[0].header['EXPSTART']
mjd_key = np.round(mjd,tolerance)
if mjd_key not in mjd_dict.keys():
mjd_dict[mjd_key] = []
mjd_dict[mjd_key].append(f)
return mjd_dict
def filter_dict_from_list(filelist,sky_location=None,ext=1):
filt_dict = {}
for f in filelist:
dat = astropy.io.fits.open(f)
if sky_location is not None:
imwcs = astropy.wcs.WCS(dat[ext],dat)
y,x = imwcs.world_to_pixel(sky_location)
if not (0<x<dat[ext].data.shape[1] and 0<y<dat[ext].data.shape[0]):
continue
if 'FILTER' in dat[0].header.keys():
filt = dat[0].header['FILTER']
elif 'FILTER1' in dat[0].header.keys():
if 'CLEAR' in dat[0].header['FILTER1']:
filt = dat[0].header['FILTER2']
else:
filt = dat[0].header['FILTER1']
elif 'FILTER' in dat[ext].header.keys():
filt = dat[ext].header['FILTER']
else:
print('Cannot find FILTER keyword')
return
if filt not in filt_dict.keys():
filt_dict[filt] = []
filt_dict[filt].append(f)
return filt_dict
def webbpsf_setup_sim_to_match_file(filename_or_HDUList, verbose=True, plot=False,dateobs=None):
""" Setup a webbpsf Instrument instance matched to a given
"""
if isinstance(filename_or_HDUList,str):
if verbose:
print(f"Setting up sim to match {filename_or_HDUList}")
header = fits.getheader(filename_or_HDUList)
else:
header = filename_or_HDUList[0].header
if verbose:
print(f"Setting up sim to match provided FITS HDUList object")
inst = webbpsf.instrument(header['INSTRUME'])
if inst.name=='MIRI' and header['FILTER']=='P750L':
# webbpsf doesn't model the MIRI LRS prism spectral response
print("Please note, webbpsf does not currently model the LRS spectral response. Setting filter to F770W instead.")
inst.filter='F770W'
else:
inst.filter=header['filter']
inst.set_position_from_aperture_name(header['APERNAME'])
if dateobs is None:
dateobs = astropy.time.Time(header['DATE-OBS']+"T"+header['TIME-OBS'])
inst.load_wss_opd_by_date(dateobs, verbose=verbose, plot=plot)
# per-instrument specializations
if inst.name == 'NIRCam':
if header['PUPIL'].startswith('MASK'):
inst.pupil_mask = header['PUPIL']
inst.image_mask = header['CORONMSK'].replace('MASKA', 'MASK') # note, have to modify the value slightly for
# consistency with the labels used in webbpsf
elif inst.name == 'MIRI':
if inst.filter in ['F1065C', 'F1140C', 'F1550C']:
inst.image_mask = 'FQPM'+inst.filter[1:5]
elif inst.filter == 'F2300C':
inst.image_mask = 'LYOT2300'
elif header['FILTER'] == 'P750L':
inst.pupil_mask = 'P750L'
if header['APERNAME'] == 'MIRIM_SLIT':
inst.image_mask = 'LRS slit'
# TODO add other per-instrument keyword checks
if verbose:
print(f"""
Configured simulation instrument for:
Instrument: {inst.name}
Filter: {inst.filter}
Detector: {inst.detector}
Apername: {inst.aperturename}
Det. Pos.: {inst.detector_position} {'in subarray' if "FULL" not in inst.aperturename else ""}
Image plane mask: {inst.image_mask}
Pupil plane mask: {inst.pupil_mask}
""")
return inst
def get_jwst_psf_grid(st_obs,num_psfs=16,fname=None,dateobs=None):
if fname is None:
inst = webbpsf_setup_sim_to_match_file(st_obs.exposure_fnames[0],dateobs=dateobs,verbose=False)
else:
inst = webbpsf_setup_sim_to_match_file(fname,dateobs=dateobs,verbose=False)
c = webbpsf.gridded_library.CreatePSFLibrary(inst,inst.filter, num_psfs = num_psfs,
detectors=st_obs.detector,verbose=False)
#psf = inst.calc_psf(oversample=4,normalize='last')
grid = c.create_grid()
return grid
def get_jwst_psf_from_grid(st_obs,sky_location,grid,psf_width=101):
grid.oversampling = (1,1)
psf_list = []
for i in range(st_obs.n_exposures):
imwcs = st_obs.wcs_list[i]
x,y = astropy.wcs.utils.skycoord_to_pixel(sky_location,imwcs)
grid.x_0 = x
grid.y_0 = y
xf, yf = np.meshgrid(np.arange(-4*psf_width/2,psf_width/2*4+1,1).astype(int)+int(x+.5),
np.arange(-4*psf_width/2,psf_width/2*4+1,1).astype(int)+int(y+.5))
psf = np.array(grid(xf,yf)).astype(float)
psf/=np.sum(psf)
psf*=16
epsf_model = photutils.psf.FittableImageModel(psf,normalize=False,oversampling=4)
psf_list.append(epsf_model)
return psf_list
def get_jwst_psf(st_obs,sky_location,psf_width=61,pipeline_level=2,fname=None,dateobs=None):
#inst = webbpsf.instrument(st_obs.instrument)
#inst.filter = st_obs.filter
#inst.detector=st_obs.detector
if fname is None:
inst = webbpsf_setup_sim_to_match_file(st_obs.exposure_fnames[0],dateobs=dateobs,verbose=False)
else:
inst = webbpsf_setup_sim_to_match_file(fname,dateobs=dateobs,verbose=False)
if pipeline_level == 3:
oversampling = 1
else:
oversampling = 4
psf_list = []
#kernel = astropy.convolution.Box2DKernel(width=4)
for i in range(st_obs.n_exposures):
#inst.pixelscale = st_obs.pixel_scale[i]
imwcs = st_obs.wcs_list[i]
x,y = astropy.wcs.utils.skycoord_to_pixel(sky_location,imwcs)
#inst.detector_position = (x,y)
c = webbpsf.gridded_library.CreatePSFLibrary(inst,inst.filter, num_psfs = 1, psf_location = (x,y), fov_pixels = psf_width,
detectors=st_obs.detector,save=False,verbose=False,
use_detsampled_psf=True if oversampling==1 else False)
#psf = inst.calc_psf(oversample=4,normalize='last')
grid = c.create_grid()
#psf[0].data = astropy.convolution.convolve(psf[0].data, kernel)
#webbpsf.detectors.apply_detector_ipc(psf, extname=0)
#epsf_model = photutils.psf.FittableImageModel(psf[0].data*16,normalize=False,oversampling=oversampling)
#epsf_model = photutils.psf.FittableImageModel(grid.data[0,:,:]/np.sum(grid.data[0,:,:])*16,normalize=False,oversampling=oversampling)
epsf_model = photutils.psf.FittableImageModel(grid.data[0,:,:],normalize=False,oversampling=oversampling)
psf_list.append(epsf_model)
return psf_list
def get_jwst3_psf(st_obs,st_obs3,sky_location,num_psfs=16,psf_width=101,temp_outdir='.'):
with open('./stpipe-log.cfg','w') as f:
s = '[*]\nhandler = file:/dev/null\nlevel = INFO\n'
f.write(s)
#sys.exit()
psfs = get_jwst_psf(st_obs,sky_location,psf_width=psf_width,pipeline_level=3)
#grid = get_jwst_psf_grid(st_obs,num_psfs=num_psfs)
#grid.oversampling = 1
# kernel = astropy.convolution.Box2DKernel(width=4)
# psfs = []
# for i in range(st_obs.n_exposures):
# imwcs = st_obs.wcs_list[i]
# x,y = astropy.wcs.utils.skycoord_to_pixel(sky_location,imwcs)
# psf = inst.calc_psf(oversample=4,normalize='last')
# psf[0].data = astropy.convolution.convolve(psf[0].data, kernel)
# # grid.x_0 = x
# # grid.y_0 = y
# #
# # xf, yf = np.meshgrid(np.arange(-4*psf_width/2,psf_width/2*4+1,1).astype(int)+int(x+.5),
# # np.arange(-4*psf_width/2,psf_width/2*4+1,1).astype(int)+int(y+.5))
# # psf = np.array(grid(xf,yf)).astype(float)
# epsf_model = photutils.psf.FittableImageModel(psf,normalize=True,oversampling=1)
# psfs.append(epsf_model)
outdir = os.path.join(temp_outdir,'temp_psf_dir')#%np.random.randint(0,1000))
if not os.path.exists(outdir):
os.mkdir(outdir)
#print(outdir)
level2_sums = []
try:
out_fnames = []
for i,f in enumerate(st_obs.exposure_fnames):
#print(f)
dat = fits.open(f)
#dat['SCI',1].data[np.isnan(dat['SCI',1].data)] = 0
#xf, yf = np.mgrid[0:dat['SCI',1].data.shape[0],0:dat['SCI',1].data.shape[1]].astype(int)
#norm = astropy.visualization.simple_norm(dat['SCI',1].data,invalid=0,min_cut=-.15,max_cut=.3)
#print(np.max(dat['SCI',1].data))
#plt.imshow(dat[1].data,norm=norm)
#plt.show()
#imwcs = wcs.WCS(dat['SCI',1])
#y_init,x_init = skycoord_to_pixel(sky_location,imwcs)
#print(x,y,pixel_to_skycoord(x,y,imwcs))
if False:
newx = psfs[i].data.shape[0]#dat[1].header['NAXIS1']*4
newy = psfs[i].data.shape[1]#dat[1].header['NAXIS2']*4
#dat[1].header['NAXIS1'] = newx
#dat[1].header['NAXIS2'] = newy
old_wcs = wcs.WCS(dat[1])
#print(old_wcs)
#new_wcs = old_wcs[::(st_obs3.pixel_scale/st_obs.pixel_scale[0]),::(st_obs3.pixel_scale/st_obs.pixel_scale[0])].to_header()
new_wcs = old_wcs[::.25,::.25].to_header()
for k in ['PC1_1', 'PC1_2','PC2_1','PC2_2']:
new_wcs[k]/=4#(st_obs3.pixel_scale/st_obs.pixel_scale[0])
for key in new_wcs.keys():
if len(key)>0:
#dm_fits[i].header[key+'A'] = dm_fits[i].header[key]
#if not (self.do_driz or ('CRPIX' in key or 'CTYPE' in key)):
if 'CTYPE' not in key:
if key.startswith('PC') and key not in dat[1].header.keys():
dat[1].header.set(key.replace('PC','CD'),value=new_wcs[key])
elif key in dat[1].header:
dat[1].header.set(key,value=new_wcs[key])
#else:
# dm_fits[i].header.set(key,value='TWEAK')
#else:
# print(key)
# sys.exit()
dat[1].header['PIXAR_A2'] = dat[1].header['PIXAR_A2']/16\
#dat[1].header['CRPIX1'] =
#dat[1].header['PIXAR_A2'] = st_obs3.pixel_scale**2
#dat[1].header['PIXAR_SR'] = dat[1].header['PIXAR_SR']/(st_obs.pixel_scale[0]/st_obs3.pixel_scale)**2
#print(newx,newy)
#dat['SCI',1].data = np.zeros((newx,newy))
imwcs = wcs.WCS(dat['SCI',1])
#print(imwcs)
y,x = skycoord_to_pixel(sky_location,imwcs)
#xf, yf = np.mgrid[0:dat['SCI',1].data.shape[0]+int(psf_width*8),0:dat['SCI',1].data.shape[1]+int(psf_width*8)].astype(int)
xf, yf = np.mgrid[0:dat['SCI',1].data.shape[0],0:dat['SCI',1].data.shape[1]].astype(int)
#psfs[i].x_0 = x+psf_width*4
#psfs[i].y_0 = y+psf_width*4
psfs[i].x_0 = x#int(x)+.5
psfs[i].y_0 = y#int(y)+.5
#import pdb
#pdb.set_trace()
#dat['SCI',1].data = psfs[i].data#
dat['SCI',1].data = psfs[i](xf,yf)
level2_sums.append(np.sum(dat['SCI',1].data))
#print(dat['SCI',1].data.shape,st_obs.pams[i].shape,np.pad(st_obs.pams[i],int(psf_width),constant_values=1).shape)
#temp_pam = astropy.nddata.utils.extract_array(st_obs.pams[i],[int(dat['SCI',1].data.shape[0]/4)]*2,(x_init,y_init))
#dat['SCI',1].data/= scipy.ndimage.zoom(temp_pam,4)
#dat['SCI',1].data /= np.pad(st_obs.pams[i],int(psf_width*4),constant_values=1)
#bigarr = np.ones(dat['SCI',1].data.shape)
#bigarr = astropy.nddata.utils.add_array(bigarr,scipy.ndimage.zoom(temp_pam,4),(y,x))
#dat['SCI',1].data/=np.pad(,(int((dat['SCI',1].data.shape[0]-psf_width*4)/2),
# int((dat['SCI',1].data.shape[1]-psf_width*4)/2)))#st_obs.pams[i]#
#dat['SCI',1].data/=bigarr
#plt.imshow(dat['SCI',1].data)
#plt.show()
#print(np.max(dat['SCI',1].data))
#dat['SCI',1].data[dat['SCI',1].data>.005] = 10000
#plt.imshow(dat[1].data[xf,yf],vmin=0,vmax=.005)
#plt.show()
#sys.exit()
# dat['ERR',1].data = np.ones((1,1))
# dat['VAR_RNOISE',1].data = np.ones((1,1))
# dat['VAR_POISSON',1].data = np.ones((1,1))
# dat['VAR_FLAT',1].data = np.ones((1,1))
# dat['DQ',1].data = np.zeros(dat[1].data.shape)
dat.writeto(os.path.join(outdir,os.path.basename(f)),overwrite=True)
#out_fnames.append(os.path.join(outdir,os.path.basename(f)))
out_fnames.append(os.path.basename(f))
#sys.exit()
asn = asn_from_list.asn_from_list(out_fnames, rule=DMS_Level3_Base,
product_name='temp_psf_cals')
with open(os.path.join(outdir,'cal_data_asn.json'),"w") as outfile:
name, serialized = asn.dump(format='json')
outfile.write(serialized)
pipe3 = Image3Pipeline()
pipe3.output_dir = outdir
pipe3.save_results = True
pipe3.tweakreg.skip = True
pipe3.outlier_detection.skip = True
pipe3.skymatch.skip = True
pipe3.source_catalog.skip = True
pipe3.resample.output_shape = st_obs3.data.shape
pipe3.outlier_detection.save_results = False
#pipe3.resample.output_shape = (dat['SCI',1].data.shape)
pipe3.resample.pixel_scale = st_obs3.pixel_scale#/4#[0]#/4
#pipe3.resample.pixel_scale_ratio = st_obs3.pixel_scale/st_obs.pixel_scale[0]
pipe3.run(os.path.join(outdir,'cal_data_asn.json'))
#imwcs = None
#level3 = None
with fits.open(os.path.join(outdir,'temp_psf_cals_i2d.fits')) as dat:
imwcs = wcs.WCS(dat['SCI',1])
level3 = dat[1].data
level3[np.isnan(level3)] = 0
level3[level3<0] = 0
#print(np.max(level3))
#sys.exit()
#kernel = astropy.convolution.Box2DKernel(width=4)
#level3 = astropy.convolution.convolve(level3, kernel)
y,x = astropy.wcs.utils.skycoord_to_pixel(sky_location,imwcs)
# mx,my = np.meshgrid(np.arange(-4*psf_width/2,psf_width/2*4+1,1).astype(int)+int(x+.5+psf_width*4),
# np.arange(-4*psf_width/2,psf_width/2*4+1,1).astype(int)+int(y+.5+psf_width*4))
mx,my = np.meshgrid(np.arange(-4*psf_width/2,psf_width/2*4+1,1).astype(int)+int(x+.5),
np.arange(-4*psf_width/2,psf_width/2*4+1,1).astype(int)+int(y+.5))
#print(np.sum(level3[mx,my]))
#kernel = astropy.convolution.Box2DKernel(width=4)
#level3 = astropy.convolution.convolve(level3, kernel)
#s = np.sum(level3[mx,my])
#level3[mx,my]/=np.sum(level3[mx,my])
#level3[mx,my]*=16
#print((np.median(level2_sums)/np.sum(level3[mx,my])*(st_obs3.pixel_scale/np.median(st_obs.pixel_scale))))
#level3[mx,my]*=(np.median(level2_sums)/np.sum(level3[mx,my])*(st_obs3.pixel_scale/np.median(st_obs.pixel_scale)))
#print(np.sum(level3[mx,my]))
#level3[mx,my]*=16**2
#level3_psf = photutils.psf.FittableImageModel(level3[mx,my],normalize=False,
# oversampling=4)
#import pdb
#pdb.set_trace()
level3_psf = photutils.psf.FittableImageModel(level3[mx,my],normalize=False,
oversampling=1)
#import pdb
#pdb.set_trace()
temp_fnames = glob.glob(os.path.join(outdir,'*'))
for f in temp_fnames:
os.remove(f)
shutil.rmtree(outdir, ignore_errors=True)
#os.rmdir(outdir)
os.remove('stpipe-log.cfg')
except RuntimeError:#Exception as e:
print('Failed to create PSF model')
print(e)
temp_fnames = glob.glob(os.path.join(outdir,'*'))
for f in temp_fnames:
os.remove(f)
shutil.rmtree(outdir, ignore_errors=True)
os.remove('stpipe-log.cfg')
return level3_psf
def get_hst_psf_grid(st_obs):
grid = make_models(get_standard_psf(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'wfc3_photometry/psfs'),st_obs.filter,st_obs.detector))[0]
return grid
def get_hst_psf(st_obs,sky_location,psf_width=25,pipeline_level=2):
grid = make_models(get_standard_psf(os.path.join(os.path.abspath(os.path.dirname(__file__)),
'wfc3_photometry/psfs'),st_obs.filter,st_obs.detector))[0]
psf_list = []
for i in range(st_obs.n_exposures):
imwcs = st_obs.wcs_list[i]
y,x = astropy.wcs.utils.skycoord_to_pixel(sky_location,imwcs)
psfinterp = grid._calc_interpolator(int(x), int(y))
_psf_interp = psfinterp(grid._xidx, grid._yidx)
_psf_interp/=simple_aperture_sum(_psf_interp,[[_psf_interp.shape[0]/2,_psf_interp.shape[0]/2]],5.6*4)
_psf_interp*=16
_psf_interp*=(hst_apcorr(5.6*st_obs.px_scale,st_obs.filter,st_obs.instrument))
if pipeline_level==2:
psfmodel = photutils.psf.FittableImageModel(_psf_interp,
oversampling=grid.oversampling)
else:
psfmodel = photutils.psf.FittableImageModel(_psf_interp,
oversampling=1)
psfmodel.x_0 = x#int(x)
psfmodel.y_0 = y#int(y)
psf_list.append(psfmodel)
#yg, xg = np.mgrid[-1*(psf_width-1)/2:(psf_width+1)/2,-1*(psf_width-1)/2:(psf_width+1)/2].astype(int)
#yf, xf = yg+int(y+.5), xg+int(x+.5)
#yf, xf = yg+int(np.round(y)), xg+int(np.round(x))
#psf = np.array(psfmodel(xf,yf)).astype(float)
#plt.imshow(psf)
#plt.show()
#continue
#print(x,y)
#epsf_model = EPSFModel(psf)
#psf_list.append(epsf_model)
return psf_list
def get_hst3_psf(st_obs,st_obs3,sky_location,psf_width=25):
from drizzlepac import astrodrizzle
psfs = get_hst_psf(st_obs,sky_location,psf_width=psf_width,pipeline_level=3)
outdir = os.path.join(os.path.abspath(os.path.dirname(__file__)),'temp_%i'%np.random.randint(0,1000))
os.mkdir(outdir)
level2_sums = []
try:
out_fnames = []
for i,f in enumerate(st_obs.exposure_fnames):
dat = fits.open(f)
if False:
newx = dat[1].header['NAXIS1']*4
newy = dat[1].header['NAXIS2']*4
old_wcs = wcs.WCS(dat[1],dat)
new_wcs = old_wcs[::.25,::.25].to_header()
for k in ['PC1_1', 'PC1_2','PC2_1','PC2_2']:
new_wcs[k]/=4
for key in new_wcs.keys():
if len(key)>0:
#dm_fits[i].header[key+'A'] = dm_fits[i].header[key]
#if not (self.do_driz or ('CRPIX' in key or 'CTYPE' in key)):
if 'CTYPE' not in key:
if key.startswith('PC') and key not in dat[1].header.keys():
dat[1].header.set(key.replace('PC','CD'),value=new_wcs[key])
elif key in dat[1].header:
dat[1].header.set(key,value=new_wcs[key])
#else:
# dm_fits[i].header.set(key,value='TWEAK')
dat[1].header['IDCSCALE'] = dat[1].header['IDCSCALE']/4
else:
newx = dat['SCI',st_obs.sci_ext].data.shape[0]#header['NAXIS1']
newy = dat['SCI',st_obs.sci_ext].data.shape[1]#header['NAXIS2']
if True:
#dat['SCI',1].data = np.zeros((newy,newx))
imwcs = wcs.WCS(dat['SCI',st_obs.sci_ext],dat)
y,x = skycoord_to_pixel(sky_location,imwcs)
#xf, yf = np.mgrid[0:dat['SCI',1].data.shape[0]+int(psf_width*8),0:dat['SCI',1].data.shape[1]+int(psf_width*8)].astype(int)
xf, yf = np.mgrid[0:dat['SCI',st_obs.sci_ext].data.shape[0],0:dat['SCI',st_obs.sci_ext].data.shape[1]].astype(int)
#psfs[i].x_0 = x+psf_width*4
#psfs[i].y_0 = y+psf_width*4
psfs[i].x_0 = int(x)+.5
psfs[i].y_0 = int(y)+.5
dat['SCI',st_obs.sci_ext].data = psfs[i](xf,yf)
#x,y = astropy.wcs.utils.skycoord_to_pixel(sky_location,wcs.WCS(dat[1],dat))
#psf2 = photutils.psf.FittableImageModel(psfs[i].data,normalize=False,
# oversampling=1)
#psf2.x_0 = x
#psf2.y_0 = y
#x = int(x+.5)
#y = int(y+.5)
#gx, gy = np.mgrid[0:newx,0:newy].astype(int)
#dat[1].data = psf2.evaluate(gx,gy,psf2.flux.value,psf2.x_0.value,psf2.y_0.value,
# use_oversampling=False)
dat['SCI',st_obs.sci_ext].data[dat['SCI',st_obs.sci_ext].data<0] = 0
#dat[1].data/=scipy.ndimage.zoom(st_obs.pams[0].T,4)
#dat[1].data/=st_obs.pams[0]
#if st_obs.detector in ["ACS","UVIS"]:
# dat['D2IMARR',st_obs.sci_ext].data = scipy.ndimage.zoom(dat['D2IMARR',st_obs.sci_ext].data,4)
dat['DQ',st_obs.sci_ext].data = np.zeros((newx,newy)).astype(int)
dat['ERR',st_obs.sci_ext].data = np.ones((newx,newy))
#dat = dat[:4]
level2_sums.append(simple_aperture_sum(dat['SCI',st_obs.sci_ext].data,[[y,x]],5.6*4))
dat.writeto(os.path.join(outdir,os.path.basename(f)),overwrite=True)
out_fnames.append(os.path.join(outdir,os.path.basename(f)))
astrodrizzle.AstroDrizzle(','.join(out_fnames),output=os.path.join(outdir,'temp_psf'),
build=True,median=False,skysub=False,sky_bits=None,
driz_cr_corr=False,final_wht_type='ERR',driz_separate=False,
driz_cr=False,blot=False,clean=True,group='sci,'+str(st_obs.sci_ext),
final_scale=st_obs3.pixel_scale
)
try:
dat = fits.open(glob.glob(os.path.join(outdir,'temp_psf_drz.fits'))[0])
except:
dat = fits.open(glob.glob(os.path.join(outdir,'temp_psf_drc.fits'))[0])
#sys.exit()
imwcs = wcs.WCS(dat[1],dat)
y,x = skycoord_to_pixel(sky_location,imwcs)
level3 = dat[1].data
level3[np.isnan(level3)] = 0
level3[level3<0] = 0
y,x = astropy.wcs.utils.skycoord_to_pixel(sky_location,imwcs)
mx,my = np.meshgrid(np.arange(-4*psf_width/2,psf_width/2*4+1,1).astype(int)+int(x+.5),
np.arange(-4*psf_width/2,psf_width/2*4+1,1).astype(int)+int(y+.5))
mx2,my2 = np.meshgrid(np.arange(-1*psf_width/2,psf_width/2*1+1,1).astype(int)+int(x+.5),
np.arange(-1*psf_width/2,psf_width/2*1+1,1).astype(int)+int(y+.5))
plt.imshow(level3[mx2,my2])
plt.scatter(12.5,12.5)
plt.show()
level3_sum = simple_aperture_sum(level3,[y,x],5.6*4)
level3[mx,my]/=level3_sum
#level3[mx,my]*=np.median(level2_sums)
level3[mx,my]*=16
level3[mx,my]*=(hst_apcorr(5.6*st_obs3.px_scale,st_obs3.filter,st_obs3.instrument))
#level3[mx,my]*=(np.median(level2_sums)/simple_aperture_sum(level3[mx,my],[[level3[mx,my].shape[0]/2,
# level3[mx,my].shape[1]/2]],5.6*4))
#level3[mx,my]*=16
level3_psf = photutils.psf.FittableImageModel(level3[mx,my],normalize=False,
oversampling=4)
#kernel = astropy.convolution.Box2DKernel(width=4)
#level3_psf = photutils.psf.FittableImageModel(astropy.convolution.convolve(level3[mx,my], kernel),normalize=False,
# oversampling=4)
#sys.exit()
shutil.rmtree(outdir)
except RuntimeError:
print('Failed to create PSF model')
shutil.rmtree(outdir)
return level3_psf
def jwst_apcorr_interp(fname,radius,alternate_ref=None):
import scipy
with open('stpipe-log.cfg','w') as f:
s = '[*]\nhandler = file:/dev/null\nlevel = INFO\n'
f.write(s)
sc = source_catalog.source_catalog_step.SourceCatalogStep()
os.remove('stpipe-log.cfg')
if alternate_ref is not None:
fname = alternate_ref
all_ees = []
all_inner = []
all_outer = []
all_corr = []
all_radius = []
with datamodels.open(fname) as model:
reffile_paths = sc._get_reffile_paths(model)
for ees in [(10,20,30),(40,50,60),(70,80,90)]:
refdata = reference_data.ReferenceData(model, reffile_paths,
ees)
try:
aperture_params = refdata.aperture_params
all_ees = np.append(all_ees,ees)
all_radius = np.append(all_radius,aperture_params['aperture_radii'])
all_corr = np.append(all_corr,aperture_params['aperture_corrections'])
except:
if ees[0]==10:
tempees = np.append(ees[1:],[40]).ravel()
refdata = reference_data.ReferenceData(model, reffile_paths,
tempees)
aperture_params = refdata.aperture_params
all_ees = np.append(all_ees,ees[1:])
all_radius = np.append(all_radius,aperture_params['aperture_radii'][:2])
all_corr = np.append(all_corr,aperture_params['aperture_corrections'][:2])
elif ees[-1]==90:
tempees = np.append([60],ees[:2]).ravel()
refdata = reference_data.ReferenceData(model, reffile_paths,
tempees)
aperture_params = refdata.aperture_params
all_ees = np.append(all_ees,ees[1:])
all_radius = np.append(all_radius,aperture_params['aperture_radii'][1:])
all_corr = np.append(all_corr,aperture_params['aperture_corrections'][1:])
if radius>np.max(all_radius):
print('Your radius is larger than the largest allowed radius of %f pixels'%np.max(all_radius))
return
apcorr = scipy.interpolate.interp1d(all_radius,all_corr)(radius)
ee_interp = scipy.interpolate.interp1d(all_radius,all_ees)(radius)
return float(ee_interp), apcorr, aperture_params['bkg_aperture_inner_radius'], aperture_params['bkg_aperture_outer_radius']
def jwst_apcorr(fname,ee=70,alternate_ref=None):
with open('./stpipe-log.cfg','w') as f:
s = '[*]\nhandler = file:/dev/null\nlevel = INFO\n'
f.write(s)
sc = source_catalog.source_catalog_step.SourceCatalogStep()
os.remove('stpipe-log.cfg')
if alternate_ref is not None:
fname = alternate_ref
with datamodels.open(fname) as model:
reffile_paths = sc._get_reffile_paths(model)
if ee==10:
ees = (10,20,30)
refdata = reference_data.ReferenceData(model, reffile_paths,
ees)
aperture_params = refdata.aperture_params
return [aperture_params['aperture_radii'][0],
aperture_params['aperture_corrections'][0],
aperture_params['bkg_aperture_inner_radius'],
aperture_params['bkg_aperture_outer_radius']]
elif ee==20:
ees = (20,30,40)
refdata = reference_data.ReferenceData(model, reffile_paths,
ees)
aperture_params = refdata.aperture_params
return [aperture_params['aperture_radii'][0],
aperture_params['aperture_corrections'][0],
aperture_params['bkg_aperture_inner_radius'],
aperture_params['bkg_aperture_outer_radius']]
elif ee==30:
ees = (20,30,40)
refdata = reference_data.ReferenceData(model, reffile_paths,
ees)
aperture_params = refdata.aperture_params
return [aperture_params['aperture_radii'][1],
aperture_params['aperture_corrections'][1],
aperture_params['bkg_aperture_inner_radius'],
aperture_params['bkg_aperture_outer_radius']]
else:
ees = (20,30,ee)
refdata = reference_data.ReferenceData(model, reffile_paths,
ees)
aperture_params = refdata.aperture_params
return [aperture_params['aperture_radii'][-1],
aperture_params['aperture_corrections'][-1],
aperture_params['bkg_aperture_inner_radius'],
aperture_params['bkg_aperture_outer_radius']]
def estimate_bkg(data,position,inner, outer,model_psf=None,corr=None):
assert model_psf is not None or corr is not None, 'Must supply model_psf or corr'
assert inner<outer
annulus_aperture = CircularAnnulus(np.flip(position), r_in=inner, r_out=outer)
annulus_mask = annulus_aperture.to_mask(method='center')
annulus_data = annulus_mask.multiply(data)
import matplotlib.pyplot as plt
model_psf.x_0 = position[1]
model_psf.y_0 = position[0]
yf, xf = np.mgrid[0:data.shape[0],0:data.shape[1]].astype(int)
psf = np.array(model_psf(xf,yf)).astype(float)
annulus_psf = annulus_mask.multiply(psf)
print(np.sum(annulus_psf)/np.sum(psf))
plt.imshow(annulus_data)
plt.show()
plt.imshow(annulus_psf)
plt.show()
sys.exit()
def generic_aperture_phot(data,positions,radius,sky,epadu=1,error=None):
aperture = CircularAperture(positions, r=radius)
annulus_aperture = CircularAnnulus(positions, r_in=sky["sky_in"], r_out=sky["sky_out"])
annulus_mask = annulus_aperture.to_mask(method='center')
bkg_median = []
bkg_stdev = []
for mask in annulus_mask:
annulus_data = mask.multiply(data)
annulus_data_1d = annulus_data[mask.data > 0]
_, median_sigclip, stdev_sigclip = sigma_clipped_stats(annulus_data_1d)
bkg_median.append(median_sigclip)
bkg_stdev.append(stdev_sigclip)
bkg_median = np.array(bkg_median)#32.672334253787994#33#
bkg_stdev = np.array(bkg_stdev)
phot = aperture_photometry(data, aperture, method='exact',error=error)
phot['annulus_median'] = bkg_median
phot['aper_bkg'] = bkg_median * aperture.area
phot['aper_sum_bkgsub'] = phot['aperture_sum'] - phot['aper_bkg']
if error is None:
error_poisson = np.sqrt(phot['aper_sum_bkgsub'])
error_scatter_sky = aperture.area * bkg_stdev**2
error_mean_sky = bkg_stdev**2 * aperture.area**2 / annulus_aperture.area
fluxerr = np.sqrt(error_poisson**2/epadu + error_scatter_sky + error_mean_sky)
phot['aperture_sum_err'] = fluxerr
return phot
def jwst_aperture_phot(fname,ra,dec,
filt,ee='r70'):
try:
force_ra = float(ra)
force_dec = float(dec)
unit = u.deg
except:
unit = (u.hourangle, u.deg)
if isinstance(ee,str):
radius,apcorr,skyan_in,skyan_out = get_apcorr_params(fname,int(ee[1:]))
else:
radius,apcorr,skyan_in,skyan_out = ee,1,ee+1,ee+3
#radius =1.8335238
#apcorr = aper_func(radius)
#radius,apcorr = 1.83,1
image = fits.open(fname)
data = image['SCI',1].data#*image['AREA',1].data
err = image['ERR',1].data
imwcs = wcs.WCS(image[1])
#positions = np.atleast_2d(np.flip([582.80256776,819.78997553]))#
positions = np.atleast_2d(astropy.wcs.utils.skycoord_to_pixel(SkyCoord(ra, dec,unit=unit),imwcs))
imh = image['SCI',1].header
area = image[1].header['PIXAR_SR']
aa = np.argwhere(data < 0)
for i in np.arange(0, len(aa), 1):
data[aa[i][0], aa[i][1]] = 0
sky = {'sky_in':skyan_in,'sky_out':skyan_out}
#with datamodels.open(fname) as model:
# dat = model.data
# err = model.err
#phot = generic_aperture_phot(data,positions,radius,sky,error=image['ERR',1].data)
phot = generic_aperture_phot(data,positions,radius,sky,error=err)
phot['aper_sum_corrected'] = phot['aper_sum_bkgsub'] * apcorr
phot['aperture_sum_err']*=apcorr
phot['magerr'] = 2.5 * np.log10(1.0 + (phot['aperture_sum_err']/phot['aper_sum_bkgsub']))
pixel_scale = wcs.utils.proj_plane_pixel_scales(imwcs)[0] * imwcs.wcs.cunit[0].to('arcsec')
flux_units = u.MJy / u.sr * (pixel_scale * u.arcsec)**2
flux = phot['aper_sum_corrected']*flux_units
phot['mag'] = flux.to(u.ABmag).value
return phot
def hst_apcorr(ap,filt,inst):
if inst=='ir':
if not os.path.exists('ir_ee_corrections.csv'):
urllib.request.urlretrieve('https://www.stsci.edu/files/live/sites/www/files/home/hst/'+\
'instrumentation/wfc3/data-analysis/photometric-calibration/'+\
'ir-encircled-energy/_documents/ir_ee_corrections.csv',
'ir_ee_corrections.csv')
ee = Table.read('ir_ee_corrections.csv',format='ascii')
ee.remove_column('FILTER')
waves = ee['PIVOT']
ee.remove_column('PIVOT')
else:
if not os.path.exists('wfc3uvis2_aper_007_syn.csv'):
urllib.request.urlretrieve('https://www.stsci.edu/files/live/sites/www/files/home/hst/'+\
'instrumentation/wfc3/data-analysis/photometric-calibration/'+\
'uvis-encircled-energy/_documents/wfc3uvis2_aper_007_syn.csv','wfc3uvis2_aper_007_syn.csv')
ee = Table.read('wfc3uvis2_aper_007_syn.csv',format='ascii')
ee.remove_column('FILTER')
waves = ee['WAVELENGTH']
ee.remove_column('WAVELENGTH')
ee_arr = np.array([ee[col] for col in ee.colnames])
apps = [float(x.split('#')[1]) for x in ee.colnames]
interp = scipy.interpolate.RectBivariateSpline(waves,apps,ee_arr.T)
try:
filt_wave = sncosmo.get_bandpass(filt).wave_eff
except:
filt_wave = sncosmo.get_bandpass('uv'+filt).wave_eff
return(interp(filt_wave,ap))
def hst_get_zp(filt,zpsys='ab'):
if zpsys.lower()=='ab':
return {'F098M':25.666,'F105W':26.264,'F110W':26.819,'F125W':26.232,'F140W':26.450,'F160W':25.936}[filt]
elif zpsys.lower()=='vega':
return {'F098M':25.090,'F105W':25.603,'F110W':26.042,'F125W':25.312,'F140W':25.353,'F160W':24.662}[filt]
else:
print('unknown zpsys')
return
def hst_aperture_phot(fname,force_ra,force_dec,filt,radius=3,
skyan_in=4,skyan_out=8):
data_file = fits.open(fname)
drc_dat = data_file['SCI',1]
if data_file[1].header['BUNIT']=='ELECTRON':
epadu = 1
else:
epadu = data_file[0].header['EXPTIME']
try:
force_ra = float(force_ra)
force_dec = float(force_dec)
unit = u.deg
except:
unit = (u.hourangle, u.deg)
sky_location = SkyCoord(force_ra,force_dec,unit=unit)
imwcs = wcs.WCS(drc_dat.header,data_file)
x,y = astropy.wcs.utils.skycoord_to_pixel(sky_location,imwcs)
px_scale = wcs.utils.proj_plane_pixel_scales(imwcs)[0] * imwcs.wcs.cunit[0].to('arcsec')
try:
zp = hst_get_zp(filt,'ab')
inst = 'ir'
except:
inst = 'uvis'
phot = generic_aperture_phot(drc_dat.data,np.atleast_2d([x,y]),
radius,{'sky_in':skyan_in,'sky_out':skyan_out},epadu=epadu)
phot['magerr'] = 1.086 * phot['aperture_sum_err']/phot['aper_sum_bkgsub']
apcorr = hst_get_ee_corr(radius*px_scale,filt,inst)
if inst=='ir':
ee_corr = 2.5*np.log10(apcorr)
zp = hst_get_zp(filt,'ab')
phot['aper_sum_corrected'] = phot['aper_sum_bkgsub']/apcorr
phot['mag'] = -2.5*np.log10(phot['aper_sum_corrected'])+zp
else:
try:
hdr = drc_dat.header
photflam = hdr['PHOTFLAM']
except:
hdr = fits.open(data_file)[0].header
photflam = hdr['PHOTFLAM']
photplam = drc_dat.header['PHOTPLAM']
ee_corr = 2.5*np.log10(apcorr)
zp = -2.5*np.log10(photflam)-5*np.log10(photplam)-2.408
phot['aper_sum_corrected'] = phot['aper_sum_bkgsub'] / apcorr
phot['aperture_sum_err']/=apcorr
phot['mag'] = -2.5*np.log10(phot['aper_sum_corrected']) + zp
return(phot)
def simple_aperture_sum(data,positions,radius):
aperture = CircularAperture(positions, r=radius)
phot = aperture_photometry(data, aperture, method='exact')
return phot['aperture_sum']
|
jpierel14REPO_NAMEspace_photPATH_START.@space_phot_extracted@space_phot-main@space_phot@util.py@.PATH_END.py
|
{
"filename": "BoundingBox.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_support/task/processor/BoundingBox.md",
"type": "Markdown"
}
|
page_type: reference
description: An integer bounding box, axis aligned.
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_support.task.processor.BoundingBox" />
<meta itemprop="path" content="Stable" />
<meta itemprop="property" content="__eq__"/>
<meta itemprop="property" content="__init__"/>
</div>
# tflite_support.task.processor.BoundingBox
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
<td>
<a target="_blank" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/python/task/processor/proto/bounding_box.proto">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub
</a>
</td>
</table>
An integer bounding box, axis aligned.
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>tflite_support.task.processor.BoundingBox(
origin_x: int, origin_y: int, width: int, height: int
)
</code></pre>
<!-- Placeholder for "Used in" -->
<!-- Tabular view -->
<table class="responsive fixed orange">
<colgroup><col width="214px"><col></colgroup>
<tr><th colspan="2"><h2 class="add-link">Attributes</h2></th></tr>
<tr>
<td>
`origin_x`<a id="origin_x"></a>
</td>
<td>
The X coordinate of the top-left corner, in pixels.
</td>
</tr><tr>
<td>
`origin_y`<a id="origin_y"></a>
</td>
<td>
The Y coordinate of the top-left corner, in pixels.
</td>
</tr><tr>
<td>
`width`<a id="width"></a>
</td>
<td>
The width of the bounding box, in pixels.
</td>
</tr><tr>
<td>
`height`<a id="height"></a>
</td>
<td>
The height of the bounding box, in pixels.
</td>
</tr>
</table>
## Methods
<h3 id="__eq__"><code>__eq__</code></h3>
<a target="_blank" class="external" href="https://github.com/tensorflow/tflite-support/blob/v0.4.4/tensorflow_lite_support/python/task/processor/proto/bounding_box.proto">View source</a>
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>__eq__(
other: Any
) -> bool
</code></pre>
Checks if this object is equal to the given object.
<!-- Tabular view -->
<table class="responsive fixed orange">
<colgroup><col width="214px"><col></colgroup>
<tr><th colspan="2">Args</th></tr>
<tr>
<td>
`other`
</td>
<td>
The object to be compared with.
</td>
</tr>
</table>
<!-- Tabular view -->
<table class="responsive fixed orange">
<colgroup><col width="214px"><col></colgroup>
<tr><th colspan="2">Returns</th></tr>
<tr class="alt">
<td colspan="2">
True if the objects are equal.
</td>
</tr>
</table>
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_support@task@processor@BoundingBox.md@.PATH_END.py
|
{
"filename": "DDsmu_mocks.py",
"repo_name": "manodeep/Corrfunc",
"repo_path": "Corrfunc_extracted/Corrfunc-master/Corrfunc/mocks/DDsmu_mocks.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python wrapper around the C extension for the pair counter in
``mocks/DDsmu``. This python wrapper is :py:mod:`Corrfunc.mocks.DDsmu_mocks`
"""
from __future__ import (division, print_function, absolute_import,
unicode_literals)
import warnings
__author__ = ('Manodeep Sinha', 'Nick Hand')
__all__ = ('DDsmu_mocks', )
def DDsmu_mocks(autocorr, cosmology, nthreads, mu_max, nmu_bins, binfile,
RA1, DEC1, CZ1, weights1=None,
RA2=None, DEC2=None, CZ2=None, weights2=None,
is_comoving_dist=False,
verbose=False, output_savg=False,
fast_divide_and_NR_steps=0,
xbin_refine_factor=2, ybin_refine_factor=2,
zbin_refine_factor=1, max_cells_per_dim=100,
copy_particles=True, enable_min_sep_opt=True,
c_api_timer=False, isa='fastest', weight_type=None):
"""
Calculate the 2-D pair-counts corresponding to the correlation
function, :math:`\\xi(s, \\mu)`. The pairs are counted in bins of
radial separation and cosine of angle to the line-of-sight (LOS). The
input positions are expected to be on-sky co-ordinates. This module is
suitable for calculating correlation functions for mock catalogs.
If ``weights`` are provided, the resulting pair counts are weighted. The
weighting scheme depends on ``weight_type``.
Returns a numpy structured array containing the pair counts for the
specified bins.
.. note:: This module only returns pair counts and not the actual
correlation function :math:`\\xi(s, \\mu)`. See the
utilities :py:mod:`Corrfunc.utils.convert_3d_counts_to_cf`
for computing :math:`\\xi(s, \\mu)` from the pair counts.
.. versionadded:: 2.1.0
Parameters
----------
autocorr: boolean, required
Boolean flag for auto/cross-correlation. If autocorr is set to 1,
then the second set of particle positions are not required.
cosmology: integer, required
Integer choice for setting cosmology. Valid values are 1->LasDamas
cosmology and 2->Planck cosmology. If you need arbitrary cosmology,
easiest way is to convert the ``CZ`` values into co-moving distance,
based on your preferred cosmology. Set ``is_comoving_dist=True``, to
indicate that the co-moving distance conversion has already been done.
Choices:
1. LasDamas cosmology. :math:`\\Omega_m=0.25`, :math:`\\Omega_\\Lambda=0.75`
2. Planck cosmology. :math:`\\Omega_m=0.302`, :math:`\\Omega_\\Lambda=0.698`
To setup a new cosmology, add an entry to the function,
``init_cosmology`` in ``ROOT/utils/cosmology_params.c`` and re-install
the entire package.
nthreads: integer
The number of OpenMP threads to use. Has no effect if OpenMP was not
enabled during library compilation.
mu_max: double. Must be in range [0.0, 1.0]
A double-precision value for the maximum cosine of the angular
separation from the line of sight (LOS). Here, ``mu`` is defined as
the angle between ``s`` and ``l``. If :math:`v_1` and :math:`v_2`
represent the vectors to each point constituting the pair, then
:math:`s := v_1 - v_2` and :math:`l := 1/2 (v_1 + v_2)`.
Note: Only pairs with :math:`0 <= \\cos(\\theta_{LOS}) < \\mu_{max}`
are counted (no equality).
nmu_bins: int
The number of linear ``mu`` bins, with the bins ranging from
from (0, :math:`\\mu_{max}`)
binfile: string or an list/array of floats
For string input: filename specifying the ``s`` bins for
``DDsmu_mocks``. The file should contain white-space separated values
of (smin, smax) specifying each ``s`` bin wanted. The bins
need to be contiguous and sorted in increasing order (smallest bins
come first).
For array-like input: A sequence of ``s`` values that provides the
bin-edges. For example,
``np.logspace(np.log10(0.1), np.log10(10.0), 15)`` is a valid
input specifying **14** (logarithmic) bins between 0.1 and 10.0. This
array does not need to be sorted.
RA1: array-like, real (float/double)
The array of Right Ascensions for the first set of points. RA's
are expected to be in [0.0, 360.0], but the code will try to fix cases
where the RA's are in [-180, 180.0]. For peace of mind, always supply
RA's in [0.0, 360.0].
Calculations are done in the precision of the supplied arrays.
DEC1: array-like, real (float/double)
Array of Declinations for the first set of points. DEC's are expected
to be in the [-90.0, 90.0], but the code will try to fix cases where
the DEC's are in [0.0, 180.0]. Again, for peace of mind, always supply
DEC's in [-90.0, 90.0].
Must be of same precision type as RA1.
CZ1: array-like, real (float/double)
Array of (Speed Of Light * Redshift) values for the first set of
points. Code will try to detect cases where ``redshifts`` have been
passed and multiply the entire array with the ``speed of light``.
If is_comoving_dist is set, then ``CZ1`` is interpreted as the
co-moving distance, rather than `cz`.
weights1: array_like, real (float/double), optional
A scalar, or an array of weights of shape (n_weights, n_positions)
or (n_positions,). `weight_type` specifies how these weights are used;
results are returned in the `weightavg` field. If only one of
``weights1`` or ``weights2`` is specified, the other will be set
to uniform weights.
RA2: array-like, real (float/double)
The array of Right Ascensions for the second set of points. RA's
are expected to be in [0.0, 360.0], but the code will try to fix cases
where the RA's are in [-180, 180.0]. For peace of mind, always supply
RA's in [0.0, 360.0].
Must be of same precision type as RA1/DEC1/CZ1.
DEC2: array-like, real (float/double)
Array of Declinations for the second set of points. DEC's are expected
to be in the [-90.0, 90.0], but the code will try to fix cases where
the DEC's are in [0.0, 180.0]. Again, for peace of mind, always supply
DEC's in [-90.0, 90.0].
Must be of same precision type as RA1/DEC1/CZ1.
CZ2: array-like, real (float/double)
Array of (Speed Of Light * Redshift) values for the second set of
points. Code will try to detect cases where ``redshifts`` have been
passed and multiply the entire array with the ``speed of light``.
If is_comoving_dist is set, then ``CZ2`` is interpreted as the
co-moving distance, rather than `cz`.
Must be of same precision type as RA1/DEC1/CZ1.
weights2: array-like, real (float/double), optional
Same as weights1, but for the second set of positions
is_comoving_dist: boolean (default false)
Boolean flag to indicate that ``cz`` values have already been
converted into co-moving distances. This flag allows arbitrary
cosmologies to be used in ``Corrfunc``.
verbose: boolean (default false)
Boolean flag to control output of informational messages
output_savg: boolean (default false)
Boolean flag to output the average ``s`` for each bin. Code will
run slower if you set this flag. Also, note, if you are calculating
in single-precision, ``savg`` will suffer from numerical loss of
precision and can not be trusted. If you need accurate ``savg``
values, then pass in double precision arrays for the particle
positions.
fast_divide_and_NR_steps: integer (default 0)
Replaces the division in ``AVX`` implementation with an approximate
reciprocal, followed by ``fast_divide_and_NR_steps`` of Newton-Raphson.
Can improve runtime by ~15-20% on older computers. Value of 0 uses
the standard division operation.
(xyz)bin_refine_factor: integer, default is (2,2,1); typically within [1-3]
Controls the refinement on the cell sizes. Can have up to a 20% impact
on runtime.
max_cells_per_dim: integer, default is 100, typical values in [50-300]
Controls the maximum number of cells per dimension. Total number of
cells can be up to (max_cells_per_dim)^3. Only increase if ``rpmax`` is
too small relative to the boxsize (and increasing helps the runtime).
copy_particles: boolean (default True)
Boolean flag to make a copy of the particle positions
If set to False, the particles will be re-ordered in-place
.. versionadded:: 2.3.0
enable_min_sep_opt: boolean (default true)
Boolean flag to allow optimizations based on min. separation between
pairs of cells. Here to allow for comparison studies.
.. versionadded:: 2.3.0
c_api_timer: boolean (default false)
Boolean flag to measure actual time spent in the C libraries. Here
to allow for benchmarking and scaling studies.
isa: string, case-insensitive (default ``fastest``)
Controls the runtime dispatch for the instruction set to use. Options
are: [``fastest``, ``avx512f``, ``avx``, ``sse42``, ``fallback``]
Setting isa to ``fastest`` will pick the fastest available instruction
set on the current computer. However, if you set ``isa`` to, say,
``avx`` and ``avx`` is not available on the computer, then the code will
revert to using ``fallback`` (even though ``sse42`` might be available).
Unless you are benchmarking the different instruction sets, you should
always leave ``isa`` to the default value. And if you *are*
benchmarking, then the string supplied here gets translated into an
``enum`` for the instruction set defined in ``utils/defs.h``.
weight_type: string, optional (default None)
The type of weighting to apply. One of ["pair_product", None].
Returns
--------
results: Numpy structured array
A numpy structured array containing [smin, smax, savg, mumax,
npairs, weightavg]. There are a total of ``nmu_bins`` in ``mu``
for each separation bin specified in the ``binfile``, with ``mumax``
being the upper limit of the ``mu`` bin. If ``output_savg`` is not
set, then ``savg`` will be set to 0.0 for all bins; similarly for
``weightavg``. ``npairs`` contains the number of pairs in that bin
and can be used to compute the actual :math:`\\xi(s, \\mu)` by
combining with (DR, RR) counts.
api_time: float, optional
Only returned if ``c_api_timer`` is set. ``api_time`` measures only
the time spent within the C library and ignores all python overhead.
"""
try:
from Corrfunc._countpairs_mocks import countpairs_s_mu_mocks as\
DDsmu_extn
except ImportError:
msg = "Could not import the C extension for the on-sky"\
"pair counter."
raise ImportError(msg)
import numpy as np
from Corrfunc.utils import translate_isa_string_to_enum, fix_ra_dec,\
return_file_with_rbins, convert_to_native_endian,\
sys_pipes, process_weights
from future.utils import bytes_to_native_str
# Check if mu_max is scalar
if not np.isscalar(mu_max):
msg = "The parameter `mu_max` = {0}, has size = {1}. "\
"The code is expecting a scalar quantity (and not "\
"not a list, array)".format(mu_max, np.size(mu_max))
raise TypeError(msg)
# Check that mu_max is within (0.0, 1.0]
if mu_max <= 0.0 or mu_max > 1.0:
msg = "The parameter `mu_max` = {0}, is the max. of cosine of an "\
"angle and should be within (0.0, 1.0]".format(mu_max)
raise ValueError(msg)
if not autocorr:
if RA2 is None or DEC2 is None or CZ2 is None:
msg = "Must pass valid arrays for RA2/DEC2/CZ2 for "\
"computing cross-correlation"
raise ValueError(msg)
else:
RA2 = np.empty(1)
DEC2 = np.empty(1)
CZ2 = np.empty(1)
weights1, weights2 = process_weights(weights1, weights2, RA1, RA2, weight_type, autocorr)
# Ensure all input arrays are native endian
RA1, DEC1, CZ1, weights1, RA2, DEC2, CZ2, weights2 = [
convert_to_native_endian(arr, warn=True) for arr in
[RA1, DEC1, CZ1, weights1, RA2, DEC2, CZ2, weights2]]
fix_ra_dec(RA1, DEC1)
if autocorr == 0:
fix_ra_dec(RA2, DEC2)
# Passing None parameters breaks the parsing code, so avoid this
kwargs = {}
for k in ['weights1', 'weights2', 'weight_type', 'RA2', 'DEC2', 'CZ2']:
v = locals()[k]
if v is not None:
kwargs[k] = v
integer_isa = translate_isa_string_to_enum(isa)
sbinfile, delete_after_use = return_file_with_rbins(binfile)
warn_large_mu(mu_max,
# RA and DEC are checked to be the same dtype
dtype=RA1.dtype,
)
with sys_pipes():
extn_results = DDsmu_extn(autocorr, cosmology, nthreads,
mu_max, nmu_bins, sbinfile,
RA1, DEC1, CZ1,
is_comoving_dist=is_comoving_dist,
verbose=verbose,
output_savg=output_savg,
fast_divide_and_NR_steps=fast_divide_and_NR_steps,
xbin_refine_factor=xbin_refine_factor,
ybin_refine_factor=ybin_refine_factor,
zbin_refine_factor=zbin_refine_factor,
max_cells_per_dim=max_cells_per_dim,
copy_particles=copy_particles,
enable_min_sep_opt=enable_min_sep_opt,
c_api_timer=c_api_timer,
isa=integer_isa, **kwargs)
if extn_results is None:
msg = "RuntimeError occurred"
raise RuntimeError(msg)
else:
extn_results, api_time = extn_results
if delete_after_use:
import os
os.remove(sbinfile)
results_dtype = np.dtype([(bytes_to_native_str(b'smin'), np.float64),
(bytes_to_native_str(b'smax'), np.float64),
(bytes_to_native_str(b'savg'), np.float64),
(bytes_to_native_str(b'mumax'), np.float64),
(bytes_to_native_str(b'npairs'), np.uint64),
(bytes_to_native_str(b'weightavg'), np.float64)])
nbin = len(extn_results)
results = np.zeros(nbin, dtype=results_dtype)
for ii, r in enumerate(extn_results):
results['smin'][ii] = r[0]
results['smax'][ii] = r[1]
results['savg'][ii] = r[2]
results['mumax'][ii] = r[3]
results['npairs'][ii] = r[4]
results['weightavg'][ii] = r[5]
if not c_api_timer:
return results
else:
return results, api_time
def warn_large_mu(mu_max, dtype):
'''
Small theta values (large mu) underfloat float32. Warn the user.
Context: https://github.com/manodeep/Corrfunc/issues/296 (see also #297)
'''
if dtype.itemsize > 4:
return
if mu_max >= 0.9800666: # cos(0.2)
warnings.warn("""
Be aware that small angular pair separations (mu near 1) will suffer from loss
of floating-point precision, as the input data is in float32 precision or
lower. In float32, the loss of precision is 1% in mu at separations of 0.2
degrees, and larger at smaller separations.
For more information, see:
https://github.com/manodeep/Corrfunc/issues/296 (see also #297)
"""
)
if __name__ == '__main__':
import doctest
doctest.testmod()
|
manodeepREPO_NAMECorrfuncPATH_START.@Corrfunc_extracted@Corrfunc-master@Corrfunc@mocks@DDsmu_mocks.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "davidharvey1986/pyRRG",
"repo_path": "pyRRG_extracted/pyRRG-master/unittests/bugFixPyRRG/lib/python3.7/site-packages/pip/_vendor/distlib/_backport/__init__.py",
"type": "Python"
}
|
"""Modules copied from Python 3 standard libraries, for internal use only.
Individual classes and functions are found in d2._backport.misc. Intended
usage is to always import things missing from 3.1 from that module: the
built-in/stdlib objects will be used if found.
"""
|
davidharvey1986REPO_NAMEpyRRGPATH_START.@pyRRG_extracted@pyRRG-master@unittests@bugFixPyRRG@lib@python3.7@site-packages@pip@_vendor@distlib@_backport@__init__.py@.PATH_END.py
|
{
"filename": "unbatch_benchmark.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/data/experimental/benchmarks/unbatch_benchmark.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for `tf.data.Dataset.unbatch()`."""
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.data.ops import dataset_ops
class UnbatchBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.Dataset.unbatch()`."""
def benchmark_native_unbatch(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
num_elements = 10000
for batch_size in batch_sizes:
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
dataset = dataset.batch(batch_size)
dataset = dataset.unbatch()
self.run_and_report_benchmark(
dataset=dataset,
num_elements=num_elements,
iters=5,
extras={
"model_name": "unbatch.benchmark.1",
"parameters": "%d" % batch_size,
},
name="native_batch_size_%d" % batch_size)
# Include a benchmark of the previous `unbatch()` implementation that uses
# a composition of more primitive ops. Eventually we'd hope to generate code
# that is as good in both cases.
def benchmark_old_unbatch_implementation(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
num_elements = 10000
for batch_size in batch_sizes:
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
dataset = dataset.batch(batch_size)
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)
self.run_and_report_benchmark(
dataset=dataset,
num_elements=num_elements,
iters=5,
extras={
"model_name": "unbatch.benchmark.2",
"parameters": "%d" % batch_size,
},
name="unfused_batch_size_%d" % batch_size)
if __name__ == "__main__":
benchmark_base.test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@data@experimental@benchmarks@unbatch_benchmark.py@.PATH_END.py
|
{
"filename": "residuals_grid_CA.py",
"repo_name": "annayqho/TheCannon",
"repo_path": "TheCannon_extracted/TheCannon-master/code/lamost/xcalib_5labels/paper_plots/residuals_grid_CA.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib.gridspec as gridspec
from matplotlib.colors import LogNorm
from math import log10, floor
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
import numpy as np
from pylab import rcParams
def round_sig(x, sig=1):
if x < 0:
return -round(-x, sig-int(floor(log10(-x)))-1)
return round(x, sig-int(floor(log10(x)))-1)
names = ['\mbox{T}_{\mbox{eff}},', '\mbox{log g}', '\mbox{[Fe/H]}']
units = ['\mbox{K}', '\mbox{dex}', '\mbox{dex}']
snr_str = r'SNR $\textgreater$ 100'
y_highs = [300, 0.5, 0.3]
x_lows = [4000, 1.1, -2.0, -0.08]
x_highs = [5300, 3.8, 0.5, 0.4]
direc = "/users/annaho/Data/LAMOST/Label_Transfer"
tr_id = np.load("%s/tr_id.npz" %direc)['arr_0']
tr_id = np.array([val.decode('utf-8') for val in tr_id])
snr = np.load("%s/tr_snr.npz" %direc)['arr_0']
apogee = np.load("%s/tr_label.npz" %direc)['arr_0']
cannon = np.load("%s/all_cannon_labels.npz" %direc)['arr_0']
fig,axarr = plt.subplots(1,3)#,figsize=(17,7))
props = dict(boxstyle='round', facecolor='white', alpha=0.3)
for i in range(0, len(names)):
name = names[i]
unit = units[i]
ax = axarr[i]
ax.axhline(y=0, c='k')
choose = snr > 100
diff = (cannon[:,i] - apogee[:,i])[choose]
scatter = round_sig(np.std(diff), 1)
bias = round_sig(np.mean(diff), 1)
im = ax.hist2d(
apogee[:,i][choose], diff,
range=[[x_lows[i], x_highs[i]], [-y_highs[i], y_highs[i]]],
bins=30, norm=LogNorm(), cmap="gray_r")
ax.locator_params(nbins=5)
ax.tick_params(axis='y', labelsize=20)
ax.tick_params(axis='x', labelsize=20)
ax.set_title(r"$\Delta %s_{\mbox{C/L-A}}$ [%s]" %(name, unit), fontsize=20)
ax.set_xlabel("$%s$ [%s] from APOGEE" %(name, unit), fontsize=20)
textstr1 = snr_str
ax.text(0.05, 0.95, textstr1, transform=ax.transAxes,
fontsize=20, verticalalignment='top', bbox=props)
textstr2 = 'Scatter: %s \nBias: %s' %(scatter, bias)
ax.text(0.05, 0.05, textstr2, transform=ax.transAxes,
fontsize=20, verticalalignment='bottom', bbox=props)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.1, 0.02, 0.8])
cbar = plt.colorbar(im[3], cax=cbar_ax)
cbar.ax.tick_params(labelsize=16)
cbar.set_label("Number of Objects", size=16)
rcParams['figure.figsize'] = 8,3
#plt.savefig("residuals_grid_5label.png")
plt.show()
|
annayqhoREPO_NAMETheCannonPATH_START.@TheCannon_extracted@TheCannon-master@code@lamost@xcalib_5labels@paper_plots@residuals_grid_CA.py@.PATH_END.py
|
{
"filename": "test_massfunction.py",
"repo_name": "LSSTDESC/CCL",
"repo_path": "CCL_extracted/CCL-master/pyccl/tests/test_massfunction.py",
"type": "Python"
}
|
import numpy as np
import pytest
import pyccl as ccl
COSMO = ccl.Cosmology(
Omega_c=0.27, Omega_b=0.045, h=0.67, sigma8=0.8, n_s=0.96,
transfer_function='bbks', matter_power_spectrum='halofit')
@pytest.mark.parametrize('m', [
1e14,
int(1e14),
[1e14, 1e15],
np.array([1e14, 1e15])])
def test_massfunc_smoke(m):
a = 0.8
mf = ccl.halos.MassFuncTinker10()(COSMO, m, a)
assert np.all(np.isfinite(mf))
assert np.shape(mf) == np.shape(m)
@pytest.mark.parametrize('m', [
1e14,
int(1e14),
[1e14, 1e15],
np.array([1e14, 1e15])])
def test_sigmaM_smoke(m):
a = 0.8
s = ccl.sigmaM(COSMO, m, a)
assert np.all(np.isfinite(s))
assert np.shape(s) == np.shape(m)
def test_deltac():
cosmo = ccl.Cosmology(Omega_c=0.25, Omega_b=0.05,
h=0.7, n_s=0.96, sigma8=0.8,
transfer_function='bbks')
# Test EdS value
dca = 3*(12*np.pi)**(2/3)/20
dcb = ccl.halos.get_delta_c(cosmo, 1.0, kind='EdS')
assert np.fabs(dcb/dca-1) < 1E-5
# Test Mead et al. value
dca = (1.59+0.0314*np.log(0.8))*(1+0.0123*np.log10(0.3))
dcb = ccl.halos.get_delta_c(cosmo, 1.0, kind='Mead16')
assert np.fabs(dcb/dca-1) < 1E-5
|
LSSTDESCREPO_NAMECCLPATH_START.@CCL_extracted@CCL-master@pyccl@tests@test_massfunction.py@.PATH_END.py
|
{
"filename": "version.py",
"repo_name": "cpiaulet/smint",
"repo_path": "smint_extracted/smint-master/smint/version.py",
"type": "Python"
}
|
__version__ = "1.0.0"
|
cpiauletREPO_NAMEsmintPATH_START.@smint_extracted@smint-master@smint@version.py@.PATH_END.py
|
{
"filename": "browserless.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/document_loaders/browserless.ipynb",
"type": "Jupyter Notebook"
}
|
# Browserless
Browserless is a service that allows you to run headless Chrome instances in the cloud. It's a great way to run browser-based automation at scale without having to worry about managing your own infrastructure.
To use Browserless as a document loader, initialize a `BrowserlessLoader` instance as shown in this notebook. Note that by default, `BrowserlessLoader` returns the `innerText` of the page's `body` element. To disable this and get the raw HTML, set `text_content` to `False`.
```python
from langchain_community.document_loaders import BrowserlessLoader
```
```python
BROWSERLESS_API_TOKEN = "YOUR_BROWSERLESS_API_TOKEN"
```
```python
loader = BrowserlessLoader(
api_token=BROWSERLESS_API_TOKEN,
urls=[
"https://en.wikipedia.org/wiki/Document_classification",
],
text_content=True,
)
documents = loader.load()
print(documents[0].page_content[:1000])
```
Jump to content
Main menu
Search
Create account
Log in
Personal tools
Toggle the table of contents
Document classification
17 languages
Article
Talk
Read
Edit
View history
Tools
From Wikipedia, the free encyclopedia
Document classification or document categorization is a problem in library science, information science and computer science. The task is to assign a document to one or more classes or categories. This may be done "manually" (or "intellectually") or algorithmically. The intellectual classification of documents has mostly been the province of library science, while the algorithmic classification of documents is mainly in information science and computer science. The problems are overlapping, however, and there is therefore interdisciplinary research on document classification.
The documents to be classified may be texts, images, music, etc. Each kind of document possesses its special classification problems. When not otherwise specified, text classification is implied.
Do
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@document_loaders@browserless.ipynb@.PATH_END.py
|
{
"filename": "server.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/xmlrpc/server.py",
"type": "Python"
}
|
r"""XML-RPC Servers.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
The Doc* classes can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the sys functions available through sys.func_name
import sys
self.sys = sys
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the sys methods
return list_public_methods(self) + \
['sys.' + method for method in list_public_methods(self.sys)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise ValueError('bad method')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
from xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode
from http.server import BaseHTTPRequestHandler
from functools import partial
from inspect import signature
import html
import http.server
import socketserver
import sys
import os
import re
import pydoc
import traceback
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
callable(getattr(obj, member))]
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. This class doesn't need to be
instanced directly when used by SimpleXMLRPCServer but it
can be instanced when used by the MultiPathXMLRPCServer
"""
def __init__(self, allow_none=False, encoding=None,
use_builtin_types=False):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
self.use_builtin_types = use_builtin_types
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches an XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function=None, name=None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
# decorator factory
if function is None:
return partial(self.register_function, name=name)
if name is None:
name = function.__name__
self.funcs[name] = function
return function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the preferred means
of changing method dispatch behavior.
"""
try:
params, method = loads(data, use_builtin_types=self.use_builtin_types)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault as fault:
response = dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except BaseException as exc:
response = dumps(
Fault(1, "%s:%s" % (type(exc), exc)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response.encode(self.encoding, 'xmlcharrefreplace')
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = set(self.funcs.keys())
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods |= set(self.instance._listMethods())
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods |= set(list_public_methods(self.instance))
return sorted(methods)
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault as fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except BaseException as exc:
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (type(exc), exc)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
try:
# call the matching registered function
func = self.funcs[method]
except KeyError:
pass
else:
if func is not None:
return func(*params)
raise Exception('method "%s" is not supported' % method)
if self.instance is not None:
if hasattr(self.instance, '_dispatch'):
# call the `_dispatch` method on the instance
return self.instance._dispatch(method, params)
# call the instance's method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
else:
if func is not None:
return func(*params)
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2', '/pydoc.css')
#if not None, encode responses larger than this, if possible
encode_threshold = 1400 #a common MTU
#Override form StreamRequestHandler: full buffering of output
#and no Nagle.
wbufsize = -1
disable_nagle_algorithm = True
# a re to match a gzip Accept-Encoding
aepattern = re.compile(r"""
\s* ([^\s;]+) \s* #content-coding
(;\s* q \s*=\s* ([0-9\.]+))? #q
""", re.VERBOSE | re.IGNORECASE)
def accept_encodings(self):
r = {}
ae = self.headers.get("Accept-Encoding", "")
for e in ae.split(","):
match = self.aepattern.match(e)
if match:
v = match.group(3)
v = float(v) if v else 1.0
r[match.group(1)] = v
return r
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
chunk = self.rfile.read(chunk_size)
if not chunk:
break
L.append(chunk)
size_remaining -= len(L[-1])
data = b''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
trace = traceback.format_exc()
trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII')
self.send_header("X-traceback", trace)
self.send_header("Content-length", "0")
self.end_headers()
else:
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except ValueError:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers()
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = b'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(socketserver.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inherited
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
"""Multipath XML-RPC Server
This specialization of SimpleXMLRPCServer allows the user to create
multiple Dispatcher instances and assign them to different
HTTP request paths. This makes it possible to run two or more
'virtual XML-RPC servers' at the same port.
Make sure that the requestHandler accepts the paths in question.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
encoding, bind_and_activate, use_builtin_types)
self.dispatchers = {}
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
def add_dispatcher(self, path, dispatcher):
self.dispatchers[path] = dispatcher
return dispatcher
def get_dispatcher(self, path):
return self.dispatchers[path]
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
try:
response = self.dispatchers[path]._marshaled_dispatch(
data, dispatch_method, path)
except BaseException as exc:
# report low level exception back to server
# (each dispatcher should have handled their own
# exceptions)
response = dumps(
Fault(1, "%s:%s" % (type(exc), exc)),
encoding=self.encoding, allow_none=self.allow_none)
response = response.encode(self.encoding, 'xmlcharrefreplace')
return response
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None, use_builtin_types=False):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding, use_builtin_types)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print('Content-Type: text/xml')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = BaseHTTPRequestHandler.responses[code]
response = http.server.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
response = response.encode('utf-8')
print('Status: %d %s' % (code, message))
print('Content-Type: %s' % http.server.DEFAULT_ERROR_CONTENT_TYPE)
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_request(self, request_text=None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (ValueError, TypeError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
# -----------------------------------------------------------------------------
# Self documenting XML-RPC Server.
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|https|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while match := pattern.search(text, here):
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'https://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'https://peps.python.org/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if callable(object):
argspec = str(signature(object))
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head)
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', 'functions', ''.join(contents))
return result
def page(self, title, contents):
"""Format an HTML page."""
css_path = "/pydoc.css"
css_link = (
'<link rel="stylesheet" type="text/css" href="%s">' %
css_path)
return '''\
<!DOCTYPE>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Python: %s</title>
%s</head><body>%s</body></html>''' % (title, css_link, contents)
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(html.escape(self.server_title), documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def _get_css(self, url):
path_here = os.path.dirname(os.path.realpath(__file__))
css_path = os.path.join(path_here, "..", "pydoc_data", "_pydoc.css")
with open(css_path, mode="rb") as fp:
return fp.read()
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
if self.path.endswith('.css'):
content_type = 'text/css'
response = self._get_css(self.path)
else:
content_type = 'text/html'
response = self.server.generate_html_documentation().encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', '%s; charset=UTF-8' % content_type)
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True, use_builtin_types=False):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate,
use_builtin_types)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation().encode('utf-8')
print('Content-Type: text/html')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
if __name__ == '__main__':
import datetime
class ExampleService:
def getData(self):
return '42'
class currentTime:
@staticmethod
def getCurrentTime():
return datetime.datetime.now()
with SimpleXMLRPCServer(("localhost", 8000)) as server:
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.register_instance(ExampleService(), allow_dotted_names=True)
server.register_multicall_functions()
print('Serving XML-RPC on localhost port 8000')
print('It is advisable to run this example server within a secure, closed network.')
try:
server.serve_forever()
except KeyboardInterrupt:
print("\nKeyboard interrupt received, exiting.")
sys.exit(0)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@xmlrpc@server.py@.PATH_END.py
|
{
"filename": "agnvar_example-checkpoint.ipynb",
"repo_name": "scotthgn/AGNvar",
"repo_path": "AGNvar_extracted/AGNvar-main/.ipynb_checkpoints/agnvar_example-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
A quick tutoial using agnvar. We will be using the agnsed mode (AGNsed_var), comprising of an outer standard disc (i.e multi-colour black-body), followed by a warm Comptonizing region, and then the hot Comptonizing corona. See Kubota & Done (2018) for details on the SED model.
```python
#Generic imports
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as cols
#Adding src directory to python path - needed as agnvar uses pyNTHCOMP, this is easiest way of NOT breaking code...
import os
import sys
sys.path.append(os.path.abspath('src'))
#Doing example with AGNsed_var (as my preffered model) - however the syntex is same for the others too
from agnvar import AGNsed_var
```
```python
#Defining accretion params
M = 1e8 #BH mass, Msol
D = 100 #Distance, co-moving, Mpc
log_mdot = -1.2 #log mass accretion rate, in Eddington units (Mdot/Mdot_edd)
a_star = 0.7 #BH spin, dimensionless
cosi = 0.9 #cos inclination, measured from z-axis (disc in x-y plane)
kTe_h = 100 #electron temp of hot Comptonizing region, keV
kTe_w = 0.2 #electron temp of warm Comptonizing region, keV
gamma_h = 1.7 #Photon index of hot Comptonizing region
gamma_w = 2.7 #Photon index of warm Comptonizing region
r_h = 10 #Outer radius of corona, Rg
r_w = 50 #Outer radius of warm Comptonizing region, Rg
log_rout = 5 #log of outer disc radius, Rg
hmax = 10 #max height of corona - used to set lamppost height for irradiation approx. Rg
z = 0.025 #Redshift at D for whatever cosmology you are assuming
```
There are a couple of switching parameters here. If you set log_rout = -1, then it will use the self-gravity radius from Laor & Netzer (1989) assuming viscous param \alpha=0.1
If you set r_h or r_w = -1, then it will use the innermost stable circular orbit, r_isco.
If r_w > r_out, then it re-sets to r_w = r_out
```python
#Now initiating the model object
agnmod = AGNsed_var(M, D, log_mdot, a_star, cosi, kTe_h, kTe_w, gamma_h, gamma_w, r_h, r_w, log_rout, hmax, z)
agnmod.new_ear(np.geomspace(1e-5, 1e3, 1000)) #Extending energy grid. The defualt is Emin=1e-4keV, Emax=1e4keV, numE=1000
agnmod.set_counts()
agnmod.set_flux() #Telling model I want the SED units in photons/s/cm^2/keV
```
```python
#As a start, we'll extract time-averaged SED
Es = agnmod.E_obs #Observed energy (redshift now taken into account)
ftot = agnmod.mean_spec() #Total SED
fd = agnmod.Lnu_d #Disc component of SED. SED components are added as properties AFTER calling .mean_spec()
fw = agnmod.Lnu_w #Warm Compton component
fh = agnmod.Lnu_h #hot Compton component
#Plotting
plt.loglog(Es, Es**2 * fd, ls='dashed', color='red')
plt.loglog(Es, Es**2 * fw, ls='dashed', color='green')
plt.loglog(Es, Es**2 * fh, ls='dashed', color='blue')
plt.loglog(Es, Es**2 * ftot, color='k')
plt.ylim(1e-4, 1)
plt.xlabel('Energy (keV)')
plt.ylabel(r'EF(E) keV$^{2}$ (Photons s$^{-1}$ cm$^{-2}$ keV$^{-1}$)')
plt.show()
```

We'll now go onto exploring the system response to a breif X-ray flash. To do this we'll simply give it a flat X-ray light-curve, modulated with a narrow Gaussian.
If you wanted to use the code to create response function, you would do something like this
```python
#Defining the gaussian flash
def G_flash(t):
tmid = 0.5 #Time for flash peak
sigma = 0.15 #flash width (ish)
return 1 * np.exp(-((t - tmid)**2 /( 2 * sigma**2)))
```
```python
#Generating input x-ray light-curve
ts = np.linspace(0, 10, 500) #time-steps (DAYS!)
fx_in = G_flash(ts) + 1 #liht-cruves normalized by mean, so 1 implies NO modulation
#Plotting input
plt.plot(ts, fx_in)
plt.xlabel('Time (days)')
plt.ylabel(r'$F/\langle F \rangle$')
plt.show()
```

```python
#Now calculating response thorughout the SED - Note this may take a couple of mins
allSEDs = agnmod.evolve_spec(fx_in, ts)
print('Completed')
```
Completed
The output from this will be a 2D numpy array, with shape (len(Es), len(ts)). So each column within the array contains the SED at time t
```python
resp_sed = allSEDs/ftot[:, np.newaxis] #Normalising by the time-averaged SED. So units F/Fmean
#Plotting figure
fig_tsed = plt.figure(figsize=(5, 8))
ax = fig_tsed.add_subplot(111)
Eall = np.tile(Es, (len(ts), 1))
Eall = np.transpose(Eall)
tall = np.tile(ts, (len(Es), 1))
normc = cols.PowerNorm(0.3, 1, 1.1)
pcm = ax.pcolormesh(tall, Eall, resp_sed, cmap='plasma', norm=normc, shading='auto')
ax.set_yscale('log')
ax.set_ylim(1e-4, 1e-1)
ax.set_xlabel('Time (days)')
ax.set_ylabel('Energy (keV)')
plt.colorbar(pcm, ax=ax).set_label(r'F/$\langle F \rangle$')
plt.show()
```

The above figure shows the SED response to the input light-curve. This highlites the energy dependence of the response to a change in X-ray flux, as the flash propagates across the disc; giving an increase in the smearing and mean time delay. You'll also notice that most of the SED begins responding at a similar time. This is due to the continuum nature of the calculation. Since we calculate the spectral component from each annulus (essentially annular spectra - so continuum) and since the response from the inner disc is overwhelmingly strong (see Fig. 3 in Hagen & Done (in prep.)), lower energies will start responding at similar times. However, their mean delay time will mostly follow the general $\tau \propto \lambda^{4/3}$ relation, due to an increase in the response width at lower energies (see Fig. 5 in Hagen & Done (in prep.))
If you wanted to use this to generate a response function $\Psi(E, \t)$, you would simply slice the output 2D SED array at the relevant energy (and then subtract the mean, etc.). E.g at $10^{-3}$ and $3 \times 10^{-3}$ keV, the response would be:
```python
Ersp = [1e-3, 3e-3]
for e in Ersp:
idx_e = np.abs(e - Es).argmin()
rsp_e = resp_sed[idx_e, :]
rsp_e -= 1
plt.plot(ts, rsp_e, label=f'E = {e} kev')
plt.legend(frameon=False)
plt.xlabel('Time (Days)')
plt.ylabel(r'$\Psi(t)$ $F/\langle F \rangle - 1$')
```
Text(0, 0.5, '$\\Psi(t)$ $F/\\langle F \\rangle - 1$')

This shows clearly that the response reduces in amplitude, and increases in width at lower energies. The increase in width is what gives the lag-energy relation
However, the code can also be used to directly simulate a light-curve without going through all the hassel of generating response functions first, and then convolving.
So take in input light-curve of, perhaps 200 days long with cadence 0.5 days.
```python
ts_lc = np.arange(0, 200.5, 0.5)
np.random.seed(123)
fx_lc = 0.9 * np.sin(ts_lc/10) + 1 + np.random.rand(len(ts_lc))*0.5
fx_lc /= np.mean(fx_lc)
plt.plot(ts_lc, fx_lc)
plt.xlabel('Time (days)')
plt.ylabel(r'$F/\langle F \rangle$')
plt.show()
```

To generate model light-curves we can do the same as when we looked at the response
```python
SED_lc = agnmod.evolve_spec(fx_lc, ts_lc)
print('Completed')
```
Completed
```python
#We use the .generate_lightcurve method to extract model light-curves in whatever bandpass you wish
mod_lc1 = agnmod.generate_lightcurve(1e-2, 5e-3, as_frac=True) #Extracts light-curve centered on Emid=1e-2 keV with width dE=5e-3 keV
plt.plot(ts_lc, fx_lc)
plt.plot(ts_lc, mod_lc1)
plt.ylabel(r'$F/\langle F \rangle$')
plt.xlabel('Time (days)')
plt.show()
plt.plot(ts_lc, mod_lc1, color='C1')
plt.ylabel(r'$F/\langle F \rangle$')
plt.xlabel('Time (days)')
plt.show()
```


The top shows input light-curve (in blue) along with the output (orange), centered on whatever you pass to .generate_lightcurve(). The bottom panel shows thejust the model (so zoomed in essentially). We see that both that the amount of response is reduced and it appears that some of the fast random variability has been smoothed out a little (at least if you squint it appeatrs that way - becomes more clear if you change your inner disc radius)
You can also use the generate-lightcurve method to generate responses for a band-pass with finite width. e.g if you wanted to see the response for one of the swift-UVOT filters, you would feed the code a narrow X-ray flash (as done above for the response section), and then use .generate_lightcurve with the relevant mid-point energy and width (also ensuring as_frac=True, such that output normalised by mean SED).
This is a brief summary of the main functionallity the code is aimed at. Any questions (or bug discoveries!!) please email me at scott.hagen@durham.ac.uk.
```python
```
|
scotthgnREPO_NAMEAGNvarPATH_START.@AGNvar_extracted@AGNvar-main@.ipynb_checkpoints@agnvar_example-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "test_hilbert.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/yt/frontends/ramses/tests/test_hilbert.py",
"type": "Python"
}
|
import numpy as np
from numpy.testing import assert_equal
import yt
from yt.frontends.ramses.hilbert import get_cpu_list_cuboid, hilbert3d
from yt.testing import requires_file
def test_hilbert3d():
# 8 different cases, checked against RAMSES' own implementation
inputs = [
[0, 0, 0],
[1, 0, 0],
[0, 1, 0],
[1, 1, 0],
[0, 0, 1],
[1, 0, 1],
[0, 1, 1],
[1, 1, 1],
]
outputs = [0, 1, 7, 6, 3, 2, 4, 5]
for i, o in zip(inputs, outputs, strict=True):
assert_equal(hilbert3d(i, 3).item(), o)
output_00080 = "output_00080/info_00080.txt"
@requires_file(output_00080)
def test_get_cpu_list():
ds = yt.load(output_00080)
np.random.seed(16091992)
# These are randomly generated outputs, checked against RAMSES' own implementation
inputs = (
[[0.27747276, 0.30018937, 0.17916189], [0.42656026, 0.40509483, 0.29927838]],
[[0.90660856, 0.44201328, 0.22770587], [1.09175462, 0.58017918, 0.2836648]],
[[0.98542323, 0.58543376, 0.45858327], [1.04441105, 0.62079207, 0.58919283]],
[[0.42274841, 0.44887745, 0.87793679], [0.52066634, 0.58936331, 1.00666222]],
[[0.69964803, 0.65893669, 0.03660775], [0.80565696, 0.67409752, 0.11434604]],
)
outputs = ([0, 15], [0, 15], [0, 1, 15], [0, 13, 14, 15], [0])
ncpu = ds.parameters["ncpu"]
bound_keys = np.array(
[ds.hilbert_indices[icpu][0] for icpu in range(1, ncpu + 1)]
+ [ds.hilbert_indices[ds.parameters["ncpu"]][1]],
dtype="float64",
)
for i, o in zip(inputs, outputs, strict=True):
bbox = i
ls = list(get_cpu_list_cuboid(ds, bbox, bound_keys=bound_keys))
assert len(ls) > 0
assert all(np.array(o) == np.array(ls))
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@yt@frontends@ramses@tests@test_hilbert.py@.PATH_END.py
|
{
"filename": "run_dyn_single.py",
"repo_name": "dynamics-of-stellar-systems/dynamite",
"repo_path": "dynamite_extracted/dynamite-master/dev_tests/run_dyn_single.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 15 08:54:48 2021
@author: sabine
"""
import os
import dynamite as dyn
from dynamite import physical_system as physys
print('DYNAMITE')
print(' version', dyn.__version__)
print(' installed at ', dyn.__path__)
fname = 'FCC047_2kin/FCC047_config.yaml'
c = dyn.config_reader.Configuration(fname, reset_logging=True)
c.remove_existing_orblibs()
c.remove_existing_all_models_file()
plotdir = c.settings.io_settings['plot_directory']
print(type(c.system))
print(type(c.settings))
parset = c.parspace.get_parset()
print(parset)
nkin = c.system.n_kin
print(f'{nkin} kinematics data sets in system')
model = dyn.model.Model(config=c, parset=parset)
model.setup_directories()
orblib=model.get_orblib()
model.get_weights(orblib=orblib)
print(model.chi2)
plotter = dyn.plotter.Plotter(config=c)
stars = c.system.get_component_from_class(physys.TriaxialVisibleComponent)
for kinset in range(nkin):
plotfile = f'{plotdir}kin_map_{stars.kinematic_data[kinset].name}.png'
if os.path.isfile(plotfile):
os.remove(plotfile)
figure = plotter.plot_kinematic_maps(model,kin_set=kinset)
figure.savefig(plotfile)
print(f'Look at {plotfile}')
|
dynamics-of-stellar-systemsREPO_NAMEdynamitePATH_START.@dynamite_extracted@dynamite-master@dev_tests@run_dyn_single.py@.PATH_END.py
|
{
"filename": "scipy_stats_test.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/tests/scipy_stats_test.py",
"type": "Python"
}
|
# Copyright 2018 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import itertools
import unittest
from absl.testing import absltest
import numpy as np
import scipy.stats as osp_stats
import scipy.version
import jax
import jax.numpy as jnp
from jax._src import dtypes, test_util as jtu
from jax.scipy import stats as lsp_stats
from jax.scipy.special import expit
jax.config.parse_flags_with_absl()
scipy_version = jtu.parse_version(scipy.version.version)
all_shapes = [(), (4,), (3, 4), (3, 1), (1, 4), (2, 1, 4)]
one_and_two_dim_shapes = [(4,), (3, 4), (3, 1), (1, 4)]
def genNamedParametersNArgs(n):
return jtu.sample_product(
shapes=itertools.combinations_with_replacement(all_shapes, n),
dtypes=itertools.combinations_with_replacement(jtu.dtypes.floating, n),
)
# Allow implicit rank promotion in these tests, as virtually every test exercises it.
@jtu.with_config(jax_numpy_rank_promotion="allow")
class LaxBackedScipyStatsTests(jtu.JaxTestCase):
"""Tests for LAX-backed scipy.stats implementations"""
@genNamedParametersNArgs(2)
def testVonMisesPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.vonmises.pdf
lax_fun = lsp_stats.vonmises.pdf
def args_maker():
x, kappa = map(rng, shapes, dtypes)
kappa = np.where(kappa < 0, kappa * -1, kappa).astype(kappa.dtype)
return [x, kappa]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(2)
def testVonMisesLogPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.vonmises.pdf
lax_fun = lsp_stats.vonmises.pdf
def args_maker():
x, kappa = map(rng, shapes, dtypes)
kappa = np.where(kappa < 0, kappa * -1, kappa).astype(kappa.dtype)
return [x, kappa]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(2)
def testWrappedCauchyPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
rng_uniform = jtu.rand_uniform(self.rng(), low=1e-3, high=1 - 1e-3)
scipy_fun = osp_stats.wrapcauchy.pdf
lax_fun = lsp_stats.wrapcauchy.pdf
def args_maker():
x = rng(shapes[0], dtypes[0])
c = rng_uniform(shapes[1], dtypes[1])
return [x, c]
tol = {
np.float32: 1e-4 if jtu.test_device_matches(["tpu"]) else 1e-5,
np.float64: 1e-11,
}
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker,
check_dtypes=False, tol=tol)
self._CompileAndCheck(lax_fun, args_maker, tol=tol)
@genNamedParametersNArgs(2)
def testWrappedCauchyLogPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
rng_uniform = jtu.rand_uniform(self.rng(), low=1e-3, high=1 - 1e-3)
scipy_fun = osp_stats.wrapcauchy.logpdf
lax_fun = lsp_stats.wrapcauchy.logpdf
def args_maker():
x = rng(shapes[0], dtypes[0])
c = rng_uniform(shapes[1], dtypes[1])
return [x, c]
tol = {
np.float32: 1e-4 if jtu.test_device_matches(["tpu"]) else 1e-5,
np.float64: 1e-11,
}
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker,
check_dtypes=False, tol=tol)
self._CompileAndCheck(lax_fun, args_maker, tol=tol)
@genNamedParametersNArgs(3)
def testPoissonLogPmf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.poisson.logpmf
lax_fun = lsp_stats.poisson.logpmf
def args_maker():
k, mu, loc = map(rng, shapes, dtypes)
# clipping to ensure that rate parameter is strictly positive
mu = np.clip(np.abs(mu), a_min=0.1, a_max=None).astype(mu.dtype)
loc = np.floor(loc)
return [k, mu, loc]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker, rtol={np.float64: 1e-14})
@genNamedParametersNArgs(3)
def testPoissonPmf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.poisson.pmf
lax_fun = lsp_stats.poisson.pmf
def args_maker():
k, mu, loc = map(rng, shapes, dtypes)
# clipping to ensure that rate parameter is strictly positive
mu = np.clip(np.abs(mu), a_min=0.1, a_max=None).astype(mu.dtype)
loc = np.floor(loc)
return [k, mu, loc]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testPoissonCdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.poisson.cdf
lax_fun = lsp_stats.poisson.cdf
def args_maker():
k, mu, loc = map(rng, shapes, dtypes)
# clipping to ensure that rate parameter is strictly positive
mu = np.clip(np.abs(mu), a_min=0.1, a_max=None).astype(mu.dtype)
return [k, mu, loc]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testBernoulliLogPmf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.bernoulli.logpmf
lax_fun = lsp_stats.bernoulli.logpmf
def args_maker():
x, logit, loc = map(rng, shapes, dtypes)
x = np.floor(x)
p = expit(logit)
loc = np.floor(loc)
return [x, p, loc]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(2)
def testBernoulliCdf(self, shapes, dtypes):
rng_int = jtu.rand_int(self.rng(), -100, 100)
rng_uniform = jtu.rand_uniform(self.rng())
scipy_fun = osp_stats.bernoulli.cdf
lax_fun = lsp_stats.bernoulli.cdf
def args_maker():
x = rng_int(shapes[0], dtypes[0])
p = rng_uniform(shapes[1], dtypes[1])
return [x, p]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(2)
def testBernoulliPpf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.bernoulli.ppf
lax_fun = lsp_stats.bernoulli.ppf
if scipy_version < (1, 9, 2):
self.skipTest("Scipy 1.9.2 needed for fix https://github.com/scipy/scipy/pull/17166.")
def args_maker():
q, p = map(rng, shapes, dtypes)
q = expit(q)
p = expit(p)
return [q, p]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=3e-4)
@genNamedParametersNArgs(3)
def testGeomLogPmf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.geom.logpmf
lax_fun = lsp_stats.geom.logpmf
def args_maker():
x, logit, loc = map(rng, shapes, dtypes)
x = np.floor(x)
p = expit(logit)
loc = np.floor(loc)
return [x, p, loc]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(5)
def testBetaLogPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.beta.logpdf
lax_fun = lsp_stats.beta.logpdf
def args_maker():
x, a, b, loc, scale = map(rng, shapes, dtypes)
return [x, a, b, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker,
rtol={np.float32: 2e-3, np.float64: 1e-4})
@genNamedParametersNArgs(5)
def testBetaLogCdf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.beta.logcdf
lax_fun = lsp_stats.beta.logcdf
def args_maker():
x, a, b, loc, scale = map(rng, shapes, dtypes)
return [x, a, b, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker,
rtol={np.float32: 2e-3, np.float64: 1e-4})
@genNamedParametersNArgs(5)
def testBetaSf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.beta.sf
lax_fun = lsp_stats.beta.sf
def args_maker():
x, a, b, loc, scale = map(rng, shapes, dtypes)
return [x, a, b, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker,
rtol={np.float32: 2e-3, np.float64: 1e-4})
@genNamedParametersNArgs(5)
def testBetaLogSf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.beta.logsf
lax_fun = lsp_stats.beta.logsf
def args_maker():
x, a, b, loc, scale = map(rng, shapes, dtypes)
return [x, a, b, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker,
rtol={np.float32: 2e-3, np.float64: 1e-4})
def testBetaLogPdfZero(self):
# Regression test for https://github.com/jax-ml/jax/issues/7645
a = b = 1.
x = np.array([0., 1.])
self.assertAllClose(
osp_stats.beta.pdf(x, a, b), lsp_stats.beta.pdf(x, a, b), atol=1e-5,
rtol=2e-5)
def testBetaLogPdfNegativeConstants(self):
a = b = -1.1
x = jnp.array([0., 0.5, 1.])
self.assertAllClose(
osp_stats.beta.pdf(x, a, b), lsp_stats.beta.pdf(x, a, b), atol=1e-5,
rtol=2e-5)
def testBetaLogPdfNegativeScale(self):
a = b = 1.
x = jnp.array([0., 0.5, 1.])
loc = 0
scale = -1
self.assertAllClose(
osp_stats.beta.pdf(x, a, b, loc, scale),
lsp_stats.beta.pdf(x, a, b, loc, scale), atol=1e-5,
rtol=2e-5)
@genNamedParametersNArgs(3)
def testCauchyLogPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.cauchy.logpdf
lax_fun = lsp_stats.cauchy.logpdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, tol={np.float64: 1E-14})
@genNamedParametersNArgs(3)
def testCauchyLogCdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.cauchy.logcdf
lax_fun = lsp_stats.cauchy.logcdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol={np.float64: 1e-14},
atol={np.float64: 1e-14})
@genNamedParametersNArgs(3)
def testCauchyCdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.cauchy.cdf
lax_fun = lsp_stats.cauchy.cdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol={np.float64: 1e-14},
atol={np.float64: 1e-14})
@genNamedParametersNArgs(3)
def testCauchyLogSf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.cauchy.logsf
lax_fun = lsp_stats.cauchy.logsf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol={np.float64: 1e-14},
atol={np.float64: 1e-14})
@genNamedParametersNArgs(3)
def testCauchySf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.cauchy.sf
lax_fun = lsp_stats.cauchy.sf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol={np.float64: 1e-14},
atol={np.float64: 1e-14})
@genNamedParametersNArgs(3)
def testCauchyIsf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.cauchy.isf
lax_fun = lsp_stats.cauchy.isf
def args_maker():
q, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that q is in desired range
# since lax.tan and numpy.tan work different near divergence points
q = np.clip(q, 5e-3, 1 - 5e-3).astype(q.dtype)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [q, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=2e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=3e-4)
@genNamedParametersNArgs(3)
def testCauchyPpf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.cauchy.ppf
lax_fun = lsp_stats.cauchy.ppf
def args_maker():
q, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that q is in desired
# since lax.tan and numpy.tan work different near divergence points
q = np.clip(q, 5e-3, 1 - 5e-3).astype(q.dtype)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [q, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=2e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=3e-4)
@jtu.sample_product(
shapes=[
[x_shape, alpha_shape]
for x_shape in one_and_two_dim_shapes
for alpha_shape in [(x_shape[0],), (x_shape[0] + 1,)]
],
dtypes=itertools.combinations_with_replacement(jtu.dtypes.floating, 2),
)
def testDirichletLogPdf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
def _normalize(x, alpha):
x_norm = x.sum(0) + (0.0 if x.shape[0] == alpha.shape[0] else 0.1)
return (x / x_norm).astype(x.dtype), alpha
def lax_fun(x, alpha):
return lsp_stats.dirichlet.logpdf(*_normalize(x, alpha))
def scipy_fun(x, alpha):
# scipy validates the x normalization using float64 arithmetic, so we must
# cast x to float64 before normalization to ensure this passes.
x, alpha = _normalize(x.astype('float64'), alpha)
result = osp_stats.dirichlet.logpdf(x, alpha)
# if x.shape is (N, 1), scipy flattens the output, while JAX returns arrays
# of a consistent rank. This check ensures the results have the same shape.
return result if x.ndim == 1 else np.atleast_1d(result)
def args_maker():
# Don't normalize here, because we want normalization to happen at 64-bit
# precision in the scipy version.
x, alpha = map(rng, shapes, dtypes)
return x, alpha
tol = {np.float32: 1E-3, np.float64: 1e-5}
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(lax_fun, args_maker, atol=tol, rtol=tol)
@genNamedParametersNArgs(3)
def testExponLogPdf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.expon.logpdf
lax_fun = lsp_stats.expon.logpdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(4)
def testGammaLogPdf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.gamma.logpdf
lax_fun = lsp_stats.gamma.logpdf
def args_maker():
x, a, loc, scale = map(rng, shapes, dtypes)
return [x, a, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker)
def testGammaLogPdfZero(self):
# Regression test for https://github.com/jax-ml/jax/issues/7256
self.assertAllClose(
osp_stats.gamma.pdf(0.0, 1.0), lsp_stats.gamma.pdf(0.0, 1.0), atol=1E-6)
def testGammaDebugNans(self):
# Regression test for https://github.com/jax-ml/jax/issues/24939
with jax.debug_nans(True):
self.assertAllClose(
osp_stats.gamma.pdf(0.0, 1.0, 1.0), lsp_stats.gamma.pdf(0.0, 1.0, 1.0)
)
@genNamedParametersNArgs(4)
def testGammaLogCdf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.gamma.logcdf
lax_fun = lsp_stats.gamma.logcdf
def args_maker():
x, a, loc, scale = map(rng, shapes, dtypes)
x = np.clip(x, 0, None).astype(x.dtype)
return [x, a, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(4)
def testGammaLogSf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.gamma.logsf
lax_fun = lsp_stats.gamma.logsf
def args_maker():
x, a, loc, scale = map(rng, shapes, dtypes)
return [x, a, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(4)
def testGammaSf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.gamma.sf
lax_fun = lsp_stats.gamma.sf
def args_maker():
x, a, loc, scale = map(rng, shapes, dtypes)
return [x, a, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(2)
def testGenNormLogPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.gennorm.logpdf
lax_fun = lsp_stats.gennorm.logpdf
def args_maker():
x, p = map(rng, shapes, dtypes)
return [x, p]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4, rtol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(2)
def testGenNormCdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.gennorm.cdf
lax_fun = lsp_stats.gennorm.cdf
def args_maker():
x, p = map(rng, shapes, dtypes)
return [x, p]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4, rtol=1e-3)
self._CompileAndCheck(lax_fun, args_maker, atol={np.float32: 3e-5},
rtol={np.float32: 3e-5})
@genNamedParametersNArgs(4)
def testNBinomLogPmf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.nbinom.logpmf
lax_fun = lsp_stats.nbinom.logpmf
def args_maker():
k, n, logit, loc = map(rng, shapes, dtypes)
k = np.floor(np.abs(k))
n = np.ceil(np.abs(n))
p = expit(logit)
loc = np.floor(loc)
return [k, n, p, loc]
tol = {np.float32: 1e-6, np.float64: 1e-8}
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=tol, atol=tol)
@genNamedParametersNArgs(3)
def testLaplaceLogPdf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.laplace.logpdf
lax_fun = lsp_stats.laplace.logpdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(scale, a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testLaplaceCdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.laplace.cdf
lax_fun = lsp_stats.laplace.cdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# ensure that scale is not too low
scale = np.clip(scale, a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol={np.float32: 1e-5, np.float64: 1e-6})
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testLogisticCdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.logistic.cdf
lax_fun = lsp_stats.logistic.cdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# ensure that scale is not too low
scale = np.clip(scale, a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=3e-5)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testLogisticLogpdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.logistic.logpdf
lax_fun = lsp_stats.logistic.logpdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# ensure that scale is not too low
scale = np.clip(scale, a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
def testLogisticLogpdfOverflow(self):
# Regression test for https://github.com/jax-ml/jax/issues/10219
self.assertAllClose(
np.array([-100, -100], np.float32),
lsp_stats.logistic.logpdf(np.array([-100, 100], np.float32)),
check_dtypes=False)
@genNamedParametersNArgs(3)
def testLogisticPpf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.logistic.ppf
lax_fun = lsp_stats.logistic.ppf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# ensure that scale is not too low
scale = np.clip(scale, a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
atol=1e-3, rtol=1e-3)
self._CompileAndCheck(lax_fun, args_maker, rtol=3e-4)
@genNamedParametersNArgs(3)
def testLogisticSf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.logistic.sf
lax_fun = lsp_stats.logistic.sf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# ensure that scale is not too low
scale = np.clip(scale, a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=2e-5)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testLogisticIsf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.logistic.isf
lax_fun = lsp_stats.logistic.isf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# ensure that scale is not too low
scale = np.clip(scale, a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=3e-4)
@genNamedParametersNArgs(3)
def testNormLogPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.norm.logpdf
lax_fun = lsp_stats.norm.logpdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testNormLogCdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.norm.logcdf
lax_fun = lsp_stats.norm.logcdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testNormCdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.norm.cdf
lax_fun = lsp_stats.norm.cdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-6)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testNormLogSf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.norm.logsf
lax_fun = lsp_stats.norm.logsf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testNormSf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.norm.sf
lax_fun = lsp_stats.norm.sf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-6)
self._CompileAndCheck(lax_fun, args_maker)
def testNormSfNearZero(self):
# Regression test for https://github.com/jax-ml/jax/issues/17199
value = np.array(10, np.float32)
self.assertAllClose(osp_stats.norm.sf(value).astype('float32'),
lsp_stats.norm.sf(value),
atol=0, rtol=1E-5)
@genNamedParametersNArgs(3)
def testNormPpf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.norm.ppf
lax_fun = lsp_stats.norm.ppf
def args_maker():
q, loc, scale = map(rng, shapes, dtypes)
# ensure probability is between 0 and 1:
q = np.clip(np.abs(q / 3), a_min=None, a_max=1).astype(q.dtype)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [q, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=3e-4)
@genNamedParametersNArgs(3)
def testNormIsf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.norm.isf
lax_fun = lsp_stats.norm.isf
def args_maker():
q, loc, scale = map(rng, shapes, dtypes)
# ensure probability is between 0 and 1:
q = np.clip(np.abs(q / 3), a_min=None, a_max=1).astype(q.dtype)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [q, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=3e-4, atol=3e-4)
@genNamedParametersNArgs(5)
def testTruncnormLogPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.truncnorm.logpdf
lax_fun = lsp_stats.truncnorm.logpdf
def args_maker():
x, a, b, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, a, b, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(5)
def testTruncnormPdf(self, shapes, dtypes):
if jtu.test_device_matches(["cpu"]):
raise unittest.SkipTest("TODO(b/282695039): test fails at LLVM head")
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.truncnorm.pdf
lax_fun = lsp_stats.truncnorm.pdf
def args_maker():
x, a, b, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, a, b, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(5)
def testTruncnormLogCdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.truncnorm.logcdf
lax_fun = lsp_stats.truncnorm.logcdf
def args_maker():
x, a, b, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, a, b, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(5)
def testTruncnormCdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.truncnorm.cdf
lax_fun = lsp_stats.truncnorm.cdf
def args_maker():
x, a, b, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, a, b, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker, rtol={np.float32: 1e-5},
atol={np.float32: 1e-5})
@genNamedParametersNArgs(5)
def testTruncnormLogSf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.truncnorm.logsf
lax_fun = lsp_stats.truncnorm.logsf
def args_maker():
x, a, b, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, a, b, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(5)
def testTruncnormSf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.truncnorm.sf
lax_fun = lsp_stats.truncnorm.sf
def args_maker():
x, a, b, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, a, b, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(4)
def testParetoLogPdf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.pareto.logpdf
lax_fun = lsp_stats.pareto.logpdf
def args_maker():
x, b, loc, scale = map(rng, shapes, dtypes)
return [x, b, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(4)
def testTLogPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.t.logpdf
lax_fun = lsp_stats.t.logpdf
def args_maker():
x, df, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = np.clip(np.abs(scale), a_min=0.1, a_max=None).astype(scale.dtype)
return [x, df, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker,
rtol={np.float64: 1e-14}, atol={np.float64: 1e-14})
@genNamedParametersNArgs(3)
def testUniformLogPdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.uniform.logpdf
lax_fun = lsp_stats.uniform.logpdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
return [x, loc, np.abs(scale)]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testUniformCdf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.uniform.cdf
lax_fun = lsp_stats.uniform.cdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
return [x, loc, np.abs(scale)]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-5)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(3)
def testUniformPpf(self, shapes, dtypes):
rng = jtu.rand_default(self.rng())
scipy_fun = osp_stats.uniform.ppf
lax_fun = lsp_stats.uniform.ppf
def args_maker():
q, loc, scale = map(rng, shapes, dtypes)
return [q, loc, np.abs(scale)]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-5)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(4)
def testChi2LogPdf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.chi2.logpdf
lax_fun = lsp_stats.chi2.logpdf
def args_maker():
x, df, loc, scale = map(rng, shapes, dtypes)
return [x, df, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(4)
def testChi2LogCdf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.chi2.logcdf
lax_fun = lsp_stats.chi2.logcdf
def args_maker():
x, df, loc, scale = map(rng, shapes, dtypes)
return [x, df, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(4)
def testChi2Cdf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.chi2.cdf
lax_fun = lsp_stats.chi2.cdf
def args_maker():
x, df, loc, scale = map(rng, shapes, dtypes)
return [x, df, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(4)
def testChi2Sf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.chi2.sf
lax_fun = lsp_stats.chi2.sf
def args_maker():
x, df, loc, scale = map(rng, shapes, dtypes)
return [x, df, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(4)
def testChi2LogSf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.chi2.logsf
lax_fun = lsp_stats.chi2.logsf
def args_maker():
x, df, loc, scale = map(rng, shapes, dtypes)
return [x, df, loc, scale]
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker)
@genNamedParametersNArgs(5)
def testBetaBinomLogPmf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
lax_fun = lsp_stats.betabinom.logpmf
def args_maker():
k, n, a, b, loc = map(rng, shapes, dtypes)
k = np.floor(k)
n = np.ceil(n)
loc = np.floor(loc)
return [k, n, a, b, loc]
with jtu.strict_promotion_if_dtypes_match(dtypes):
scipy_fun = osp_stats.betabinom.logpmf
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=1e-5, atol=1e-5)
def testBetaBinomLogPmfZerokZeron(self):
self.assertEqual(lsp_stats.betabinom.logpmf(0, 0, 10, 5, 0),
osp_stats.betabinom.logpmf(0, 0, 10, 5, 0))
@genNamedParametersNArgs(4)
def testBinomLogPmf(self, shapes, dtypes):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.binom.logpmf
lax_fun = lsp_stats.binom.logpmf
def args_maker():
k, n, logit, loc = map(rng, shapes, dtypes)
k = np.floor(k)
n = np.ceil(n)
p = expit(logit)
loc = np.floor(loc)
return [k, n, p, loc]
tol = {np.float32: 1e-6, np.float64: 1e-8}
with jtu.strict_promotion_if_dtypes_match(dtypes):
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=tol, atol=tol)
def testBinomPmfOutOfRange(self):
# Regression test for https://github.com/jax-ml/jax/issues/19150
self.assertEqual(lsp_stats.binom.pmf(k=6.5, n=5, p=0.8), 0.0)
def testBinomLogPmfZerokZeron(self):
self.assertEqual(lsp_stats.binom.logpmf(0, 0, 0.8, 0),
osp_stats.binom.logpmf(0, 0, 0.8, 0))
def testIssue972(self):
self.assertAllClose(
np.ones((4,), np.float32),
lsp_stats.norm.cdf(np.full((4,), np.inf, np.float32)),
check_dtypes=False)
@jtu.sample_product(
[dict(x_dtype=x_dtype, p_dtype=p_dtype)
for x_dtype, p_dtype in itertools.product(jtu.dtypes.integer, jtu.dtypes.floating)
],
shape=[(2), (4,), (1, 5)],
)
def testMultinomialLogPmf(self, shape, x_dtype, p_dtype):
rng = jtu.rand_positive(self.rng())
scipy_fun = osp_stats.multinomial.logpmf
lax_fun = lsp_stats.multinomial.logpmf
def args_maker():
x = rng(shape, x_dtype)
n = np.sum(x, dtype=x.dtype)
p = rng(shape, p_dtype)
# Normalize the array such that it sums it's entries sum to 1 (or close enough to)
p = p / np.sum(p)
return [x, n, p]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker, rtol=1e-5, atol=1e-5)
@jtu.sample_product(
[dict(x_shape=x_shape, mean_shape=mean_shape, cov_shape=cov_shape)
for x_shape, mean_shape, cov_shape in [
# # These test cases cover default values for mean/cov, but we don't
# # support those yet (and they seem not very valuable).
# [(), None, None],
# [(), (), None],
# [(2,), None, None],
# [(2,), (), None],
# [(2,), (2,), None],
# [(3, 2), (3, 2,), None],
# [(5, 3, 2), (5, 3, 2,), None],
[(), (), ()],
[(3,), (), ()],
[(3,), (3,), ()],
[(3,), (3,), (3, 3)],
[(3, 4), (4,), (4, 4)],
[(2, 3, 4), (4,), (4, 4)],
]
],
[dict(x_dtype=x_dtype, mean_dtype=mean_dtype, cov_dtype=cov_dtype)
for x_dtype, mean_dtype, cov_dtype in itertools.combinations_with_replacement(jtu.dtypes.floating, 3)
],
# if (mean_shape is not None or mean_dtype == np.float32)
# and (cov_shape is not None or cov_dtype == np.float32)))
)
def testMultivariateNormalLogpdf(self, x_shape, x_dtype, mean_shape,
mean_dtype, cov_shape, cov_dtype):
rng = jtu.rand_default(self.rng())
def args_maker():
args = [rng(x_shape, x_dtype)]
if mean_shape is not None:
args.append(5 * rng(mean_shape, mean_dtype))
if cov_shape is not None:
if cov_shape == ():
args.append(0.1 + rng(cov_shape, cov_dtype) ** 2)
else:
factor_shape = (*cov_shape[:-1], 2 * cov_shape[-1])
factor = rng(factor_shape, cov_dtype)
args.append(np.matmul(factor, np.swapaxes(factor, -1, -2)))
return [a.astype(x_dtype) for a in args]
self._CheckAgainstNumpy(osp_stats.multivariate_normal.logpdf,
lsp_stats.multivariate_normal.logpdf,
args_maker, tol=1e-3, check_dtypes=False)
self._CompileAndCheck(lsp_stats.multivariate_normal.logpdf, args_maker,
rtol=1e-4, atol=1e-4)
@jtu.sample_product(
[dict(x_shape=x_shape, mean_shape=mean_shape, cov_shape=cov_shape)
for x_shape, mean_shape, cov_shape in [
# These test cases are where scipy flattens things, which has
# different batch semantics than some might expect, so we manually
# vectorize scipy's outputs for the sake of testing.
[(5, 3, 2), (5, 3, 2), (5, 3, 2, 2)],
[(2,), (5, 3, 2), (5, 3, 2, 2)],
[(5, 3, 2), (2,), (5, 3, 2, 2)],
[(5, 3, 2), (5, 3, 2,), (2, 2)],
[(1, 3, 2), (3, 2,), (5, 1, 2, 2)],
[(5, 3, 2), (1, 2,), (2, 2)],
]
],
[dict(x_dtype=x_dtype, mean_dtype=mean_dtype, cov_dtype=cov_dtype)
for x_dtype, mean_dtype, cov_dtype in itertools.combinations_with_replacement(jtu.dtypes.floating, 3)
],
)
def testMultivariateNormalLogpdfBroadcasted(self, x_shape, x_dtype, mean_shape,
mean_dtype, cov_shape, cov_dtype):
rng = jtu.rand_default(self.rng())
def args_maker():
args = [rng(x_shape, x_dtype)]
if mean_shape is not None:
args.append(5 * rng(mean_shape, mean_dtype))
if cov_shape is not None:
if cov_shape == ():
args.append(0.1 + rng(cov_shape, cov_dtype) ** 2)
else:
factor_shape = (*cov_shape[:-1], 2 * cov_shape[-1])
factor = rng(factor_shape, cov_dtype)
args.append(np.matmul(factor, np.swapaxes(factor, -1, -2)))
return [a.astype(x_dtype) for a in args]
osp_fun = np.vectorize(osp_stats.multivariate_normal.logpdf,
signature="(n),(n),(n,n)->()")
self._CheckAgainstNumpy(osp_fun, lsp_stats.multivariate_normal.logpdf,
args_maker, tol=1e-3, check_dtypes=False)
self._CompileAndCheck(lsp_stats.multivariate_normal.logpdf, args_maker,
rtol=1e-4, atol=1e-4)
@jtu.sample_product(
ndim=[2, 3],
nbatch=[1, 3, 5],
dtype=jtu.dtypes.floating,
)
def testMultivariateNormalLogpdfBatch(self, ndim, nbatch, dtype):
# Regression test for #5570
rng = jtu.rand_default(self.rng())
x = rng((nbatch, ndim), dtype)
mean = 5 * rng((nbatch, ndim), dtype)
factor = rng((nbatch, ndim, 2 * ndim), dtype)
cov = factor @ factor.transpose(0, 2, 1)
result1 = lsp_stats.multivariate_normal.logpdf(x, mean, cov)
result2 = jax.vmap(lsp_stats.multivariate_normal.logpdf)(x, mean, cov)
self.assertArraysAllClose(result1, result2, check_dtypes=False)
@jtu.sample_product(
inshape=[(50,), (3, 50), (2, 12)],
dtype=jtu.dtypes.floating,
outsize=[None, 10],
weights=[False, True],
method=[None, "scott", "silverman", 1.5, "callable"],
func=[None, "evaluate", "logpdf", "pdf"],
)
@jax.default_matmul_precision("float32")
def testKde(self, inshape, dtype, outsize, weights, method, func):
if method == "callable":
method = lambda kde: kde.neff ** -1./(kde.d+4)
def scipy_fun(dataset, points, w):
w = np.abs(w) if weights else None
kde = osp_stats.gaussian_kde(dataset, bw_method=method, weights=w)
if func is None:
result = kde(points)
else:
result = getattr(kde, func)(points)
# Note: the scipy implementation _always_ returns float64
return result.astype(dtype)
def lax_fun(dataset, points, w):
w = jax.numpy.abs(w) if weights else None
kde = lsp_stats.gaussian_kde(dataset, bw_method=method, weights=w)
if func is None:
result = kde(points)
else:
result = getattr(kde, func)(points)
return result
if outsize is None:
outshape = inshape
else:
outshape = inshape[:-1] + (outsize,)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [
rng(inshape, dtype), rng(outshape, dtype), rng(inshape[-1:], dtype)]
self._CheckAgainstNumpy(
scipy_fun, lax_fun, args_maker, tol={
np.float32: 2e-2 if jtu.test_device_matches(["tpu"]) else 1e-3,
np.float64: 3e-14
})
self._CompileAndCheck(
lax_fun, args_maker, rtol={np.float32: 3e-5, np.float64: 3e-14},
atol={np.float32: 3e-4, np.float64: 3e-14})
@jtu.sample_product(
shape=[(15,), (3, 15), (1, 12)],
dtype=jtu.dtypes.floating,
)
def testKdeIntegrateGaussian(self, shape, dtype):
def scipy_fun(dataset, weights):
kde = osp_stats.gaussian_kde(dataset, weights=np.abs(weights))
# Note: the scipy implementation _always_ returns float64
return kde.integrate_gaussian(mean, covariance).astype(dtype)
def lax_fun(dataset, weights):
kde = lsp_stats.gaussian_kde(dataset, weights=jax.numpy.abs(weights))
return kde.integrate_gaussian(mean, covariance)
# Construct a random mean and positive definite covariance matrix
rng = jtu.rand_default(self.rng())
ndim = shape[0] if len(shape) > 1 else 1
mean = rng(ndim, dtype)
L = rng((ndim, ndim), dtype)
L[np.triu_indices(ndim, 1)] = 0.0
L[np.diag_indices(ndim)] = np.exp(np.diag(L)) + 0.01
covariance = L @ L.T
args_maker = lambda: [
rng(shape, dtype), rng(shape[-1:], dtype)]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker,
tol={np.float32: 1e-3, np.float64: 1e-14})
self._CompileAndCheck(
lax_fun, args_maker, rtol={np.float32: 3e-07, np.float64: 4e-15})
@jtu.sample_product(
shape=[(15,), (12,)],
dtype=jtu.dtypes.floating,
)
def testKdeIntegrateBox1d(self, shape, dtype):
def scipy_fun(dataset, weights):
kde = osp_stats.gaussian_kde(dataset, weights=np.abs(weights))
# Note: the scipy implementation _always_ returns float64
return kde.integrate_box_1d(-0.5, 1.5).astype(dtype)
def lax_fun(dataset, weights):
kde = lsp_stats.gaussian_kde(dataset, weights=jax.numpy.abs(weights))
return kde.integrate_box_1d(-0.5, 1.5)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [
rng(shape, dtype), rng(shape[-1:], dtype)]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker,
tol={np.float32: 1e-3, np.float64: 1e-14})
self._CompileAndCheck(
lax_fun, args_maker, rtol={np.float32: 3e-07, np.float64: 4e-15})
@jtu.sample_product(
shape=[(15,), (3, 15), (1, 12)],
dtype=jtu.dtypes.floating,
)
def testKdeIntegrateKde(self, shape, dtype):
def scipy_fun(dataset, weights):
kde = osp_stats.gaussian_kde(dataset, weights=np.abs(weights))
other = osp_stats.gaussian_kde(
dataset[..., :-3] + 0.1, weights=np.abs(weights[:-3]))
# Note: the scipy implementation _always_ returns float64
return kde.integrate_kde(other).astype(dtype)
def lax_fun(dataset, weights):
kde = lsp_stats.gaussian_kde(dataset, weights=jax.numpy.abs(weights))
other = lsp_stats.gaussian_kde(
dataset[..., :-3] + 0.1, weights=jax.numpy.abs(weights[:-3]))
return kde.integrate_kde(other)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [
rng(shape, dtype), rng(shape[-1:], dtype)]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker,
tol={np.float32: 1e-3, np.float64: 1e-14})
self._CompileAndCheck(
lax_fun, args_maker, rtol={np.float32: 3e-07, np.float64: 4e-15})
@jtu.sample_product(
shape=[(15,), (3, 15), (1, 12)],
dtype=jtu.dtypes.floating,
)
@jax.legacy_prng_key('allow')
def testKdeResampleShape(self, shape, dtype):
def resample(key, dataset, weights, *, shape):
kde = lsp_stats.gaussian_kde(dataset, weights=jax.numpy.abs(weights))
return kde.resample(key, shape=shape)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [
jax.random.PRNGKey(0), rng(shape, dtype), rng(shape[-1:], dtype)]
ndim = shape[0] if len(shape) > 1 else 1
func = partial(resample, shape=())
with jax.debug_key_reuse(False):
self._CompileAndCheck(
func, args_maker, rtol={np.float32: 3e-07, np.float64: 4e-15})
result = func(*args_maker())
assert result.shape == (ndim,)
func = partial(resample, shape=(4,))
with jax.debug_key_reuse(False):
self._CompileAndCheck(
func, args_maker, rtol={np.float32: 3e-07, np.float64: 4e-15})
result = func(*args_maker())
assert result.shape == (ndim, 4)
@jtu.sample_product(
shape=[(15,), (1, 12)],
dtype=jtu.dtypes.floating,
)
@jax.legacy_prng_key('allow')
def testKdeResample1d(self, shape, dtype):
rng = jtu.rand_default(self.rng())
dataset = rng(shape, dtype)
weights = jax.numpy.abs(rng(shape[-1:], dtype))
kde = lsp_stats.gaussian_kde(dataset, weights=weights)
samples = jax.numpy.squeeze(kde.resample(jax.random.PRNGKey(5), shape=(1000,)))
def cdf(x):
result = jax.vmap(partial(kde.integrate_box_1d, -np.inf))(x)
# Manually casting to numpy in order to avoid type promotion error
return np.array(result)
self.assertGreater(osp_stats.kstest(samples, cdf).pvalue, 0.01)
def testKdePyTree(self):
@jax.jit
def evaluate_kde(kde, x):
return kde.evaluate(x)
dtype = np.float32
rng = jtu.rand_default(self.rng())
dataset = rng((3, 15), dtype)
x = rng((3, 12), dtype)
kde = lsp_stats.gaussian_kde(dataset)
leaves, treedef = jax.tree.flatten(kde)
kde2 = jax.tree.unflatten(treedef, leaves)
jax.tree.map(lambda a, b: self.assertAllClose(a, b), kde, kde2)
self.assertAllClose(evaluate_kde(kde, x), kde.evaluate(x))
@jtu.sample_product(
[dict(shape=shape, axis=axis)
for shape, axis in (
((0,), None),
((0,), 0),
((7,), None),
((7,), 0),
((47, 8), None),
((47, 8), 0),
((47, 8), 1),
((0, 2, 3), None),
((0, 2, 3), 0),
((0, 2, 3), 1),
((0, 2, 3), 2),
((10, 5, 21), None),
((10, 5, 21), 0),
((10, 5, 21), 1),
((10, 5, 21), 2),
)
],
dtype=jtu.dtypes.integer + jtu.dtypes.floating,
contains_nans=[True, False],
keepdims=[True, False]
)
@jtu.ignore_warning(
category=RuntimeWarning,
message="One or more sample arguments is too small; all returned values will be NaN"
)
@jtu.ignore_warning(
category=RuntimeWarning,
message="All axis-slices of one or more sample arguments are too small",
)
def testMode(self, shape, dtype, axis, contains_nans, keepdims):
if scipy_version < (1, 9, 0) and keepdims != True:
self.skipTest("scipy < 1.9.0 only support keepdims == True")
if contains_nans:
rng = jtu.rand_some_nan(self.rng())
else:
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
def scipy_mode_wrapper(a, axis=0, nan_policy='propagate', keepdims=None):
"""Wrapper to manage the shape discrepancies between scipy and jax"""
if scipy_version < (1, 11, 0) and a.size == 0:
if keepdims:
if axis == None:
output_shape = tuple(1 for _ in a.shape)
else:
output_shape = tuple(1 if i == axis else s for i, s in enumerate(a.shape))
else:
if axis == None:
output_shape = ()
else:
output_shape = np.delete(np.array(a.shape, dtype=np.int64), axis)
t = dtypes.canonicalize_dtype(jax.numpy.float_)
return (np.full(output_shape, np.nan, dtype=t),
np.zeros(output_shape, dtype=t))
if scipy_version < (1, 9, 0):
result = osp_stats.mode(a, axis=axis, nan_policy=nan_policy)
else:
result = osp_stats.mode(a, axis=axis, nan_policy=nan_policy, keepdims=keepdims)
if a.size != 0 and axis == None and keepdims == True:
output_shape = tuple(1 for _ in a.shape)
return (result.mode.reshape(output_shape), result.count.reshape(output_shape))
return result
scipy_fun = partial(scipy_mode_wrapper, axis=axis, keepdims=keepdims)
scipy_fun = jtu.ignore_warning(category=RuntimeWarning,
message="Mean of empty slice.*")(scipy_fun)
scipy_fun = jtu.ignore_warning(category=RuntimeWarning,
message="invalid value encountered.*")(scipy_fun)
lax_fun = partial(lsp_stats.mode, axis=axis, keepdims=keepdims)
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = jtu.tolerance(dtype, tol_spec)
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(lax_fun, args_maker, rtol=tol)
@jtu.sample_product(
[dict(shape=shape, axis=axis)
for shape in [(0,), (7,), (47, 8), (0, 2, 3), (10, 5, 21)]
for axis in [None, *range(len(shape))
]],
dtype=jtu.dtypes.integer + jtu.dtypes.floating,
method=['average', 'min', 'max', 'dense', 'ordinal']
)
def testRankData(self, shape, dtype, axis, method):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
scipy_fun = partial(osp_stats.rankdata, method=method, axis=axis)
lax_fun = partial(lsp_stats.rankdata, method=method, axis=axis)
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = jtu.tolerance(dtype, tol_spec)
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(lax_fun, args_maker, rtol=tol)
@jtu.sample_product(
[dict(shape=shape, axis=axis, ddof=ddof, nan_policy=nan_policy, keepdims=keepdims)
for shape in [(5,), (5, 6), (5, 6, 7)]
for axis in [None, *range(len(shape))]
for ddof in [0, 1, 2, 3]
for nan_policy in ["propagate", "omit"]
for keepdims in [True, False]
],
dtype=jtu.dtypes.integer + jtu.dtypes.floating,
)
def testSEM(self, shape, dtype, axis, ddof, nan_policy, keepdims):
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng(shape, dtype)]
kwds = {} if scipy_version < (1, 11) else {'keepdims': keepdims}
scipy_fun = partial(osp_stats.sem, axis=axis, ddof=ddof, nan_policy=nan_policy,
**kwds)
lax_fun = partial(lsp_stats.sem, axis=axis, ddof=ddof, nan_policy=nan_policy,
**kwds)
tol_spec = {np.float32: 2e-4, np.float64: 5e-6}
tol = jtu.tolerance(dtype, tol_spec)
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
atol=tol)
self._CompileAndCheck(lax_fun, args_maker, atol=tol)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@scipy_stats_test.py@.PATH_END.py
|
{
"filename": "sql.py",
"repo_name": "gwpy/gwpy",
"repo_path": "gwpy_extracted/gwpy-main/gwpy/table/io/sql.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (C) Scott Coughlin (2017-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Utilities for database queries
"""
from astropy.table import Table
from ..filter import (OPERATORS, parse_column_filters)
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
def format_db_selection(selection, engine=None):
"""Format a column filter selection as a SQL database WHERE string
"""
# parse selection for SQL query
if selection is None:
return ''
selections = []
for col, op_, value in parse_column_filters(selection):
if engine and engine.name == 'postgresql':
col = '"%s"' % col
try:
opstr = [key for key in OPERATORS if OPERATORS[key] is op_][0]
except KeyError:
raise ValueError("Cannot format database 'WHERE' command with "
"selection operator %r" % op_)
selections.append('{0} {1} {2!r}'.format(col, opstr, value))
if selections:
return 'WHERE %s' % ' AND '.join(selections)
return ''
def fetch(engine, tablename, columns=None, selection=None, **kwargs):
"""Fetch data from an SQL table into an `EventTable`
Parameters
----------
engine : `sqlalchemy.engine.Engine`
the database engine to use when connecting
table : `str`,
The name of table you are attempting to receive triggers
from.
selection
other filters you would like to supply
underlying reader method for the given format
.. note::
For now it will attempt to automatically connect you
to a specific DB. In the future, this may be an input
argument.
Returns
-------
table : `GravitySpyTable`
"""
import pandas as pd
# parse columns for SQL query
if columns is None:
columnstr = '*'
else:
columnstr = ', '.join('"%s"' % c for c in columns)
# parse selection for SQL query
selectionstr = format_db_selection(selection, engine=engine)
# build SQL query
qstr = 'SELECT %s FROM %s %s' % (columnstr, tablename, selectionstr)
# perform query
tab = pd.read_sql(qstr, engine, **kwargs)
# Convert unicode columns to string
types = tab.apply(lambda x: pd.api.types.infer_dtype(x.values))
if not tab.empty:
for col in types[types == 'unicode'].index:
tab[col] = tab[col].astype(str)
return Table.from_pandas(tab).filled()
|
gwpyREPO_NAMEgwpyPATH_START.@gwpy_extracted@gwpy-main@gwpy@table@io@sql.py@.PATH_END.py
|
{
"filename": "rvs.py",
"repo_name": "tamarervin/SolAster",
"repo_path": "SolAster_extracted/SolAster-main/SolAster/tools/rvs.py",
"type": "Python"
}
|
"""
Tamar Ervin
Date: October 7, 2021
pipeline function to calculate 'sun-as-a-star' RVs
"""
import os
import datetime
import numpy as np
import sunpy.map
from sunpy.net import Fido
from sunpy.net import attrs as a
from sunpy.coordinates import frames
import SolAster.tools.calculation_funcs as sfuncs
import SolAster.tools.lbc_funcs as lbfuncs
import SolAster.tools.coord_funcs as ctfuncs
import SolAster.tools.utilities as utils
from SolAster.tools.settings import *
from SolAster.tools.plotting_funcs import hmi_plot
def calc_model(inst, v_conv, v_phot):
"""
Parameters
----------
inst: str
instrument for ground-based RVs
v_conv: arr, float
convective velocity values
v_phot: arr, float
photometric velocity values
Returns
-------
RV: arr, float
model RV values
"""
if inst == 'HARPS-N':
A = HARPSN.A
B = HARPSN.B
RV0 = HARPSN.RV0
elif inst == 'NEID':
A = NEID.A
B = NEID.B
RV0 = NEID.RV0
else:
raise Exception('The instrument', inst,
'is not currently supported by SolAster. Choose either \'HARPS-N\', or \'NEID\'.')
RV = A * v_phot + B * v_conv + RV0
return RV
def calc_rvs(start_date, end_date, cadence, inst='NEID', csv_name=None, diagnostic_plots=False, save_fig=None):
"""
function to calculate rv components using pipeline functions
Calculation pipeline described in Ervin et al. (2021) - Submitted. and based on
Haywood et al. (2016), Milbourne et al. (2019) using the technique
developed by Meunier, Lagrange & Desort (2010) for SoHO/MDI images.
Parameters
----------
start_date: datetime
start date of RV calculations (datetime object)
end_date: datetime
end date of RV calculations (datetime object)
cadence: int
how often to calculate RV components
inst: str
instrument to use to fit for RVs ('NEID' or 'HARPS-N')
csv_name: str
name of file to store calculations in
diagnostic_plots: bool
whether or not to create diagnostic plots showing HMI images and active region detection
save_fig: str
path to save diagnostic plot or None if not saving
Returns
-------
"""
# check input formats
start_date, end_date, cadence, csv_name = utils.check_inputs(CsvDir.CALC, start_date, end_date, cadence, csv_name)
csv_name = os.path.join(CsvDir.CALC, csv_name + '.csv')
bad_dates_csv = os.path.join(CsvDir.CALC, csv_name + '_bad_dates.csv')
# print out csv title
print("Beginning calculation of values for csv file: " + csv_name)
# List of header strings
row_contents = ['date_obs', 'date_jd', 'rv_model', 'v_quiet', 'v_disc', 'v_phot', 'v_conv', 'f_bright', 'f_spot',
'f', 'Bobs', 'vphot_bright', 'vphot_spot', 'f_small', 'f_large', 'f_network', 'f_plage',
'quiet_flux', 'ar_flux', 'conv_flux', 'pol_flux', 'pol_conv_flux', 'vconv_quiet', 'vconv_large',
'vconv_small']
# Append a list as new line to an old csv file
utils.append_list_as_row(csv_name, row_contents)
# get hmi data products
time_range = datetime.timedelta(seconds=22)
physobs_list = [a.Physobs.los_velocity, a.Physobs.los_magnetic_field, a.Physobs.intensity]
# get dates list
xy = (end_date - start_date).seconds + (end_date - start_date).days * 24 * 3600
dates_list = [start_date + datetime.timedelta(seconds=cadence * x) for x in range(0, int(xy / cadence))]
for i, date in enumerate(dates_list):
# convert the date to a string -- required for use in csv file
date_str, date_obj, date_jd = utils.get_dates(date)
# pull image within specified time range
result = Fido.search(a.Time(str(date_obj - time_range), str(date_obj + time_range)),
a.Instrument.hmi, physobs_list[0] | physobs_list[1] | physobs_list[2])
# add file to list
file_download = Fido.fetch(result)
# remove unusable file types
good_files = []
for file in file_download:
name, extension = os.path.splitext(file)
if extension == '.fits':
good_files.append(file)
if len(good_files) != 3:
# append these values to the csv file
save_vals = [date_str, 'not three good files']
utils.append_list_as_row(bad_dates_csv, save_vals)
# print that the files are missing
print('\nNot three good files: ' + date_str + ' index: ' + str(i))
pass
else:
# convert to map sequence
map_seq = sunpy.map.Map(sorted(good_files))
# check for missing data types
missing_map = False
# split into data types
for j, map_obj in enumerate(map_seq):
if map_obj.meta['content'] == 'DOPPLERGRAM':
vmap = map_obj
elif map_obj.meta['content'] == 'MAGNETOGRAM':
mmap = map_obj
elif map_obj.meta['content'] == 'CONTINUUM INTENSITY':
imap = map_obj
else:
missing_map = True
if missing_map:
print("Missing a data product for " + date_str)
# add the data
# append these values to the csv file
save_vals = [date_str, 'missing data product']
utils.append_list_as_row(bad_dates_csv, save_vals)
pass
else:
# coordinate transformation for maps
x, y, pd, r, d, mu = ctfuncs.coordinates(vmap)
wij, nij, rij = ctfuncs.vel_coords(x, y, pd, r, vmap)
# remove bad mu values
vmap, mmap, imap = ctfuncs.fix_mu(mu, [vmap, mmap, imap], mu_cutoff=Parameters.mu_cutoff)
# calculate relative positions
deltaw, deltan, deltar, dij = sfuncs.rel_positions(wij, nij, rij, vmap)
# calculate spacecraft velocity
vsc = sfuncs.spacecraft_vel(deltaw, deltan, deltar, dij, vmap)
# optimized solar rotation parameters
a_parameters = [Parameters.a1, Parameters.a2, Parameters.a3]
# calculation of solar rotation velocity
vrot = sfuncs.solar_rot_vel(wij, nij, rij, deltaw, deltan, deltar, dij, vmap, a_parameters)
# calculate corrected velocity
corrected_vel = vmap.data - np.real(vsc) - np.real(vrot)
# corrected velocity maps
map_vel_cor = sfuncs.corrected_map(corrected_vel, vmap, map_type='Corrected-Dopplergram',
frame=frames.HeliographicCarrington)
# limb brightening
Lij = lbfuncs.limb_polynomial(imap)
# calculate corrected data
Iflat = imap.data / Lij
# corrected intensity maps
map_int_cor = sfuncs.corrected_map(Iflat, imap, map_type='Corrected-Intensitygram',
frame=frames.HeliographicCarrington)
# calculate unsigned field strength
Bobs, Br = sfuncs.mag_field(mu, mmap, Parameters.B_noise, mu_cutoff=Parameters.mu_cutoff)
# corrected observed magnetic data map
map_mag_obs = sfuncs.corrected_map(Bobs, mmap, map_type='Corrected-Magnetogram',
frame=frames.HeliographicCarrington)
# calculate magnetic threshold
active, quiet = sfuncs.mag_thresh(mu, mmap, Br_cutoff=Parameters.Br_cutoff,
mu_cutoff=Parameters.mu_cutoff)
# calculate intensity threshold
fac_inds, spot_inds = sfuncs.int_thresh(map_int_cor, active, quiet)
# create diagnostic plots
if i == 0 and diagnostic_plots:
hmi_plot(map_int_cor, map_mag_obs, map_vel_cor, fac_inds, spot_inds, mu, save_fig)
### velocity contribution due to convective motion of quiet-Sun
v_quiet = v_quiet(map_vel_cor, imap, quiet)
### velocity contribution due to rotational Doppler imbalance of active regions (faculae/sunspots)
# calculate photospheric velocity
v_phot, vphot_bright, vphot_spot = v_phot(quiet, active, Lij, vrot, imap, mu, fac_inds,
spot_inds, mu_cutoff=Parameters.mu_cutoff)
### velocity contribution due to suppression of convective blueshift by active regions
# calculate disc-averaged velocity
v_disc = v_disc(map_vel_cor, imap)
# calculate convective velocity
v_conv = v_disc - v_quiet
### filling factor
# calculate filling factor
f_bright, f_spot, f = sfuncs.filling_factor(mu, mmap, active, fac_inds, spot_inds,
mu_cutoff=Parameters.mu_cutoff)
### unsigned magnetic flux
# unsigned observed flux
unsigned_obs_flux = sfuncs.unsigned_flux(map_mag_obs, imap)
### calculate the area filling factor
pixA_hem = ctfuncs.pix_area_hem(wij, nij, rij, vmap)
area = sfuncs.area_calc(active, pixA_hem)
f_small, f_large, f_network, f_plage, f_nonconv = sfuncs.area_filling_factor(active, area, mu, mmap,
fac_inds,
athresh=Parameters.athresh,
mu_cutoff=Parameters.mu_cutoff)
### get the unsigned flux
quiet_flux, ar_flux, conv_flux, pol_flux, pol_conv_flux = sfuncs.area_unsigned_flux(map_mag_obs, imap,
area,
active,
athresh=Parameters.athresh)
### get area weighted convective velocities
vconv_quiet, vconv_large, vconv_small = sfuncs.area_vconv(map_vel_cor, imap, active, area,
athresh=Parameters.athresh)
### calculate model RV
rv_model = rvs.calc_model(inst, v_conv, v_phot)
# make array of what we want to save
save_vals = [rv_model, v_quiet, v_disc, v_phot, v_conv, f_bright, f_spot, f, unsigned_obs_flux,
vphot_bright,
vphot_spot, f_small, f_large, f_network, f_plage, quiet_flux, ar_flux,
conv_flux, pol_flux, pol_conv_flux, vconv_quiet, vconv_large, vconv_small]
# round stuff
rounded = np.around(save_vals, 3)
round_vals = [date_str, date_jd]
for val in rounded:
round_vals.append(val)
# append these values to the csv file
utils.append_list_as_row(csv_name, round_vals)
# print that the date is completed
print('\nCalculations and save to file complete for ' + date_str + ' index: ' + str(i))
print('Calculation complete for dates:', start_date, 'to', end_date)
|
tamarervinREPO_NAMESolAsterPATH_START.@SolAster_extracted@SolAster-main@SolAster@tools@rvs.py@.PATH_END.py
|
{
"filename": "paperfigs.py",
"repo_name": "lucabaldini/ixpeobssim",
"repo_path": "ixpeobssim_extracted/ixpeobssim-main/docs/macro/paperfigs.py",
"type": "Python"
}
|
# Copyright (C) 2022, the ixpeobssim team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function, division
import matplotlib
import numpy
from astropy.visualization.wcsaxes import WCSAxes
from ixpeobssim.binning.misc import xBinnedMap
from ixpeobssim.binning.polarization import xBinnedPolarizationMapCube
from ixpeobssim.core.fitsio import xFITSImageBase
from ixpeobssim.core.hist import xHistogram2d
import ixpeobssim.core.pipeline as pipeline
import ixpeobssim.evt.xspec_ as xspec_
from ixpeobssim.instrument import DU_IDS
from ixpeobssim.instrument.sc import dithering_pattern
from ixpeobssim.irf import load_arf, load_psf
from ixpeobssim.utils.matplotlib_ import plt, setup_gca, _set_rc_param
from ixpeobssim.utils.units_ import degrees_to_arcmin
from ixpeobssim.config.crab_pulsar import ROI_MODEL as CRAB_PRS_ROI
# Make the font slightly bigger.
_set_rc_param('font.size', 14.0)
def plot_count_spectrum(energy=6., phase=0.6):
"""
"""
psr = CRAB_PRS_ROI['Crab pulsar']
aeff = load_arf()
time_grid = numpy.linspace(0., 1., 100)
count_spectrum = psr.create_count_spectrum(aeff, time_grid)
count_spectrum.zlabel = 'Count spectrum [s$^{-1}$ keV$^{-1}$]'
plt.figure('Crab count spectrum', figsize=(8, 7.2))
plt.subplots_adjust(top=0.80, bottom=0.1)
count_spectrum.plot(logz=True, vmin=1.1e-6, num_contours=100)
fmt = matplotlib.ticker.LogFormatterMathtext()
fmt.create_dummy_axis()
count_spectrum.plot_contours(logz=True, cfmt=fmt)
setup_gca(ylabel='Pulse phase')
y = 0.78
dx = 0.42
dy = 0.40
axins1 = plt.gca().inset_axes([-0.08, y, dx, dy])
slice1 = count_spectrum.hslice(phase)
axins1.plot(slice1.x, slice1.y, color='black')
axins1.set_yscale('log')
axins1.set_xlim(1., 12.)
axins1.set_xlabel('Energy [keV]')
axins1.set_xticks(numpy.linspace(2., 10., 5))
axins1.xaxis.tick_top()
axins1.xaxis.set_label_position('top')
axins1.grid(which='major')
plt.axhline(phase, color='white', ls='dashed')
plt.gca().annotate('', xy=(2., phase), xycoords='data', xytext=(0, 65),
textcoords='offset points', size=20,
arrowprops=dict(arrowstyle="wedge,tail_width=0.7", fc="white", ec="white"))
axins2 = plt.gca().inset_axes([0.80, y, dx, dy])
slice2 = count_spectrum.vslice(energy)
axins2.plot(slice2.x, slice2.y, color='black')
axins2.set_xlim(0., 1.)
axins2.set_xlabel('Pulse phase')
axins2.xaxis.tick_top()
axins2.xaxis.set_label_position('top')
axins2.yaxis.tick_right()
axins2.yaxis.set_label_position('right')
axins2.grid(which='major')
plt.axvline(energy, color='white', ls='dashed')
plt.gca().annotate('', xy=(energy, 0.9), xycoords='data', xytext=(130, 0),
textcoords='offset points', size=20,
arrowprops=dict(arrowstyle="wedge,tail_width=0.7", fc="white", ec="white"))
plt.figure('Crab ppf', figsize=(8, 7.2))
logz = True
ppf = count_spectrum.ppf
ppf.zlabel = 'Percent point function [keV]'
plt.subplots_adjust(top=0.80, bottom=0.1)
ppf.plot(logz=True, vmin=1., vmax=12., num_contours=100)
ppf.plot_contours(num_contours=[2, 3, 4, 5])
setup_gca(ylabel='Pulse phase', xlabel='$\\xi$')
axins = plt.gca().inset_axes([-0.08, y, dx, dy])
slice = ppf.hslice(phase)
axins.plot(slice.x, slice.y, color='black')
axins.set_ylim(1., 12.)
axins.set_yscale('log')
axins.set_xlabel('$\\xi$')
axins.set_xticks(numpy.linspace(0., 1., 6))
yticks = [1, 2, 3, 4, 5, 10]
axins.set_yticks(yticks)
axins.set_yticklabels(yticks)
axins.xaxis.tick_top()
axins.xaxis.set_label_position('top')
axins.grid(which='major')
plt.axhline(phase, color='white', ls='dashed')
plt.gca().annotate('', xy=(0.2, phase), xycoords='data', xytext=(0, 65),
textcoords='offset points', size=20,
arrowprops=dict(arrowstyle="wedge,tail_width=0.7", fc="white", ec="white"))
def plot_dithering_path(start_met, duration, step=1, ylabel='y [arcmin]'):
"""
"""
dithering = dithering_pattern()
t = numpy.arange(start_met, duration + 0.5 * step, step)
x, y = dithering(t)
plt.plot(x, y, color='black')
plt.gca().set_aspect('equal')
setup_gca(xmin=-2., xmax=2., ymin=-2., ymax=2., grids=True,
xlabel='x [arcmin]', ylabel=ylabel)
plt.text(1.8, 1.6, '%d s' % t[-1], fontsize=12, ha='right')
def plot_dithering():
"""
"""
t = numpy.linspace(0., 50000., 10000000)
x, y = dithering_pattern()(t)
psf = load_psf(du_id=1)
dx, dy = psf.delta(len(t))
x += degrees_to_arcmin(dx)
y += degrees_to_arcmin(dy)
binning = numpy.linspace(-2., 2., 200)
h = xHistogram2d(binning, binning, zlabel='Scaled counts [a. u.]').fill(x, y)
h.content /= h.content.max()
fig = plt.figure('Dithering', (8, 10))
ax = fig.add_gridspec(3, 3, hspace=0.35, bottom=0.075, top=0.975)
ax1 = fig.add_subplot(ax[1:3, 0:3])
h.plot()
ax1.set_aspect('equal')
setup_gca(xmin=-2., xmax=2., ymin=-2., ymax=2., grids=True,
xlabel='x [arcmin]', ylabel='y [arcmin]')
ax0 = fig.add_subplot(ax[0, 0])
plot_dithering_path(0, 500)
ax1 = fig.add_subplot(ax[0, 1])
plot_dithering_path(0, 5000, ylabel=None)
ax2 = fig.add_subplot(ax[0, 2])
plot_dithering_path(0, 50000, ylabel=None)
def plot_xspec(duration=100000., rebin=2):
"""
"""
pipeline.reset('toy_point_source')
pipeline.xpobssim(duration=duration, saa=False, occult=False)
for algorithm in ['PHA1', 'PHA1Q', 'PHA1U']:
file_list = pipeline.xpbin(*pipeline.file_list(), algorithm=algorithm)
pipeline.xpgrppha(*file_list, comm='GROUP 0 275 %d' % rebin)
file_list = pipeline.file_list('pha1*', 'grppha')
fit_output = pipeline.xpxspec(*file_list, model='pollin * powerlaw', plot=False)
#xspec_.plot()
fig, axs = plt.subplots(6, 1, figsize=(8, 10), sharex=True,
gridspec_kw=dict(bottom=0.06, top=0.98, height_ratios=[1., 0.4, 1., 0.4, 1., 0.4]))
# Remove horizontal space between axes
fig.subplots_adjust(hspace=0.15)
label_dict = {'PHA1': 'I', 'PHA1Q': 'Q', 'PHA1U': 'U'}
for i, alg in enumerate(['PHA1', 'PHA1Q', 'PHA1U']):
for du_id in DU_IDS:
data = xspec_.retrieve_plot_data(alg, du_id)
plt.sca(axs[i * 2])
xspec_.plot_normalized_counts(data, du_id)
label = '%s [s$^{-1}$ keV$^{-1}$]' % label_dict[alg]
setup_gca(grids=True, logy=True, ylabel=label)
if i == 0:
fit_data = xspec_.current_fit_output()
#fit_data.stat_box(position='lower left').plot()
plt.legend()
plt.sca(axs[i * 2 + 1])
xspec_.plot_residuals(data)
if i != 2:
plt.gca().set_xlabel('')
fig.align_ylabels()
def plot_polarization_maps(duration=2000000.):
"""
"""
pipeline.reset('casa')
pipeline.xpobssim(duration=duration, saa=False, occult=False)
pipeline.xpbin(*pipeline.file_list(), algorithm='CMAP', npix=250, pixsize=2)
pipeline.xpbin(*pipeline.file_list(), algorithm='PMAP', npix=50, pixsize=10)
cmap = xBinnedMap.from_file_list(pipeline.file_list('cmap'))
pmap = xBinnedPolarizationMapCube.from_file_list(pipeline.file_list('pmap'))
fig = plt.figure('Polarization map', (8, 10))
ax = fig.add_gridspec(2, 2, bottom=0.075, top=0.975, height_ratios=(1, 2.45), hspace=0.025)
ax1 = fig.add_subplot(ax[1, 0:2], axes_class=WCSAxes)
ax1.reset_wcs(cmap.fits_image.wcs)
ax1.imshow(cmap.fits_image.data)
ax1.grid(color='gray')
ax1.set_xlabel('Right Ascension (J2000)')
ax1.set_ylabel('Declination (J2000)')
mask = pmap.calculate_significance_mask(3., 0.)
pmap._overlay_arrows(0, mask)
plt.text(0.05, 0.92, 'Stokes I', color='white', transform = ax1.transAxes)
ax2 = fig.add_subplot(ax[0, 0], axes_class=WCSAxes)
ax2.reset_wcs(pmap.wcs, slices=('x', 'y', 0))
ax2.imshow(pmap.Q[0])
for axis in ('x', 'y'):
ax2.tick_params(axis, labelsize=0.)
ax2.set_xlabel(' ')
ax2.set_ylabel(' ')
ax2.grid(color='gray')
plt.text(0.1, 0.85, 'Stokes Q', color='white', transform = ax2.transAxes)
ax3 = fig.add_subplot(ax[0, 1], axes_class=WCSAxes)
ax3.reset_wcs(pmap.wcs, slices=('x', 'y', 0))
ax3.imshow(pmap.U[0])
for axis in ('x', 'y'):
ax3.tick_params(axis, labelsize=0.)
ax3.set_xlabel(' ')
ax3.set_ylabel(' ')
ax3.grid(color='gray')
plt.text(0.1, 0.85, 'Stokes U', color='white', transform = ax3.transAxes)
if __name__ == '__main__':
plot_count_spectrum()
#plot_dithering()
#plot_xspec()
#plot_polarization_maps()
plt.show()
|
lucabaldiniREPO_NAMEixpeobssimPATH_START.@ixpeobssim_extracted@ixpeobssim-main@docs@macro@paperfigs.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/polar/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._uirevision import UirevisionValidator
from ._sector import SectorValidator
from ._radialaxis import RadialaxisValidator
from ._hole import HoleValidator
from ._gridshape import GridshapeValidator
from ._domain import DomainValidator
from ._bgcolor import BgcolorValidator
from ._barmode import BarmodeValidator
from ._bargap import BargapValidator
from ._angularaxis import AngularaxisValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._uirevision.UirevisionValidator",
"._sector.SectorValidator",
"._radialaxis.RadialaxisValidator",
"._hole.HoleValidator",
"._gridshape.GridshapeValidator",
"._domain.DomainValidator",
"._bgcolor.BgcolorValidator",
"._barmode.BarmodeValidator",
"._bargap.BargapValidator",
"._angularaxis.AngularaxisValidator",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@polar@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "rat-pac/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/SCons/Tool/packaging/__init__.py",
"type": "Python"
}
|
"""SCons.Tool.Packaging
SCons Packaging Tool.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/packaging/__init__.py 4043 2009/02/23 09:06:45 scons"
import SCons.Environment
from SCons.Variables import *
from SCons.Errors import *
from SCons.Util import is_List, make_path_relative
from SCons.Warnings import warn, Warning
import os, imp
import SCons.Defaults
__all__ = [ 'src_targz', 'src_tarbz2', 'src_zip', 'tarbz2', 'targz', 'zip', 'rpm', 'msi', 'ipk' ]
#
# Utility and Builder function
#
def Tag(env, target, source, *more_tags, **kw_tags):
""" Tag a file with the given arguments, just sets the accordingly named
attribute on the file object.
TODO: FIXME
"""
if not target:
target=source
first_tag=None
else:
first_tag=source
if first_tag:
kw_tags[first_tag[0]] = ''
if len(kw_tags) == 0 and len(more_tags) == 0:
raise UserError, "No tags given."
# XXX: sanity checks
for x in more_tags:
kw_tags[x] = ''
if not SCons.Util.is_List(target):
target=[target]
else:
# hmm, sometimes the target list, is a list of a list
# make sure it is flattened prior to processing.
# TODO: perhaps some bug ?!?
target=env.Flatten(target)
for t in target:
for (k,v) in kw_tags.items():
# all file tags have to start with PACKAGING_, so we can later
# differentiate between "normal" object attributes and the
# packaging attributes. As the user should not be bothered with
# that, the prefix will be added here if missing.
#if not k.startswith('PACKAGING_'):
if k[:10] != 'PACKAGING_':
k='PACKAGING_'+k
setattr(t, k, v)
def Package(env, target=None, source=None, **kw):
""" Entry point for the package tool.
"""
# check if we need to find the source files ourself
if not source:
source = env.FindInstalledFiles()
if len(source)==0:
raise UserError, "No source for Package() given"
# decide which types of packages shall be built. Can be defined through
# four mechanisms: command line argument, keyword argument,
# environment argument and default selection( zip or tar.gz ) in that
# order.
try: kw['PACKAGETYPE']=env['PACKAGETYPE']
except KeyError: pass
if not kw.get('PACKAGETYPE'):
from SCons.Script import GetOption
kw['PACKAGETYPE'] = GetOption('package_type')
if kw['PACKAGETYPE'] == None:
if env['BUILDERS'].has_key('Tar'):
kw['PACKAGETYPE']='targz'
elif env['BUILDERS'].has_key('Zip'):
kw['PACKAGETYPE']='zip'
else:
raise UserError, "No type for Package() given"
PACKAGETYPE=kw['PACKAGETYPE']
if not is_List(PACKAGETYPE):
PACKAGETYPE=string.split(PACKAGETYPE, ',')
# load the needed packagers.
def load_packager(type):
try:
file,path,desc=imp.find_module(type, __path__)
return imp.load_module(type, file, path, desc)
except ImportError, e:
raise EnvironmentError("packager %s not available: %s"%(type,str(e)))
packagers=map(load_packager, PACKAGETYPE)
# set up targets and the PACKAGEROOT
try:
# fill up the target list with a default target name until the PACKAGETYPE
# list is of the same size as the target list.
if not target: target = []
size_diff = len(PACKAGETYPE)-len(target)
default_name = "%(NAME)s-%(VERSION)s"
if size_diff>0:
default_target = default_name%kw
target.extend( [default_target]*size_diff )
if not kw.has_key('PACKAGEROOT'):
kw['PACKAGEROOT'] = default_name%kw
except KeyError, e:
raise SCons.Errors.UserError( "Missing Packagetag '%s'"%e.args[0] )
# setup the source files
source=env.arg2nodes(source, env.fs.Entry)
# call the packager to setup the dependencies.
targets=[]
try:
for packager in packagers:
t=[target.pop(0)]
t=apply(packager.package, [env,t,source], kw)
targets.extend(t)
assert( len(target) == 0 )
except KeyError, e:
raise SCons.Errors.UserError( "Missing Packagetag '%s' for %s packager"\
% (e.args[0],packager.__name__) )
except TypeError, e:
# this exception means that a needed argument for the packager is
# missing. As our packagers get their "tags" as named function
# arguments we need to find out which one is missing.
from inspect import getargspec
args,varargs,varkw,defaults=getargspec(packager.package)
if defaults!=None:
args=args[:-len(defaults)] # throw away arguments with default values
args.remove('env')
args.remove('target')
args.remove('source')
# now remove any args for which we have a value in kw.
#args=[x for x in args if not kw.has_key(x)]
args=filter(lambda x, kw=kw: not kw.has_key(x), args)
if len(args)==0:
raise # must be a different error, so reraise
elif len(args)==1:
raise SCons.Errors.UserError( "Missing Packagetag '%s' for %s packager"\
% (args[0],packager.__name__) )
else:
raise SCons.Errors.UserError( "Missing Packagetags '%s' for %s packager"\
% (", ".join(args),packager.__name__) )
target=env.arg2nodes(target, env.fs.Entry)
targets.extend(env.Alias( 'package', targets ))
return targets
#
# SCons tool initialization functions
#
added = None
def generate(env):
from SCons.Script import AddOption
global added
if not added:
added = 1
AddOption('--package-type',
dest='package_type',
default=None,
type="string",
action="store",
help='The type of package to create.')
try:
env['BUILDERS']['Package']
env['BUILDERS']['Tag']
except KeyError:
env['BUILDERS']['Package'] = Package
env['BUILDERS']['Tag'] = Tag
def exists(env):
return 1
# XXX
def options(opts):
opts.AddVariables(
EnumVariable( 'PACKAGETYPE',
'the type of package to create.',
None, allowed_values=map( str, __all__ ),
ignorecase=2
)
)
#
# Internal utility functions
#
def copy_attr(f1, f2):
""" copies the special packaging file attributes from f1 to f2.
"""
#pattrs = [x for x in dir(f1) if not hasattr(f2, x) and\
# x.startswith('PACKAGING_')]
copyit = lambda x, f2=f2: not hasattr(f2, x) and x[:10] == 'PACKAGING_'
pattrs = filter(copyit, dir(f1))
for attr in pattrs:
setattr(f2, attr, getattr(f1, attr))
def putintopackageroot(target, source, env, pkgroot, honor_install_location=1):
""" Uses the CopyAs builder to copy all source files to the directory given
in pkgroot.
If honor_install_location is set and the copied source file has an
PACKAGING_INSTALL_LOCATION attribute, the PACKAGING_INSTALL_LOCATION is
used as the new name of the source file under pkgroot.
The source file will not be copied if it is already under the the pkgroot
directory.
All attributes of the source file will be copied to the new file.
"""
# make sure the packageroot is a Dir object.
if SCons.Util.is_String(pkgroot): pkgroot=env.Dir(pkgroot)
if not SCons.Util.is_List(source): source=[source]
new_source = []
for file in source:
if SCons.Util.is_String(file): file = env.File(file)
if file.is_under(pkgroot):
new_source.append(file)
else:
if hasattr(file, 'PACKAGING_INSTALL_LOCATION') and\
honor_install_location:
new_name=make_path_relative(file.PACKAGING_INSTALL_LOCATION)
else:
new_name=make_path_relative(file.get_path())
new_file=pkgroot.File(new_name)
new_file=env.CopyAs(new_file, file)[0]
copy_attr(file, new_file)
new_source.append(new_file)
return (target, new_source)
def stripinstallbuilder(target, source, env):
""" strips the install builder action from the source list and stores
the final installation location as the "PACKAGING_INSTALL_LOCATION" of
the source of the source file. This effectively removes the final installed
files from the source list while remembering the installation location.
It also warns about files which have no install builder attached.
"""
def has_no_install_location(file):
return not (file.has_builder() and\
hasattr(file.builder, 'name') and\
(file.builder.name=="InstallBuilder" or\
file.builder.name=="InstallAsBuilder"))
if len(filter(has_no_install_location, source)):
warn(Warning, "there are files to package which have no\
InstallBuilder attached, this might lead to irreproducible packages")
n_source=[]
for s in source:
if has_no_install_location(s):
n_source.append(s)
else:
for ss in s.sources:
n_source.append(ss)
copy_attr(s, ss)
setattr(ss, 'PACKAGING_INSTALL_LOCATION', s.get_path())
return (target, n_source)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
rat-pacREPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@SCons@Tool@packaging@__init__.py@.PATH_END.py
|
{
"filename": "flat_fielding_test.py",
"repo_name": "Keck-DataReductionPipelines/KPF-Pipeline",
"repo_path": "KPF-Pipeline_extracted/KPF-Pipeline-master/tests/regression/flat_fielding_test.py",
"type": "Python"
}
|
import numpy as np
from astropy.io import fits
import matplotlib.pyplot as plt
import os
from dotenv import load_dotenv
load_dotenv()
#importing test files
test_flat = os.getenv('KPFPIPE_TEST_DATA')+ '/NEIDdata/FLAT/neidTemp_2D20191214T001924.fits'
test_raw= os.getenv('KPFPIPE_TEST_DATA') + '/NEIDdata/TAUCETI_20191217/L0/neidTemp_2D20191217T023129.fits'
#defining flat division function
def flat_div(rawimg, flatimg):
flatdata = fits.getdata(flatimg, ext=0)
rawdata = fits.getdata(rawimg, ext=0)
if flatdata.shape==rawdata.shape:
print (".Fits Dimensions Equal, Check Passed")
if flatdata.shape!=rawdata.shape:
print (".Fits Dimensions NOT Equal! Check Failed")
raw_div_flat=rawdata/flatdata
return raw_div_flat
#function plot
field_result=flat_div(test_raw,test_flat)
plt.figure()
plt.title("Flat Division Function Result")
plt.imshow(field_result)
plt.colorbar()
#non-function
flatdata = fits.getdata(test_flat, ext=0)
rawdata = fits.getdata(test_raw, ext=0)
res2=rawdata/flatdata
plt.figure()
plt.title("Flat Division Non-Fxn Result")
plt.imshow(res2)
plt.colorbar()
#original files
plt.figure()
plt.title("Raw Data")
plt.imshow(rawdata)
plt.colorbar()
plt.figure()
plt.title("Flat Data")
plt.imshow(flatdata)
plt.colorbar()
#difference
plt.figure()
plt.title("Difference Between Fxn and Non-Fxn Result")
plt.imshow(field_result-res2)
plt.colorbar()
np.where((field_result-res2)!=0.0)
|
Keck-DataReductionPipelinesREPO_NAMEKPF-PipelinePATH_START.@KPF-Pipeline_extracted@KPF-Pipeline-master@tests@regression@flat_fielding_test.py@.PATH_END.py
|
{
"filename": "_textsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/volume/_textsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="textsrc", parent_name="volume", **kwargs):
super(TextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@volume@_textsrc.py@.PATH_END.py
|
{
"filename": "test_fitting.py",
"repo_name": "andycasey/smhr",
"repo_path": "smhr_extracted/smhr-master/smh/tests/test_fitting.py",
"type": "Python"
}
|
"""
Based on Andy's pull request example
Currently just fits a bunch of individual lines with both synthesis and EW
TODO expand to do synthesis with multiple lines
TODO calculate abundances
TODO export tables
"""
import os
import smh.linelists
import smh.photospheres
import smh.radiative_transfer as rt
import smh.spectral_models as sm
import matplotlib.pyplot as plt
def show_result(region):
fig, ax = plt.subplots()
a, b, c = region._result
ax.plot(c["model_x"], c["model_y"], c='r')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
ax.plot(
region.session.normalized_spectrum.dispersion,
region.session.normalized_spectrum.flux,
c='k', zorder=-1)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
l, u = c["model_yerr"]
ax.fill_between(c["model_x"], c["model_y"] + l, c["model_y"] + u,
facecolor="r", alpha=0.25, zorder=-2)
return fig
if __name__=="__main__":
datadir = os.path.dirname(os.path.abspath(__file__))+'/test_data'
## Test with "known" stellar parameters
session = smh.Session([
datadir+"/spectra/hd122563.fits"
])
session.metadata['stellar_parameters']['effective_temperature'] = 4380
session.metadata['stellar_parameters']['surface_gravity'] = 0.1
session.metadata['stellar_parameters']['metallicity'] = -2.68
session.metadata['stellar_parameters']['microturbulence'] = 2.65
isotopes = {"H-C":{112:0.8,113:0.2},
"C-N":{1214:0.8,1314:0.2},
'Ba':{134:0.0,135:0.370,136:0.0,137:0.351,138:0.279}
}
session.metadata['isotopes'] = isotopes
## Start with a normalized, RV corrected spectrum
spectrum = smh.specutils.Spectrum1D.read(datadir+"/spectra/hd122563.fits")
session.normalized_spectrum = spectrum
## Primary line list (EW) measurements
transitions = smh.linelists.LineList.read(datadir+"/linelists/complete.list")
ew_measurements = []
synth_measurements = []
for transition in transitions:
elem = transition['elem1']
ew = sm.ProfileFittingModel(transition, session)
synth = sm.SpectralSynthesisModel(transition, session, elem)
ew_measurements.append(ew)
synth_measurements.append(synth)
## Summarize
## EW Table
## Abundance Table
## Default plots
|
andycaseyREPO_NAMEsmhrPATH_START.@smhr_extracted@smhr-master@smh@tests@test_fitting.py@.PATH_END.py
|
{
"filename": "test_visual.py",
"repo_name": "glue-viz/glue",
"repo_path": "glue_extracted/glue-main/glue/core/tests/test_visual.py",
"type": "Python"
}
|
from glue.core.visual import VisualAttributes
from glue.utils.matplotlib import MATPLOTLIB_GE_36
if MATPLOTLIB_GE_36:
from matplotlib import colormaps
else:
from matplotlib.cm import get_cmap
import pytest
def test_VA_preferred_cmap():
# Not a real CMAP array - errors
with pytest.raises(TypeError, match="`preferred_cmap` must be a string or an instance of "
"a matplotlib.colors.Colormap"):
VisualAttributes(preferred_cmap=1)
# Not a valid string / known key [mpl 3.6+] for a CMAP - errors
with pytest.raises(ValueError, match="not_a_cmap is not a valid colormap name."):
VisualAttributes(preferred_cmap="not_a_cmap")
viridis_cmap = colormaps["viridis"] if MATPLOTLIB_GE_36 else get_cmap("viridis")
# get_cmap cmap name
va = VisualAttributes(preferred_cmap="viridis")
assert va.preferred_cmap == viridis_cmap
# formal cmap name
va = VisualAttributes(preferred_cmap="Viridis")
assert va.preferred_cmap == viridis_cmap
# Valid Colormap
va = VisualAttributes(preferred_cmap=viridis_cmap)
assert va.preferred_cmap == viridis_cmap
# None is allowed - it is the default
va = VisualAttributes(preferred_cmap=None)
assert va.preferred_cmap is None
|
glue-vizREPO_NAMEgluePATH_START.@glue_extracted@glue-main@glue@core@tests@test_visual.py@.PATH_END.py
|
{
"filename": "bz2.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/bz2.py",
"type": "Python"
}
|
"""Interface to the libbzip2 compression library.
This module provides a file interface, classes for incremental
(de)compression, and functions for one-shot (de)compression.
"""
__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor",
"open", "compress", "decompress"]
__author__ = "Nadeem Vawda <nadeem.vawda@gmail.com>"
from builtins import open as _builtin_open
import io
import os
import _compression
from _bz2 import BZ2Compressor, BZ2Decompressor
_MODE_CLOSED = 0
_MODE_READ = 1
# Value 2 no longer used
_MODE_WRITE = 3
class BZ2File(_compression.BaseStream):
"""A file object providing transparent bzip2 (de)compression.
A BZ2File can act as a wrapper for an existing file object, or refer
directly to a named file on disk.
Note that BZ2File provides a *binary* file interface - data read is
returned as bytes, and data to be written should be given as bytes.
"""
def __init__(self, filename, mode="r", *, compresslevel=9):
"""Open a bzip2-compressed file.
If filename is a str, bytes, or PathLike object, it gives the
name of the file to be opened. Otherwise, it should be a file
object, which will be used to read or write the compressed data.
mode can be 'r' for reading (default), 'w' for (over)writing,
'x' for creating exclusively, or 'a' for appending. These can
equivalently be given as 'rb', 'wb', 'xb', and 'ab'.
If mode is 'w', 'x' or 'a', compresslevel can be a number between 1
and 9 specifying the level of compression: 1 produces the least
compression, and 9 (default) produces the most compression.
If mode is 'r', the input file may be the concatenation of
multiple compressed streams.
"""
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
if not (1 <= compresslevel <= 9):
raise ValueError("compresslevel must be between 1 and 9")
if mode in ("", "r", "rb"):
mode = "rb"
mode_code = _MODE_READ
elif mode in ("w", "wb"):
mode = "wb"
mode_code = _MODE_WRITE
self._compressor = BZ2Compressor(compresslevel)
elif mode in ("x", "xb"):
mode = "xb"
mode_code = _MODE_WRITE
self._compressor = BZ2Compressor(compresslevel)
elif mode in ("a", "ab"):
mode = "ab"
mode_code = _MODE_WRITE
self._compressor = BZ2Compressor(compresslevel)
else:
raise ValueError("Invalid mode: %r" % (mode,))
if isinstance(filename, (str, bytes, os.PathLike)):
self._fp = _builtin_open(filename, mode)
self._closefp = True
self._mode = mode_code
elif hasattr(filename, "read") or hasattr(filename, "write"):
self._fp = filename
self._mode = mode_code
else:
raise TypeError("filename must be a str, bytes, file or PathLike object")
if self._mode == _MODE_READ:
raw = _compression.DecompressReader(self._fp,
BZ2Decompressor, trailing_error=OSError)
self._buffer = io.BufferedReader(raw)
else:
self._pos = 0
def close(self):
"""Flush and close the file.
May be called more than once without error. Once the file is
closed, any other operation on it will raise a ValueError.
"""
if self._mode == _MODE_CLOSED:
return
try:
if self._mode == _MODE_READ:
self._buffer.close()
elif self._mode == _MODE_WRITE:
self._fp.write(self._compressor.flush())
self._compressor = None
finally:
try:
if self._closefp:
self._fp.close()
finally:
self._fp = None
self._closefp = False
self._mode = _MODE_CLOSED
self._buffer = None
@property
def closed(self):
"""True if this file is closed."""
return self._mode == _MODE_CLOSED
def fileno(self):
"""Return the file descriptor for the underlying file."""
self._check_not_closed()
return self._fp.fileno()
def seekable(self):
"""Return whether the file supports seeking."""
return self.readable() and self._buffer.seekable()
def readable(self):
"""Return whether the file was opened for reading."""
self._check_not_closed()
return self._mode == _MODE_READ
def writable(self):
"""Return whether the file was opened for writing."""
self._check_not_closed()
return self._mode == _MODE_WRITE
def peek(self, n=0):
"""Return buffered data without advancing the file position.
Always returns at least one byte of data, unless at EOF.
The exact number of bytes returned is unspecified.
"""
self._check_can_read()
# Relies on the undocumented fact that BufferedReader.peek()
# always returns at least one byte (except at EOF), independent
# of the value of n
return self._buffer.peek(n)
def read(self, size=-1):
"""Read up to size uncompressed bytes from the file.
If size is negative or omitted, read until EOF is reached.
Returns b'' if the file is already at EOF.
"""
self._check_can_read()
return self._buffer.read(size)
def read1(self, size=-1):
"""Read up to size uncompressed bytes, while trying to avoid
making multiple reads from the underlying stream. Reads up to a
buffer's worth of data if size is negative.
Returns b'' if the file is at EOF.
"""
self._check_can_read()
if size < 0:
size = io.DEFAULT_BUFFER_SIZE
return self._buffer.read1(size)
def readinto(self, b):
"""Read bytes into b.
Returns the number of bytes read (0 for EOF).
"""
self._check_can_read()
return self._buffer.readinto(b)
def readline(self, size=-1):
"""Read a line of uncompressed bytes from the file.
The terminating newline (if present) is retained. If size is
non-negative, no more than size bytes will be read (in which
case the line may be incomplete). Returns b'' if already at EOF.
"""
if not isinstance(size, int):
if not hasattr(size, "__index__"):
raise TypeError("Integer argument expected")
size = size.__index__()
self._check_can_read()
return self._buffer.readline(size)
def readlines(self, size=-1):
"""Read a list of lines of uncompressed bytes from the file.
size can be specified to control the number of lines read: no
further lines will be read once the total size of the lines read
so far equals or exceeds size.
"""
if not isinstance(size, int):
if not hasattr(size, "__index__"):
raise TypeError("Integer argument expected")
size = size.__index__()
self._check_can_read()
return self._buffer.readlines(size)
def write(self, data):
"""Write a byte string to the file.
Returns the number of uncompressed bytes written, which is
always the length of data in bytes. Note that due to buffering,
the file on disk may not reflect the data written until close()
is called.
"""
self._check_can_write()
if isinstance(data, (bytes, bytearray)):
length = len(data)
else:
# accept any data that supports the buffer protocol
data = memoryview(data)
length = data.nbytes
compressed = self._compressor.compress(data)
self._fp.write(compressed)
self._pos += length
return length
def writelines(self, seq):
"""Write a sequence of byte strings to the file.
Returns the number of uncompressed bytes written.
seq can be any iterable yielding byte strings.
Line separators are not added between the written byte strings.
"""
return _compression.BaseStream.writelines(self, seq)
def seek(self, offset, whence=io.SEEK_SET):
"""Change the file position.
The new position is specified by offset, relative to the
position indicated by whence. Values for whence are:
0: start of stream (default); offset must not be negative
1: current stream position
2: end of stream; offset must not be positive
Returns the new file position.
Note that seeking is emulated, so depending on the parameters,
this operation may be extremely slow.
"""
self._check_can_seek()
return self._buffer.seek(offset, whence)
def tell(self):
"""Return the current file position."""
self._check_not_closed()
if self._mode == _MODE_READ:
return self._buffer.tell()
return self._pos
def open(filename, mode="rb", compresslevel=9,
encoding=None, errors=None, newline=None):
"""Open a bzip2-compressed file in binary or text mode.
The filename argument can be an actual filename (a str, bytes, or
PathLike object), or an existing file object to read from or write
to.
The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or
"ab" for binary mode, or "rt", "wt", "xt" or "at" for text mode.
The default mode is "rb", and the default compresslevel is 9.
For binary mode, this function is equivalent to the BZ2File
constructor: BZ2File(filename, mode, compresslevel). In this case,
the encoding, errors and newline arguments must not be provided.
For text mode, a BZ2File object is created, and wrapped in an
io.TextIOWrapper instance with the specified encoding, error
handling behavior, and line ending(s).
"""
if "t" in mode:
if "b" in mode:
raise ValueError("Invalid mode: %r" % (mode,))
else:
if encoding is not None:
raise ValueError("Argument 'encoding' not supported in binary mode")
if errors is not None:
raise ValueError("Argument 'errors' not supported in binary mode")
if newline is not None:
raise ValueError("Argument 'newline' not supported in binary mode")
bz_mode = mode.replace("t", "")
binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel)
if "t" in mode:
encoding = io.text_encoding(encoding)
return io.TextIOWrapper(binary_file, encoding, errors, newline)
else:
return binary_file
def compress(data, compresslevel=9):
"""Compress a block of data.
compresslevel, if given, must be a number between 1 and 9.
For incremental compression, use a BZ2Compressor object instead.
"""
comp = BZ2Compressor(compresslevel)
return comp.compress(data) + comp.flush()
def decompress(data):
"""Decompress a block of data.
For incremental decompression, use a BZ2Decompressor object instead.
"""
results = []
while data:
decomp = BZ2Decompressor()
try:
res = decomp.decompress(data)
except OSError:
if results:
break # Leftover data is not a valid bzip2 stream; ignore it.
else:
raise # Error on the first iteration; bail out.
results.append(res)
if not decomp.eof:
raise ValueError("Compressed data ended before the "
"end-of-stream marker was reached")
data = decomp.unused_data
return b"".join(results)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@bz2.py@.PATH_END.py
|
{
"filename": "sdss-catalog.py",
"repo_name": "dstndstn/tractor",
"repo_path": "tractor_extracted/tractor-main/projects/desi/sdss-catalog.py",
"type": "Python"
}
|
from __future__ import print_function
import fitsio
from astrometry.util.util import *
from astrometry.sdss.fields import *
from astrometry.sdss import *
from tractor import *
from tractor.sdss import *
from desi_common import *
if __name__ == '__main__':
import optparse
import sys
import desi_common
parser = optparse.OptionParser('%prog [options] <WISE tile name> <catalog output name>')
parser.add_option('-n', type=int, default=desi_common.N_subtiles,
help='Number of sub-tiles; default %default')
parser.add_option('-x', type=int, help='Sub-tile x', default=0)
parser.add_option('-y', type=int, help='Sub-tile y', default=0)
parser.add_option('--atlas', default='allsky-atlas.fits', help='WISE tile list')
parser.add_option('--bands', default=[], action='append', help='Bands to include in output catalog, default g,r,z')
opt,args = parser.parse_args()
if len(args) != 2:
parser.print_help()
sys.exit(-1)
tile,outfn = args
if opt.n != desi_common.N_subtiles:
desi_common.N_subtiles = opt.n
if len(opt.bands) == 0:
opt.bands = ['g','r','z']
wcs = get_subtile_wcs(tile, opt.x, opt.y)
print('WCS:', wcs)
# FIXME
margin = 0.
photoobjdir = 'photoObjs-new'
sdss = DR9(basedir=photoobjdir)
sdss.useLocalTree()
cols = ['objid', 'ra', 'dec', 'fracdev', 'objc_type',
'theta_dev', 'theta_deverr', 'ab_dev', 'ab_deverr', 'phi_dev_deg',
'theta_exp', 'theta_experr', 'ab_exp', 'ab_experr', 'phi_exp_deg',
'resolve_status', 'nchild', 'flags', 'objc_flags',
'run','camcol','field','id',
'psfflux', 'psfflux_ivar',
'cmodelflux', 'cmodelflux_ivar',
'modelflux', 'modelflux_ivar',
'devflux', 'expflux']
objs = read_photoobjs_in_wcs(wcs, margin, sdss=sdss, cols=cols)
print('Got', len(objs), 'photoObjs')
srcs = get_tractor_sources_dr9(
None, None, None, objs=objs, sdss=sdss,
bands=opt.bands,
nanomaggies=True, fixedComposites=True,
useObjcType=True,
ellipse=EllipseESoft.fromRAbPhi)
print('Got', len(srcs), 'Tractor sources')
cat = Catalog(*srcs)
N = cat.numberOfParams()
var = np.zeros(N)
T,hdr = prepare_fits_catalog(cat, var, None, None, opt.bands, None)
T.writeto(outfn, header=hdr)
print('Wrote to', outfn)
|
dstndstnREPO_NAMEtractorPATH_START.@tractor_extracted@tractor-main@projects@desi@sdss-catalog.py@.PATH_END.py
|
{
"filename": "time_distributed.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/rnn/time_distributed.py",
"type": "Python"
}
|
"""Wrapper layer to apply every temporal slice of an input."""
from keras.src import backend
from keras.src import ops
from keras.src.api_export import keras_export
from keras.src.layers.core.wrapper import Wrapper
from keras.src.layers.layer import Layer
@keras_export("keras.layers.TimeDistributed")
class TimeDistributed(Wrapper):
"""This wrapper allows to apply a layer to every temporal slice of an input.
Every input should be at least 3D, and the dimension of index one of the
first input will be considered to be the temporal dimension.
Consider a batch of 32 video samples, where each sample is a 128x128 RGB
image with `channels_last` data format, across 10 timesteps.
The batch input shape is `(32, 10, 128, 128, 3)`.
You can then use `TimeDistributed` to apply the same `Conv2D` layer to each
of the 10 timesteps, independently:
>>> inputs = layers.Input(shape=(10, 128, 128, 3), batch_size=32)
>>> conv_2d_layer = layers.Conv2D(64, (3, 3))
>>> outputs = layers.TimeDistributed(conv_2d_layer)(inputs)
>>> outputs.shape
(32, 10, 126, 126, 64)
Because `TimeDistributed` applies the same instance of `Conv2D` to each of
the timestamps, the same set of weights are used at each timestamp.
Args:
layer: a `keras.layers.Layer` instance.
Call arguments:
inputs: Input tensor of shape (batch, time, ...) or nested tensors,
and each of which has shape (batch, time, ...).
training: Python boolean indicating whether the layer should behave in
training mode or in inference mode. This argument is passed to the
wrapped layer (only if the layer supports this argument).
mask: Binary tensor of shape `(samples, timesteps)` indicating whether
a given timestep should be masked. This argument is passed to the
wrapped layer (only if the layer supports this argument).
"""
def __init__(self, layer, **kwargs):
if not isinstance(layer, Layer):
raise ValueError(
"Please initialize `TimeDistributed` layer with a "
f"`keras.layers.Layer` instance. Received: {layer}"
)
super().__init__(layer, **kwargs)
self.supports_masking = True
def _get_child_input_shape(self, input_shape):
if not isinstance(input_shape, (tuple, list)) or len(input_shape) < 3:
raise ValueError(
"`TimeDistributed` Layer should be passed an `input_shape` "
f"with at least 3 dimensions, received: {input_shape}"
)
return (input_shape[0], *input_shape[2:])
def compute_output_shape(self, input_shape):
child_input_shape = self._get_child_input_shape(input_shape)
child_output_shape = self.layer.compute_output_shape(child_input_shape)
return (child_output_shape[0], input_shape[1], *child_output_shape[1:])
def build(self, input_shape):
child_input_shape = self._get_child_input_shape(input_shape)
super().build(child_input_shape)
self.built = True
def call(self, inputs, training=None, mask=None):
input_shape = ops.shape(inputs)
mask_shape = None if mask is None else ops.shape(mask)
batch_size = input_shape[0]
timesteps = input_shape[1]
if mask_shape is not None and mask_shape[:2] != (batch_size, timesteps):
raise ValueError(
"`TimeDistributed` Layer should be passed a `mask` of shape "
f"({batch_size}, {timesteps}, ...), "
f"received: mask.shape={mask_shape}"
)
def time_distributed_transpose(data):
"""Swaps the timestep and batch dimensions of a tensor."""
axes = [1, 0, *range(2, len(data.shape))]
return ops.transpose(data, axes=axes)
inputs = time_distributed_transpose(inputs)
if mask is not None:
mask = time_distributed_transpose(mask)
def step_function(i):
kwargs = {}
if self.layer._call_has_mask_arg and mask is not None:
kwargs["mask"] = mask[i]
if self.layer._call_has_training_arg:
kwargs["training"] = training
return self.layer.call(inputs[i], **kwargs)
# Implementation #1: is the time axis is static, use a Python for loop.
if inputs.shape[0] is not None:
outputs = ops.stack(
[step_function(i) for i in range(inputs.shape[0])]
)
return time_distributed_transpose(outputs)
# Implementation #2: use backend.vectorized_map.
outputs = backend.vectorized_map(step_function, ops.arange(timesteps))
return time_distributed_transpose(outputs)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@rnn@time_distributed.py@.PATH_END.py
|
{
"filename": "toolmanager_sgskip.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/galleries/examples/user_interfaces/toolmanager_sgskip.py",
"type": "Python"
}
|
"""
============
Tool Manager
============
This example demonstrates how to
* modify the Toolbar
* create tools
* add tools
* remove tools
using `matplotlib.backend_managers.ToolManager`.
"""
import matplotlib.pyplot as plt
from matplotlib.backend_tools import ToolBase, ToolToggleBase
plt.rcParams['toolbar'] = 'toolmanager'
class ListTools(ToolBase):
"""List all the tools controlled by the `ToolManager`."""
default_keymap = 'm' # keyboard shortcut
description = 'List Tools'
def trigger(self, *args, **kwargs):
print('_' * 80)
fmt_tool = "{:12} {:45} {}".format
print(fmt_tool('Name (id)', 'Tool description', 'Keymap'))
print('-' * 80)
tools = self.toolmanager.tools
for name in sorted(tools):
if not tools[name].description:
continue
keys = ', '.join(sorted(self.toolmanager.get_tool_keymap(name)))
print(fmt_tool(name, tools[name].description, keys))
print('_' * 80)
fmt_active_toggle = "{!s:12} {!s:45}".format
print("Active Toggle tools")
print(fmt_active_toggle("Group", "Active"))
print('-' * 80)
for group, active in self.toolmanager.active_toggle.items():
print(fmt_active_toggle(group, active))
class GroupHideTool(ToolToggleBase):
"""Show lines with a given gid."""
default_keymap = 'S'
description = 'Show by gid'
default_toggled = True
def __init__(self, *args, gid, **kwargs):
self.gid = gid
super().__init__(*args, **kwargs)
def enable(self, *args):
self.set_lines_visibility(True)
def disable(self, *args):
self.set_lines_visibility(False)
def set_lines_visibility(self, state):
for ax in self.figure.get_axes():
for line in ax.get_lines():
if line.get_gid() == self.gid:
line.set_visible(state)
self.figure.canvas.draw()
fig = plt.figure()
plt.plot([1, 2, 3], gid='mygroup')
plt.plot([2, 3, 4], gid='unknown')
plt.plot([3, 2, 1], gid='mygroup')
# Add the custom tools that we created
fig.canvas.manager.toolmanager.add_tool('List', ListTools)
fig.canvas.manager.toolmanager.add_tool('Show', GroupHideTool, gid='mygroup')
# Add an existing tool to new group `foo`.
# It can be added as many times as we want
fig.canvas.manager.toolbar.add_tool('zoom', 'foo')
# Remove the forward button
fig.canvas.manager.toolmanager.remove_tool('forward')
# To add a custom tool to the toolbar at specific location inside
# the navigation group
fig.canvas.manager.toolbar.add_tool('Show', 'navigation', 1)
plt.show()
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@galleries@examples@user_interfaces@toolmanager_sgskip.py@.PATH_END.py
|
{
"filename": "eclipse.py",
"repo_name": "hippke/pandora",
"repo_path": "pandora_extracted/pandora-main/pandoramoon/eclipse.py",
"type": "Python"
}
|
import numpy as np
from numpy import sqrt, pi, arccos, abs, ceil, fliplr, flipud
from numba import jit
@jit(cache=True, nopython=True, fastmath=True, parallel=False)
def cci(r1, r2, d):
"""Calculates area of asymmetric "lens" in which two circles intersect
Source: http://mathworld.wolfram.com/Circle-CircleIntersection.html"""
if r1 < d - r2:
return 0
elif r1 >= d + r2:
return pi * r2 ** 2
elif d - r2 <= -r1:
return pi * r1 ** 2
else:
return (
r2 ** 2 * arccos((d ** 2 + r2 ** 2 - r1 ** 2) / (2 * d * r2))
+ r1 ** 2 * arccos((d ** 2 + r1 ** 2 - r2 ** 2) / (2 * d * r1))
- 0.5 * sqrt((-d + r2 + r1) * (d + r2 - r1) * (d - r2 + r1) * (d + r2 + r1))
)
@jit(cache=True, nopython=True, fastmath=True, parallel=False)
def eclipse_ratio(distance_planet_moon, r_planet, r_moon):
"""Returns eclipsed ratio [0..1] using circle_circle_intersect"""
eclipsing = False
eclipse_ratio = 0
if abs(distance_planet_moon) < (r_planet + r_moon):
eclipsing = True
if (r_planet - r_moon) > abs(distance_planet_moon):
eclipse_ratio = 1
return eclipse_ratio
# For partial eclipses, get the fraction of moon eclipse using transit...
if eclipsing:
if distance_planet_moon == 0:
distance_planet_moon = 1e-10
eclipse_ratio = cci(r_planet, r_moon, distance_planet_moon)
# ...and transform this value into how much AREA is really eclipsed
eclipse_ratio = eclipse_ratio / (pi * r_moon ** 2)
return eclipse_ratio
@jit(cache=True, nopython=True, fastmath=True, parallel=False)
def pixelart(xp, yp, xm, ym, r_planet, r_moon, numerical_grid):
if numerical_grid % 2 == 0: # assure pixel number is odd for perfect circle
numerical_grid += 1
r_star = (1 / r_moon) * numerical_grid
image = np.zeros((numerical_grid + 1, numerical_grid + 1), dtype="int8")
color_star = 5 # arbitrary values, but useful for visualization
color_moon = 3 # the sum of the values must be unique to identify the
color_planet = 2 # total overlap: area of moon on star occulted by planet
all_colors = color_star + color_moon + color_planet
# Paint moon circle by painting one quarter, flipping it over, and rolling it down
# Faster than painting naively the full circle, because it saves 3/4 of dist calcs
# Caching this part would be a good idea, but numba memory mgmt caused crashes
# The gain is very small anyways and the complexity not worth it
# The version here, which replaces the usual sqrt with **2, is ~5% faster
# Paint upper left corner
anti_aliasing = np.sqrt(((numerical_grid + 1) ** 2 - (numerical_grid ** 2))) / 2
mid = int(ceil(numerical_grid / 2))
for x in range(mid):
for y in range(mid):
d_moon = (numerical_grid - 2 * x) ** 2 + (numerical_grid - 2 * y) ** 2
if d_moon < (numerical_grid**2 + anti_aliasing):
image[x, y] = color_moon
image[mid:,:mid] = flipud(image[:mid:,:mid]) # Copy upper left to upper right
image[:,mid:] = fliplr(image[:,:mid]) # Copy upper half to lower half
# Now add planet and star
anti_aliasing = -0.5 / numerical_grid # Working with sqrt again
for x in range(numerical_grid + 1):
for y in range(numerical_grid + 1):
d_star = sqrt(
(xm * r_star + 2 * x - numerical_grid) ** 2
+ (ym * r_star + 2 * y - numerical_grid) ** 2
)
if d_star < r_star - anti_aliasing:
image[x, y] += color_star
d_planet = sqrt(
((-(xp - xm) * r_star) + 2 * x - numerical_grid) ** 2
+ ((-(yp - ym) * r_star) + 2 * y - numerical_grid) ** 2
)
if d_planet < (r_planet / r_moon) * numerical_grid - anti_aliasing:
image[x, y] += color_planet
moon_sum_analytical = pi * ((numerical_grid) / 2) ** 2
moon_occult_frac = np.sum(image == all_colors) / moon_sum_analytical
cci = eclipse_ratio(sqrt(xm ** 2 + ym ** 2), 1, r_moon)
if cci > 0:
return min(1, (1 - ((cci - moon_occult_frac) / cci)))
else:
return 1
@jit(cache=True, nopython=True, fastmath=True, parallel=False)
def eclipse(xp, yp, xm, ym, r_planet, r_moon, flux_moon, numerical_grid):
"""Checks if planet-moon occultation present. If yes, returns adjusted moon flux.
Parameters
----------
xp, yp, x_m, ym : float
Planet and moon coordinates. Normalized so that R_star = 1 at (0,0)
r_planet, r_moon : float
Planet and moon radii. Normalized so that R_star = 1
flux_moon : float
Un-occulted moon flux (from Mandel-Agol model) in [0,1]
Returns
-------
occulted_flux_moon : float
Occulted moon flux <= flux_moon. Assumes planet occults moon.
"""
# Planet-Moon occultation
# Case 1: No occultation
# Case 2: Occultation, both bodies on star or off star --> 2-circle intersect
# Case 3: Occultation, any body on limb --> Numerical solution
for idx in range(len(xp)):
planet_moon_occultation = False
on_limb = False
# Check if moon or planet are on stellar limb
if abs(1 - (sqrt(xm[idx] ** 2 + ym[idx] ** 2))) < (r_moon):
on_limb = True
if abs(1 - (sqrt(xp[idx] ** 2 + yp[idx] ** 2))) < (r_planet):
on_limb = True
# Check if planet-moon occultation
distance_p_m = sqrt((xm[idx] - xp[idx]) ** 2 + (ym[idx] - yp[idx]) ** 2)
if abs(distance_p_m) < (r_planet + r_moon):
planet_moon_occultation = True
# Case 1: No occultation
else:
continue
# Case 2: Occultation, both bodies on star or off star --> 2 circle intersect
if planet_moon_occultation and not on_limb:
er = eclipse_ratio(distance_p_m, r_planet, r_moon)
# Case 3: Occultation, any body on limb --> numerical estimate with pixel-art
if planet_moon_occultation and on_limb:
er = pixelart(
xp[idx],
yp[idx],
xm[idx],
ym[idx],
r_planet,
r_moon,
numerical_grid
)
# For Cases 2+3: Calculate reduced moon flux
if er > 0:
flux_moon[idx] = -(1 - flux_moon[idx]) * 10 ** 6
flux_moon[idx] = flux_moon[idx] * (1 - er)
flux_moon[idx] = 1 - (-flux_moon[idx] * 10 ** -6)
return flux_moon
|
hippkeREPO_NAMEpandoraPATH_START.@pandora_extracted@pandora-main@pandoramoon@eclipse.py@.PATH_END.py
|
{
"filename": "gaia_predict_decam_mag.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/py/LSS/imaging/veto_masks/reference/gaia_predict_decam_mag.py",
"type": "Python"
}
|
# Add predicted DECam mags
from __future__ import division, print_function
import sys, os, glob, time, warnings, gc
import numpy as np
# import matplotlib
# matplotlib.use("Agg")
# import matplotlib.pyplot as plt
from astropy.table import Table, vstack, hstack
import fitsio
# from astropy.io import fits
# Coefficients for EDR3
coeffs = dict(
g = [-0.1125681175, 0.3506376997, 0.9082025788, -1.0078309266,
-1.4212131445, 4.5685722177, -4.5719415419, 2.3816887292,
-0.7162270722, 0.1247021438, -0.0114938710, 0.0003949585,
0.0000051647],
r = [0.1431278873, -0.2999797766, -0.0553379742, 0.1544273115,
0.3068634689, -0.9499143903, 0.9769739362, -0.4926704528,
0.1272539574, -0.0133178183, -0.0008153813, 0.0003094116,
-0.0000198891],
z = [0.5173814296, -1.0450176704, 0.1529797809, 0.1856005222,
-0.2366580132, 0.1018331214, -0.0189673240, 0.0012988354])
bprp_min, bprp_max = -0.5, 4.7
gaia = Table(fitsio.read('/global/cfs/cdirs/desi/users/rongpu/desi_mask/gaia_edr3_g_18_dr9.fits'))
for band in ['g', 'r', 'z']:
mag = np.copy(gaia['PHOT_G_MEAN_MAG'])
for order, c in enumerate(coeffs[band]):
x = gaia['PHOT_BP_MEAN_MAG']-gaia['PHOT_RP_MEAN_MAG']
x = np.clip(x, bprp_min, bprp_max)
mag += c * (x)**order
gaia['decam_mag_'+band] = mag
mask = (gaia['PHOT_BP_MEAN_MAG']==0) | (gaia['PHOT_RP_MEAN_MAG']==0)
for band in ['g', 'r', 'z']:
gaia['decam_mag_'+band][mask] = np.nan
gaia = gaia[['decam_mag_g', 'decam_mag_r', 'decam_mag_z']]
gaia.write('/global/cfs/cdirs/desi/users/rongpu/desi_mask/gaia_edr3_g_18_predict_decam_dr9.fits')
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@py@LSS@imaging@veto_masks@reference@gaia_predict_decam_mag.py@.PATH_END.py
|
{
"filename": "test_models_detection_anchor_utils.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/test/test_models_detection_anchor_utils.py",
"type": "Python"
}
|
import pytest
import torch
from common_utils import assert_equal
from torchvision.models.detection.anchor_utils import AnchorGenerator, DefaultBoxGenerator
from torchvision.models.detection.image_list import ImageList
class Tester:
def test_incorrect_anchors(self):
incorrect_sizes = (
(2, 4, 8),
(32, 8),
)
incorrect_aspects = (0.5, 1.0)
anc = AnchorGenerator(incorrect_sizes, incorrect_aspects)
image1 = torch.randn(3, 800, 800)
image_list = ImageList(image1, [(800, 800)])
feature_maps = [torch.randn(1, 50)]
pytest.raises(AssertionError, anc, image_list, feature_maps)
def _init_test_anchor_generator(self):
anchor_sizes = ((10,),)
aspect_ratios = ((1,),)
anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios)
return anchor_generator
def _init_test_defaultbox_generator(self):
aspect_ratios = [[2]]
dbox_generator = DefaultBoxGenerator(aspect_ratios)
return dbox_generator
def get_features(self, images):
s0, s1 = images.shape[-2:]
features = [torch.rand(2, 8, s0 // 5, s1 // 5)]
return features
def test_anchor_generator(self):
images = torch.randn(2, 3, 15, 15)
features = self.get_features(images)
image_shapes = [i.shape[-2:] for i in images]
images = ImageList(images, image_shapes)
model = self._init_test_anchor_generator()
model.eval()
anchors = model(images, features)
# Estimate the number of target anchors
grid_sizes = [f.shape[-2:] for f in features]
num_anchors_estimated = 0
for sizes, num_anchors_per_loc in zip(grid_sizes, model.num_anchors_per_location()):
num_anchors_estimated += sizes[0] * sizes[1] * num_anchors_per_loc
anchors_output = torch.tensor(
[
[-5.0, -5.0, 5.0, 5.0],
[0.0, -5.0, 10.0, 5.0],
[5.0, -5.0, 15.0, 5.0],
[-5.0, 0.0, 5.0, 10.0],
[0.0, 0.0, 10.0, 10.0],
[5.0, 0.0, 15.0, 10.0],
[-5.0, 5.0, 5.0, 15.0],
[0.0, 5.0, 10.0, 15.0],
[5.0, 5.0, 15.0, 15.0],
]
)
assert num_anchors_estimated == 9
assert len(anchors) == 2
assert tuple(anchors[0].shape) == (9, 4)
assert tuple(anchors[1].shape) == (9, 4)
assert_equal(anchors[0], anchors_output)
assert_equal(anchors[1], anchors_output)
def test_defaultbox_generator(self):
images = torch.zeros(2, 3, 15, 15)
features = [torch.zeros(2, 8, 1, 1)]
image_shapes = [i.shape[-2:] for i in images]
images = ImageList(images, image_shapes)
model = self._init_test_defaultbox_generator()
model.eval()
dboxes = model(images, features)
dboxes_output = torch.tensor(
[
[6.3750, 6.3750, 8.6250, 8.6250],
[4.7443, 4.7443, 10.2557, 10.2557],
[5.9090, 6.7045, 9.0910, 8.2955],
[6.7045, 5.9090, 8.2955, 9.0910],
]
)
assert len(dboxes) == 2
assert tuple(dboxes[0].shape) == (4, 4)
assert tuple(dboxes[1].shape) == (4, 4)
torch.testing.assert_close(dboxes[0], dboxes_output, rtol=1e-5, atol=1e-8)
torch.testing.assert_close(dboxes[1], dboxes_output, rtol=1e-5, atol=1e-8)
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@test@test_models_detection_anchor_utils.py@.PATH_END.py
|
{
"filename": "_tickwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/choropleth/colorbar/_tickwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="tickwidth", parent_name="choropleth.colorbar", **kwargs
):
super(TickwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@choropleth@colorbar@_tickwidth.py@.PATH_END.py
|
{
"filename": "test_initial_conditions.py",
"repo_name": "21cmfast/21cmFAST",
"repo_path": "21cmFAST_extracted/21cmFAST-master/tests/test_initial_conditions.py",
"type": "Python"
}
|
"""
Various tests of the initial_conditions() function and InitialConditions class.
"""
import pytest
import numpy as np
from multiprocessing import cpu_count
from py21cmfast import wrapper
def test_box_shape(ic):
"""Test basic properties of the InitialConditions struct"""
shape = (35, 35, 35)
hires_shape = tuple(2 * s for s in shape)
assert ic.lowres_density.shape == shape
assert ic.lowres_vx.shape == shape
assert ic.lowres_vy.shape == shape
assert ic.lowres_vz.shape == shape
assert ic.lowres_vx_2LPT.shape == shape
assert ic.lowres_vy_2LPT.shape == shape
assert ic.lowres_vz_2LPT.shape == shape
assert ic.hires_density.shape == hires_shape
assert ic.hires_vx.shape == hires_shape
assert ic.hires_vy.shape == hires_shape
assert ic.hires_vz.shape == hires_shape
assert ic.hires_vx_2LPT.shape == hires_shape
assert ic.hires_vy_2LPT.shape == hires_shape
assert ic.hires_vz_2LPT.shape == hires_shape
assert not hasattr(ic, "lowres_vcb")
assert ic.cosmo_params == wrapper.CosmoParams()
def test_modified_cosmo(ic):
"""Test using a modified cosmology"""
cosmo = wrapper.CosmoParams(SIGMA_8=0.9)
ic2 = wrapper.initial_conditions(
cosmo_params=cosmo,
user_params=ic.user_params,
)
assert ic2.cosmo_params != ic.cosmo_params
assert ic2.cosmo_params == cosmo
assert ic2.cosmo_params.SIGMA_8 == cosmo.SIGMA_8
def test_transfer_function(ic, default_user_params):
"""Test using a modified transfer function"""
user_params = default_user_params.clone(POWER_SPECTRUM=5)
ic2 = wrapper.initial_conditions(
random_seed=ic.random_seed,
user_params=user_params,
)
rmsnew = np.sqrt(np.mean(ic2.hires_density**2))
rmsdelta = np.sqrt(np.mean((ic2.hires_density - ic.hires_density) ** 2))
assert rmsdelta < rmsnew
assert rmsnew > 0.0
assert not np.allclose(ic2.hires_density, ic.hires_density)
def test_relvels():
"""Test for relative velocity initial conditions"""
ic = wrapper.initial_conditions(
random_seed=1,
user_params=wrapper.UserParams(
HII_DIM=100,
DIM=300,
BOX_LEN=300,
POWER_SPECTRUM=5,
USE_RELATIVE_VELOCITIES=True,
N_THREADS=cpu_count(), # To make this one a bit faster.
),
)
vcbrms_lowres = np.sqrt(np.mean(ic.lowres_vcb**2))
vcbavg_lowres = np.mean(ic.lowres_vcb)
# we test the lowres box
# rms should be about 30 km/s for LCDM, so we check it is finite and not far off
# the average should be 0.92*vrms, since it follows a maxwell boltzmann
assert vcbrms_lowres > 20.0
assert vcbrms_lowres < 40.0
assert vcbavg_lowres < 0.97 * vcbrms_lowres
assert vcbavg_lowres > 0.88 * vcbrms_lowres
|
21cmfastREPO_NAME21cmFASTPATH_START.@21cmFAST_extracted@21cmFAST-master@tests@test_initial_conditions.py@.PATH_END.py
|
{
"filename": "_textcase.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolar/marker/colorbar/title/font/_textcase.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcaseValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="textcase",
parent_name="scatterpolar.marker.colorbar.title.font",
**kwargs,
):
super(TextcaseValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["normal", "word caps", "upper", "lower"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolar@marker@colorbar@title@font@_textcase.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/partners/mistralai/tests/unit_tests/__init__.py",
"type": "Python"
}
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@partners@mistralai@tests@unit_tests@__init__.py@.PATH_END.py
|
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergeo/selected/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._textfont import TextfontValidator
from ._marker import MarkerValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._textfont.TextfontValidator", "._marker.MarkerValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergeo@selected@__init__.py@.PATH_END.py
|
{
"filename": "periodic.ipynb",
"repo_name": "lgrcia/nuance",
"repo_path": "nuance_extracted/nuance-main/docs/notebooks/periodic.ipynb",
"type": "Jupyter Notebook"
}
|
# Periodic transit search
In this notebook, we use `nuance` to search for a periodic transit.
```python
# in order to run on all CPUs
import os
import jax
jax.config.update("jax_enable_x64", True)
os.environ["XLA_FLAGS"] = f"--xla_force_host_platform_device_count={os.cpu_count()}"
```
## Generating the data
Let's generate some data first
```python
import numpy as np
from nuance import core
import matplotlib.pyplot as plt
from tinygp import kernels, GaussianProcess
depth = 2.3e-3
error = 1e-3
time = np.linspace(0, 3.0, 4000)
transit_prams = {"epoch": 0.2, "duration": 0.05, "period": 0.7}
transit_model = depth * core.transit(time, **transit_prams)
kernel = kernels.quasisep.SHO(10.0, 10.0, 0.002)
gp = GaussianProcess(kernel, time, diag=error**2)
flux = transit_model + gp.sample(jax.random.PRNGKey(0)) + 1.0
ax = plt.subplot(111, xlabel="time", ylabel="flux")
plt.plot(time, flux, ".", c="0.8")
plt.tight_layout()
```

## Linear search
We now run the [`linear_search`](nuance.linear_search)
```python
from nuance.linear_search import linear_search
epochs = time.copy()
durations = np.linspace(0.01, 0.2, 15)
ls = linear_search(time, flux, gp=gp)(epochs, durations)
```
100%|██████████| 4008/4008 [00:01<00:00, 2438.61it/s]
```{note}
Notice how we reused the same Gaussian Process used to generate the data. In practice, a kernel need to be chosen and its hyper-parameters optimized. See the [GP optimization tutorial](gp.ipynb)
```
## The *periodic search*
We can now perform the [`periodic_search`](nuance.periodic_search)
```python
from nuance.periodic_search import periodic_search
periods = np.linspace(0.1, 2.0, 4000)
snr_function = jax.jit(core.snr(time, flux, gp=gp))
ps_function = periodic_search(epochs, durations, ls, snr_function)
snr, params = ps_function(periods)
t0, D, P = params[np.argmax(snr)]
print(
"\n".join(
[f"{n}: {v:.3f}" for n, v in zip(["epoch", "duration", "period"], [t0, D, P])]
)
)
```
/Users/lgarcia/mambaforge/envs/nuance/lib/python3.10/site-packages/multiprocess/popen_fork.py:66: RuntimeWarning: os.fork() was called. os.fork() is incompatible with multithreaded code, and JAX is multithreaded, so this will likely lead to a deadlock.
self.pid = os.fork()
100%|██████████| 4000/4000 [00:01<00:00, 3660.82it/s]/Users/lgarcia/mambaforge/envs/nuance/lib/python3.10/site-packages/multiprocess/popen_fork.py:66: RuntimeWarning: os.fork() was called. os.fork() is incompatible with multithreaded code, and JAX is multithreaded, so this will likely lead to a deadlock.
self.pid = os.fork()
epoch: 0.200
duration: 0.051
period: 0.700
and plot the SNR periodogram
```python
from nuance import utils
fig = plt.figure(figsize=(8.5, 4))
linear, found, noise = core.separate_models(time, flux, gp=gp)(t0, D, P)
ax = plt.subplot(121, xlabel="periods", ylabel="SNR")
ax.axvline(transit_prams["period"], c="0.8", ls="-", label="true")
ax.plot(periods, snr)
ax.legend()
ax = plt.subplot(222, xlabel="time", ylabel="flux")
ax.plot(time, flux, ".", c="0.8")
ax.plot(time, found + 1, c="k", label="found")
ax.plot(time, transit_model + 1, c="C0", label="true")
ax.legend()
ax = plt.subplot(224, xlabel="time", ylabel="flux", xlim=(-0.2, 0.2))
phi = utils.phase(time, t0, P)
detrended = flux - noise - linear
plt.plot(phi, detrended, ".", c=".8")
bx, by, be = utils.binn_time(phi, detrended, bins=7 / 60 / 24)
plt.errorbar(bx, by, yerr=be, fmt=".", c="k")
plt.tight_layout()
```

```{note}
If we want to search for another planetary candidate, we can mask the current candidate and reuse the *linear search*, only the *periodic search* needs to be reran. This is demonstrated in the [Multi-planetary search notebook](multi1.ipynb)
```
|
lgrciaREPO_NAMEnuancePATH_START.@nuance_extracted@nuance-main@docs@notebooks@periodic.ipynb@.PATH_END.py
|
{
"filename": "binomialObsFPRate-checkpoint.ipynb",
"repo_name": "stevepur/DR25-occurrence-public",
"repo_path": "DR25-occurrence-public_extracted/DR25-occurrence-public-main/GKbaseline_gaiaRadCut/.ipynb_checkpoints/binomialObsFPRate-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
Reliability is given by
$$ R = \frac{N_{\mathrm{truePC}}}{N_{\mathrm{obsPC}}} = 1 - \frac{N_{\mathrm{obsFP}}}{N_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) = 1 - \frac{F_{\mathrm{obsFP}}}{F_{\mathrm{obsPC}}} \left( \frac{1 - E}{E} \right) $$
where $E = N_{\mathrm{obsFP}}/N_{\mathrm{trueFP}}$ is the false positive effectiveness, $F_{\mathrm{obsFP}} = N_{\mathrm{obsFP}}/N_{\mathrm{obsTCEs}}$ is the fraction of observed TCEs that are dispositioned as FP and $F_{\mathrm{obsPC}} = N_{\mathrm{obsPC}}/N_{\mathrm{obsTCEs}}$ is the fraction of TCEs dispositioned as PC.
We will separately measure $E$ and $F_{\mathrm{obsFP}}$ as binomial point processes with probabilities that depend on period and MES. Once we have $F_{\mathrm{obsFP}}$ then $F_{\mathrm{obsPC}} = 1 - F_{\mathrm{obsFP}}$, assuming that $N_{\mathrm{obsTCEs}} = N_{\mathrm{obsPC}} + N_{\mathrm{obsFP}}$.
We think of TCEs as consisting of two sets: those that are dispositioned as FP and those that are dispositioned as PC. We do this for both the observed TCEs, and for inverted/scrambled TCEs, where all TCEs are true false positives. Then we can think of the vetting process as drawing from the set of TCEs, with a probability $r$ of selecting either PCs or FPs. Then the probability distribution of selecting $c$ FPs from $n$ TCEs is given by the binomial distribution
$$P\{c\} = \left( \begin{array}{c} n \\ c \end{array} \right) r^c (1-r)^{n-c}.$$
To measure $E$ we use the inverted and scrambled data sets, where all detected TCEs are by definition FPs. We define $E$ as the probability of drawing FPs from inverted/scrambled TCEs, found via the Bayesian inference $p(E|n, c) \propto p(c|E, n) p(E)$, where
$$p(c|E, n) = \left( \begin{array}{c} n \\ c \end{array} \right) E^c (1-E)^{n-c}$$ and
$p(E)$ is a prior distribution of the probability $E$. By putting the data on a grid indexed by $i,j$, we can fit effectiveness as a function parameterized by a vector $\theta$, $E(\theta,\mathrm{period},\mathrm{MES})$, as $p(\theta)|n_{i,j}, c_{i,j}, \mathrm{period}_{i,j},\mathrm{MES}_{i,j}) \propto p(c_{i,j}|\theta, n_{i,j}, \mathrm{period}_{i,j},\mathrm{MES}_{i,j}) p(\theta)$, where $p(\theta)$ is some prior distribution of the parameters.
To measure $F_{\mathrm{obsFP}}$ we perform a similar inference using the set of observed TCEs, and inferring the probability of drawing c FPs from n observed TCEs. The inference in this case becomes $p(F_{\mathrm{obsFP}}|n, c) \propto p(c|F_{\mathrm{obsFP}}, n) p(F_{\mathrm{obsFP}})$, which we can parameterize interms of a function similar to effectiveness.
```python
import numpy as np
import matplotlib.pyplot as plt
import scipy.special as spec
import pandas as pd
from astropy.io import ascii
from astropy.table import Table, vstack
import sys
sys.path.insert(0, '..')
import dr25Models as funcModels
```
First we set the parameters space for our analysis.
```python
stellarType = "GK"
scoreCut = 0.0;
if False:
periodMin = 200;
periodMax = 400;
# rpMin = 0.75;
# rpMax = 2.0;
rpMin = 0;
rpMax = 100;
mesMin = 7;
mesMax = 15;
else:
periodMin = 50;
periodMax = 600;
rpMin = 0.5;
rpMax = 15;
mesMin = 7;
mesMax = 30;
```
```python
def drawHeatMap(dataArray, imageSize, x, y, nData=[], colorBarLabel="", textOn=True, forceInt=True):
dx = x[(1,0)] - x[(0,0)];
dy = y[(0,1)] - y[(0,0)];
extent = [x[(0,0)], x[(-1,0)]+dx,y[(0,0)],y[(0,-1)]+dy];
plt.figure(figsize=imageSize);
# fig, ax = plt.subplots(figsize=imageSize);
ax = plt.gca()
da = np.transpose(dataArray);
# im = ax.imshow(da, extent = extent, origin='lower');
im = ax.imshow(da, extent = extent, origin='lower', cmap="Greys");
ax.set_aspect(10);
if len(nData) == 0:
nData = np.ones(dataArray.shape)
# ax.imshow(da, origin='lower');
arrayShape = da.shape;
minda = np.min(da)
maxda = np.max(da)
daRange = maxda - minda;
for i in range(arrayShape[0]):
for j in range(arrayShape[1]):
if da[i, j] > minda + daRange*0.5:
# cstr = "k"
cstr = "w"
else:
# cstr = "w"
cstr = "k"
if np.abs(da[i,j]) < 100:
fsz = 9
else:
fsz = 9
if textOn:
if nData[(j,i)] > 0:
if forceInt:
ax.text(x[(j,i)]+dx/2, y[(j,i)]+dy/2, da[i, j].astype("int"),
ha="center", va="center", color=cstr, fontsize=fsz)
else:
ax.text(x[(j,i)]+dx/2, y[(j,i)]+dy/2, da[i, j],
ha="center", va="center", color=cstr, fontsize=fsz)
else:
ax.text(x[(j,i)]+dx/2, y[(j,i)]+dy/2, "-",
ha="center", va="center", color=cstr, fontsize=fsz)
ax.tick_params(axis = "both", labelsize = 12)
im_ratio = float(da.shape[0])/da.shape[1]
cbh = plt.colorbar(im,fraction=0.024*im_ratio, pad=0.02)
cbh.ax.set_ylabel(colorBarLabel, fontSize = 16);
# ax.invert_yaxis();
```
Then we load our data, which consists of the stellar catalog, observed TCEs, inverted TCEs and scrambled TCEs. We convert the tables to Pandas for manipulation.
```python
# not currently used
# load skygroup maps for the FOV position dependence study
# obsSkygroupMap = ascii.read("../data/obsTceToSkygroup.txt");
```
```python
dataLoc = "../data/"
starlist = "../stellarCatalogs/dr25_stellar_berger2019_clean_GaiaRadCut_GK.txt"
kic = pd.read_csv(starlist)
obsTceList = dataLoc + "kplr_dr25_obs_tces.txt"
obsTcesFull = ascii.read(obsTceList);
obsTcesFullPd = obsTcesFull.to_pandas();
```
And remove the banned TCEs from the observed TCEs
```python
bannedTCEList = ascii.read(dataLoc + "kplr_droplist_inv.txt");
bannedTCEList = bannedTCEList.to_pandas();
obsTcesPd = obsTcesFullPd[~obsTcesFullPd['TCE_ID'].isin(bannedTCEList['TCE_ID'])];
print("length of obsTcesFull = " + str(len(obsTcesFullPd))
+ ", length of obsTces = " + str(len(obsTcesPd)))
```
length of obsTcesFull = 32530, length of obsTces = 29130
Now convert back to astropy tables, and combine the scrambled TCEs into one list
```python
obsTces = Table.from_pandas(obsTcesPd)
```
Resstrict to the chosen stellar population
```python
obsTces = obsTces[np.in1d(obsTces['KIC'],kic.kepid)]
print("length of obsTces = " + str(len(obsTces)))
```
length of obsTces = 4138
Restrict to the desired radius and period range
```python
```
```python
spIndex = np.where(np.all([
obsTces['Rp']>rpMin,obsTces['Rp']<rpMax,\
obsTces['period']>periodMin,obsTces['period']<periodMax], axis=0))
spObsTces = obsTces[spIndex]
print("length of spObsTces = " + str(len(spObsTces)))
```
length of spObsTces = 2009
Separate out the dispositioned PC and FPs
```python
spObsNtlPcs = spObsTces[(spObsTces['NTL']==0) & (spObsTces['Score']>=scoreCut)]
spObsNtlFps = spObsTces[(spObsTces['NTL']==1) | (spObsTces['Score']<scoreCut)]
spObsPcs = spObsTces[(spObsTces['Disp']=='PC') & (spObsTces['Score']>=scoreCut)]
spObsFps = spObsTces[(spObsTces['Disp']=='FP') | (spObsTces['Score']<scoreCut)]
print("length of spObsNtlPcs = " + str(len(spObsNtlPcs)))
print("length of spObsNtlFps = " + str(len(spObsNtlFps)))
print("length of obsPcs = " + str(len(spObsPcs)))
print("length of obsFps = " + str(len(spObsFps)))
```
length of spObsNtlPcs = 271
length of spObsNtlFps = 1738
length of obsPcs = 246
length of obsFps = 1763
```python
spObsFpsNotNtl = spObsFps[(spObsFps['NTL']==0)]
spObsFpsNtl = spObsFps[(spObsFps['NTL']==1)]
print("length of spObsFpsNotNtl = " + str(len(spObsFpsNotNtl)))
print("length of spObsFpsNtl = " + str(len(spObsFpsNtl)))
```
length of spObsFpsNotNtl = 25
length of spObsFpsNtl = 1738
```python
spObsFpsNotNtlId = spObsFpsNotNtl["TCE_ID"]
spObsFpsNotNtlId
```
<MaskedColumn name='TCE_ID' dtype='str12' length=25>
<table>
<tr><td>001718189-03</td></tr>
<tr><td>003531231-01</td></tr>
<tr><td>003854101-01</td></tr>
<tr><td>004043443-01</td></tr>
<tr><td>004055092-01</td></tr>
<tr><td>004150539-03</td></tr>
<tr><td>004371172-01</td></tr>
<tr><td>005015459-01</td></tr>
<tr><td>005350447-01</td></tr>
<tr><td>005551228-01</td></tr>
<tr><td>005806800-01</td></tr>
<tr><td>005809954-01</td></tr>
<tr><td>006289344-01</td></tr>
<tr><td>006862332-01</td></tr>
<tr><td>006948480-01</td></tr>
<tr><td>007813039-01</td></tr>
<tr><td>008223655-01</td></tr>
<tr><td>008345172-01</td></tr>
<tr><td>008956706-01</td></tr>
<tr><td>009394762-01</td></tr>
<tr><td>009932970-01</td></tr>
<tr><td>010812504-01</td></tr>
<tr><td>011774991-02</td></tr>
<tr><td>012416987-01</td></tr>
<tr><td>012459725-01</td></tr>
</table>
```python
# list of obs FPs with NTL=0 that may still be artifacts from visual inspection
# the record of the visual inspection is in obsFpsNotNtlId.txt
obsNoNtlFAs = ["004371172-01", "009394762-01"]
# remove these from the obs FPs with NTL = 0 to create list of astrophysical FPs
spObsFpsNotNtlPd = spObsFpsNotNtl.to_pandas()
spObsFpsNotNtlAfpPd = spObsFpsNotNtlPd[~spObsFpsNotNtlPd['TCE_ID'].isin(obsNoNtlFAs)]
# remove the obs FPs with NTL=0 that are judged to be artifacts from visual inspection
spObsFpsPd = spObsFps.to_pandas();
spObsFpsPd = spObsFpsPd[~spObsFpsPd['TCE_ID'].isin(spObsFpsNotNtlAfpPd['TCE_ID'])]
spObsFps = Table.from_pandas(spObsFpsPd)
print("length of obsFps = " + str(len(spObsFps)))
```
length of obsFps = 1740
```python
```
Let's see what this population looks like.
```python
plt.figure(figsize=(10,5));
# plt.subplot(2,2,1);
plt.plot(spObsPcs['period'], spObsPcs['MES'], "o",
spObsFps['period'], spObsFps['MES'], ".");
plt.ylim(mesMin,mesMax);
plt.xlim(periodMin,periodMax);
plt.legend(("obs PCs", "obs FPs"));
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
```

Bin the data so we have n TCEs and c FPs in each bin.
```python
dPeriod = 20;
dMes = 1;
p0 = periodMin;
pEnd = periodMax;
m0 = mesMin;
mEnd = mesMax;
# make the period-mes grid
NPeriod = int((pEnd - p0)/dPeriod);
NMes = int((mEnd - m0)/dMes);
cellPeriod = np.zeros((NPeriod,NMes));
cellMes = np.zeros((NPeriod,NMes));
obsTceGrid = np.zeros((NPeriod,NMes));
obsFpGrid = np.zeros((NPeriod,NMes));
obsPcGrid = np.zeros((NPeriod,NMes));
# count how many points are in each cell
for p in range(NPeriod):
for m in range(NMes):
cellPeriod[(p,m)] = p0 + p*dPeriod;
cellMes[(p,m)] = m0 + m*dMes;
pointsInCell = np.where(
(spObsTces['period'] > cellPeriod[(p,m)])
& (spObsTces['period'] <= cellPeriod[(p,m)]+dPeriod)
& (spObsTces['MES'] > cellMes[(p,m)])
& (spObsTces['MES'] <= cellMes[(p,m)]+dMes));
obsTceGrid[(p,m)] = len(pointsInCell[0]);
pointsInCell = np.where(
(spObsPcs['period'] > cellPeriod[(p,m)])
& (spObsPcs['period'] <= cellPeriod[(p,m)]+dPeriod)
& (spObsPcs['MES'] > cellMes[(p,m)])
& (spObsPcs['MES'] <= cellMes[(p,m)]+dMes));
obsPcGrid[(p,m)] = len(pointsInCell[0]);
pointsInCell = np.where(
(spObsFps['period'] > cellPeriod[(p,m)])
& (spObsFps['period'] <= cellPeriod[(p,m)]+dPeriod)
& (spObsFps['MES'] > cellMes[(p,m)])
& (spObsFps['MES'] <= cellMes[(p,m)]+dMes));
obsFpGrid[(p,m)] = len(pointsInCell[0]);
```
```python
drawHeatMap(obsTceGrid, (15,15), cellPeriod, cellMes, colorBarLabel="# of TCEs");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
plt.savefig("obsFPNTces.pdf",bbox_inches='tight')
plt.title("All observed TCEs");
drawHeatMap(obsPcGrid, (15,15), cellPeriod, cellMes);
plt.title("Observed PCs");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
drawHeatMap(obsFpGrid, (15,15), cellPeriod, cellMes);
plt.title("Observed FPs");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
```



Compute the PC and FC fractions in each cell to get a sense of what the fractions look like. These are not used in the inference.
```python
minTcePerCell = 0;
obsPcFrac = np.zeros(np.shape(obsTceGrid))
obsPcFrac[obsTceGrid>minTcePerCell] = obsPcGrid[obsTceGrid>minTcePerCell]/obsTceGrid[obsTceGrid>minTcePerCell];
drawHeatMap(np.round(100*obsPcFrac), (15,15), cellPeriod, cellMes);
plt.title("Observed PC Fraction (%)");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
```

```python
obsFpFrac = np.zeros(np.shape(obsTceGrid))
obsFpFrac[obsTceGrid>minTcePerCell] = obsFpGrid[obsTceGrid>minTcePerCell]/obsTceGrid[obsTceGrid>minTcePerCell];
drawHeatMap(np.round(100*obsFpFrac), (15,15), cellPeriod, cellMes, colorBarLabel="FP Fraction (%)", nData = obsTceGrid);
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
plt.savefig("obsFPFrac.pdf",bbox_inches='tight')
plt.title("Observed FP Fraction (%)");
```

Prepare the data for the call to emcee to do the Bayesian inference.
```python
import dr25Models as funcModels
```
```python
cObsFp = obsFpGrid.flatten();
nObsTce = obsTceGrid.flatten();
# convert to homogeneous coordinates on unit square [0,1]
cellX, cellY = funcModels.normalizeRange(cellPeriod, cellMes, [periodMin, periodMax], [mesMin, mesMax]);
gridShape = np.shape(cellX);
dx = 1./gridShape[0];
dy = 1./gridShape[1];
print("gridShape = " + str(gridShape) + ", dx = " + str(dx) + ", dy = " + str(dy))
cellXFlat = cellX.flatten();
cellYFlat = cellY.flatten();
tceData = [cellXFlat, cellYFlat, nObsTce, cObsFp];
```
gridShape = (27, 23), dx = 0.037037037037, dy = 0.0434782608696
We're ready to compute a Bayesian inference of the success probability $r$:
$$p(r|c, n) \propto p(c|r, n) p(r).$$
But we're computing $r$ as a function of period $p$, MES $m$, and parameters $\theta$, $r(\theta, p, m)$. So our inference becomes
$$p(\theta|c, n, p, m) \propto p(c|\theta, n, p, m) p(\theta).$$
Because each cell is independent, we linearize the array to a list of cells indexed by $k$. Then the likelihood for each cell is
$$p(c_k|\theta, n_k, p_k, m_k) = \left( \begin{array}{c_k} n_k \\ c_k \end{array} \right) r(\theta, p_k , m_k )^{c_k} (1-r(\theta, p_k , m_k ))^{n_k-c_k}$$
Because the $N$ cells are independent, the likelihood for the collection of cells is
$$p(c|\theta, n, p, m) \equiv p(c_1, \ldots, c_N|\theta, n_1, \ldots, n_N, p_1, \ldots, p_N, m_1, \ldots, m_N) = \prod_k \left( \begin{array}{c_k} n_k \\ c_k \end{array} \right) r(\theta, p_k , m_k )^{c_k} (1-r(\theta, p_k , m_k ))^{n_k-c_k}.$$
The log-likelihood is then
$$\log p(c|\theta, n, p, m) = \sum_k \log \left(\left( \begin{array}{c_k} n_k \\ c_k \end{array} \right) r(\theta, p_k , m_k )^{c_k} (1-r(\theta, p_k , m_k ))^{n_k-c_k} \right)$$
$$= \sum_k \left[ \log \left(\begin{array}{c_k} n_k \\ c_k \end{array} \right) + c_k \log \left(r(\theta, p_k , m_k ) \right) + \left( n_k-c_k \right) \log(1-r(\theta, p_k , m_k )) \right] $$
Define the likelihood and prior functions.
```python
def lnBinlike(theta, data, model):
x, y, n, c = data
r = funcModels.rateModel(x,y,theta,model);
clnr = c*np.log(r);
clnr[c==0] = 0;
lpl = np.sum(np.log(spec.comb(n,c)) + clnr + (n-c)*np.log(1-r));
return lpl
def lnBinprior(theta, data, model):
x, y, n, c = data
# print(theta)
if model == "constant":
if 0.0 <= theta[0] <= 1:
return 1.0
elif model == "linearX":
if -5000.0 < theta[0] < 5000.0 and 0.0 < theta[1] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "linearXY":
if -5000.0 < theta[0] < 5000.0 and -5000.0 < theta[1] < 5000.0 \
and 0.0 < theta[2] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "gaussian":
if 0.45 < theta[0] < 1 and 0 <= theta[1] < 0.5 \
and 1e-2 < theta[2] < 10 and 1e-2 < theta[3] < 10 and -1 <= theta[4] <= 1 and 0 <= theta[5] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticX":
if -1 < theta[0] < 2.0 \
and 1e-4 < theta[1] < 1e4 and 0 < theta[2] < 1e6 and 0 < theta[3] < 1e6 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticY":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 100 and 0 < theta[2] <= 0.1 and 0.9 < theta[3] <= 1 \
and 0 < theta[2] + theta[3] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticY":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 100 and -180 < theta[2] <= 180 \
and 0 < theta[3] <= 0.2 and 0.8 < theta[4] <= 1 \
and 0 < theta[3] + theta[4] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticYXFixedLogisticY":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 100 and -180 < theta[2] <= 180 \
and 0 < theta[3] <= 0.2 and 0.8 < theta[4] <= 1 \
and 0 < theta[3] + theta[4] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticYXLogisticY":
if -1 <= theta[0] <= 2 and 1e-4 < theta[1] < 100 \
and -1 <= theta[2] <= 2 and 1e-4 < theta[3] < 100 \
and -180 < theta[4] <= 180 \
and 0 < theta[5] <= 0.2 and 0.8 < theta[6] <= 1 \
and 0 < theta[5] + theta[6] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticY2":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 1e4 and 0.1 < theta[2] <= 1000 and 0 < theta[3] <= 0.1 and 0.9 < theta[4] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticX0":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 1e4 and 0 < theta[2] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticY0":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 1e4 and 0 < theta[2] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticY02":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 1e4 and 0.1 < theta[2] <= 1000 and 0 < theta[3] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "logisticX0xlogisticY0":
if 0 <= theta[0] <= 1e3 and 0 <= theta[1] <= 1e3 \
and 1e-4 < theta[2] < 1e4 and 1e-4 < theta[3] < 1e4 \
and 0 < theta[4] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticX0xlogisticY0":
if -1 <= theta[0] <= 2 and -1 <= theta[1] <= 2 \
and 1e-4 < theta[2] < 1e4 and 1e-4 < theta[3] < 1e4 \
and -180 < theta[4] < 180 and -180 < theta[5] < 180 \
and 0 < theta[6] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticX0":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 100 and 0 < theta[2] <= 1 and -180 < theta[3] < 180 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticX02":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 1000 and 0.1 < theta[2] <= 1000 and 0 < theta[3] <= 1 and -180 < theta[4] < 180 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticX0xlogisticY02":
if 0 <= theta[0] <= 2 and 0 <= theta[1] <= 2 \
and 1e-4 < theta[2] < 1e4 and 1e-4 < theta[3] < 1e4 \
and 0.01 < theta[4] < 1000 and 0.01 < theta[5] < 1000 \
and -180 < theta[6] < 180 and -180 < theta[7] < 180 \
and 0 < theta[8] < 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "rotatedLogisticX0+gaussian":
if -1 <= theta[0] <= 2 \
and 1e-4 < theta[1] < 100 and 0 < theta[2] <= 1 and -180 < theta[3] < 180 \
and 0.45 < theta[4] < 1 and 0 <= theta[5] < 0.5 \
and 1e-2 < theta[6] < 2 and 1e-2 < theta[7] < 2 and -1 <= theta[8] <= 0 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
elif model == "dualBrokenPowerLaw":
# print(funcModels.rateModel(x, y, theta, model))
if 0 <= theta[0] <= 1 and 0 <= theta[1] <= 1 \
and -2 < theta[2] < 2 and -2 < theta[3] < 2 and -2 < theta[4] < 2 and -2 < theta[5] < 2 \
and 0 < theta[6] <= 1 \
and np.min(funcModels.rateModel(x, y, theta, model)) >= 0 \
and np.max(funcModels.rateModel(x, y, theta, model)) <= 1:
return 1.0
else:
raise ValueError('Bad model name');
# print(theta)
return -np.inf
def lnBinprob(theta, data, model):
x, y, n, c = data
lp = lnBinprior(theta, data, model)
# print("lnPoisprior = " + str(lp))
if not np.isfinite(lp):
return -np.inf
# print(str(lnPoislike(theta, A, c)))
return lp + lnBinlike(theta, data, model)
```
```python
obsModel = "rotatedLogisticX0"
if obsModel == "constant":
initialPos = funcModels.initRateModel(obsModel);
elif obsModel == "logisticY0":
initialPos = funcModels.initRateModel(obsModel);
elif obsModel == "logisticY":
initialPos = [-2.63785585e-02, 20, 0.02, 0.98];
elif obsModel == "rotatedLogisticY":
initialPos = [ 0.04944521, 23.31556305, -25.49590252, 0.04268818, 0.95325393];
elif obsModel == "rotatedLogisticYXLogisticY":
initialPos = [ 0.04944521, 23.31556305, -2.63785585e-02, 20, -25.49590252, 0.04268818, 0.95325393];
elif obsModel == "logisticY02":
initialPos = [-1.73396324e-01, 10, 2, 9.90011008e-01];
elif obsModel == "gaussian":
initialPos = [ 0.62, 0.04, 1, 1, -0.01, 9.91039169e-01]
elif obsModel == "dualBrokenPowerLaw":
initialPos = [ 0.56591357, 0.16586711, 1.9865538, -0.00202982, 0.0262327, -1.79204403, 0.98610129]
elif obsModel == "rotatedLogisticX0":
initialPos = [ 1.2, 9.7715652, 0.99773284, -161.15051182];
elif obsModel == "rotatedLogisticX02":
initialPos = [ 0.45632181, 76.52125481, 24.82236241, 0.98233125, -161.15051182];
elif obsModel == "rotatedLogisticX0xlogisticY02":
initialPos = [ 1.6, 0.54208962, 45.93529004, 76.21697177, 527.33411115, 24.66179214, -80, -108.48517773, 0.98293829];
elif obsModel == "rotatedLogisticX0+gaussian":
initialPos = [ 8.09244005e-01, 7.72091689e+00, 9.96798125e-01, -1.70021035e+02,
7.02672475e-01, 3.57238891e-01, 3.62755815e-01, 7.28225533e-02,
-1.60673700e-01];
elif obsModel == "rotatedLogisticX0xlogisticY0":
initialPos = [ 2e+00, 1.99824393e-01, 10, 7.70912734e+00, 9.24636388e-02, -1.00316033e+02, 9.95651001e-01]
else:
initialPos = funcModels.initRateModel(obsModel);
print(initialPos)
```
[1.2, 9.7715652, 0.99773284, -161.15051182]
```python
import scipy.optimize as op
nll = lambda *args: -lnBinlike(*args)
result = op.minimize(nll, initialPos, args=(tceData, obsModel))
maxLikelihoodResult = result["x"];
modelLabels = funcModels.getModelLabels(obsModel)
for i in range(0,len(maxLikelihoodResult)):
print("maximum Likelihood " + modelLabels[i] + ":={:.3f}".format(maxLikelihoodResult[i]))
if lnBinprior(maxLikelihoodResult, tceData, obsModel) == -np.inf:
maxLikelihoodResult = initialPos;
print("violates prior, replacing maxLikelihoodResult result with initialPos")
x, y, n, c = tceData
r = funcModels.rateModel(x,y,maxLikelihoodResult,obsModel);
print("maximum Likelihood rate min = {:.3f}".format(np.min(np.min(r))) + ", max = {:.3f}".format(np.max(np.max(r))))
```
maximum Likelihood $x_0$:=0.689
maximum Likelihood $k_x$:=16.908
maximum Likelihood $A$:=0.910
maximum Likelihood $\phi$:=-157.793
maximum Likelihood rate min = 0.000, max = 0.910
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:4: RuntimeWarning: divide by zero encountered in log
after removing the cwd from sys.path.
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:4: RuntimeWarning: invalid value encountered in multiply
after removing the cwd from sys.path.
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/scipy/optimize/optimize.py:663: RuntimeWarning: invalid value encountered in double_scalars
grad[k] = (f(*((xk + d,) + args)) - f0) / d[k]
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:6: RuntimeWarning: invalid value encountered in log
Fit $F_{\mathrm{obsFP}}$.
```python
import emcee
ndim, nwalkers = len(maxLikelihoodResult), 100
pos = [maxLikelihoodResult + 1e-3*np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnBinprob, args=(tceData, obsModel))
sampler.run_mcmc(pos, 10000);
obsSamples = sampler.chain[:, 5000:, :].reshape((-1, ndim))
dataResult = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(obsSamples, [16, 50, 84],
axis=0)))
dataResult = list(dataResult)
np.save("binObsPosteriors_" + str(obsModel) + ".npy", obsSamples)
modelLabels = funcModels.getModelLabels(obsModel)
for i in range(0,ndim):
v = dataResult[i];
print("MCMC " + modelLabels[i] + ":={:.3f}".format(v[0]) + "+{:.3f}".format(v[1]) + "-{:.3f}".format(v[2]))
# print("true " + modelLabels[i] + ":={:.3f}".format(trueTheta[i]))
resultSize = np.shape(dataResult);
fpFitTheta = np.zeros(resultSize[0]);
for i in range(resultSize[0]):
fpFitTheta[i] = dataResult[i][0]
print("pcFitTheta = " + str(fpFitTheta))
plt.figure(figsize=(10,5))
for i in range(0,ndim):
plt.subplot(ndim,1,i+1)
plt.plot(np.transpose(sampler.chain[:, :, i]), color="k", alpha=0.1);
plt.ylabel(modelLabels[i]);
```
MCMC $x_0$:=0.692+0.026-0.027
MCMC $k_x$:=14.007+1.354-1.236
MCMC $A$:=0.983+0.004-0.004
MCMC $\phi$:=-158.923+3.456-3.311
pcFitTheta = [ 0.69191509 14.00737204 0.98315092 -158.92339638]

```python
import corner
# trueTheta = funcModels.initRateModel(model)
# fig = corner.corner(samples, labels=["$m$", "$b$", "$\ln\,f$"],
# truths=[m_true, b_true, np.log(f_true)])
fig = corner.corner(obsSamples, labels = modelLabels, label_kwargs = {"fontsize": 32}, truths = fpFitTheta)
plt.savefig("obsFPPost.pdf",bbox_inches='tight')
```

```python
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
fig = plt.figure(figsize=plt.figaspect(0.3));
ax = fig.add_subplot(1, 3, 1, projection='3d')
Z = funcModels.rateModel(cellX, cellY, fpFitTheta, obsModel);
surf = ax.plot_surface(cellPeriod, cellMes, Z, alpha = 0.5);
scat = ax.scatter(cellPeriod[obsTceGrid>0], cellMes[obsTceGrid>0], obsFpFrac[obsTceGrid>0], c='r', marker = '.');
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,0)
ax = fig.add_subplot(1, 3, 2, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, Z, alpha = 0.5);
scat = ax.scatter(cellPeriod[obsTceGrid>0], cellMes[obsTceGrid>0], obsFpFrac[obsTceGrid>0], c='r', marker = '.');
plt.xlabel("period");
plt.ylabel("MES");
ax.view_init(0,-90)
plt.title("Observed FP Rate");
ax = fig.add_subplot(1, 3, 3, projection='3d')
surf = ax.plot_surface(cellPeriod, cellMes, Z, alpha = 0.5);
scat = ax.scatter(cellPeriod[obsTceGrid>0], cellMes[obsTceGrid>0], obsFpFrac[obsTceGrid>0], c='r', marker = '.');
plt.xlabel("period");
plt.ylabel("MES");
fig, ax = plt.subplots(figsize=(5,5));
CS = ax.contour(cellPeriod, cellMes, Z);
ax.clabel(CS, inline=1, fontsize=10);
plt.xlabel("period");
plt.ylabel("MES");
```


```python
fig, ax = plt.subplots(figsize=(15,10));
Z = funcModels.rateModel(cellX, cellY, fpFitTheta, obsModel);
CS = ax.contour(cellPeriod, cellMes, Z, colors='k');
ax.clabel(CS, inline=1, fontsize=18);
scf = ax.scatter(cellPeriod[obsTceGrid>0], cellMes[obsTceGrid>0], cmap="cividis", c=obsFpFrac[obsTceGrid>0], s=5*obsTceGrid[obsTceGrid>0], alpha = 0.5);
plt.xlabel("period", fontSize = 24);
plt.ylabel("MES", fontSize = 24);
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Measured Rate", fontSize = 24);
plt.tick_params(labelsize = 16)
plt.savefig("obsFPContours.pdf",bbox_inches='tight')
plt.title("Observed FP Rate. Size of marker = # of TCEs in cell");
```

Reconstruct observed FP rates from the fit to compare with data.
```python
fitGrid = np.zeros(np.shape(obsTceGrid));
for p in range(NPeriod):
for m in range(NMes):
fitGrid[(p,m)] = np.random.binomial(obsTceGrid[(p,m)],
funcModels.rateModel(cellX[(p,m)]+dx/2, cellY[(p,m)]+dx/2, fpFitTheta, obsModel), 1);
drawHeatMap(fitGrid, (10,10), cellPeriod, cellMes);
plt.title('reconstructed Observed FPs from the fit');
plt.ylabel('MES');
plt.xlabel('Period');
fitFrac = np.zeros(np.shape(obsTceGrid))
fitFrac[obsTceGrid>minTcePerCell] = fitGrid[obsTceGrid>minTcePerCell]/obsTceGrid[obsTceGrid>minTcePerCell];
drawHeatMap(np.round(100*fitFrac), (10,10), cellPeriod, cellMes);
plt.title('reconstructed Observed FP rate from the fit');
plt.ylabel('MES');
plt.xlabel('Period');
```


```python
from ipywidgets import FloatProgress
from IPython.display import display
nFits = 1000;
fitGrid = np.zeros([np.shape(obsTceGrid)[0],np.shape(obsTceGrid)[1],nFits]);
sidx = [0]*nFits
progress = FloatProgress(min=0, max=nFits)
display(progress)
for f in range(nFits):
sidx[f] = int(np.random.uniform(high=obsSamples.shape[0]-1));
tTheta = obsSamples[sidx[f],:]
for p in range(NPeriod):
for m in range(NMes):
rm = funcModels.rateModel(cellX[(p,m)]+dx/2, cellY[(p,m)]+dy/2, tTheta, obsModel)
if rm > 1:
rm = 1;
fitGrid[(p,m,f)] = np.random.binomial(obsTceGrid[(p,m)], rm, 1);
progress.value += 1
meanFit = np.mean(fitGrid, 2)
stdFit = np.std(fitGrid, 2)
```
FloatProgress(value=0.0, max=1000.0)
```python
drawHeatMap(meanFit, (15,15), cellPeriod, cellMes);
plt.title("Mean reconstructed Observed FPs from the fit");
plt.ylabel('MES');
plt.xlabel('Period');
fitFracMean = np.zeros(np.shape(obsTceGrid))
fitFracMean[obsTceGrid>minTcePerCell] = meanFit[obsTceGrid>minTcePerCell]/obsTceGrid[obsTceGrid>minTcePerCell];
drawHeatMap(np.round(fitFracMean, 2), (15,15), cellPeriod, cellMes,
nData = obsTceGrid, colorBarLabel="Mean FP %", forceInt=False);
plt.ylabel('MES');
plt.xlabel('Period');
plt.savefig("obsFPMean.pdf",bbox_inches='tight')
plt.title("Mean Observed FP rate reconstructed from the fit");
stdFrac = np.zeros(np.shape(obsTceGrid))
stdFrac[obsTceGrid>minTcePerCell] = stdFit[obsTceGrid>minTcePerCell]/obsTceGrid[obsTceGrid>minTcePerCell];
drawHeatMap(np.round(stdFrac, 2), (15,15), cellPeriod, cellMes,
nData = obsTceGrid, colorBarLabel="Standard Deviation", forceInt=False);
plt.ylabel('MES');
plt.xlabel('Period');
plt.savefig("obsFPStd.pdf",bbox_inches='tight')
plt.title("Fractional standard deviation of the Observed FP rate reconstructed from the fit");
```



```python
fitDiff = fitFracMean - obsFpFrac
fitDiffNorm =np.zeros(fitDiff.shape)
fitDiffNorm[stdFit>0] = fitDiff[stdFit>0]/stdFit[stdFit>0];
drawHeatMap(np.round(fitDiff, 2), (15,15), cellPeriod, cellMes, nData = obsTceGrid, forceInt = False);
plt.title("Residual from mean");
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
drawHeatMap(np.round(fitDiffNorm, 2), (15,15), cellPeriod, cellMes, nData = obsTceGrid,
colorBarLabel="Mean Residual (in standard deviations)", forceInt = False);
plt.ylabel('MES', fontsize = 16);
plt.xlabel('Period', fontsize = 16);
plt.savefig("obsFPMeanResid.pdf",bbox_inches='tight')
plt.title("Residual from mean (in standard deviations)");
plt.figure(figsize=(15,5));
plt.hist(fitDiffNorm.flatten()[nObsTce > 0], 100);
np.median(fitDiffNorm.flatten()[nObsTce > 0])
```
-0.039587393869624024



```python
aic = 2*len(fpFitTheta) - 2*lnBinlike(fpFitTheta, tceData, obsModel)
aic
```
338.2504522678564
```python
maxLikelihoodAic = 2*len(maxLikelihoodResult) - 2*lnBinlike(maxLikelihoodResult, tceData, obsModel)
maxLikelihoodAic
```
482.28358775490346
```python
fpFitTheta
```
array([ 0.69191509, 14.00737204, 0.98315092, -158.92339638])
```python
l = np.zeros(np.shape(obsSamples)[0])
aicDist = np.zeros(np.shape(obsSamples)[0])
progress = FloatProgress(min=0, max=obsSamples.shape[0])
display(progress)
for i in range(np.shape(obsSamples)[0]):
l[i] = lnBinprob(obsSamples[i,:], tceData, obsModel)
aicDist[i] = 2*len(obsSamples[i,:]) - 2*lnBinlike(obsSamples[i,:], tceData, obsModel)
progress.value += 1
plt.hist(aicDist, 100);
```
FloatProgress(value=0.0, max=500000.0)

```python
minAic = min(aicDist)
```
```python
plt.hist(np.exp(l - np.median(l)), 100);
```

```python
np.median(l)
```
-165.78742130652034
```python
from skmonaco import mcquad
lbnds = np.empty([np.shape(obsSamples)[1]])
ubnds = np.empty([np.shape(obsSamples)[1]])
for i in range(len(lbnds)):
lbnds[i] = np.min(obsSamples[:,i])
ubnds[i] = np.max(obsSamples[:,i])
regularizationOffset = np.median(l)
def linBinProbInt(theta):
return np.exp(lnBinprob(theta, tceData, obsModel) - regularizationOffset)
BF, BFerror = mcquad(linBinProbInt, xl=lbnds,xu=ubnds, npoints=1e7,nprocs=8 )
print("BF = {:.3e} +/- {:.3e}").format(BF,BFerror)
```
BF = 3.541e-02 +/- 1.222e-04
```python
import os.path
import pickle
fname = "obsFpTable.pkl"
if os.path.isfile(fname):
modelComparisonTable = pd.read_pickle(fname)
else:
modelComparisonTable = pd.DataFrame({"Model": ["rotatedLogisticX0", "rotatedLogisticX0xlogisticY0",
"dualBrokenPowerLaw", "rotatedLogisticX02",
"rotatedLogisticX0xlogisticY02", "rotatedLogisticX0+gaussian"],
"medianMCMCAIC": [0., 0., 0., 0., 0., 0.],
"minMCMCAIC": [0., 0., 0., 0., 0., 0.],
"maxLikelihoodAIC": [0., 0., 0., 0., 0., 0.],
"MedianLogPost": [0., 0., 0., 0., 0., 0.],
"IntegralPost": [0., 0., 0., 0., 0., 0.],
"IntegralPostErr": [0., 0., 0., 0., 0., 0.],
"medianMCMCTheta": [[0],[0],[0],[0],[0],[0]],
"maxLikelihoodTheta": [[0],[0],[0],[0],[0],[0]],
"periodRange": [[0],[0],[0],[0],[0],[0]],
"mesRange": [[0],[0],[0],[0],[0],[0]]},
columns=["Model","BayesFactor","BayesFactorError","AICRelativeProb",
"medianMCMCAIC","minMCMCAIC","maxLikelihoodAIC","IntegralPost",
"IntegralPostErr","MedianLogPost","medianMCMCTheta","maxLikelihoodTheta","periodRange","mesRange"])
modelComparisonTable['IntegralPost'] = modelComparisonTable['IntegralPost'].map('{:,.3e}'.format)
modelComparisonTable['IntegralPostErr'] = modelComparisonTable['IntegralPostErr'].map('{:,.3e}'.format)
```
```python
mctIndex = np.where(modelComparisonTable["Model"].isin([obsModel]))[0][0]
print(mctIndex)
modelComparisonTable["medianMCMCAIC"][mctIndex] = aic;
modelComparisonTable["minMCMCAIC"][mctIndex] = minAic;
modelComparisonTable["maxLikelihoodAIC"][mctIndex] = maxLikelihoodAic;
modelComparisonTable["MedianLogPost"][mctIndex] = regularizationOffset;
modelComparisonTable["IntegralPost"][mctIndex] = BF;
modelComparisonTable["IntegralPostErr"][mctIndex] = BFerror;
modelComparisonTable["medianMCMCTheta"][mctIndex] = fpFitTheta;
modelComparisonTable["maxLikelihoodTheta"][mctIndex] = maxLikelihoodResult;
modelComparisonTable["periodRange"][mctIndex] = [periodMin, periodMax];
modelComparisonTable["mesRange"][mctIndex] = [mesMin, mesMax];
```
0
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:3: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
This is separate from the ipykernel package so we can avoid doing imports until
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:4: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
after removing the cwd from sys.path.
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:5: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
"""
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:6: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:7: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
import sys
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:8: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:9: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
if __name__ == '__main__':
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:10: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
# Remove the CWD from sys.path while we load stuff.
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:11: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
# This is added back by InteractiveShellApp.init_path()
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:12: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
if sys.path[0] == '':
```python
modelToCompareIndex = 0
modelComparisonTable["AICRelativeProb"] = 0.
minAic = np.min(modelComparisonTable["medianMCMCAIC"][modelToCompareIndex])
for i in range(len(modelComparisonTable)):
modelComparisonTable["AICRelativeProb"].iloc[i] = np.exp((minAic - modelComparisonTable["medianMCMCAIC"].iloc[i])/2.)
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/pandas/core/indexing.py:189: SettingWithCopyWarning:
A value is trying to be set on a copy of a slice from a DataFrame
See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
self._setitem_with_indexer(indexer, value)
```python
modelComparisonTable["BayesFactor"] = 0.
c1 = modelComparisonTable["MedianLogPost"].iloc[modelToCompareIndex]
i1 = np.double(modelComparisonTable["IntegralPost"].iloc[modelToCompareIndex])
for i in range(len(modelComparisonTable)):
c2 = modelComparisonTable["MedianLogPost"].iloc[i]
modelComparisonTable["BayesFactor"].iloc[i] = np.exp(c2 - c1)*np.double(modelComparisonTable["IntegralPost"].iloc[i])/i1
```
```python
modelComparisonTable["BayesFactorError"] = 0.
B = np.double(modelComparisonTable["IntegralPost"].iloc[modelToCompareIndex])
sB = np.double(modelComparisonTable["IntegralPostErr"].iloc[modelToCompareIndex])
c1 = modelComparisonTable["MedianLogPost"].iloc[modelToCompareIndex]
for i in range(len(modelComparisonTable)):
c2 = modelComparisonTable["MedianLogPost"].iloc[i]
f = np.double(modelComparisonTable["BayesFactor"].iloc[i])
A = np.exp(c2 - c1)*np.double(modelComparisonTable["IntegralPost"].iloc[i])
sA = np.exp(c2 - c1)*np.double(modelComparisonTable["IntegralPostErr"].iloc[i])
modelComparisonTable["BayesFactorError"].iloc[i] = f*np.sqrt((sA/A)**2 + (sB/B)**2)
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:10: RuntimeWarning: invalid value encountered in double_scalars
# Remove the CWD from sys.path while we load stuff.
```python
modelComparisonTable
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Model</th>
<th>BayesFactor</th>
<th>BayesFactorError</th>
<th>AICRelativeProb</th>
<th>medianMCMCAIC</th>
<th>minMCMCAIC</th>
<th>maxLikelihoodAIC</th>
<th>IntegralPost</th>
<th>IntegralPostErr</th>
<th>MedianLogPost</th>
<th>medianMCMCTheta</th>
<th>maxLikelihoodTheta</th>
<th>periodRange</th>
<th>mesRange</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>rotatedLogisticX0</td>
<td>1.0</td>
<td>0.004883</td>
<td>1.000000e+00</td>
<td>338.250452</td>
<td>338.245618</td>
<td>482.283588</td>
<td>0.0354081</td>
<td>0.00012225</td>
<td>-165.787421</td>
<td>[0.6919150890122021, 14.007372040871111, 0.983...</td>
<td>[0.6885198655461882, 16.9083334336558, 0.90988...</td>
<td>[50, 600]</td>
<td>[7, 30]</td>
</tr>
<tr>
<th>1</th>
<td>rotatedLogisticX0xlogisticY0</td>
<td>0.0</td>
<td>NaN</td>
<td>2.819373e+73</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.000000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>2</th>
<td>dualBrokenPowerLaw</td>
<td>0.0</td>
<td>NaN</td>
<td>2.819373e+73</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.000000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>3</th>
<td>rotatedLogisticX02</td>
<td>0.0</td>
<td>NaN</td>
<td>2.819373e+73</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.000000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>4</th>
<td>rotatedLogisticX0xlogisticY02</td>
<td>0.0</td>
<td>NaN</td>
<td>2.819373e+73</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.000000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>5</th>
<td>rotatedLogisticX0+gaussian</td>
<td>0.0</td>
<td>NaN</td>
<td>2.819373e+73</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.000000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
</tbody>
</table>
</div>
```python
modelComparisonTable.to_pickle(fname)
```
```python
tt = pd.read_pickle(fname)
```
```python
tt
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>Model</th>
<th>BayesFactor</th>
<th>BayesFactorError</th>
<th>AICRelativeProb</th>
<th>medianMCMCAIC</th>
<th>minMCMCAIC</th>
<th>maxLikelihoodAIC</th>
<th>IntegralPost</th>
<th>IntegralPostErr</th>
<th>MedianLogPost</th>
<th>medianMCMCTheta</th>
<th>maxLikelihoodTheta</th>
<th>periodRange</th>
<th>mesRange</th>
</tr>
</thead>
<tbody>
<tr>
<th>0</th>
<td>rotatedLogisticX0</td>
<td>1.0</td>
<td>0.004883</td>
<td>1.000000e+00</td>
<td>338.250452</td>
<td>338.245618</td>
<td>482.283588</td>
<td>0.0354081</td>
<td>0.00012225</td>
<td>-165.787421</td>
<td>[0.6919150890122021, 14.007372040871111, 0.983...</td>
<td>[0.6885198655461882, 16.9083334336558, 0.90988...</td>
<td>[50, 600]</td>
<td>[7, 30]</td>
</tr>
<tr>
<th>1</th>
<td>rotatedLogisticX0xlogisticY0</td>
<td>0.0</td>
<td>NaN</td>
<td>2.819373e+73</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.000000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>2</th>
<td>dualBrokenPowerLaw</td>
<td>0.0</td>
<td>NaN</td>
<td>2.819373e+73</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.000000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>3</th>
<td>rotatedLogisticX02</td>
<td>0.0</td>
<td>NaN</td>
<td>2.819373e+73</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.000000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>4</th>
<td>rotatedLogisticX0xlogisticY02</td>
<td>0.0</td>
<td>NaN</td>
<td>2.819373e+73</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.000000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
<tr>
<th>5</th>
<td>rotatedLogisticX0+gaussian</td>
<td>0.0</td>
<td>NaN</td>
<td>2.819373e+73</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000000</td>
<td>0.000e+00</td>
<td>0.000e+00</td>
<td>0.000000</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
<td>[0]</td>
</tr>
</tbody>
</table>
</div>
```javascript
%%javascript
IPython.notebook.save_notebook()
```
<IPython.core.display.Javascript object>
```bash
%%bash -s "$obsModel"
jupyter nbconvert --to html binomialObsFPRate.ipynb
mv binomialObsFPRate.html htmlArchive/binomialObsFPRate_$1.html
```
[NbConvertApp] Converting notebook binomialObsFPRate.ipynb to html
[NbConvertApp] Writing 1995516 bytes to binomialObsFPRate.html
```python
```
```python
```
```python
```
```python
```
|
stevepurREPO_NAMEDR25-occurrence-publicPATH_START.@DR25-occurrence-public_extracted@DR25-occurrence-public-main@GKbaseline_gaiaRadCut@.ipynb_checkpoints@binomialObsFPRate-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "conftest.py",
"repo_name": "sdss/marvin",
"repo_path": "marvin_extracted/marvin-main/tests/conftest.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import os
import pathlib
import json
import yaml
import pytest
import copy
import itertools
from flask_jwt_extended import tokens
from brain import bconfig
from marvin import config, marvindb
from marvin.utils.datamodel.dap import datamodel
from marvin.api.api import Interaction
from marvin.tools import Maps, Cube, ModelCube
from marvin.tools.query import Query
from sdss_access.path import Path
# Global Parameters for FIXTURES
# ------------------------------
releases = ['DR17']
bintypes_accepted = {'DR17': ['HYB10', 'SPX', 'VOR10']}
templates_accepted = {'DR17': ['MILESHC-MASTARSSP']}
def populate_bintypes_templates(releases, onlybin=None):
''' Generates bintype and template dictionaries for each release '''
bintypes = {release: [] for release in releases}
templates = {release: [] for release in releases}
for release in releases:
bintemps = datamodel[release].get_bintemps()
for bintemp in bintemps:
bintype = bintemp.split('-')[0]
template = '-'.join(bintemp.split('-')[1:])
if onlybin and bintype != onlybin:
continue
if release in bintypes_accepted and bintype not in bintypes_accepted[release]:
continue
if release in templates_accepted and template not in templates_accepted[release]:
continue
if bintype not in bintypes[release]:
bintypes[release].append(bintype)
if template not in templates[release]:
templates[release].append(template)
return bintypes, templates
bintypes, templates = populate_bintypes_templates(releases, onlybin='HYB10')
@pytest.fixture(scope='session', params=releases)
def release(request):
"""Yield a release."""
return request.param
def _get_release_generator_chain():
"""Return all valid combinations of (release, bintype, template)."""
return itertools.chain(*[itertools.product([release], bintypes[release],
templates[release]) for release in releases])
def _params_ids(fixture_value):
"""Return a test id for the release chain."""
return '-'.join(fixture_value)
@pytest.fixture(scope='session', params=sorted(_get_release_generator_chain()), ids=_params_ids)
def get_params(request):
"""Yield a tuple of (release, bintype, template)."""
return request.param
with open(os.path.join(os.path.dirname(__file__), 'data/query_test_data.dat')) as f:
query_data = yaml.load(f, Loader=yaml.FullLoader)
with open(os.path.join(os.path.dirname(__file__), 'data/galaxy_test_data.dat')) as f:
galaxy_data = yaml.load(f, Loader=yaml.FullLoader)
modes = ['local', 'remote'] # to loop over modes (see mode fixture)
dbs = ['db', 'nodb'] # to loop over dbs (see db fixture)
origins = ['file', 'db', 'api'] # to loop over data origins (see data_origin fixture)
def pytest_addoption(parser):
"""Add new options"""
# control releases run
parser.addoption('--local-only', action='store_true', default=False, help='Run a local file tests only')
@pytest.fixture(scope='session')
def check_marks(pytestconfig):
markers_arg = pytestconfig.getoption('-m')
local = pytestconfig.getoption('--local-only')
return markers_arg, local
@pytest.fixture(scope='session', params=sorted(galaxy_data.keys()))
def plateifu(request):
"""Yield a plate-ifu."""
return request.param
@pytest.fixture(scope='session', params=origins)
def data_origin(request, check_marks):
"""Yield a data access mode."""
marker, local = check_marks
if local and request.param != 'file':
pytest.skip('Skipping non-local modes')
if ((marker == 'not uses_db' and request.param == 'db') or
(marker == 'uses_db' and request.param != 'db')):
pytest.skip('Skipping database modes')
if ((marker == 'not uses_web' and request.param == 'api')
or (marker == 'uses_web' and request.param != 'api')):
pytest.skip('Skipping web/api modes')
return request.param
@pytest.fixture(scope='session', params=modes)
def mode(request, check_marks):
"""Yield a data mode."""
marker, local = check_marks
if local and request.param != 'local':
pytest.skip('Skipping non-local modes')
if ((marker == 'not uses_web' and request.param == 'remote')
or (marker == 'uses_web' and request.param != 'remote')):
pytest.skip('Skipping web/api modes')
return request.param
# Config-based FIXTURES
# ----------------------
def read_urlmap():
""" read a test urlmap """
path = pathlib.Path(__file__).parent / 'data/urlmap.json'
with open(path, 'r') as f:
return json.load(f)
URLMAP = read_urlmap()
@pytest.fixture(autouse=True)
def setup_config():
config.access = 'public'
@pytest.fixture(scope='session', autouse=True)
def set_config():
"""Set config."""
config.use_sentry = False
config.add_github_message = False
config._traceback = None
@pytest.fixture()
def check_config():
"""Check the config to see if a db is on."""
return config.db is None
def set_the_config(release):
"""Set config release without parametrizing.
Using ``set_release`` combined with ``galaxy`` double parametrizes!"""
config.access = 'public'
config.setRelease(release)
set_sasurl(loc='local')
#config.login()
config._traceback = None
def custom_login():
config.token = tokens.encode_access_token('test', os.environ.get('MARVIN_SECRET'), 'HS256', False, True, 'user_claims', True, 'identity', 'user_claims')
def custom_auth(self, authtype=None):
authtype = 'token'
super(Interaction, self).setAuth(authtype=authtype)
def set_sasurl(loc='local', port=None):
"""Set the sasurl to local or test-utah, and regenerate the urlmap."""
if not port:
port = int(os.environ.get('LOCAL_MARVIN_PORT', 5000))
istest = loc == 'utah'
config.switchSasUrl(loc, test=istest, port=port)
@pytest.fixture(autouse=True)
def mock_urlmap(monkeypatch, mocker):
""" Mock the urlmap """
monkeypatch.setattr(config, 'urlmap', URLMAP)
mocker.patch('marvin.config', new=config)
@pytest.fixture(scope='session', autouse=True)
def saslocal():
"""Set sasurl to local."""
set_sasurl(loc='local')
@pytest.fixture(scope='session')
def set_release(release):
"""Set the release in the config."""
config.setRelease(release)
@pytest.fixture(scope='session')
def versions(release):
"""Yield the DRP and DAP versions for a release."""
drpver, dapver = config.lookUpVersions(release)
return drpver, dapver
@pytest.fixture(scope='session')
def drpver(versions):
"""Return DRP version."""
drpver, __ = versions
return drpver
@pytest.fixture(scope='session')
def dapver(versions):
"""Return DAP version."""
__, dapver = versions
return dapver
# DB-based FIXTURES
# -----------------
class DB(object):
"""Object representing aspects of the marvin db.
Useful for tests needing direct DB access.
"""
def __init__(self):
"""Initialize with DBs."""
self._marvindb = marvindb
self.session = marvindb.session
self.datadb = marvindb.datadb
self.sampledb = marvindb.sampledb
self.dapdb = marvindb.dapdb
@pytest.fixture(scope='session')
def maindb():
"""Yield an instance of the DB object."""
if not marvindb or not marvindb.isdbconnected:
pytest.skip('Skipping when no database is connected')
yield DB()
@pytest.fixture(scope='function')
def db_off():
"""Turn the DB off for a test, and reset it after."""
config.forceDbOff()
yield
config.forceDbOn()
@pytest.fixture(autouse=True)
def db_on():
"""Automatically turn on the DB at collection time."""
config.forceDbOn()
@pytest.fixture()
def usedb(request):
''' fixture for optional turning off the db '''
if request.param:
config.forceDbOn()
else:
config.forceDbOff()
return config.db is not None
@pytest.fixture()
def checkdb():
""" Fixture to check if db available and turn off in marvin config """
config.forceDbOn()
nodb = not marvindb or not marvindb.isdbconnected
if nodb:
config.forceDbOff()
yield
config.forceDbOn()
@pytest.fixture(params=dbs)
def db(request, check_marks):
"""Turn local db on or off.
Use this to parametrize over all db options.
"""
if request.param == 'db' and (not marvindb or not marvindb.isdbconnected):
pytest.skip('Skipping when no database is connected')
marker, local = check_marks
if local and request.param != 'nodb':
pytest.skip('Skipping non-local db modes')
if ((marker == 'not uses_db' and request.param == 'db') or
(marker == 'uses_db' and request.param != 'notdb')):
pytest.skip('Skipping database modes')
if request.param == 'db':
config.forceDbOn()
else:
config.forceDbOff()
yield config.db is not None
config.forceDbOn()
@pytest.fixture()
def exporigin(mode, db):
"""Return the expected origins for a given db/mode combo."""
if mode == 'local' and not db:
return 'file'
elif mode == 'local' and db:
return 'db'
elif mode == 'remote' and not db:
return 'api'
elif mode == 'remote' and db:
return 'api'
elif mode == 'auto' and db:
return 'db'
elif mode == 'auto' and not db:
return 'file'
@pytest.fixture()
def expmode(mode, db):
''' expected modes for a given db/mode combo '''
if mode == 'local' and not db:
return None
elif mode == 'local' and db:
return 'local'
elif mode == 'remote' and not db:
return 'remote'
elif mode == 'remote' and db:
return 'remote'
elif mode == 'auto' and db:
return 'local'
elif mode == 'auto' and not db:
return 'remote'
@pytest.fixture()
def user(maindb):
username = 'test'
password = 'test'
model = maindb.datadb.User
user = maindb.session.query(model).filter(model.username == username).one_or_none()
if not user:
user = model(username=username, login_count=1)
user.set_password(password)
maindb.session.add(user)
yield user
maindb.session.delete(user)
# Monkeypatch-based FIXTURES
# --------------------------
@pytest.fixture()
def monkeyconfig(request, monkeypatch):
"""Monkeypatch a variable on the Marvin config.
Example at line 160 in utils/test_general.
"""
name, value = request.param
monkeypatch.setattr(config, name, value=value)
@pytest.fixture()
def monkeyauth(monkeypatch):
monkeypatch.setattr(config, 'login', custom_login)
monkeypatch.setattr(Interaction, 'setAuth', custom_auth)
monkeypatch.setattr(bconfig, '_public_api_url', config.sasurl)
monkeypatch.setattr(bconfig, '_collab_api_url', config.sasurl)
# Temp Dir/File-based FIXTURES
# ----------------------------
@pytest.fixture(scope='session')
def temp_scratch(tmp_path_factory):
"""Create a temporary scratch space for reading/writing.
Use for creating temp dirs and files.
Example at line 208 in tools/test_query, line 254 in tools/test_results, and
misc/test_marvin_pickle.
"""
fn = tmp_path_factory.mktemp('scratch')
yield fn
fn = None
# Object-based FIXTURES
# ---------------------
class Galaxy(object):
"""An example galaxy for Marvin-tools testing."""
sasbasedir = os.getenv('SAS_BASE_DIR')
mangaredux = os.getenv('MANGA_SPECTRO_REDUX')
mangaanalysis = os.getenv('MANGA_SPECTRO_ANALYSIS')
dir3d = 'stack'
def __init__(self, plateifu):
"""Initialize plate and ifu."""
self.plateifu = plateifu
self.plate, self.ifu = self.plateifu.split('-')
self.plate = int(self.plate)
def set_galaxy_data(self, data_origin=None):
"""Set galaxy properties from the configuration file."""
if self.plateifu not in galaxy_data:
return
data = copy.deepcopy(galaxy_data[self.plateifu])
for key in data.keys():
setattr(self, key, data[key])
# sets specfic data per release
releasedata = self.releasedata[self.release]
for key in releasedata.keys():
setattr(self, key, releasedata[key])
# remap NSA drpall names for MPL-4 vs 5+
drpcopy = self.nsa_data['drpall'].copy()
for key, val in self.nsa_data['drpall'].items():
if isinstance(val, list):
newval, newkey = drpcopy.pop(key)
if self.release == 'MPL-4':
drpcopy[newkey] = newval
else:
drpcopy[key] = newval
self.nsa_data['drpall'] = drpcopy
def set_params(self, bintype=None, template=None, release=None):
"""Set bintype, template, etc."""
self.release = release
self.drpver, self.dapver = config.lookUpVersions(self.release)
self.drpall = 'drpall-{0}.fits'.format(self.drpver)
self.bintype = datamodel[self.dapver].get_bintype(bintype)
self.template = datamodel[self.dapver].get_template(template)
self.bintemp = '{0}-{1}'.format(self.bintype.name, self.template.name)
if release == 'MPL-4':
self.niter = int('{0}{1}'.format(self.template.n, self.bintype.n))
else:
self.niter = '*'
self.access_kwargs = {'plate': self.plate, 'ifu': self.ifu, 'drpver': self.drpver,
'dapver': self.dapver, 'dir3d': self.dir3d, 'mpl': self.release,
'bintype': self.bintype.name, 'n': self.niter, 'mode': '*',
'daptype': self.bintemp, 'wave': 'LOG'}
def set_filepaths(self, pathtype='full'):
"""Set the paths for cube, maps, etc."""
self.path = Path(release=self.release, public=True)
self.imgpath = self.path.__getattribute__(pathtype)('mangaimage', **self.access_kwargs)
self.cubepath = self.path.__getattribute__(pathtype)('mangacube', **self.access_kwargs)
self.rsspath = self.path.__getattribute__(pathtype)('mangarss', **self.access_kwargs)
if self.release == 'MPL-4':
self.mapspath = self.path.__getattribute__(pathtype)('mangamap', **self.access_kwargs)
self.modelpath = None
else:
self.access_kwargs.pop('mode')
self.mapspath = self.path.__getattribute__(pathtype)('mangadap', mode='MAPS',
**self.access_kwargs)
self.modelpath = self.path.__getattribute__(pathtype)('mangadap', mode='LOGCUBE',
**self.access_kwargs)
def get_location(self, path):
"""Extract the location from the input path."""
return self.path.location("", full=path)
def partition_path(self, path):
"""Partition the path into non-redux/analysis parts."""
endredux = path.partition(self.mangaredux)[-1]
endanalysis = path.partition(self.mangaanalysis)[-1]
end = (endredux or endanalysis)
return end
def new_path(self, name, newvar):
''' Sets a new path with the subsituted name '''
access_copy = self.access_kwargs.copy()
access_copy['mode'] = '*'
access_copy.update(**newvar)
if name == 'maps':
access_copy['mode'] = 'MAPS'
name = 'mangamap' if self.release == 'MPL-4' else 'mangadap'
elif name == 'modelcube':
access_copy['mode'] = 'LOGCUBE'
name = None if self.release == 'MPL-4' else 'mangadap'
path = self.path.full(name, **access_copy) if name else None
return path
@pytest.fixture(scope='function')
def galaxy(monkeyauth, get_params, plateifu):
"""Yield an instance of a Galaxy object for use in tests."""
release, bintype, template = get_params
set_the_config(release)
gal = Galaxy(plateifu=plateifu)
gal.set_params(bintype=bintype, template=template, release=release)
gal.set_filepaths()
gal.set_galaxy_data()
yield gal
gal = None
@pytest.fixture(scope='function')
def cube(galaxy, exporigin, mode):
''' Yield a Marvin Cube based on the expected origin combo of (mode+db).
Fixture tests 6 cube origins from (mode+db) combos [file, db and api]
'''
# if str(galaxy.bintype) != 'HYB10':
# pytest.skip()
if exporigin == 'file':
c = Cube(filename=galaxy.cubepath, release=galaxy.release, mode=mode)
else:
c = Cube(plateifu=galaxy.plateifu, release=galaxy.release, mode=mode)
c.exporigin = exporigin
c.initial_mode = mode
yield c
c = None
@pytest.fixture(scope='function')
def modelcube(galaxy, exporigin, mode):
''' Yield a Marvin ModelCube based on the expected origin combo of (mode+db).
Fixture tests 6 modelcube origins from (mode+db) combos [file, db and api]
'''
if exporigin == 'file':
mc = ModelCube(filename=galaxy.modelpath, release=galaxy.release, mode=mode, bintype=galaxy.bintype)
else:
mc = ModelCube(plateifu=galaxy.plateifu, release=galaxy.release, mode=mode, bintype=galaxy.bintype)
mc.exporigin = exporigin
mc.initial_mode = mode
yield mc
mc = None
@pytest.fixture(scope='function')
def maps(galaxy, exporigin, mode):
''' Yield a Marvin Maps based on the expected origin combo of (mode+db).
Fixture tests 6 cube origins from (mode+db) combos [file, db and api]
'''
if exporigin == 'file':
m = Maps(filename=galaxy.mapspath, release=galaxy.release, mode=mode, bintype=galaxy.bintype)
else:
m = Maps(plateifu=galaxy.plateifu, release=galaxy.release, mode=mode, bintype=galaxy.bintype)
m.exporigin = exporigin
yield m
m = None
@pytest.fixture(scope='function')
def maps_release_only(galaxy, release):
return Maps(filename=galaxy.mapspath, release=release)
@pytest.fixture(scope='function')
@pytest.mark.uses_db
def query(request, allow_dap, release, mode, db):
''' Yields a Query that loops over all modes and db options '''
data = query_data[release]
set_the_config(release)
if mode == 'local' and not db:
pytest.skip('cannot use queries in local mode without a db')
searchfilter = request.param if hasattr(request, 'param') else None
q = Query(search_filter=searchfilter, mode=mode, release=release)
q.expdata = data
if q.mode == 'remote':
pytest.xfail('cannot control for DAP spaxel queries on server side; failing all remotes until then')
yield q
config.forceDbOn()
q = None
|
sdssREPO_NAMEmarvinPATH_START.@marvin_extracted@marvin-main@tests@conftest.py@.PATH_END.py
|
{
"filename": "S_get_number_DL.py",
"repo_name": "SJ001/AI-Feynman",
"repo_path": "AI-Feynman_extracted/AI-Feynman-master/aifeynman/S_get_number_DL.py",
"type": "Python"
}
|
# Calculates the complexity of a number to be used for the Pareto frontier
import numpy as np
def get_number_DL(n):
epsilon = 1e-10
# check if integer
if np.isnan(n):
return 1000000
elif np.abs(n - int(n)) < epsilon:
return np.log2(1+abs(n))
elif np.abs(n - np.pi) < epsilon:
return np.log2(1+3)
# check if real
else:
PrecisionFloorLoss = 1e-14
return np.log2(1 + (float(n) / PrecisionFloorLoss) ** 2) / 2
|
SJ001REPO_NAMEAI-FeynmanPATH_START.@AI-Feynman_extracted@AI-Feynman-master@aifeynman@S_get_number_DL.py@.PATH_END.py
|
{
"filename": "finder.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/jedi/py3/jedi/evaluate/finder.py",
"type": "Python"
}
|
"""
Searching for names with given scope and name. This is very central in Jedi and
Python. The name resolution is quite complicated with descripter,
``__getattribute__``, ``__getattr__``, ``global``, etc.
If you want to understand name resolution, please read the first few chapters
in http://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/.
Flow checks
+++++++++++
Flow checks are not really mature. There's only a check for ``isinstance``. It
would check whether a flow has the form of ``if isinstance(a, type_or_tuple)``.
Unfortunately every other thing is being ignored (e.g. a == '' would be easy to
check for -> a is a string). There's big potential in these checks.
"""
from parso.python import tree
from parso.tree import search_ancestor
from jedi import debug
from jedi import settings
from jedi.evaluate.context import AbstractInstanceContext
from jedi.evaluate import compiled
from jedi.evaluate import analysis
from jedi.evaluate import flow_analysis
from jedi.evaluate.arguments import TreeArguments
from jedi.evaluate import helpers
from jedi.evaluate.context import iterable
from jedi.evaluate.filters import get_global_filters, TreeNameDefinition
from jedi.evaluate.base_context import ContextSet
from jedi.parser_utils import is_scope, get_parent_scope
class NameFinder(object):
def __init__(self, evaluator, context, name_context, name_or_str,
position=None, analysis_errors=True):
self._evaluator = evaluator
# Make sure that it's not just a syntax tree node.
self._context = context
self._name_context = name_context
self._name = name_or_str
if isinstance(name_or_str, tree.Name):
self._string_name = name_or_str.value
else:
self._string_name = name_or_str
self._position = position
self._found_predefined_types = None
self._analysis_errors = analysis_errors
@debug.increase_indent
def find(self, filters, attribute_lookup):
"""
:params bool attribute_lookup: Tell to logic if we're accessing the
attribute or the contents of e.g. a function.
"""
names = self.filter_name(filters)
if self._found_predefined_types is not None and names:
check = flow_analysis.reachability_check(
context=self._context,
context_scope=self._context.tree_node,
node=self._name,
)
if check is flow_analysis.UNREACHABLE:
return ContextSet()
return self._found_predefined_types
types = self._names_to_types(names, attribute_lookup)
if not names and self._analysis_errors and not types \
and not (isinstance(self._name, tree.Name) and
isinstance(self._name.parent.parent, tree.Param)):
if isinstance(self._name, tree.Name):
if attribute_lookup:
analysis.add_attribute_error(
self._name_context, self._context, self._name)
else:
message = ("NameError: name '%s' is not defined."
% self._string_name)
analysis.add(self._name_context, 'name-error', self._name, message)
return types
def _get_origin_scope(self):
if isinstance(self._name, tree.Name):
scope = self._name
while scope.parent is not None:
# TODO why if classes?
if not isinstance(scope, tree.Scope):
break
scope = scope.parent
return scope
else:
return None
def get_filters(self, search_global=False):
origin_scope = self._get_origin_scope()
if search_global:
position = self._position
# For functions and classes the defaults don't belong to the
# function and get evaluated in the context before the function. So
# make sure to exclude the function/class name.
if origin_scope is not None:
ancestor = search_ancestor(origin_scope, 'funcdef', 'classdef', 'lambdef')
lambdef = None
if ancestor == 'lambdef':
# For lambdas it's even more complicated since parts will
# be evaluated later.
lambdef = ancestor
ancestor = search_ancestor(origin_scope, 'funcdef', 'classdef')
if ancestor is not None:
colon = ancestor.children[-2]
if position < colon.start_pos:
if lambdef is None or position < lambdef.children[-2].start_pos:
position = ancestor.start_pos
return get_global_filters(self._evaluator, self._context, position, origin_scope)
else:
return self._context.get_filters(search_global, self._position, origin_scope=origin_scope)
def filter_name(self, filters):
"""
Searches names that are defined in a scope (the different
``filters``), until a name fits.
"""
names = []
if self._context.predefined_names and isinstance(self._name, tree.Name):
node = self._name
while node is not None and not is_scope(node):
node = node.parent
if node.type in ("if_stmt", "for_stmt", "comp_for"):
try:
name_dict = self._context.predefined_names[node]
types = name_dict[self._string_name]
except KeyError:
continue
else:
self._found_predefined_types = types
break
for filter in filters:
names = filter.get(self._string_name)
if names:
if len(names) == 1:
n, = names
if isinstance(n, TreeNameDefinition):
# Something somewhere went terribly wrong. This
# typically happens when using goto on an import in an
# __init__ file. I think we need a better solution, but
# it's kind of hard, because for Jedi it's not clear
# that that name has not been defined, yet.
if n.tree_name == self._name:
if self._name.get_definition().type == 'import_from':
continue
break
debug.dbg('finder.filter_name %s in (%s): %s@%s',
self._string_name, self._context, names, self._position)
return list(names)
def _check_getattr(self, inst):
"""Checks for both __getattr__ and __getattribute__ methods"""
# str is important, because it shouldn't be `Name`!
name = compiled.create_simple_object(self._evaluator, self._string_name)
# This is a little bit special. `__getattribute__` is in Python
# executed before `__getattr__`. But: I know no use case, where
# this could be practical and where Jedi would return wrong types.
# If you ever find something, let me know!
# We are inversing this, because a hand-crafted `__getattribute__`
# could still call another hand-crafted `__getattr__`, but not the
# other way around.
names = (inst.get_function_slot_names(u'__getattr__') or
inst.get_function_slot_names(u'__getattribute__'))
return inst.execute_function_slots(names, name)
def _names_to_types(self, names, attribute_lookup):
contexts = ContextSet.from_sets(name.infer() for name in names)
debug.dbg('finder._names_to_types: %s -> %s', names, contexts)
if not names and isinstance(self._context, AbstractInstanceContext):
# handling __getattr__ / __getattribute__
return self._check_getattr(self._context)
# Add isinstance and other if/assert knowledge.
if not contexts and isinstance(self._name, tree.Name) and \
not isinstance(self._name_context, AbstractInstanceContext):
flow_scope = self._name
base_node = self._name_context.tree_node
if base_node.type == 'comp_for':
return contexts
while True:
flow_scope = get_parent_scope(flow_scope, include_flows=True)
n = _check_flow_information(self._name_context, flow_scope,
self._name, self._position)
if n is not None:
return n
if flow_scope == base_node:
break
return contexts
def _check_flow_information(context, flow, search_name, pos):
""" Try to find out the type of a variable just with the information that
is given by the flows: e.g. It is also responsible for assert checks.::
if isinstance(k, str):
k. # <- completion here
ensures that `k` is a string.
"""
if not settings.dynamic_flow_information:
return None
result = None
if is_scope(flow):
# Check for asserts.
module_node = flow.get_root_node()
try:
names = module_node.get_used_names()[search_name.value]
except KeyError:
return None
names = reversed([
n for n in names
if flow.start_pos <= n.start_pos < (pos or flow.end_pos)
])
for name in names:
ass = search_ancestor(name, 'assert_stmt')
if ass is not None:
result = _check_isinstance_type(context, ass.assertion, search_name)
if result is not None:
return result
if flow.type in ('if_stmt', 'while_stmt'):
potential_ifs = [c for c in flow.children[1::4] if c != ':']
for if_test in reversed(potential_ifs):
if search_name.start_pos > if_test.end_pos:
return _check_isinstance_type(context, if_test, search_name)
return result
def _check_isinstance_type(context, element, search_name):
try:
assert element.type in ('power', 'atom_expr')
# this might be removed if we analyze and, etc
assert len(element.children) == 2
first, trailer = element.children
assert first.type == 'name' and first.value == 'isinstance'
assert trailer.type == 'trailer' and trailer.children[0] == '('
assert len(trailer.children) == 3
# arglist stuff
arglist = trailer.children[1]
args = TreeArguments(context.evaluator, context, arglist, trailer)
param_list = list(args.unpack())
# Disallow keyword arguments
assert len(param_list) == 2
(key1, lazy_context_object), (key2, lazy_context_cls) = param_list
assert key1 is None and key2 is None
call = helpers.call_of_leaf(search_name)
is_instance_call = helpers.call_of_leaf(lazy_context_object.data)
# Do a simple get_code comparison. They should just have the same code,
# and everything will be all right.
normalize = context.evaluator.grammar._normalize
assert normalize(is_instance_call) == normalize(call)
except AssertionError:
return None
context_set = ContextSet()
for cls_or_tup in lazy_context_cls.infer():
if isinstance(cls_or_tup, iterable.Sequence) and cls_or_tup.array_type == 'tuple':
for lazy_context in cls_or_tup.py__iter__():
for context in lazy_context.infer():
context_set |= context.execute_evaluated()
else:
context_set |= cls_or_tup.execute_evaluated()
return context_set
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@jedi@py3@jedi@evaluate@finder.py@.PATH_END.py
|
{
"filename": "python-reference_catboostipythonwidget_update_widget.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/concepts/python-reference_catboostipythonwidget_update_widget.md",
"type": "Markdown"
}
|
# start
{% include [sections-with-methods-desc-catboostipythonwidget__purpose__full__div](../_includes/work_src/reusage/catboostipythonwidget__purpose__full__div.md) %}
## {{ dl--invoke-format }} {#call-formmat}
```python
start()
```
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@concepts@python-reference_catboostipythonwidget_update_widget.md@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "Goobley/crtaf-py",
"repo_path": "crtaf-py_extracted/crtaf-py-main/README.md",
"type": "Markdown"
}
|
# crtaf: python utilities for the [Common Radiative Transfer Atomic Format](https://github.com/Goobley/CommonRTAtomicFormat).
| | | | |
|---|---|---|---|
| __Maintainer__ | Chris Osborne | __Institution__ | University of Glasgow |
| __License__ |  | __CI__ | 
Description
-----------
`crtaf` is a simple package for managing and manipulating atomic data for radiative transfer (RT) codes, in the CRTAF YAML format.
This format is documented [here](https://github.com/Goobley/CommonRTAtomicFormat).
It is a two-tier format with both a high-level and a simplified representation (to reduce the level of atomic physics necessary to be incorporated in new RT codes), and can be easily added to most codes with the addition of a standard YAML parser.
`crtaf` allows for the simplification of high-level (intended to be writted by humans) to low-level formats.
This format supports a common set of functionality present in non-LTE radiative transfer codes such as [Lightweaver](https://github.com/Goobley/Lightweaver), RH, SNAPI.
This package is primarily of use to those working with atomic data to feed into radiative transfer models.
📖 Documentation
----------------
The package is documented via docstrings, and the [format specification](https://github.com/Goobley/CommonRTAtomicFormat).
⬇ Installation
--------------
The package should install through a normal clone and `pip install .` procedure.
It will be added to PyPI shortly.
🤝 Contributing
---------------
We would love for you to get involved.
Please report any bugs encountered on the GitHub issue tracker.
Adding features:
- For new features that don't affect the specification, please submit a pull
request directly or discuss in the issues.
- For features that affect the specification, please open an issue/pull request there first, indicating the necessary format changes/extensions. The implementation changes can then be submitted here.
We require all contributors to abide by the [code of conduct](CODE_OF_CONDUCT.md).
Acknowledgments
---------------
This format is based on the work of Tiago Pereira for [Muspel.jl](https://github.com/tiagopereira/Muspel.jl), along with the atoms as code approach of [Lightweaver](https://github.com/Lightweaver)
|
GoobleyREPO_NAMEcrtaf-pyPATH_START.@crtaf-py_extracted@crtaf-py-main@README.md@.PATH_END.py
|
{
"filename": "urls.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/webserver/lasair/apps/mma_watchmap/urls.py",
"type": "Python"
}
|
from . import views
from django.urls import path
urlpatterns = [
path('mma_watchmaps/', views.mma_watchmap_index, name='mma_watchmap_index'),
path('mma_watchmaps/<int:mw_id>/', views.mma_watchmap_detail, name='mma_watchmap_detail'),
# path('mma_watchmaps/<int:mw_id>/file/', views.mma_watchmap_download, name='mma_watchmap_download'),
]
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@webserver@lasair@apps@mma_watchmap@urls.py@.PATH_END.py
|
{
"filename": "ds_test.py",
"repo_name": "mhammond/pywin32",
"repo_path": "pywin32_extracted/pywin32-main/com/win32comext/directsound/test/ds_test.py",
"type": "Python"
}
|
import os
import struct
import sys
import unittest
import pythoncom
import pywintypes
import win32api
import win32com.directsound.directsound as ds
import win32event
from pywin32_testutil import TestSkipped, find_test_fixture
# next two lines are for for debugging:
# import win32com
# import directsound as ds
WAV_FORMAT_PCM = 1
WAV_HEADER_SIZE = struct.calcsize("<4sl4s4slhhllhh4sl")
def wav_header_unpack(data):
(
riff,
riffsize,
wave,
fmt,
fmtsize,
format,
nchannels,
samplespersecond,
datarate,
blockalign,
bitspersample,
data,
datalength,
) = struct.unpack("<4sl4s4slhhllhh4sl", data)
assert riff == b"RIFF", "invalid wav header"
# fmt chuck is not first chunk, directly followed by data chuck
# It is nowhere required that they are, it is just very common
assert (
fmtsize == 16 and fmt == b"fmt " and data == b"data"
), "cannot understand wav header"
wfx = pywintypes.WAVEFORMATEX()
wfx.wFormatTag = format
wfx.nChannels = nchannels
wfx.nSamplesPerSec = samplespersecond
wfx.nAvgBytesPerSec = datarate
wfx.nBlockAlign = blockalign
wfx.wBitsPerSample = bitspersample
return wfx, datalength
def wav_header_pack(wfx, datasize):
return struct.pack(
"<4sl4s4slhhllhh4sl",
b"RIFF",
36 + datasize,
b"WAVE",
b"fmt ",
16,
wfx.wFormatTag,
wfx.nChannels,
wfx.nSamplesPerSec,
wfx.nAvgBytesPerSec,
wfx.nBlockAlign,
wfx.wBitsPerSample,
b"data",
datasize,
)
class WAVEFORMATTest(unittest.TestCase):
def test_1_Type(self):
"WAVEFORMATEX type"
w = pywintypes.WAVEFORMATEX()
self.assertTrue(isinstance(w, pywintypes.WAVEFORMATEXType))
def test_2_Attr(self):
"WAVEFORMATEX attribute access"
# A wav header for a soundfile from a CD should look like this...
w = pywintypes.WAVEFORMATEX()
w.wFormatTag = pywintypes.WAVE_FORMAT_PCM
w.nChannels = 2
w.nSamplesPerSec = 44100
w.nAvgBytesPerSec = 176400
w.nBlockAlign = 4
w.wBitsPerSample = 16
self.assertTrue(w.wFormatTag == 1)
self.assertTrue(w.nChannels == 2)
self.assertTrue(w.nSamplesPerSec == 44100)
self.assertTrue(w.nAvgBytesPerSec == 176400)
self.assertTrue(w.nBlockAlign == 4)
self.assertTrue(w.wBitsPerSample == 16)
class DSCAPSTest(unittest.TestCase):
def test_1_Type(self):
"DSCAPS type"
c = ds.DSCAPS()
self.assertTrue(isinstance(c, ds.DSCAPSType))
def test_2_Attr(self):
"DSCAPS attribute access"
c = ds.DSCAPS()
c.dwFlags = 1
c.dwMinSecondarySampleRate = 2
c.dwMaxSecondarySampleRate = 3
c.dwPrimaryBuffers = 4
c.dwMaxHwMixingAllBuffers = 5
c.dwMaxHwMixingStaticBuffers = 6
c.dwMaxHwMixingStreamingBuffers = 7
c.dwFreeHwMixingAllBuffers = 8
c.dwFreeHwMixingStaticBuffers = 9
c.dwFreeHwMixingStreamingBuffers = 10
c.dwMaxHw3DAllBuffers = 11
c.dwMaxHw3DStaticBuffers = 12
c.dwMaxHw3DStreamingBuffers = 13
c.dwFreeHw3DAllBuffers = 14
c.dwFreeHw3DStaticBuffers = 15
c.dwFreeHw3DStreamingBuffers = 16
c.dwTotalHwMemBytes = 17
c.dwFreeHwMemBytes = 18
c.dwMaxContigFreeHwMemBytes = 19
c.dwUnlockTransferRateHwBuffers = 20
c.dwPlayCpuOverheadSwBuffers = 21
self.assertTrue(c.dwFlags == 1)
self.assertTrue(c.dwMinSecondarySampleRate == 2)
self.assertTrue(c.dwMaxSecondarySampleRate == 3)
self.assertTrue(c.dwPrimaryBuffers == 4)
self.assertTrue(c.dwMaxHwMixingAllBuffers == 5)
self.assertTrue(c.dwMaxHwMixingStaticBuffers == 6)
self.assertTrue(c.dwMaxHwMixingStreamingBuffers == 7)
self.assertTrue(c.dwFreeHwMixingAllBuffers == 8)
self.assertTrue(c.dwFreeHwMixingStaticBuffers == 9)
self.assertTrue(c.dwFreeHwMixingStreamingBuffers == 10)
self.assertTrue(c.dwMaxHw3DAllBuffers == 11)
self.assertTrue(c.dwMaxHw3DStaticBuffers == 12)
self.assertTrue(c.dwMaxHw3DStreamingBuffers == 13)
self.assertTrue(c.dwFreeHw3DAllBuffers == 14)
self.assertTrue(c.dwFreeHw3DStaticBuffers == 15)
self.assertTrue(c.dwFreeHw3DStreamingBuffers == 16)
self.assertTrue(c.dwTotalHwMemBytes == 17)
self.assertTrue(c.dwFreeHwMemBytes == 18)
self.assertTrue(c.dwMaxContigFreeHwMemBytes == 19)
self.assertTrue(c.dwUnlockTransferRateHwBuffers == 20)
self.assertTrue(c.dwPlayCpuOverheadSwBuffers == 21)
class DSBCAPSTest(unittest.TestCase):
def test_1_Type(self):
"DSBCAPS type"
c = ds.DSBCAPS()
self.assertTrue(isinstance(c, ds.DSBCAPSType))
def test_2_Attr(self):
"DSBCAPS attribute access"
c = ds.DSBCAPS()
c.dwFlags = 1
c.dwBufferBytes = 2
c.dwUnlockTransferRate = 3
c.dwPlayCpuOverhead = 4
self.assertTrue(c.dwFlags == 1)
self.assertTrue(c.dwBufferBytes == 2)
self.assertTrue(c.dwUnlockTransferRate == 3)
self.assertTrue(c.dwPlayCpuOverhead == 4)
class DSCCAPSTest(unittest.TestCase):
def test_1_Type(self):
"DSCCAPS type"
c = ds.DSCCAPS()
self.assertTrue(isinstance(c, ds.DSCCAPSType))
def test_2_Attr(self):
"DSCCAPS attribute access"
c = ds.DSCCAPS()
c.dwFlags = 1
c.dwFormats = 2
c.dwChannels = 4
self.assertTrue(c.dwFlags == 1)
self.assertTrue(c.dwFormats == 2)
self.assertTrue(c.dwChannels == 4)
class DSCBCAPSTest(unittest.TestCase):
def test_1_Type(self):
"DSCBCAPS type"
c = ds.DSCBCAPS()
self.assertTrue(isinstance(c, ds.DSCBCAPSType))
def test_2_Attr(self):
"DSCBCAPS attribute access"
c = ds.DSCBCAPS()
c.dwFlags = 1
c.dwBufferBytes = 2
self.assertTrue(c.dwFlags == 1)
self.assertTrue(c.dwBufferBytes == 2)
class DSBUFFERDESCTest(unittest.TestCase):
def test_1_Type(self):
"DSBUFFERDESC type"
c = ds.DSBUFFERDESC()
self.assertTrue(isinstance(c, ds.DSBUFFERDESCType))
def test_2_Attr(self):
"DSBUFFERDESC attribute access"
c = ds.DSBUFFERDESC()
c.dwFlags = 1
c.dwBufferBytes = 2
c.lpwfxFormat = pywintypes.WAVEFORMATEX()
c.lpwfxFormat.wFormatTag = pywintypes.WAVE_FORMAT_PCM
c.lpwfxFormat.nChannels = 2
c.lpwfxFormat.nSamplesPerSec = 44100
c.lpwfxFormat.nAvgBytesPerSec = 176400
c.lpwfxFormat.nBlockAlign = 4
c.lpwfxFormat.wBitsPerSample = 16
self.assertTrue(c.dwFlags == 1)
self.assertTrue(c.dwBufferBytes == 2)
self.assertTrue(c.lpwfxFormat.wFormatTag == 1)
self.assertTrue(c.lpwfxFormat.nChannels == 2)
self.assertTrue(c.lpwfxFormat.nSamplesPerSec == 44100)
self.assertTrue(c.lpwfxFormat.nAvgBytesPerSec == 176400)
self.assertTrue(c.lpwfxFormat.nBlockAlign == 4)
self.assertTrue(c.lpwfxFormat.wBitsPerSample == 16)
def invalid_format(self, c):
c.lpwfxFormat = 17
def test_3_invalid_format(self):
"DSBUFFERDESC invalid lpwfxFormat assignment"
c = ds.DSBUFFERDESC()
self.assertRaises(ValueError, self.invalid_format, c)
class DSCBUFFERDESCTest(unittest.TestCase):
def test_1_Type(self):
"DSCBUFFERDESC type"
c = ds.DSCBUFFERDESC()
self.assertTrue(isinstance(c, ds.DSCBUFFERDESCType))
def test_2_Attr(self):
"DSCBUFFERDESC attribute access"
c = ds.DSCBUFFERDESC()
c.dwFlags = 1
c.dwBufferBytes = 2
c.lpwfxFormat = pywintypes.WAVEFORMATEX()
c.lpwfxFormat.wFormatTag = pywintypes.WAVE_FORMAT_PCM
c.lpwfxFormat.nChannels = 2
c.lpwfxFormat.nSamplesPerSec = 44100
c.lpwfxFormat.nAvgBytesPerSec = 176400
c.lpwfxFormat.nBlockAlign = 4
c.lpwfxFormat.wBitsPerSample = 16
self.assertTrue(c.dwFlags == 1)
self.assertTrue(c.dwBufferBytes == 2)
self.assertTrue(c.lpwfxFormat.wFormatTag == 1)
self.assertTrue(c.lpwfxFormat.nChannels == 2)
self.assertTrue(c.lpwfxFormat.nSamplesPerSec == 44100)
self.assertTrue(c.lpwfxFormat.nAvgBytesPerSec == 176400)
self.assertTrue(c.lpwfxFormat.nBlockAlign == 4)
self.assertTrue(c.lpwfxFormat.wBitsPerSample == 16)
def invalid_format(self, c):
c.lpwfxFormat = 17
def test_3_invalid_format(self):
"DSCBUFFERDESC invalid lpwfxFormat assignment"
c = ds.DSCBUFFERDESC()
self.assertRaises(ValueError, self.invalid_format, c)
class DirectSoundTest(unittest.TestCase):
# basic tests - mostly just exercise the functions
def testEnumerate(self):
"""DirectSoundEnumerate() sanity tests"""
devices = ds.DirectSoundEnumerate()
# this might fail on machines without a sound card
self.assertTrue(len(devices))
# if we have an entry, it must be a tuple of size 3
self.assertTrue(len(devices[0]) == 3)
def testCreate(self):
"""DirectSoundCreate()"""
try:
d = ds.DirectSoundCreate(None, None)
except pythoncom.com_error as exc:
if exc.hresult != ds.DSERR_NODRIVER:
raise
raise TestSkipped(exc)
def testPlay(self):
"""Mesdames et Messieurs, la cour de Devin Dazzle"""
# relative to 'testall.py' in the win32com test suite.
extra = os.path.join(
os.path.dirname(sys.argv[0]), "../../win32comext/directsound/test"
)
fname = find_test_fixture("01-Intro.wav", extra)
with open(fname, "rb") as f:
hdr = f.read(WAV_HEADER_SIZE)
wfx, size = wav_header_unpack(hdr)
try:
d = ds.DirectSoundCreate(None, None)
except pythoncom.com_error as exc:
if exc.hresult != ds.DSERR_NODRIVER:
raise
raise TestSkipped(exc)
d.SetCooperativeLevel(None, ds.DSSCL_PRIORITY)
sdesc = ds.DSBUFFERDESC()
sdesc.dwFlags = ds.DSBCAPS_STICKYFOCUS | ds.DSBCAPS_CTRLPOSITIONNOTIFY
sdesc.dwBufferBytes = size
sdesc.lpwfxFormat = wfx
buffer = d.CreateSoundBuffer(sdesc, None)
event = win32event.CreateEvent(None, 0, 0, None)
notify = buffer.QueryInterface(ds.IID_IDirectSoundNotify)
notify.SetNotificationPositions((ds.DSBPN_OFFSETSTOP, event))
buffer.Update(0, f.read(size))
buffer.Play(0)
win32event.WaitForSingleObject(event, -1)
class DirectSoundCaptureTest(unittest.TestCase):
# basic tests - mostly just exercise the functions
def testEnumerate(self):
"""DirectSoundCaptureEnumerate() sanity tests"""
devices = ds.DirectSoundCaptureEnumerate()
# this might fail on machines without a sound card
self.assertTrue(len(devices))
# if we have an entry, it must be a tuple of size 3
self.assertTrue(len(devices[0]) == 3)
def testCreate(self):
"""DirectSoundCreate()"""
try:
d = ds.DirectSoundCaptureCreate(None, None)
except pythoncom.com_error as exc:
if exc.hresult != ds.DSERR_NODRIVER:
raise
raise TestSkipped(exc)
def testRecord(self):
try:
d = ds.DirectSoundCaptureCreate(None, None)
except pythoncom.com_error as exc:
if exc.hresult != ds.DSERR_NODRIVER:
raise
raise TestSkipped(exc)
sdesc = ds.DSCBUFFERDESC()
sdesc.dwBufferBytes = 352800 # 2 seconds
sdesc.lpwfxFormat = pywintypes.WAVEFORMATEX()
sdesc.lpwfxFormat.wFormatTag = pywintypes.WAVE_FORMAT_PCM
sdesc.lpwfxFormat.nChannels = 2
sdesc.lpwfxFormat.nSamplesPerSec = 44100
sdesc.lpwfxFormat.nAvgBytesPerSec = 176400
sdesc.lpwfxFormat.nBlockAlign = 4
sdesc.lpwfxFormat.wBitsPerSample = 16
buffer = d.CreateCaptureBuffer(sdesc)
event = win32event.CreateEvent(None, 0, 0, None)
notify = buffer.QueryInterface(ds.IID_IDirectSoundNotify)
notify.SetNotificationPositions((ds.DSBPN_OFFSETSTOP, event))
buffer.Start(0)
win32event.WaitForSingleObject(event, -1)
event.Close()
data = buffer.Update(0, 352800)
fname = os.path.join(win32api.GetTempPath(), "test_directsound_record.wav")
f = open(fname, "wb")
f.write(wav_header_pack(sdesc.lpwfxFormat, 352800))
f.write(data)
f.close()
if __name__ == "__main__":
unittest.main()
|
mhammondREPO_NAMEpywin32PATH_START.@pywin32_extracted@pywin32-main@com@win32comext@directsound@test@ds_test.py@.PATH_END.py
|
{
"filename": "_fill.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolar/_fill.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="fill", parent_name="scatterpolar", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["none", "toself", "tonext"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolar@_fill.py@.PATH_END.py
|
{
"filename": "terminal.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pytest/py3/_pytest/terminal.py",
"type": "Python"
}
|
"""Terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
import argparse
import dataclasses
import datetime
import inspect
import platform
import sys
import textwrap
import warnings
from collections import Counter
from functools import partial
from pathlib import Path
from typing import Any
from typing import Callable
from typing import cast
from typing import ClassVar
from typing import Dict
from typing import Generator
from typing import List
from typing import Mapping
from typing import NamedTuple
from typing import Optional
from typing import Sequence
from typing import Set
from typing import TextIO
from typing import Tuple
from typing import TYPE_CHECKING
from typing import Union
import pluggy
import _pytest._version
from _pytest import nodes
from _pytest import timing
from _pytest._code import ExceptionInfo
from _pytest._code.code import ExceptionRepr
from _pytest._io import TerminalWriter
from _pytest._io.wcwidth import wcswidth
from _pytest.assertion.util import running_on_ci
from _pytest.compat import final
from _pytest.config import _PluggyPlugin
from _pytest.config import Config
from _pytest.config import ExitCode
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.nodes import Item
from _pytest.nodes import Node
from _pytest.pathlib import absolutepath
from _pytest.pathlib import bestrelpath
from _pytest.reports import BaseReport
from _pytest.reports import CollectReport
from _pytest.reports import TestReport
if TYPE_CHECKING:
from typing_extensions import Literal
from _pytest.main import Session
REPORT_COLLECTING_RESOLUTION = 0.5
KNOWN_TYPES = (
"failed",
"passed",
"skipped",
"deselected",
"xfailed",
"xpassed",
"warnings",
"error",
)
_REPORTCHARS_DEFAULT = "fE"
class MoreQuietAction(argparse.Action):
"""A modified copy of the argparse count action which counts down and updates
the legacy quiet attribute at the same time.
Used to unify verbosity handling.
"""
def __init__(
self,
option_strings: Sequence[str],
dest: str,
default: object = None,
required: bool = False,
help: Optional[str] = None,
) -> None:
super().__init__(
option_strings=option_strings,
dest=dest,
nargs=0,
default=default,
required=required,
help=help,
)
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[object], None],
option_string: Optional[str] = None,
) -> None:
new_count = getattr(namespace, self.dest, 0) - 1
setattr(namespace, self.dest, new_count)
# todo Deprecate config.quiet
namespace.quiet = getattr(namespace, "quiet", 0) + 1
class TestShortLogReport(NamedTuple):
"""Used to store the test status result category, shortletter and verbose word.
For example ``"rerun", "R", ("RERUN", {"yellow": True})``.
:ivar category:
The class of result, for example ``“passed”``, ``“skipped”``, ``“error”``, or the empty string.
:ivar letter:
The short letter shown as testing progresses, for example ``"."``, ``"s"``, ``"E"``, or the empty string.
:ivar word:
Verbose word is shown as testing progresses in verbose mode, for example ``"PASSED"``, ``"SKIPPED"``,
``"ERROR"``, or the empty string.
"""
category: str
letter: str
word: Union[str, Tuple[str, Mapping[str, bool]]]
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("terminal reporting", "Reporting", after="general")
group._addoption(
"-v",
"--verbose",
action="count",
default=0,
dest="verbose",
help="Increase verbosity",
)
group._addoption(
"--no-header",
action="store_true",
default=False,
dest="no_header",
help="Disable header",
)
group._addoption(
"--no-summary",
action="store_true",
default=False,
dest="no_summary",
help="Disable summary",
)
group._addoption(
"-q",
"--quiet",
action=MoreQuietAction,
default=0,
dest="verbose",
help="Decrease verbosity",
)
group._addoption(
"--verbosity",
dest="verbose",
type=int,
default=0,
help="Set verbosity. Default: 0.",
)
group._addoption(
"-r",
action="store",
dest="reportchars",
default=_REPORTCHARS_DEFAULT,
metavar="chars",
help="Show extra test summary info as specified by chars: (f)ailed, "
"(E)rror, (s)kipped, (x)failed, (X)passed, "
"(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. "
"(w)arnings are enabled by default (see --disable-warnings), "
"'N' can be used to reset the list. (default: 'fE').",
)
group._addoption(
"--disable-warnings",
"--disable-pytest-warnings",
default=False,
dest="disable_warnings",
action="store_true",
help="Disable warnings summary",
)
group._addoption(
"-l",
"--showlocals",
action="store_true",
dest="showlocals",
default=False,
help="Show locals in tracebacks (disabled by default)",
)
group._addoption(
"--no-showlocals",
action="store_false",
dest="showlocals",
help="Hide locals in tracebacks (negate --showlocals passed through addopts)",
)
group._addoption(
"--tb",
metavar="style",
action="store",
dest="tbstyle",
default="auto",
choices=["auto", "long", "short", "no", "line", "native"],
help="Traceback print mode (auto/long/short/line/native/no)",
)
group._addoption(
"--show-capture",
action="store",
dest="showcapture",
choices=["no", "stdout", "stderr", "log", "all"],
default="all",
help="Controls how captured stdout/stderr/log is shown on failed tests. "
"Default: all.",
)
group._addoption(
"--fulltrace",
"--full-trace",
action="store_true",
default=False,
help="Don't cut any tracebacks (default is to cut)",
)
group._addoption(
"--color",
metavar="color",
action="store",
dest="color",
default="auto",
choices=["yes", "no", "auto"],
help="Color terminal output (yes/no/auto)",
)
group._addoption(
"--code-highlight",
default="yes",
choices=["yes", "no"],
help="Whether code should be highlighted (only if --color is also enabled). "
"Default: yes.",
)
parser.addini(
"console_output_style",
help='Console output: "classic", or with additional progress information '
'("progress" (percentage) | "count" | "progress-even-when-capture-no" (forces '
"progress even when capture=no)",
default="progress",
)
def pytest_configure(config: Config) -> None:
reporter = TerminalReporter(config, sys.stdout)
config.pluginmanager.register(reporter, "terminalreporter")
if config.option.debug or config.option.traceconfig:
def mywriter(tags, args):
msg = " ".join(map(str, args))
reporter.write_line("[traceconfig] " + msg)
config.trace.root.setprocessor("pytest:config", mywriter)
def getreportopt(config: Config) -> str:
reportchars: str = config.option.reportchars
old_aliases = {"F", "S"}
reportopts = ""
for char in reportchars:
if char in old_aliases:
char = char.lower()
if char == "a":
reportopts = "sxXEf"
elif char == "A":
reportopts = "PpsxXEf"
elif char == "N":
reportopts = ""
elif char not in reportopts:
reportopts += char
if not config.option.disable_warnings and "w" not in reportopts:
reportopts = "w" + reportopts
elif config.option.disable_warnings and "w" in reportopts:
reportopts = reportopts.replace("w", "")
return reportopts
@hookimpl(trylast=True) # after _pytest.runner
def pytest_report_teststatus(report: BaseReport) -> Tuple[str, str, str]:
letter = "F"
if report.passed:
letter = "."
elif report.skipped:
letter = "s"
outcome: str = report.outcome
if report.when in ("collect", "setup", "teardown") and outcome == "failed":
outcome = "error"
letter = "E"
return outcome, letter, outcome.upper()
@dataclasses.dataclass
class WarningReport:
"""Simple structure to hold warnings information captured by ``pytest_warning_recorded``.
:ivar str message:
User friendly message about the warning.
:ivar str|None nodeid:
nodeid that generated the warning (see ``get_location``).
:ivar tuple fslocation:
File system location of the source of the warning (see ``get_location``).
"""
message: str
nodeid: Optional[str] = None
fslocation: Optional[Tuple[str, int]] = None
count_towards_summary: ClassVar = True
def get_location(self, config: Config) -> Optional[str]:
"""Return the more user-friendly information about the location of a warning, or None."""
if self.nodeid:
return self.nodeid
if self.fslocation:
filename, linenum = self.fslocation
relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename))
return f"{relpath}:{linenum}"
return None
@final
class TerminalReporter:
def __init__(self, config: Config, file: Optional[TextIO] = None) -> None:
import _pytest.config
self.config = config
self._numcollected = 0
self._session: Optional[Session] = None
self._showfspath: Optional[bool] = None
self.stats: Dict[str, List[Any]] = {}
self._main_color: Optional[str] = None
self._known_types: Optional[List[str]] = None
self.startpath = config.invocation_params.dir
if file is None:
file = sys.stdout
self._tw = _pytest.config.create_terminal_writer(config, file)
self._screen_width = self._tw.fullwidth
self.currentfspath: Union[None, Path, str, int] = None
self.reportchars = getreportopt(config)
self.hasmarkup = self._tw.hasmarkup
self.isatty = file.isatty()
self._progress_nodeids_reported: Set[str] = set()
self._show_progress_info = self._determine_show_progress_info()
self._collect_report_last_write: Optional[float] = None
self._already_displayed_warnings: Optional[int] = None
self._keyboardinterrupt_memo: Optional[ExceptionRepr] = None
def _determine_show_progress_info(self) -> "Literal['progress', 'count', False]":
"""Return whether we should display progress information based on the current config."""
# do not show progress if we are not capturing output (#3038) unless explicitly
# overridden by progress-even-when-capture-no
if (
self.config.getoption("capture", "no") == "no"
and self.config.getini("console_output_style")
!= "progress-even-when-capture-no"
):
return False
# do not show progress if we are showing fixture setup/teardown
if self.config.getoption("setupshow", False):
return False
cfg: str = self.config.getini("console_output_style")
if cfg == "progress" or cfg == "progress-even-when-capture-no":
return "progress"
elif cfg == "count":
return "count"
else:
return False
@property
def verbosity(self) -> int:
verbosity: int = self.config.option.verbose
return verbosity
@property
def showheader(self) -> bool:
return self.verbosity >= 0
@property
def no_header(self) -> bool:
return bool(self.config.option.no_header)
@property
def no_summary(self) -> bool:
return bool(self.config.option.no_summary)
@property
def showfspath(self) -> bool:
if self._showfspath is None:
return self.verbosity >= 0
return self._showfspath
@showfspath.setter
def showfspath(self, value: Optional[bool]) -> None:
self._showfspath = value
@property
def showlongtestinfo(self) -> bool:
return self.verbosity > 0
def hasopt(self, char: str) -> bool:
char = {"xfailed": "x", "skipped": "s"}.get(char, char)
return char in self.reportchars
def write_fspath_result(self, nodeid: str, res, **markup: bool) -> None:
fspath = self.config.rootpath / nodeid.split("::")[0]
if self.currentfspath is None or fspath != self.currentfspath:
if self.currentfspath is not None and self._show_progress_info:
self._write_progress_information_filling_space()
self.currentfspath = fspath
relfspath = bestrelpath(self.startpath, fspath)
self._tw.line()
self._tw.write(relfspath + " ")
self._tw.write(res, flush=True, **markup)
def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None:
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self) -> None:
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def wrap_write(
self,
content: str,
*,
flush: bool = False,
margin: int = 8,
line_sep: str = "\n",
**markup: bool,
) -> None:
"""Wrap message with margin for progress info."""
width_of_current_line = self._tw.width_of_current_line
wrapped = line_sep.join(
textwrap.wrap(
" " * width_of_current_line + content,
width=self._screen_width - margin,
drop_whitespace=True,
replace_whitespace=False,
),
)
wrapped = wrapped[width_of_current_line:]
self._tw.write(wrapped, flush=flush, **markup)
def write(self, content: str, *, flush: bool = False, **markup: bool) -> None:
self._tw.write(content, flush=flush, **markup)
def flush(self) -> None:
self._tw.flush()
def write_line(self, line: Union[str, bytes], **markup: bool) -> None:
if not isinstance(line, str):
line = str(line, errors="replace")
self.ensure_newline()
self._tw.line(line, **markup)
def rewrite(self, line: str, **markup: bool) -> None:
"""Rewinds the terminal cursor to the beginning and writes the given line.
:param erase:
If True, will also add spaces until the full terminal width to ensure
previous lines are properly erased.
The rest of the keyword arguments are markup instructions.
"""
erase = markup.pop("erase", False)
if erase:
fill_count = self._tw.fullwidth - len(line) - 1
fill = " " * fill_count
else:
fill = ""
line = str(line)
self._tw.write("\r" + line + fill, **markup)
def write_sep(
self,
sep: str,
title: Optional[str] = None,
fullwidth: Optional[int] = None,
**markup: bool,
) -> None:
self.ensure_newline()
self._tw.sep(sep, title, fullwidth, **markup)
def section(self, title: str, sep: str = "=", **kw: bool) -> None:
self._tw.sep(sep, title, **kw)
def line(self, msg: str, **kw: bool) -> None:
self._tw.line(msg, **kw)
def _add_stats(self, category: str, items: Sequence[Any]) -> None:
set_main_color = category not in self.stats
self.stats.setdefault(category, []).extend(items)
if set_main_color:
self._set_main_color()
def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool:
for line in str(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
return True
def pytest_warning_recorded(
self,
warning_message: warnings.WarningMessage,
nodeid: str,
) -> None:
from _pytest.warnings import warning_record_to_str
fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message)
warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid
)
self._add_stats("warnings", [warning_report])
def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
if self.config.option.traceconfig:
msg = f"PLUGIN registered: {plugin}"
# XXX This event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line.
self.write_line(msg)
def pytest_deselected(self, items: Sequence[Item]) -> None:
self._add_stats("deselected", items)
def pytest_runtest_logstart(
self, nodeid: str, location: Tuple[str, Optional[int], str]
) -> None:
# Ensure that the path is printed before the
# 1st test of a module starts running.
if self.showlongtestinfo:
line = self._locationline(nodeid, *location)
self.write_ensure_prefix(line, "")
self.flush()
elif self.showfspath:
self.write_fspath_result(nodeid, "")
self.flush()
def pytest_runtest_logreport(self, report: TestReport) -> None:
self._tests_ran = True
rep = report
res = TestShortLogReport(
*self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
)
category, letter, word = res.category, res.letter, res.word
if not isinstance(word, tuple):
markup = None
else:
word, markup = word
self._add_stats(category, [rep])
if not letter and not word:
# Probably passed setup/teardown.
return
running_xdist = hasattr(rep, "node")
if markup is None:
was_xfail = hasattr(report, "wasxfail")
if rep.passed and not was_xfail:
markup = {"green": True}
elif rep.passed and was_xfail:
markup = {"yellow": True}
elif rep.failed:
markup = {"red": True}
elif rep.skipped:
markup = {"yellow": True}
else:
markup = {}
if self.verbosity <= 0:
self._tw.write(letter, **markup)
else:
self._progress_nodeids_reported.add(rep.nodeid)
line = self._locationline(rep.nodeid, *rep.location)
if not running_xdist:
self.write_ensure_prefix(line, word, **markup)
if rep.skipped or hasattr(report, "wasxfail"):
reason = _get_raw_skip_reason(rep)
if self.config.option.verbose < 2:
available_width = (
(self._tw.fullwidth - self._tw.width_of_current_line)
- len(" [100%]")
- 1
)
formatted_reason = _format_trimmed(
" ({})", reason, available_width
)
else:
formatted_reason = f" ({reason})"
if reason and formatted_reason is not None:
self.wrap_write(formatted_reason)
if self._show_progress_info:
self._write_progress_information_filling_space()
else:
self.ensure_newline()
self._tw.write("[%s]" % rep.node.gateway.id)
if self._show_progress_info:
self._tw.write(
self._get_progress_information_message() + " ", cyan=True
)
else:
self._tw.write(" ")
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
self.flush()
@property
def _is_last_item(self) -> bool:
assert self._session is not None
return len(self._progress_nodeids_reported) == self._session.testscollected
def pytest_runtest_logfinish(self, nodeid: str) -> None:
assert self._session
if self.verbosity <= 0 and self._show_progress_info:
if self._show_progress_info == "count":
num_tests = self._session.testscollected
progress_length = len(f" [{num_tests}/{num_tests}]")
else:
progress_length = len(" [100%]")
self._progress_nodeids_reported.add(nodeid)
if self._is_last_item:
self._write_progress_information_filling_space()
else:
main_color, _ = self._get_main_color()
w = self._width_of_current_line
past_edge = w + progress_length + 1 >= self._screen_width
if past_edge:
msg = self._get_progress_information_message()
self._tw.write(msg + "\n", **{main_color: True})
def _get_progress_information_message(self) -> str:
assert self._session
collected = self._session.testscollected
if self._show_progress_info == "count":
if collected:
progress = self._progress_nodeids_reported
counter_format = f"{{:{len(str(collected))}d}}"
format_string = f" [{counter_format}/{{}}]"
return format_string.format(len(progress), collected)
return f" [ {collected} / {collected} ]"
else:
if collected:
return " [{:3d}%]".format(
len(self._progress_nodeids_reported) * 100 // collected
)
return " [100%]"
def _write_progress_information_filling_space(self) -> None:
color, _ = self._get_main_color()
msg = self._get_progress_information_message()
w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1
self.write(msg.rjust(fill), flush=True, **{color: True})
@property
def _width_of_current_line(self) -> int:
"""Return the width of the current line."""
return self._tw.width_of_current_line
def pytest_collection(self) -> None:
if self.isatty:
if self.config.option.verbose >= 0:
self.write("collecting ... ", flush=True, bold=True)
self._collect_report_last_write = timing.time()
elif self.config.option.verbose >= 1:
self.write("collecting ... ", flush=True, bold=True)
def pytest_collectreport(self, report: CollectReport) -> None:
if report.failed:
self._add_stats("error", [report])
elif report.skipped:
self._add_stats("skipped", [report])
items = [x for x in report.result if isinstance(x, Item)]
self._numcollected += len(items)
if self.isatty:
self.report_collect()
def report_collect(self, final: bool = False) -> None:
if self.config.option.verbose < 0:
return
if not final:
# Only write "collecting" report every 0.5s.
t = timing.time()
if (
self._collect_report_last_write is not None
and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION
):
return
self._collect_report_last_write = t
errors = len(self.stats.get("error", []))
skipped = len(self.stats.get("skipped", []))
deselected = len(self.stats.get("deselected", []))
selected = self._numcollected - deselected
line = "collected " if final else "collecting "
line += (
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
)
if errors:
line += " / %d error%s" % (errors, "s" if errors != 1 else "")
if deselected:
line += " / %d deselected" % deselected
if skipped:
line += " / %d skipped" % skipped
if self._numcollected > selected:
line += " / %d selected" % selected
if self.isatty:
self.rewrite(line, bold=True, erase=True)
if final:
self.write("\n")
else:
self.write_line(line)
@hookimpl(trylast=True)
def pytest_sessionstart(self, session: "Session") -> None:
self._session = session
self._sessionstarttime = timing.time()
if not self.showheader:
return
self.write_sep("=", "test session starts", bold=True)
verinfo = platform.python_version()
if not self.no_header:
msg = f"platform {sys.platform} -- Python {verinfo}"
pypy_version_info = getattr(sys, "pypy_version_info", None)
if pypy_version_info:
verinfo = ".".join(map(str, pypy_version_info[:3]))
msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]"
msg += ", pytest-{}, pluggy-{}".format(
_pytest._version.version, pluggy.__version__
)
if (
self.verbosity > 0
or self.config.option.debug
or getattr(self.config.option, "pastebin", None)
):
msg += " -- " + str(sys.executable)
self.write_line(msg)
lines = self.config.hook.pytest_report_header(
config=self.config, start_path=self.startpath
)
self._write_report_lines_from_hooks(lines)
def _write_report_lines_from_hooks(
self, lines: Sequence[Union[str, Sequence[str]]]
) -> None:
for line_or_lines in reversed(lines):
if isinstance(line_or_lines, str):
self.write_line(line_or_lines)
else:
for line in line_or_lines:
self.write_line(line)
def pytest_report_header(self, config: Config) -> List[str]:
result = [f"rootdir: {config.rootpath}"]
if config.inipath:
result.append("configfile: " + bestrelpath(config.rootpath, config.inipath))
if config.args_source == Config.ArgsSource.TESTPATHS:
testpaths: List[str] = config.getini("testpaths")
result.append("testpaths: {}".format(", ".join(testpaths)))
plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo:
result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
return result
def pytest_collection_finish(self, session: "Session") -> None:
self.report_collect(True)
lines = self.config.hook.pytest_report_collectionfinish(
config=self.config,
start_path=self.startpath,
items=session.items,
)
self._write_report_lines_from_hooks(lines)
if self.config.getoption("collectonly"):
if session.items:
if self.config.option.verbose > -1:
self._tw.line("")
self._printcollecteditems(session.items)
failed = self.stats.get("failed")
if failed:
self._tw.sep("!", "collection failures")
for rep in failed:
rep.toterminal(self._tw)
def _printcollecteditems(self, items: Sequence[Item]) -> None:
if self.config.option.verbose < 0:
if self.config.option.verbose < -1:
counts = Counter(item.nodeid.split("::", 1)[0] for item in items)
for name, count in sorted(counts.items()):
self._tw.line("%s: %d" % (name, count))
else:
for item in items:
self._tw.line(item.nodeid)
return
stack: List[Node] = []
indent = ""
for item in items:
needed_collectors = item.listchain()[1:] # strip root node
while stack:
if stack == needed_collectors[: len(stack)]:
break
stack.pop()
for col in needed_collectors[len(stack) :]:
stack.append(col)
indent = (len(stack) - 1) * " "
self._tw.line(f"{indent}{col}")
if self.config.option.verbose >= 1:
obj = getattr(col, "obj", None)
doc = inspect.getdoc(obj) if obj else None
if doc:
for line in doc.splitlines():
self._tw.line("{}{}".format(indent + " ", line))
@hookimpl(hookwrapper=True)
def pytest_sessionfinish(
self, session: "Session", exitstatus: Union[int, ExitCode]
):
outcome = yield
outcome.get_result()
self._tw.line("")
summary_exit_codes = (
ExitCode.OK,
ExitCode.TESTS_FAILED,
ExitCode.INTERRUPTED,
ExitCode.USAGE_ERROR,
ExitCode.NO_TESTS_COLLECTED,
)
if exitstatus in summary_exit_codes and not self.no_summary:
self.config.hook.pytest_terminal_summary(
terminalreporter=self, exitstatus=exitstatus, config=self.config
)
if session.shouldfail:
self.write_sep("!", str(session.shouldfail), red=True)
if exitstatus == ExitCode.INTERRUPTED:
self._report_keyboardinterrupt()
self._keyboardinterrupt_memo = None
elif session.shouldstop:
self.write_sep("!", str(session.shouldstop), red=True)
self.summary_stats()
@hookimpl(hookwrapper=True)
def pytest_terminal_summary(self) -> Generator[None, None, None]:
self.summary_errors()
self.summary_failures()
self.summary_warnings()
self.summary_passes()
yield
self.short_test_summary()
# Display any extra warnings from teardown here (if any).
self.summary_warnings()
def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None:
self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
def pytest_unconfigure(self) -> None:
if self._keyboardinterrupt_memo is not None:
self._report_keyboardinterrupt()
def _report_keyboardinterrupt(self) -> None:
excrepr = self._keyboardinterrupt_memo
assert excrepr is not None
assert excrepr.reprcrash is not None
msg = excrepr.reprcrash.message
self.write_sep("!", msg)
if "KeyboardInterrupt" in msg:
if self.config.option.fulltrace:
excrepr.toterminal(self._tw)
else:
excrepr.reprcrash.toterminal(self._tw)
self._tw.line(
"(to show a full traceback on KeyboardInterrupt use --full-trace)",
yellow=True,
)
def _locationline(
self, nodeid: str, fspath: str, lineno: Optional[int], domain: str
) -> str:
def mkrel(nodeid: str) -> str:
line = self.config.cwd_relative_nodeid(nodeid)
if domain and line.endswith(domain):
line = line[: -len(domain)]
values = domain.split("[")
values[0] = values[0].replace(".", "::") # don't replace '.' in params
line += "[".join(values)
return line
# collect_fspath comes from testid which has a "/"-normalized path.
if fspath:
res = mkrel(nodeid)
if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace(
"\\", nodes.SEP
):
res += " <- " + bestrelpath(self.startpath, Path(fspath))
else:
res = "[location]"
return res + " "
def _getfailureheadline(self, rep):
head_line = rep.head_line
if head_line:
return head_line
return "test session" # XXX?
def _getcrashline(self, rep):
try:
return str(rep.longrepr.reprcrash)
except AttributeError:
try:
return str(rep.longrepr)[:50]
except AttributeError:
return ""
#
# Summaries for sessionfinish.
#
def getreports(self, name: str):
return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")]
def summary_warnings(self) -> None:
if self.hasopt("w"):
all_warnings: Optional[List[WarningReport]] = self.stats.get("warnings")
if not all_warnings:
return
final = self._already_displayed_warnings is not None
if final:
warning_reports = all_warnings[self._already_displayed_warnings :]
else:
warning_reports = all_warnings
self._already_displayed_warnings = len(warning_reports)
if not warning_reports:
return
reports_grouped_by_message: Dict[str, List[WarningReport]] = {}
for wr in warning_reports:
reports_grouped_by_message.setdefault(wr.message, []).append(wr)
def collapsed_location_report(reports: List[WarningReport]) -> str:
locations = []
for w in reports:
location = w.get_location(self.config)
if location:
locations.append(location)
if len(locations) < 10:
return "\n".join(map(str, locations))
counts_by_filename = Counter(
str(loc).split("::", 1)[0] for loc in locations
)
return "\n".join(
"{}: {} warning{}".format(k, v, "s" if v > 1 else "")
for k, v in counts_by_filename.items()
)
title = "warnings summary (final)" if final else "warnings summary"
self.write_sep("=", title, yellow=True, bold=False)
for message, message_reports in reports_grouped_by_message.items():
maybe_location = collapsed_location_report(message_reports)
if maybe_location:
self._tw.line(maybe_location)
lines = message.splitlines()
indented = "\n".join(" " + x for x in lines)
message = indented.rstrip()
else:
message = message.rstrip()
self._tw.line(message)
self._tw.line()
self._tw.line(
"-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html"
)
def summary_passes(self) -> None:
if self.config.option.tbstyle != "no":
if self.hasopt("P"):
reports: List[TestReport] = self.getreports("passed")
if not reports:
return
self.write_sep("=", "PASSES")
for rep in reports:
if rep.sections:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, green=True, bold=True)
self._outrep_summary(rep)
self._handle_teardown_sections(rep.nodeid)
def _get_teardown_reports(self, nodeid: str) -> List[TestReport]:
reports = self.getreports("")
return [
report
for report in reports
if report.when == "teardown" and report.nodeid == nodeid
]
def _handle_teardown_sections(self, nodeid: str) -> None:
for report in self._get_teardown_reports(nodeid):
self.print_teardown_sections(report)
def print_teardown_sections(self, rep: TestReport) -> None:
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
if "teardown" in secname:
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_failures(self) -> None:
if self.config.option.tbstyle != "no":
reports: List[BaseReport] = self.getreports("failed")
if not reports:
return
self.write_sep("=", "FAILURES")
if self.config.option.tbstyle == "line":
for rep in reports:
line = self._getcrashline(rep)
self.write_line(line)
else:
for rep in reports:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
self._handle_teardown_sections(rep.nodeid)
def summary_errors(self) -> None:
if self.config.option.tbstyle != "no":
reports: List[BaseReport] = self.getreports("error")
if not reports:
return
self.write_sep("=", "ERRORS")
for rep in self.stats["error"]:
msg = self._getfailureheadline(rep)
if rep.when == "collect":
msg = "ERROR collecting " + msg
else:
msg = f"ERROR at {rep.when} of {msg}"
self.write_sep("_", msg, red=True, bold=True)
self._outrep_summary(rep)
def _outrep_summary(self, rep: BaseReport) -> None:
rep.toterminal(self._tw)
showcapture = self.config.option.showcapture
if showcapture == "no":
return
for secname, content in rep.sections:
if showcapture != "all" and showcapture not in secname:
continue
self._tw.sep("-", secname)
if content[-1:] == "\n":
content = content[:-1]
self._tw.line(content)
def summary_stats(self) -> None:
if self.verbosity < -1:
return
session_duration = timing.time() - self._sessionstarttime
(parts, main_color) = self.build_summary_stats_line()
line_parts = []
display_sep = self.verbosity >= 0
if display_sep:
fullwidth = self._tw.fullwidth
for text, markup in parts:
with_markup = self._tw.markup(text, **markup)
if display_sep:
fullwidth += len(with_markup) - len(text)
line_parts.append(with_markup)
msg = ", ".join(line_parts)
main_markup = {main_color: True}
duration = f" in {format_session_duration(session_duration)}"
duration_with_markup = self._tw.markup(duration, **main_markup)
if display_sep:
fullwidth += len(duration_with_markup) - len(duration)
msg += duration_with_markup
if display_sep:
markup_for_end_sep = self._tw.markup("", **main_markup)
if markup_for_end_sep.endswith("\x1b[0m"):
markup_for_end_sep = markup_for_end_sep[:-4]
fullwidth += len(markup_for_end_sep)
msg += markup_for_end_sep
if display_sep:
self.write_sep("=", msg, fullwidth=fullwidth, **main_markup)
else:
self.write_line(msg, **main_markup)
def short_test_summary(self) -> None:
if not self.reportchars:
return
def show_simple(lines: List[str], *, stat: str) -> None:
failed = self.stats.get(stat, [])
if not failed:
return
config = self.config
for rep in failed:
color = _color_for_type.get(stat, _color_for_type_default)
line = _get_line_with_reprcrash_message(
config, rep, self._tw, {color: True}
)
lines.append(line)
def show_xfailed(lines: List[str]) -> None:
xfailed = self.stats.get("xfailed", [])
for rep in xfailed:
verbose_word = rep._get_verbose_word(self.config)
markup_word = self._tw.markup(
verbose_word, **{_color_for_type["warnings"]: True}
)
nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
line = f"{markup_word} {nodeid}"
reason = rep.wasxfail
if reason:
line += " - " + str(reason)
lines.append(line)
def show_xpassed(lines: List[str]) -> None:
xpassed = self.stats.get("xpassed", [])
for rep in xpassed:
verbose_word = rep._get_verbose_word(self.config)
markup_word = self._tw.markup(
verbose_word, **{_color_for_type["warnings"]: True}
)
nodeid = _get_node_id_with_markup(self._tw, self.config, rep)
reason = rep.wasxfail
lines.append(f"{markup_word} {nodeid} {reason}")
def show_skipped(lines: List[str]) -> None:
skipped: List[CollectReport] = self.stats.get("skipped", [])
fskips = _folded_skips(self.startpath, skipped) if skipped else []
if not fskips:
return
verbose_word = skipped[0]._get_verbose_word(self.config)
markup_word = self._tw.markup(
verbose_word, **{_color_for_type["warnings"]: True}
)
prefix = "Skipped: "
for num, fspath, lineno, reason in fskips:
if reason.startswith(prefix):
reason = reason[len(prefix) :]
if lineno is not None:
lines.append(
"%s [%d] %s:%d: %s" % (markup_word, num, fspath, lineno, reason)
)
else:
lines.append("%s [%d] %s: %s" % (markup_word, num, fspath, reason))
REPORTCHAR_ACTIONS: Mapping[str, Callable[[List[str]], None]] = {
"x": show_xfailed,
"X": show_xpassed,
"f": partial(show_simple, stat="failed"),
"s": show_skipped,
"p": partial(show_simple, stat="passed"),
"E": partial(show_simple, stat="error"),
}
lines: List[str] = []
for char in self.reportchars:
action = REPORTCHAR_ACTIONS.get(char)
if action: # skipping e.g. "P" (passed with output) here.
action(lines)
if lines:
self.write_sep("=", "short test summary info", cyan=True, bold=True)
for line in lines:
self.write_line(line)
def _get_main_color(self) -> Tuple[str, List[str]]:
if self._main_color is None or self._known_types is None or self._is_last_item:
self._set_main_color()
assert self._main_color
assert self._known_types
return self._main_color, self._known_types
def _determine_main_color(self, unknown_type_seen: bool) -> str:
stats = self.stats
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats or not self._is_last_item:
main_color = "green"
else:
main_color = "yellow"
return main_color
def _set_main_color(self) -> None:
unknown_types: List[str] = []
for found_type in self.stats.keys():
if found_type: # setup/teardown reports have an empty key, ignore them
if found_type not in KNOWN_TYPES and found_type not in unknown_types:
unknown_types.append(found_type)
self._known_types = list(KNOWN_TYPES) + unknown_types
self._main_color = self._determine_main_color(bool(unknown_types))
def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
"""
Build the parts used in the last summary stats line.
The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===".
This function builds a list of the "parts" that make up for the text in that line, in
the example above it would be:
[
("12 passed", {"green": True}),
("2 errors", {"red": True}
]
That last dict for each line is a "markup dictionary", used by TerminalWriter to
color output.
The final color of the line is also determined by this function, and is the second
element of the returned tuple.
"""
if self.config.getoption("collectonly"):
return self._build_collect_only_summary_stats_line()
else:
return self._build_normal_summary_stats_line()
def _get_reports_to_display(self, key: str) -> List[Any]:
"""Get test/collection reports for the given status key, such as `passed` or `error`."""
reports = self.stats.get(key, [])
return [x for x in reports if getattr(x, "count_towards_summary", True)]
def _build_normal_summary_stats_line(
self,
) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
main_color, known_types = self._get_main_color()
parts = []
for key in known_types:
reports = self._get_reports_to_display(key)
if reports:
count = len(reports)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % pluralize(count, key), markup))
if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]
return parts, main_color
def _build_collect_only_summary_stats_line(
self,
) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
deselected = len(self._get_reports_to_display("deselected"))
errors = len(self._get_reports_to_display("error"))
if self._numcollected == 0:
parts = [("no tests collected", {"yellow": True})]
main_color = "yellow"
elif deselected == 0:
main_color = "green"
collected_output = "%d %s collected" % pluralize(self._numcollected, "test")
parts = [(collected_output, {main_color: True})]
else:
all_tests_were_deselected = self._numcollected == deselected
if all_tests_were_deselected:
main_color = "yellow"
collected_output = f"no tests collected ({deselected} deselected)"
else:
main_color = "green"
selected = self._numcollected - deselected
collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)"
parts = [(collected_output, {main_color: True})]
if errors:
main_color = _color_for_type["error"]
parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})]
return parts, main_color
def _get_node_id_with_markup(tw: TerminalWriter, config: Config, rep: BaseReport):
nodeid = config.cwd_relative_nodeid(rep.nodeid)
path, *parts = nodeid.split("::")
if parts:
parts_markup = tw.markup("::".join(parts), bold=True)
return path + "::" + parts_markup
else:
return path
def _format_trimmed(format: str, msg: str, available_width: int) -> Optional[str]:
"""Format msg into format, ellipsizing it if doesn't fit in available_width.
Returns None if even the ellipsis can't fit.
"""
# Only use the first line.
i = msg.find("\n")
if i != -1:
msg = msg[:i]
ellipsis = "..."
format_width = wcswidth(format.format(""))
if format_width + len(ellipsis) > available_width:
return None
if format_width + wcswidth(msg) > available_width:
available_width -= len(ellipsis)
msg = msg[:available_width]
while format_width + wcswidth(msg) > available_width:
msg = msg[:-1]
msg += ellipsis
return format.format(msg)
def _get_line_with_reprcrash_message(
config: Config, rep: BaseReport, tw: TerminalWriter, word_markup: Dict[str, bool]
) -> str:
"""Get summary line for a report, trying to add reprcrash message."""
verbose_word = rep._get_verbose_word(config)
word = tw.markup(verbose_word, **word_markup)
node = _get_node_id_with_markup(tw, config, rep)
line = f"{word} {node}"
line_width = wcswidth(line)
try:
# Type ignored intentionally -- possible AttributeError expected.
msg = rep.longrepr.reprcrash.message # type: ignore[union-attr]
except AttributeError:
pass
else:
if not running_on_ci():
available_width = tw.fullwidth - line_width
msg = _format_trimmed(" - {}", msg, available_width)
else:
msg = f" - {msg}"
if msg is not None:
line += msg
return line
def _folded_skips(
startpath: Path,
skipped: Sequence[CollectReport],
) -> List[Tuple[int, str, Optional[int], str]]:
d: Dict[Tuple[str, Optional[int], str], List[CollectReport]] = {}
for event in skipped:
assert event.longrepr is not None
assert isinstance(event.longrepr, tuple), (event, event.longrepr)
assert len(event.longrepr) == 3, (event, event.longrepr)
fspath, lineno, reason = event.longrepr
# For consistency, report all fspaths in relative form.
fspath = bestrelpath(startpath, Path(fspath))
keywords = getattr(event, "keywords", {})
# Folding reports with global pytestmark variable.
# This is a workaround, because for now we cannot identify the scope of a skip marker
# TODO: Revisit after marks scope would be fixed.
if (
event.when == "setup"
and "skip" in keywords
and "pytestmark" not in keywords
):
key: Tuple[str, Optional[int], str] = (fspath, None, reason)
else:
key = (fspath, lineno, reason)
d.setdefault(key, []).append(event)
values: List[Tuple[int, str, Optional[int], str]] = []
for key, events in d.items():
values.append((len(events), *key))
return values
_color_for_type = {
"failed": "red",
"error": "red",
"warnings": "yellow",
"passed": "green",
}
_color_for_type_default = "yellow"
def pluralize(count: int, noun: str) -> Tuple[int, str]:
# No need to pluralize words such as `failed` or `passed`.
if noun not in ["error", "warnings", "test"]:
return count, noun
# The `warnings` key is plural. To avoid API breakage, we keep it that way but
# set it to singular here so we can determine plurality in the same way as we do
# for `error`.
noun = noun.replace("warnings", "warning")
return count, noun + "s" if count != 1 else noun
def _plugin_nameversions(plugininfo) -> List[str]:
values: List[str] = []
for plugin, dist in plugininfo:
# Gets us name and version!
name = "{dist.project_name}-{dist.version}".format(dist=dist)
# Questionable convenience, but it keeps things short.
if name.startswith("pytest-"):
name = name[7:]
# We decided to print python package names they can have more than one plugin.
if name not in values:
values.append(name)
return values
def format_session_duration(seconds: float) -> str:
"""Format the given seconds in a human readable manner to show in the final summary."""
if seconds < 60:
return f"{seconds:.2f}s"
else:
dt = datetime.timedelta(seconds=int(seconds))
return f"{seconds:.2f}s ({dt})"
def _get_raw_skip_reason(report: TestReport) -> str:
"""Get the reason string of a skip/xfail/xpass test report.
The string is just the part given by the user.
"""
if hasattr(report, "wasxfail"):
reason = cast(str, report.wasxfail)
if reason.startswith("reason: "):
reason = reason[len("reason: ") :]
return reason
else:
assert report.skipped
assert isinstance(report.longrepr, tuple)
_, _, reason = report.longrepr
if reason.startswith("Skipped: "):
reason = reason[len("Skipped: ") :]
elif reason == "Skipped":
reason = ""
return reason
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pytest@py3@_pytest@terminal.py@.PATH_END.py
|
{
"filename": "_sizeref.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergl/marker/_sizeref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizerefValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="sizeref", parent_name="scattergl.marker", **kwargs):
super(SizerefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergl@marker@_sizeref.py@.PATH_END.py
|
{
"filename": "mosaic.py",
"repo_name": "mhardcastle/ddf-pipeline",
"repo_path": "ddf-pipeline_extracted/ddf-pipeline-master/scripts/mosaic.py",
"type": "Python"
}
|
#!/usr/bin/env python
# Mosaic final images
# arguments are directories with final images
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import range
from pipeline_version import version
from reproject import reproject_interp,reproject_exact
from reproj_test import reproject_interp_chunk_2d
from auxcodes import die, get_rms, flatten, convert_regionfile_to_poly, get_rms_map3
import sys
from astropy.io import fits
from astropy.table import Table
from astropy.wcs import WCS
import numpy as np
import argparse
import pickle
import os.path
import glob
import pyregion
import scipy.ndimage as nd
from copy import deepcopy
import multiprocessing as mp
from queue import Empty
from convolve import do_convolve
from time import sleep
def reproj_inner(q,reproj,hdu,header,shift,direction,ds9region,guard=20):
print('Direction',direction,'starting')
r = pyregion.parse(ds9region)
manualmask = r.get_mask(hdu=hdu)
# Find bounding box
yv = np.any(manualmask, axis=1)
xv = np.any(manualmask, axis=0)
xmin, xmax = np.where(xv)[0][[0, -1]]
ymin, ymax = np.where(yv)[0][[0, -1]]
# Add guard
xmin-=guard
if xmin<0: xmin=0
ymin-=guard
if ymin<0: ymin=0
xmax+=guard
if xmax>hdu.data.shape[1]:
xmax=hdu.data.shape[1]
ymax+=guard
if ymax>hdu.data.shape[0]:
ymax=hdu.data.shape[0]
print('Bounding box is',xmin,xmax,ymin,ymax)
newdata=hdu.data[ymin:ymax,xmin:xmax]
newheader=deepcopy(hdu.header)
# adjust the header both to shift and to take account of the subregion
cellsize=3600*newheader['CDELT2'] # arcsec per pixel
#newheader['CRPIX1']-=xmin
#newheader['CRPIX2']-=ymin
#newheader['CRVAL1']-=shift['RA_offset']/3600.0
#newheader['CRVAL2']-=shift['DEC_offset']/3600.0
newheader['CRPIX1']-=xmin+shift['RA_offset']/cellsize
newheader['CRPIX2']-=ymin-shift['DEC_offset']/cellsize
shhdu=fits.PrimaryHDU(data=newdata,header=newheader)
rpm,_=reproj(shhdu,header,hdu_in=0,parallel=False)
rphdu=fits.PrimaryHDU(header=header,data=rpm)
newmask = r.get_mask(hdu=rphdu)
rpm[~newmask]=0
print('Direction',direction,'returning result to queue')
q.put(rpm)
def do_reproj_mp(reproj,hdu,header,shift=None,polylist=None,badfacet=None):
# Wrapper around reproj which handles per-facet reprojection if required
if shift is None:
return reproj(hdu,header,hdu_in=0,parallel=False)
else:
rpm=None
q=mp.Queue()
for direction,ds9region in enumerate(polylist):
if badfacet and direction in badfacet: continue
p=mp.Process(target=reproj_inner,args=(q,reproj,hdu,header,shift[direction],direction,ds9region))
p.start()
while True:
n=len(mp.active_children())
print('In main loop,',n,'active children')
if n==0: break
try:
result=q.get(block=False)
except Empty:
print('Tick...')
sleep(1)
else:
if rpm is None:
rpm=result
else:
rpm+=result
return rpm,None # footprint is not used
def do_reproj(reproj,hdu,header,shift=None,polylist=None,debug=True):
# Wrapper around reproj which handles per-facet reprojection if required
if shift is None:
return reproj(hdu,header,hdu_in=0,parallel=False)
else:
rpm=None
for direction,ds9region in enumerate(polylist):
print(direction,ds9region)
r = pyregion.parse(ds9region)
manualmask = r.get_mask(hdu=hdu)
print('Convolving to get the new size...')
manualmask = nd.gaussian_filter(manualmask.astype(float),sigma=3)
#manualmask=np.binary_dilation(manualmask,structure=np.ones(shape=(5,5)))
print('Done')
manualmask = manualmask>0.0 # now a bool array
yv = np.any(manualmask, axis=1)
xv = np.any(manualmask, axis=0)
xmin, xmax = np.where(xv)[0][[0, -1]]
ymin, ymax = np.where(yv)[0][[0, -1]]
print('Bounding box is',xmin,xmax,ymin,ymax)
newdata=hdu.data[ymin:ymax,xmin:xmax]
newheader=deepcopy(hdu.header)
# adjust the header both to shift and to take account of the subregion
newheader['CRPIX1']-=xmin
newheader['CRPIX2']-=ymin
#newheader['CRVAL1']+=shift['RA_offset'][direction]/3600.0
#newheader['CRVAL2']-=shift['DEC_offset'][direction]/3600.0
newheader['CRVAL1']-=shift['RA_offset'][direction]/3600.0
newheader['CRVAL2']+=shift['DEC_offset'][direction]/3600.0
shhdu=fits.PrimaryHDU(data=newdata,header=newheader)
if debug:
shhdu.writeto('facet-%i.fits' % direction, overwrite=True)
if rpm is None:
rpm,_=reproj(shhdu,header,hdu_in=0,parallel=False)
rphdu=fits.PrimaryHDU(header=header,data=rpm)
newmask = r.get_mask(hdu=rphdu)
rpm[~newmask]=0
else:
newmask = r.get_mask(hdu=rphdu)
rpm+=np.where(newmask,reproj(shhdu,header,hdu_in=0,parallel=False)[0],0)
if debug:
rphdu=fits.PrimaryHDU(header=header,data=rpm)
rphdu.writeto('direction-%i.fits' % direction, overwrite=True)
return rpm,None # footprint is not used so safe to return none
def make_mosaic(args):
if args.find_noise and args.read_noise:
raise RuntimeError('Cannot both find noise and read it')
if args.scale is not None:
if len(args.scale) != len(args.directories):
die('Scales provided must match directories',database=False)
if args.noise is not None:
if len(args.noise) != len(args.directories):
die('Noises provided must match directories',database=False)
if args.band is not None:
band=int(args.band)
print('Doing band %i'%band)
else:
band=None
if args.rootname:
rootname=args.rootname+'-'
else:
rootname=''
if band is not None:
rootname=('band%i-' % band) + rootname
if args.exact:
reproj=reproject_exact # may not work
else:
reproj=reproject_interp_chunk_2d
if args.do_vlow:
intname='image_full_vlow_nocut_m.int.restored.fits'
appname='image_full_vlow_nocut_m.app.restored.fits'
elif args.do_wsclean:
intname='WSCLEAN_low-MFS-image-int.fits'
appname='WSCLEAN_low-MFS-image.fits'
elif args.do_lowres:
intname='image_full_low_m.int.restored.fits'
appname='image_full_low_m.app.restored.fits'
elif args.do_stokesV:
intname='image_full_high_stokesV.dirty.corr.fits'
appname='image_full_high_stokesV.dirty.fits'
elif band is not None and args.use_shifted:
intname='image_full_ampphase_di_m.NS_Band%i_shift.int.facetRestored.fits' % band
appname='image_full_ampphase_di_m.NS_Band%i_shift.app.facetRestored.fits' % band
elif band is not None:
intname='image_full_ampphase_di_m.NS_Band%i.int.facetRestored.fits' % band
appname='image_full_ampphase_di_m.NS_Band%i.app.facetRestored.fits' % band
elif args.use_shifted:
intname='image_full_ampphase_di_m.NS_shift.int.facetRestored.fits'
appname='image_full_ampphase_di_m.NS_shift.app.facetRestored.fits'
else:
intname='image_full_ampphase_di_m.NS.int.restored.fits'
appname='image_full_ampphase_di_m.NS.app.restored.fits'
if args.convolve:
orig_intname=intname
orig_appname=appname
intname=intname.replace('.fits','_convolved.fits')
appname=appname.replace('.fits','_convolved.fits')
# astromap blanking if required
bth=None
try:
bth=float(args.astromap_blank)
except:
pass
threshold=float(args.beamcut)
hdus=[]
app=[]
astromaps=[]
wcs=[]
print('Reading files...')
noise=[]
noisefiles=[]
name=[]
shifts=[]
polylists=[]
badfacets=[]
if args.directories is None:
raise RuntimeError("At least one directory name must be supplied")
for d in args.directories:
name.append(d.split('/')[-1])
infile=d+'/'+intname
if not os.path.isfile(infile):
if args.convolve:
print('Convolved file',infile,'does not exist, making it')
do_convolve(d+'/'+orig_intname,float(args.convolve),d+'/'+intname)
do_convolve(d+'/'+orig_appname,float(args.convolve),d+'/'+appname)
else:
raise RuntimeError('Expected file',infile,'does not exist')
hdu=fits.open(infile)
if args.convolve:
if hdu[0].header['BMAJ']*3600.0!=float(args.convolve):
raise RuntimeError('Resolution of convolved file on disk '+infile+' does not match required')
if args.do_stokesV:
hdu[0].data[0][0] = hdu[0].data[0][1]
if args.find_noise:
print('Estimating noise for', d+'/' + intname)
if args.do_vlow or args.do_wsclean:
noise.append(get_rms(hdu,boxsize=500,niter=50))
elif args.do_lowres:
noise.append(get_rms(hdu,boxsize=1500))
else:
noise.append(get_rms(hdu))
hdus.append(flatten(hdu))
imagefilename=d+'/'+appname
print('Reading image file',imagefilename)
if args.do_stokesV:
tmp = fits.open(imagefilename)
tmp[0].data[0][0] = tmp[0].data[0][1]
app.append(flatten(tmp))
else:
app.append(flatten(fits.open(imagefilename)))
if bth:
astromaps.append(flatten(fits.open(d+'/astromap.fits')))
if args.read_noise:
noisename=d+'/'+appname.replace('.fits','_facetnoise.fits')
if not os.path.isfile(noisename):
g=glob.glob(d+'/*.tessel.reg')
get_rms_map3(d+'/'+appname,g[0],noisename,database=False)
noisefiles.append(flatten(fits.open(noisename)))
if args.apply_shift or args.facet_only:
print('Reading the tessel file')
g=glob.glob(d+'/*.tessel.reg')
if len(g)==0:
raise RuntimeError('apply_shift specified but no tessel file present in '+d)
else:
polylists.append(convert_regionfile_to_poly(g[0]))
if args.apply_shift:
print('Reading the shift file and tessel file')
t=Table.read(d+'/pslocal-facet_offsets.fits')
bad=(t['RA_peak']/t['RA_peak_error']<2) | (t['DEC_peak']/t['DEC_peak_error']<2)
print('Found',np.sum(bad),'bad fits')
if np.all(bad):
print('All bad, zeroing out offsets')
t[bad]['RA_offset']=0
t[bad]['DEC_offset']=0
else:
print('Replacing bad fits with median shift')
ra_median=np.median(t[~bad]['RA_offset'])
dec_median=np.median(t[~bad]['DEC_offset'])
t[bad]['RA_offset']=ra_median
t[bad]['DEC_offset']=dec_median
shifts.append(t)
else:
print('Generating a dummy shift file')
t=Table([np.zeros(len(polylists[-1])),np.zeros(len(polylists[-1]))],names=('RA_offset','DEC_offset'))
shifts.append(t)
else:
shifts.append(None)
polylists.append(None)
if args.use_badfacet:
badfacetfile=d+'/Badfacets.txt'
if os.path.isfile(badfacetfile):
print('Reading the bad facet file')
lines=open(badfacetfile).readlines()
bflist=eval(','.join(lines[1].rstrip().split(',')[1:]))
badfacets.append(bflist)
print('Adding',len(bflist),'bad facets')
else:
badfacets.append([])
else:
badfacets.append(None)
if args.find_noise:
args.noise=noise
print('Noise values are:')
for t,n in zip(name,noise):
print(t,n)
print('Computing noise/beam factors...')
for i in range(len(app)):
np.seterr(divide='ignore')
app[i].data=np.divide(app[i].data,hdus[i].data)
app[i].data[app[i].data<threshold]=0
# at this point this is the beam factor: we want 1/sigma**2.0, so divide by noise and square
if args.noise is not None:
app[i].data/=args.noise[i]
elif noisefiles:
app[i].data/=noisefiles[i].data
app[i].data=app[i].data**2.0
'''
if args.shift:
print('Finding shifts (NOTE THIS CODE IS OBSOLETE)...')
# shift according to the FIRST delta ra/dec from quality pipeline
dras=[]
ddecs=[]
for d in args.directories:
t=Table.read(d+'/image_full_ampphase1m.cat.fits_FIRST_match_filtered.fits')
dras.append(np.mean(t['FIRST_dRA']))
ddecs.append(np.mean(t['FIRST_dDEC']))
print('Applying shifts:',dras,ddecs)
for i in range(len(app)):
for hdu in [hdus[i],app[i]]:
ra=hdu.header['CRVAL1']
dec=hdu.header['CRVAL2']
hdu.header['CRVAL1']-=dras[i]/(3600.0*np.cos(np.pi*dec/180.0))
hdu.header['CRVAL2']-=ddecs[i]/3600.0
'''
for i in range(len(app)):
wcs.append(WCS(hdus[i].header))
# astromap blanking
if bth:
print('Blanking using astrometry quality maps with threshold',bth,'arcsec')
for i in range(len(app)):
outname=rootname+'astroblank-'+name[i]+'.fits'
if args.load and os.path.isfile(outname):
print('Loading previously blanked image')
hdu=fits.open(outname)
hdus[i].data=hdu[0].data
else:
print('Blanking image',i)
dmaxy,dmaxx=hdus[i].data.shape
count=0
am=astromaps[i]
awcs=WCS(am.header)
maxy,maxx=am.data.shape
for y in range(maxy):
for x in range(maxx):
value=am.data[y,x]
if np.isnan(value):
if y<maxy-1:
value=am.data[y+1,x]
if value>bth:
ra,dec=[float(f) for f in awcs.wcs_pix2world(x,y,0)]
rx,ry=[int(p) for p in wcs[i].wcs_world2pix(ra,dec,0)]
rxp=rx+21 # astromap pix size, with margin
ryp=ry+21
if rx<0: rx=0
if ry<0: ry=0
if rxp>dmaxx: rxp=dmaxx
if ryp>dmaxy: ryp=dmaxy
hdus[i].data[ry:ryp,rx:rxp]=np.nan
count+=1
print('... blanked',count*900.0/3600,'square arcmin')
if args.save: hdus[i].writeto(outname,overwrite=True)
app[i].data[np.isnan(hdus[i].data)]=np.nan
# If the header is directly passed in, use it
try:
header=args.header
xsize=header['NAXIS1']
ysize=header['NAXIS2']
print('Mosaic using header passed from calling program')
except:
header=None
if header is None:
if args.load_layout:
with open(rootname+'mosaic-header.pickle') as f:
header=pickle.load(f)
xsize=header['NAXIS1']
ysize=header['NAXIS2']
print('Mosaic using loaded header')
else:
print('Creating the mosaic header')
ras=np.array([w.wcs.crval[0] for w in wcs])
decs=np.array([w.wcs.crval[1] for w in wcs])
mra=np.mean(ras)
mdec=np.mean(decs)
print('Will make mosaic at',mra,mdec)
# we make a reference WCS and use it to find the extent in pixels
# needed for the combined image
rwcs=WCS(naxis=2)
rwcs.wcs.ctype=wcs[0].wcs.ctype
rwcs.wcs.cdelt=wcs[0].wcs.cdelt
rwcs.wcs.crval=[mra,mdec]
rwcs.wcs.crpix=[1,1]
xmin=0
xmax=0
ymin=0
ymax=0
for a,w in zip(app,wcs):
ys,xs=np.where(a.data)
axmin=xs.min()
aymin=ys.min()
axmax=xs.max()
aymax=ys.max()
del(xs)
del(ys)
print('non-zero',axmin,aymin,axmax,aymax)
for x,y in ((axmin,aymin),(axmax,aymin),(axmin,aymax),(axmax,aymax)):
ra,dec=[float(f) for f in w.wcs_pix2world(x,y,0)]
#print ra,dec
nx,ny=[float (f) for f in rwcs.wcs_world2pix(ra,dec,0)]
print(nx,ny)
if nx<xmin: xmin=nx
if nx>xmax: xmax=nx
if ny<ymin: ymin=ny
if ny>ymax: ymax=ny
print('co-ord range:', xmin, xmax, ymin, ymax)
xsize=int(xmax-xmin)
ysize=int(ymax-ymin)
rwcs.wcs.crpix=[-int(xmin)+1,-int(ymin)+1]
print('checking:', rwcs.wcs_world2pix(mra,mdec,0))
print(rwcs)
header=rwcs.to_header()
header['NAXIS']=2
header['NAXIS1']=xsize
header['NAXIS2']=ysize
with open(rootname+'mosaic-header.pickle','wb') as f:
pickle.dump(header,f)
# ----------------------------------------
# mosaic main loop
# ----------------------------------------
isum=np.zeros([ysize,xsize],dtype=np.float32)
wsum=np.zeros_like(isum)
mask=np.zeros_like(isum,dtype=np.bool)
print('now making the mosaic')
for i in range(len(hdus)):
print('image',i,'(',name[i],')')
outname=rootname+'reproject-'+name[i]+'.fits'
if args.load and os.path.exists(outname):
print('loading...')
hdu=fits.open(outname)
r=hdu[0].data
else:
print('reprojecting...')
r, footprint = do_reproj_mp(reproj, hdus[i], header, shift=shifts[i],polylist=polylists[i],badfacet=badfacets[i])
r[np.isnan(r)]=0
hdu = fits.PrimaryHDU(header=header,data=r)
if args.save: hdu.writeto(outname,overwrite=True)
print('weights',i,'(',name[i],')')
outname=rootname+'weight-'+name[i]+'.fits'
if args.load and os.path.exists(outname):
print('loading...')
hdu=fits.open(outname)
w=hdu[0].data
mask|=(w>0)
else:
print('reprojecting...')
w, footprint = do_reproj_mp(reproj, app[i], header, shift=shifts[i],polylist=polylists[i],badfacet=badfacets[i])
mask|=~np.isnan(w)
w[np.isnan(w)]=0
hdu = fits.PrimaryHDU(header=header,data=w)
if args.save: hdu.writeto(outname,overwrite=True)
print('add to mosaic...')
if args.scale is not None:
print('Applying scale %s to %s'%(args.scale[i],name[i]))
r = r*args.scale[i]
w /= args.scale[i]**2.0
isum+=r*w
wsum+=w
if not(args.no_write):
isum/=wsum
# mask now contains True where a non-nan region was present in either map
isum[~mask]=np.nan
for ch in ('BMAJ', 'BMIN', 'BPA'):
try:
header[ch]=hdus[0].header[ch]
# Exception for Stokes V images which don't have a BMAJ
except KeyError:
print('No entry in header for %s and not creating one'%ch)
header['ORIGIN']='ddf-pipeline '+version()
hdu = fits.PrimaryHDU(header=header,data=isum)
mosname='mosaic.fits'
hdu.writeto(rootname+mosname,overwrite=True)
hdu = fits.PrimaryHDU(header=header,data=wsum)
hdu.writeto(rootname+mosname.replace('.fits','-weights.fits'),overwrite=True)
else:
mosname=None
return rootname+mosname
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Mosaic ddf-pipeline directories')
parser.add_argument('--directories', metavar='D', nargs='+',
help='directory name')
parser.add_argument('--rootname', dest='rootname', default='', help='Root name for output files, default uses no prefix')
parser.add_argument('--beamcut', dest='beamcut', default=0.3, help='Beam level to cut at')
parser.add_argument('--convolve', default=None, help='Resolution in arcsec to convolve to')
parser.add_argument('--band', dest='band', default=None, help='Band number to mosaic, leave unset for full-bw image')
parser.add_argument('--exact', dest='exact', action='store_true', help='Do exact reprojection (slow)')
parser.add_argument('--save', dest='save', action='store_true', help='Save intermediate images')
parser.add_argument('--load', dest='load', action='store_true', help='Load existing intermediate images')
parser.add_argument('--noise', dest='noise', type=float, nargs='+', help='UNSCALED Central noise level for weighting: must match numbers of maps')
parser.add_argument('--scale', dest='scale', type=float, nargs='+', help='Scale factors by which maps should be multiplied: must match numbers of maps')
parser.add_argument('--use_shifted', dest='use_shifted', action='store_true', help='Use the shifted images from the pipeline')
#parser.add_argument('--shift', dest='shift', action='store_true', help='Shift images before mosaicing')
parser.add_argument('--apply_shift', action='store_true', help='Apply per-facet shift from an offset file')
parser.add_argument('--facet_only', action='store_true', help='Do not do per-facet shift, but mosaic per facet')
parser.add_argument('--no_write', dest='no_write', action='store_true', help='Do not write final mosaic')
parser.add_argument('--find_noise', dest='find_noise', action='store_true', help='Find noise from image')
parser.add_argument('--read_noise', action='store_true', help='Read noise from a pre-existing per-facet noise file')
parser.add_argument('--use_badfacet', action='store_true', help='Read a bad facet file')
parser.add_argument('--do_lowres',dest='do_lowres', action='store_true', help='Mosaic low-res images instead of high-res')
parser.add_argument('--do_vlow',dest='do_vlow', action='store_true', help='Mosaic vlow images instead of high-res')
parser.add_argument('--do_wsclean',dest='do_wsclean', action='store_true', help='Mosaic subtracted vlow images instead of high-res')
parser.add_argument('--do_stokesV',dest='do_stokesV', action='store_true', help='Mosaic stokes V images instead of high-res')
parser.add_argument('--astromap_blank',dest='astromap_blank', help='')
parser.add_argument('--load_layout', dest='load_layout', action='store_true', help='Load a previously defined mosaic layout rather than determining from the images.')
args = parser.parse_args()
make_mosaic(args)
|
mhardcastleREPO_NAMEddf-pipelinePATH_START.@ddf-pipeline_extracted@ddf-pipeline-master@scripts@mosaic.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/waterfall/legendgrouptitle/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="shadow",
parent_name="waterfall.legendgrouptitle.font",
**kwargs,
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@waterfall@legendgrouptitle@font@_shadow.py@.PATH_END.py
|
{
"filename": "determine_version.py",
"repo_name": "scottransom/presto",
"repo_path": "presto_extracted/presto-master/determine_version.py",
"type": "Python"
}
|
import subprocess
import sys
# Determine the version info from git
result = subprocess.run(
["git", "describe", "--tags", "--long"], capture_output=True, text=True
).stdout
tag, nplus, commit = result.split("-")
if len(sys.argv) > 1 and sys.argv[1] in ["-v", "--show"]:
print(f"Last tag: {tag}")
print(f"Number of commits since tag: {nplus}")
print(f"Commit object name: {commit[1:] if commit.startswith('g') else commit}")
# Strip a leading v if it is there
tag = f"{tag[1:] if tag.startswith('v') else tag}"
# This should be a legal Python version format
version = f"{tag}.dev{nplus}" if int(nplus) else tag
print(version)
if len(sys.argv) > 1 and sys.argv[1] in ["-w", "--write"]:
# Update the version info in meson.build and the python pyproject.toml files
with open("meson.build", "r+") as f:
ll = f.readlines()
f.seek(0)
for line in ll:
ls = line.split()
if len(ls) >= 2:
if ls[0]=="version:":
f.write(f" version: '{version}',\n")
continue
f.write(line)
# Update the version info in meson.build and the python pyproject.toml files
with open("python/pyproject.toml", "r+") as f:
ll = f.readlines()
f.seek(0)
for line in ll:
ls = line.split()
if len(ls) >= 2:
if ls[0]=="version":
f.write(f"version = '{version}'\n")
continue
f.write(line)
|
scottransomREPO_NAMEprestoPATH_START.@presto_extracted@presto-master@determine_version.py@.PATH_END.py
|
{
"filename": "test_interpolate_flat.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/jwst/flatfield/tests/test_interpolate_flat.py",
"type": "Python"
}
|
"""
Test for flat_field.interpolate_flat
"""
import numpy as np
from jwst.flatfield.flat_field import interpolate_flat
nz = 6
ny = 5
nx = 7
image_wl = np.arange(nz, dtype=np.float32) + 4.5
wl = np.arange(nx * ny, dtype=np.float32).reshape(ny, nx) - 5.0
def test_interpolate_flat_1():
image_flat = np.arange(nx * ny, dtype=np.float32).reshape(ny, nx) - 5.0
image_err = np.arange(nx * ny, dtype=np.float32).reshape(ny, nx) - 7.0
image_dq = np.arange(nx * ny, dtype=np.int32).reshape(ny, nx)
# Since image_flat is 2-D, the inputs will be returned unchanged.
output = interpolate_flat(image_flat, image_dq, image_err, image_wl, wl)
assert np.allclose(output[0], image_flat, atol=1.0e-6)
assert np.allclose(output[1], image_dq, atol=0)
assert np.allclose(output[2], image_err, atol=1.0e-6)
def test_interpolate_flat_2():
# 2-D, but reshaped to 3-D
image_flat = np.arange(nx * ny, dtype=np.float32).reshape(1, ny, nx) - 5.0
image_err = np.arange(nx * ny, dtype=np.float32).reshape(1, ny, nx) - 7.0
# 2-D
image_dq = np.arange(nx * ny, dtype=np.int32).reshape(ny, nx)
output = interpolate_flat(image_flat, image_dq, image_err, image_wl, wl)
assert np.allclose(output[0], image_flat[0, :, :], atol=1.0e-6)
assert np.allclose(output[1], image_dq, atol=0)
assert np.allclose(output[2], image_err[0, :, :], atol=1.0e-6)
def test_interpolate_flat_3():
# 2-D, but reshaped to 3-D
image_flat = np.arange(nx * ny, dtype=np.float32).reshape(1, ny, nx) - 5.0
image_err = np.arange(nx * ny, dtype=np.float32).reshape(1, ny, nx) - 7.0
# 2-D, but reshaped to 3-D
image_dq = np.arange(nx * ny, dtype=np.int32).reshape(1, ny, nx)
output = interpolate_flat(image_flat, image_dq, image_err, image_wl, wl)
assert np.allclose(output[0], image_flat[0, :, :], atol=1.0e-6)
assert np.allclose(output[1], image_dq[0, :, :], atol=0)
assert np.allclose(output[2], image_err[0, :, :], atol=1.0e-6)
def test_interpolate_flat_4():
# 3-D
image_flat = np.arange(nx * ny * nz, dtype=np.float32).reshape(nz, ny, nx) - 5.0
image_err = np.arange(nx * ny * nz, dtype=np.float32).reshape(nz, ny, nx) - 7.0
image_dq = np.zeros((nz, ny, nx), dtype=np.uint32)
image_dq[:, 1, 3] = 2
output = interpolate_flat(image_flat, image_dq, image_err, image_wl, wl)
expected_value_0 = np.array(
[
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 22.5, 58.5, 94.5, 130.5],
[166.5, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0],
],
dtype=np.float32,
)
expected_value_1 = np.array(
[
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 2, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
],
dtype=np.uint32,
)
expected_value_2 = np.array(
[
[-339.5, -303.5, -267.5, -231.5, -195.5, -159.5, -123.5],
[-87.5, -51.5, -15.5, 20.5, 56.5, 92.5, 128.5],
[164.5, 200.5, 236.5, 272.5, 308.5, 344.5, 380.5],
[416.5, 452.5, 488.5, 524.5, 560.5, 596.5, 632.5],
[668.5, 704.5, 740.5, 776.5, 812.5, 848.5, 884.5],
],
dtype=np.float32,
)
assert np.allclose(output[0], expected_value_0, atol=1.0e-6)
assert np.allclose(output[1], expected_value_1, atol=0)
assert np.allclose(output[2], expected_value_2, atol=1.0e-6)
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@jwst@flatfield@tests@test_interpolate_flat.py@.PATH_END.py
|
{
"filename": "ascii2mpw.py",
"repo_name": "Caltech-IPAC/Montage",
"repo_path": "Montage_extracted/Montage-main/lib/src/freetype-2.9.1/builds/mac/ascii2mpw.py",
"type": "Python"
}
|
#!/usr/bin/env python
import sys
import string
if len( sys.argv ) == 1 :
for asc_line in sys.stdin.readlines():
mpw_line = string.replace(asc_line, "\\xA5", "\245")
mpw_line = string.replace(mpw_line, "\\xB6", "\266")
mpw_line = string.replace(mpw_line, "\\xC4", "\304")
mpw_line = string.replace(mpw_line, "\\xC5", "\305")
mpw_line = string.replace(mpw_line, "\\xFF", "\377")
mpw_line = string.replace(mpw_line, "\n", "\r")
mpw_line = string.replace(mpw_line, "\\n", "\n")
sys.stdout.write(mpw_line)
elif sys.argv[1] == "-r" :
for mpw_line in sys.stdin.readlines():
asc_line = string.replace(mpw_line, "\n", "\\n")
asc_line = string.replace(asc_line, "\r", "\n")
asc_line = string.replace(asc_line, "\245", "\\xA5")
asc_line = string.replace(asc_line, "\266", "\\xB6")
asc_line = string.replace(asc_line, "\304", "\\xC4")
asc_line = string.replace(asc_line, "\305", "\\xC5")
asc_line = string.replace(asc_line, "\377", "\\xFF")
sys.stdout.write(asc_line)
|
Caltech-IPACREPO_NAMEMontagePATH_START.@Montage_extracted@Montage-main@lib@src@freetype-2.9.1@builds@mac@ascii2mpw.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2dcontour/line/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="histogram2dcontour.line", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style+colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram2dcontour@line@_color.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/scene/camera/eye/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._z import ZValidator
from ._y import YValidator
from ._x import XValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._z.ZValidator", "._y.YValidator", "._x.XValidator"]
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@scene@camera@eye@__init__.py@.PATH_END.py
|
{
"filename": "sstphot_info.py",
"repo_name": "kevin218/POET",
"repo_path": "POET_extracted/POET-master/code/lib/sstphot_info.py",
"type": "Python"
}
|
import numpy as np
# properties for GJ436b;
def gj436b(info):
# measured stellar parameters
#info.ra = sexi2dec('11:42:11.0941') / 12e0 * pi # SIMBAD
#info.dec = sexi2dec('+26:42:23.652') / 180e0 * pi # SIMBAD
info.parallax = 97.73e-3 # mas, SIMBAD
info.parallaxerr = 2.27e-3 # ", SIMBAD
info.lstar = 0.019 * info.lsun # Wikipedia
info.lstarerr = 0.019 * info.lsun # FINDME:UNKNOWN ERROR
info.rstar = 0.464e0 * info.rsun # m, Rsun, Torres 2007
info.rstarerr = 0.010e0 * info.rsun # m, Rsun, Torres 2007
info.metalstar = -0.32e0 # [Fe/ H], C. Bean et al. 2006
info.metalstarerr = 0.12e0 # [Fe/ H], C. Bean et al. 2006
info.tstar = 3684e0 # K, Torres 2007
info.tstarerr = 71e0 # K, Torres 2007
info.logg = 4.80e0 # alog10(cm s-2), C. Bean et al. 2006
info.loggerr = 0.10e0 # alog10(cm s-2), C. Bean et al. 2006
# measured planet parameters
info.rplan = 0.438e0 * info.rjup # m, Bean et al. 2008
info.rplanerr = 0.040e0 * info.rjup # m, Bean et al. 2008
info.semimaj = 0.02872e0 * info.au # Bean et al. 2008
info.semimajerr = 0.00006e0 * info.au # Bean et al. 2008
info.incl = 85.8e0 * np.pi / 180. # Bean et al. 2008
info.inclerr = 0.25e0 * np.pi / 180. # Bean et al. 2008
info.ephtime = 2454222.616e0 # days, Gillon et al. 2007
info.ephtimeerr = 0.001e0 # days, Gillon et al. 2007
info.period = 2.643904e0 # Bean et al. 2008
info.perioderr = 0.000005e0 # Bean et al. 2008
info.transdur = 0.96e0 / 24.e0 # FINDME: Reference this value
info.transdurerr = 0.96e0 / 24.e0 # FINDME: Need to find FINDME:OLDVALUE
info.arat = (info.rplan / info.rstar) ** 2 # Area Ratio FINDME: tentative
info.araterr = (info.arat)*2*((info.rplanerr/info.rplan)**2 + (info.rstarerr/info.rstar)**2)**0.5
return info
# orbit data for HAT-P-1b
def hatp1b(info):
# measured stellar parameters
info.ra = 0 #sexi2dec('22:57:46.825') / 12e0 * np.pi #
info.dec = 0 #sexi2dec('38:40:29.83') / 180e0 * np.pi #
# RA & Dec from SIMBAD which references Roeser & Bastian 1988
info.parallax = 7.194e-3 # mas, FINDME:CITESOURCE
info.parallaxerr = 1.139e-3 # ", FINDME:CITESOURCE
info.lstar = 1.51e0 * info.lsun # W, FINDME:CITESOURCE
#info.lstarerr = x.xxd * info.lsun # W, FINDME:CITESOURCE
info.rstar = 1.115e0 * info.rsun # m, Rsun, Johnson et al. 2008
info.rstarerr = 0.050e0 * info.rsun # m, Rsun, Johnson et al. 2008
info.metalstar = 0.13e0 # [Fe/ H], Bakos et al. 2007
info.metalstarerr = 0.02e0 # [Fe/ H], Bakos et al. 2007
info.tstar = 5975e0 # K, Bakos et al. 2007
info.tstarerr = 45e0 # K, Bakos et al. 2007
info.logg = 4.45e0 # alog10(cm s-2), Bakos et al. 2007
info.loggerr = 0.06e0 # alog10(cm s-2), Bakos et al. 2007
# measured planet parameters
info.rplan = 1.225e0 * info.rjup # m, Johnson et al. 2008
info.rplanerr = 0.056e0 * info.rjup # m, Johnson et al. 2008
info.semimaj = 0.0553e0 * info.au # Johnson et al. 2008
info.semimajerr = 0.0014e0 * info.au # Johnson et al. 2008
info.incl = 86.28e0 * np.pi / 180e0 # Johnson et al. 2008
info.inclerr = 0.20e0 * np.pi / 180e0 # Johnson et al. 2008
info.ephtime = 2454363.94656e0 # days, Johnson et al. 2008
info.ephtimeerr = 0.00072e0 # days, Johnson et al. 2008
info.period = 4.4652934e0 # days, Johnson et al. 2008
info.perioderr = 0.000093e0 # days, Johnson et al. 2008
info.transdur = 2.798e0 # hours, Johnson et al. 2008
info.transdurerr = 0.019e0 # hours, Johnson et al. 2008
info.arat = 0.11295 ** 2 # Johnson et al. 2008
info.araterr = 0.00073 * 1.414 # Johnson et al. 2008
return info
# set universal constants in info structure
def univ(info):
import numpy as np
# conversions
# steradians per sq. arcsecond, from MDH 2.1, p. 25., sect. 3.7.1: 2.35045e-11
info.srperas = 4.e0 * np.pi / ((360.e0 * 60.e0 * 60.e0) ** 2 / np.pi)
# 1d-6 converts uJy to Jy, 1d-26 converts Jy to W m^-2 Hz^-1
info.ujy2mks = 1e-6 * 1e-26
# time
info.mjdoff = 2400000.5e0
info.j2kjd = 2451545.e0 # Julian date of J2000.0=1.5 Jan 2000 (see ESAA ch 27)
# km per parsec
info.mppc = 3.0856776e16 # m, AllenII, p.12
# units
info.lsun = 3.827e26 # W, Wikipedia
info.rsun = 695508.e3 # m, +- 26 km, AllenII
info.rjup = 71492.e3 # m, AllenII
info.au = 1.4959787066e11 # m, AllenII
info.stefboltz = 5.67040e-8 # J m^-2 s^-1 K^-4, Wikipedia
# see also AllenII, p.11 (less accurate)
info.c = 2.99792458e8 # m/s, speed of light
info.h = 6.6260693e-34 # J s, Planck's constant, Wikipedia
info.k = 1.3806503e-23 # J/K, Boltzmann's constant, Google
return info
|
kevin218REPO_NAMEPOETPATH_START.@POET_extracted@POET-master@code@lib@sstphot_info.py@.PATH_END.py
|
{
"filename": "nsc_coadd.py",
"repo_name": "astro-datalab/nsc",
"repo_path": "nsc_extracted/nsc-master/python/nsc/nsc_coadd.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# NSC_COADD.PY This is used to create coadds for the NSC.
#
__authors__ = 'David Nidever <dnidever@montana.edu>'
__version__ = '20170911' # yyyymmdd
"""
Software to create an NSC coadd.
"""
import os
import sys
import numpy as np
#import scipy
import socket
import warnings
from astropy.io import fits
from astropy.utils.exceptions import AstropyWarning
from astropy.wcs import WCS
#from skimage import measure, morphology
#from scipy.signal import argrelmin
#import scipy.ndimage.filters as filters
import time
import glob
import logging
import sep
import healpy as hp
from reproject import reproject_interp
import tempfile
from dlnpyutils import utils as dln, coords #db
#from . import coadd
import coadd
import db
def rootdirs():
# Returns dldir, mssdir, localdir
host = socket.gethostname()
shost = host.split('.')[0]
if shost == 'thing' or shost == 'hulk':
return ('/net/dl2','/mss1','/data0')
if shost == 'gp06' or shost == 'gp07' or shost == 'gp08' or shost == 'gp09':
return ('/net/dl2','/net/mss1','/data0')
# OLD NOTES
#-give list of FITS files (possible with extensions) and mask/noise file info, WCS/output header and coadd method
#-loop through each file and resample onto final projection (maybe write to temporary file)
# do same for error and mask images
#-perform the coadd method, simplest is just sum/average or median
#-write output file
# weighted sum/average/median
# Need to deal with background level and scale
# calculate background value myself and maybe use photometric zero-point for the scale.
# does that take the exposure time into account as well? I think so.
# Use the astropy reproject package
# NEW NOTES
# When creating the NSC stacks, also create a deep/multi-band stack.
# coadd steps:
# -loop over each exposure that overlaps the image
# -homogenize masks
# -fix "bad" pixels in wt and flux arrays
# -fit and subtract 2D background
# -resample each overlapping chip onto the final brick WCS (flux preserving)
# be careful when interpolating near bad pixels
# -save flux/wt/mask/sky to a temporary directory to use later
# -once all of the resampling is done, figure out the relative weights for all the exposures
# and the final scaling
# -use NSC zeropoints to figure out relative weighting between the exposures
# -need to be sure that the images are SCALED properly as well (takes into exptime/zeropoint)
# -depending on the number of overlapping exposures, break up the break into subregions which
# will be combined separately. need to make sure to use consistent weighting/scaling/zero
# otherwise there'll be jumps at these boundaries
# -weighted/scaled combine in each subregion taking the mask and wt image into account properly.
# -how about outlier rejection?
# -fpack compress the final stacked images (flux, mask, weight) similar to how the CP files are done
# -PSF-matching???
# -with the exposure zpterm and exptime it should be possible to calculate the SCALING of the exposure
# -should I use that also for the weight? should take FWHM/SEEING into account.
# in allframe_calcweights/getweights I use weight~S/N
# S/N goes as sqrt(exptime) and in background-dominated regime S/N ~ 1/FWHM
# so maybe something like weight ~ sqrt(scaling)/FWHM
# how about teff? see eqn. 4, pg.10 of Morganson+2018
# teff = (FWHM_fid/FwHM)^2 * (B_fid/B) * F_trans
# B = background
# F_trans = atmospheric transmission relative to a nearly clear night, basically zeropoint
# F_trans = 10^(-0.8*(delta_mag-0.2))
#
# teff is the ratio between the actual exposure time and the exposure time necessary to achieve
# the same signal-to-noise for point sources observed in nominal conditions. An exposure taken
# under “fiducial” conditions has teff = 1.
#
# see section 6.3 for how DES performs the coadds/stacks, they create "chi-mean" coadds of r/i/z
# also see Drlica-Wagner+2018, DES Y1 Cosmoogy results
#
# -ccdproc.combine
# https://ccdproc.readthedocs.io/en/latest/image_combination.html#id1
# -reproject.reproject_interp
# https://reproject.readthedocs.io/en/stable/
# need to check the reprojection algorithms, doesn't support lanczos
# reproject also has a coadd/mosaicking sofware and can take weights, and can do background matching
# from reproject.mosaicking import reproject_and_coadd
# could also use swarp
# https://www.astromatic.net/pubsvn/software/swarp/trunk/doc/swarp.pdf
def getbrickinfo(brick,version='v3'):
""" Get information on a brick."""
dldir, mssdir, localdir = rootdirs()
brick_dbfile = dldir+'/dnidever/nsc/instcal/'+version+'/lists/nsc_bricks.db'
brickdata = db.query(brick_dbfile, table='bricks', cols='*', where='brickname="'+brick+'"')
return brickdata
def getbrickexposures(brick,band=None,version='v3'):
""" Get exposures information that overlap a brick."""
# Directories
dldir, mssdir, localdir = rootdirs()
# Get brick information
brickdata = getbrickinfo(brick,version=version)
# Healpix information
pix128 = hp.ang2pix(128,brickdata['ra'],brickdata['dec'],lonlat=True)
# neighbors
neipix = hp.get_all_neighbours(128,pix128)
# Get all of the exposures overlapping this region
meta_dbfile = dldir+'/dnidever/nsc/instcal/'+version+'/lists/nsc_meta.db'
allpix = np.hstack((neipix.flatten(),pix128))
whr = ' or '.join(['ring128=='+h for h in allpix.astype(str)])
chipdata = db.query(meta_dbfile, table='chip', cols='*', where=whr)
# Do more overlap checking
brickvra = np.hstack((brickdata['ra1'],brickdata['ra2'],brickdata['ra2'],brickdata['ra1']))
brickvdec = np.hstack((brickdata['dec1'],brickdata['dec1'],brickdata['dec2'],brickdata['dec2']))
brickvlon,brickvlat = coords.rotsphcen(brickvra,brickvdec,brickdata['ra'],brickdata['dec'],gnomic=True)
olap = np.zeros(len(chipdata),bool)
for i in range(len(chipdata)):
vra = np.hstack((chipdata['vra1'][i],chipdata['vra2'][i],chipdata['vra2'][i],chipdata['vra1'][i]))
vdec = np.hstack((chipdata['vdec1'][i],chipdata['vdec1'][i],chipdata['vdec2'][i],chipdata['vdec2'][i]))
vlon,vlat = coords.rotsphcen(vra,vdec,brickdata['ra'],brickdata['dec'],gnomic=True)
olap[i] = coords.doPolygonsOverlap(vlon,vlat,brickvlon,brickvlat)
ngdch = np.sum(olap)
if ngdch==0:
print('No exposures overlap brick '+brick)
return None
chipdata = chipdata[olap]
exposure = np.unique(chipdata['exposure'])
# Get the exosure data
whr = ' or '.join(['exposure=="'+e+'"' for e in exposure.astype(str)])
expdata = db.query(meta_dbfile, table='exposure', cols='*', where=whr)
# Check band
if band is not None:
gband, = np.where(expdata['filter']==band)
if len(gband)==0:
print('No '+band+' exposures overlap brick '+brick)
return None
expdata = expdata[gband]
nexp = len(expdata)
print(str(nexp)+' exposures overlap brick '+brick)
return expdata
def nsc_coadd(brick,band=None,version='v3'):
# This creates a coadd for one NSC brick
# Make sure to fix the WCS using the coefficients I fit with Gaia DR2
# that are in the meta files.
# Get brick information
brickdata = getbrickinfo(brick,version=version)
# Get information exposure information
expdata = getbrickexposures(brick,band=band,version=version)
# Great WCS for this brick
brickwcs,brickhead = coadd.brickwcs(brickdata['ra'][0],brickdata['dec'][0])
# Create the coadd
coadd.coadd(expdata['fluxfile'],expdata['wtfile'],expdata,brickhead)
import pdb; pdb.set_trace()
if __name__ == "__main__":
parser = ArgumentParser(description='Create NSC coadd.')
parser.add_argument('brick', type=str, nargs=1, help='Brick name')
parser.add_argument('version', type=str, nargs=1, help='Version number')
parser.add_argument('-b','--band', type=str, default='', help='Band/filter')
parser.add_argument('-r','--redo', action='store_true', help='Redo this brick')
parser.add_argument('-v','--verbose', action='store_true', help='Verbose output')
args = parser.parse_args()
t0 = time.time()
hostname = socket.gethostname()
host = hostname.split('.')[0]
radeg = np.float64(180.00) / np.pi
# Inputs
pix = args.brick
version = args.version[0]
band = args.band
if band=='': band=None
verbose = args.verbose
redo = args.redo
# Create the coadd
nsc_coadd(brick,band=band,version=version,redo=redo)
|
astro-datalabREPO_NAMEnscPATH_START.@nsc_extracted@nsc-master@python@nsc@nsc_coadd.py@.PATH_END.py
|
{
"filename": "eclipses.py",
"repo_name": "hpparvi/PyTransit",
"repo_path": "PyTransit_extracted/PyTransit-master/pytransit/utils/eclipses.py",
"type": "Python"
}
|
# PyTransit: fast and easy exoplanet transit modelling in Python.
# Copyright (C) 2010-2019 Hannu Parviainen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
"""Methods for modelling of secondary eclipses
.. deprecated:: 2.0.0
``pytransit.utils.eclipses`` will be removed in PyTransit 2.1, it is replaced by ``pytransit.utils.phasecurves``.
"""
import warnings
from numpy import sqrt, exp, NaN
from scipy.constants import k,h,c
from scipy.optimize import brentq
warnings.warn("the pytransits.utils.eclipses module is deprecated and will be removed in PyTransit 2.1",
FutureWarning, stacklevel=2)
def Teq(Ts, a, f, A):
"""Equilibruim temperature of a planet.
Parameters
----------
Ts : Effective stellar temperature [K]
a : Scaled semi-major axis [Rs]
f : Redistribution factor [-]
A : Bond albedo [-]
Returns
-------
Teq : Equilibrium temperature [K]
"""
return Ts*sqrt(1/a)*(f*(1-A))**0.25
def Planck(T, l):
"""Radiance of a black body as a function of wavelength.
Parameters
----------
T : Black body temperature [K]
l : Wavelength [m]
Returns
-------
L : Back body radiance [W m^-2 sr^-1]
"""
return 2*h*c**2/l**5 / (exp(h*c/(l*k*T)) - 1)
def reflected_fr(a, A, r=1.5):
"""Reflected flux ratio per projected area element.
Parameters
----------
a : Scaled semi-major axis [Rs]
A : Bond albedo [-]
r : Inverse of the phase integral [-]
Returns
-------
fr : Reflected flux ratio [-]
"""
return r*A/a**2
def thermal_fr(Ts, a, f, A, l, Ti=0):
"""Thermal flux ratio per projected area element.
Parameters
----------
Ts : Effective stellar temperature [K]
a : Scaled semi-major axis [Rs]
f : Redistribution factor [-]
A : Bond albedo [-]
l : Wavelength [m]
Ti : temperature [K]
Returns
-------
fr: Thermal flux ratio [-]
"""
return Planck(Teq(Ts, a, f, A)+Ti, l) / Planck(Ts, l)
def flux_ratio(Ts, a, f, A, l, r=1.5, Ti=0):
"""Total flux ratio per projected area element.
Parameters
----------
Ts : Effective stellar temperature [K]
a : Scaled semi-major axis [Rs]
f : Redistribution factor [-]
A : Bond albedo [-]
l : Wavelength [m]
r : Inverse of the phase integral [-]
Ti : temperature [K]
Returns
-------
fr: Total flux ratio [-]
"""
return reflected_fr(a, A, r) + thermal_fr(Ts, a, f, A, l, Ti)
def solve_Teq(fr, Ts, a, A, l, r=1.5, Ti=0):
"""Solve the equilibrium temperature.
Parameters
----------
fr : Flux ratio [-]
Ts : Effective stellar temperature [K]
a : Scaled semi-major axis [Rs]
A : Bond albedo [-]
l : Wavelength [m]
r : Inverse of the phase integral [-]
Ti : temperature [K]
Returns
-------
Teq : Equilibrium temperature
"""
Bs = Planck(Ts, l)
try:
return brentq(lambda Teq: reflected_fr(a, A, r) + Planck(Teq+Ti, l)/Bs - fr, 5, Ts)
except ValueError:
return NaN
def solve_A(fr, Ts, a, f, l, r=1.5, Ti=0):
"""Solve the Bond albedo.
Parameters
----------
fr : Flux ratio [-]
Ts : Effective stellar temperature [K]
a : Scaled semi-major axis [Rs]
A : Bond albedo [-]
l : Wavelength [m]
r : Inverse of the phase integral [-]
Ti : temperature [K]
Returns
-------
A : Bond albedo
"""
try:
return brentq(lambda A: reflected_fr(a, A, r) + thermal_fr(Ts, a, f, A, l, Ti) - fr, 0, 0.3)
except ValueError:
return NaN
def solve_redistribution(fr, Ts, a, A, l):
"""Solve the redistribution factor.
Parameters
----------
fr : Flux ratio [-]
Ts : Effective stellar temperature [K]
a : Scaled semi-major axis [Rs]
A : Bond albedo [-]
l : Wavelength [m]
r : Inverse of the phase integral [-]
Ti : temperature [K]
Returns
-------
f : Redistribution factor
"""
Teqs = solve_Teq(fr, Ts, l)
return brentq(lambda f: Teq(Ts, a, f, A) - Teqs, 0.25, 15)
|
hpparviREPO_NAMEPyTransitPATH_START.@PyTransit_extracted@PyTransit-master@pytransit@utils@eclipses.py@.PATH_END.py
|
{
"filename": "titan_takeoff.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/llms/titan_takeoff.ipynb",
"type": "Jupyter Notebook"
}
|
# Titan Takeoff
`TitanML` helps businesses build and deploy better, smaller, cheaper, and faster NLP models through our training, compression, and inference optimization platform.
Our inference server, [Titan Takeoff](https://docs.titanml.co/docs/intro) enables deployment of LLMs locally on your hardware in a single command. Most generative model architectures are supported, such as Falcon, Llama 2, GPT2, T5 and many more. If you experience trouble with a specific model, please let us know at hello@titanml.co.
## Example usage
Here are some helpful examples to get started using Titan Takeoff Server. You need to make sure Takeoff Server has been started in the background before running these commands. For more information see [docs page for launching Takeoff](https://docs.titanml.co/docs/Docs/launching/).
```python
import time
# Note importing TitanTakeoffPro instead of TitanTakeoff will work as well both use same object under the hood
from langchain_community.llms import TitanTakeoff
from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler
from langchain_core.prompts import PromptTemplate
```
### Example 1
Basic use assuming Takeoff is running on your machine using its default ports (ie localhost:3000).
```python
llm = TitanTakeoff()
output = llm.invoke("What is the weather in London in August?")
print(output)
```
### Example 2
Specifying a port and other generation parameters
```python
llm = TitanTakeoff(port=3000)
# A comprehensive list of parameters can be found at https://docs.titanml.co/docs/next/apis/Takeoff%20inference_REST_API/generate#request
output = llm.invoke(
"What is the largest rainforest in the world?",
consumer_group="primary",
min_new_tokens=128,
max_new_tokens=512,
no_repeat_ngram_size=2,
sampling_topk=1,
sampling_topp=1.0,
sampling_temperature=1.0,
repetition_penalty=1.0,
regex_string="",
json_schema=None,
)
print(output)
```
### Example 3
Using generate for multiple inputs
```python
llm = TitanTakeoff()
rich_output = llm.generate(["What is Deep Learning?", "What is Machine Learning?"])
print(rich_output.generations)
```
### Example 4
Streaming output
```python
llm = TitanTakeoff(
streaming=True, callback_manager=CallbackManager([StreamingStdOutCallbackHandler()])
)
prompt = "What is the capital of France?"
output = llm.invoke(prompt)
print(output)
```
### Example 5
Using LCEL
```python
llm = TitanTakeoff()
prompt = PromptTemplate.from_template("Tell me about {topic}")
chain = prompt | llm
output = chain.invoke({"topic": "the universe"})
print(output)
```
### Example 6
Starting readers using TitanTakeoff Python Wrapper. If you haven't created any readers with first launching Takeoff, or you want to add another you can do so when you initialize the TitanTakeoff object. Just pass a list of model configs you want to start as the `models` parameter.
```python
# Model config for the llama model, where you can specify the following parameters:
# model_name (str): The name of the model to use
# device: (str): The device to use for inference, cuda or cpu
# consumer_group (str): The consumer group to place the reader into
# tensor_parallel (Optional[int]): The number of gpus you would like your model to be split across
# max_seq_length (int): The maximum sequence length to use for inference, defaults to 512
# max_batch_size (int_: The max batch size for continuous batching of requests
llama_model = {
"model_name": "TheBloke/Llama-2-7b-Chat-AWQ",
"device": "cuda",
"consumer_group": "llama",
}
llm = TitanTakeoff(models=[llama_model])
# The model needs time to spin up, length of time need will depend on the size of model and your network connection speed
time.sleep(60)
prompt = "What is the capital of France?"
output = llm.invoke(prompt, consumer_group="llama")
print(output)
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@llms@titan_takeoff.ipynb@.PATH_END.py
|
{
"filename": "_xpad.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/isosurface/colorbar/_xpad.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="xpad", parent_name="isosurface.colorbar", **kwargs):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@isosurface@colorbar@_xpad.py@.PATH_END.py
|
{
"filename": "transforms.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/tests/functional/Gravity/transforms.py",
"type": "Python"
}
|
import scipy.base as sb
import scipy
pi = scipy.pi
# fast discrete cosine transforms of real sequences (using the fft)
# These implement the DCT-II and inverse DCT-II (DCT-III)
# described at http://en.wikipedia.org/wiki/Discrete_cosine_transform
def dct(x,axis=-1):
"""Discrete cosine transform based on the FFT.
For even-length signals it uses an N-point FFT
For odd-length signals it uses a 2N-point FFT.
"""
n = len(x.shape)
N = x.shape[axis]
even = (N%2 == 0)
slices = [None]*4
for k in range(4):
slices[k] = []
for j in range(n):
slices[k].append(slice(None))
if even:
xtilde = 0.0*x
slices[0][axis] = slice(None,N/2)
slices[1][axis] = slice(None,None,2)
slices[2][axis] = slice(N/2,None)
slices[3][axis] = slice(N,None,-2)
else:
newshape = list(x.shape)
newshape[axis] = 2*N
xtilde = sb.empty(newshape,sb.Float)
slices[0][axis] = slice(None,N)
slices[2][axis] = slice(N,None)
slices[3][axis] = slice(None,None,-1)
for k in range(4):
slices[k] = tuple(slices[k])
xtilde[slices[0]] = x[slices[1]]
xtilde[slices[2]] = x[slices[3]]
Xt = scipy.fft(xtilde,axis=axis)
pk = sb.exp(-1j*pi*sb.arange(N)/(2*N))
newshape = sb.ones(n)
newshape[axis] = N
pk.shape = newshape
if not even:
pk /= 2;
Xt = Xt[slices[0]]
return sb.real(Xt*pk)
def idct(v,axis=-1):
n = len(v.shape)
N = v.shape[axis]
even = (N%2 == 0)
slices = [None]*4
for k in range(4):
slices[k] = []
for j in range(n):
slices[k].append(slice(None))
k = arange(N)
if even:
ak = sb.r_[1.0,[2]*(N-1)]*exp(1j*pi*k/(2*N))
newshape = ones(n)
newshape[axis] = N
ak.shape = newshape
xhat = real(scipy.ifft(v*ak,axis=axis))
x = 0.0*v
slices[0][axis] = slice(None,None,2)
slices[1][axis] = slice(None,N/2)
slices[2][axis] = slice(N,None,-2)
slices[3][axis] = slice(N/2,None)
for k in range(4):
slices[k] = tuple(slices[k])
x[slices[0]] = xhat[slices[1]]
x[slices[2]] = xhat[slices[3]]
return x
else:
ak = 2*sb.exp(1j*pi*k/(2*N))
newshape = ones(n)
newshape[axis] = N
ak.shape = newshape
newshape = list(v.shape)
newshape[axis] = 2*N
Y = zeros(newshape,sb.Complex)
#Y[:N] = ak*v
#Y[(N+1):] = conj(Y[N:0:-1])
slices[0][axis] = slice(None,N)
slices[1][axis] = slice(None,None)
slices[2][axis] = slice(N+1,None)
slices[3][axis] = slice((N-1),0,-1)
Y[slices[0]] = ak*v
Y[slices[2]] = conj(Y[slices[3]])
x = real(scipy.ifft(Y,axis=axis))[slices[0]]
return x
def dct2(x,axes=(-1,-2)):
return dct(dct(x,axis=axes[0]),axis=axes[1])
def idct2(v,axes=(-1,-2)):
return idct(idct(v,axis=axes[0]),axis=axes[1])
def dctn(x,axes=None):
if axes is None:
axes = arange(len(x.shape))
res = x
for k in axes:
res = dct(res,axis=k)
return res
def idctn(v,axes=None):
if axes is None:
axes = arange(len(v.shape))
res = v
for k in axes:
res = idct(res,axis=k)
return res
def makeC(N):
n,l = ogrid[:N,:N]
C = cos(pi*(2*n+1)*l/(2*N))
return C
def dct2raw(x):
M,N = x.shape
CM = makeC(M)
CN = makeC(N)
return dot(transpose(CM),dot(x,CN))
def idct2raw(v):
M,N = v.shape
iCM = linalg.inv(makeC(M))
iCN = linalg.inv(makeC(N))
return dot(transpose(iCM),dot(v,iCN))
def makeS(N):
n,k = ogrid[:N,:N]
C = sin(pi*(k+1)*(n+1)/(N+1))
return C
# DST-I
def dst(x,axis=-1):
"""Discrete Sine Transform (DST-I)
Implemented using 2(N+1)-point FFT
xsym = r_[0,x,0,-x[::-1]]
DST = (-imag(fft(xsym))/2)[1:(N+1)]
adjusted to work over an arbitrary axis for entire n-dim array
"""
n = len(x.shape)
N = x.shape[axis]
slices = [None]*3
for k in range(3):
slices[k] = []
for j in range(n):
slices[k].append(slice(None))
newshape = list(x.shape)
newshape[axis] = 2*(N+1)
xtilde = sb.zeros(newshape,sb.Float)
slices[0][axis] = slice(1,N+1)
slices[1][axis] = slice(N+2,None)
slices[2][axis] = slice(None,None,-1)
for k in range(3):
slices[k] = tuple(slices[k])
xtilde[slices[0]] = x
xtilde[slices[1]] = -x[slices[2]]
Xt = scipy.fft(xtilde,axis=axis)
return (-sb.imag(Xt)/2)[slices[0]]
def idst(v,axis=-1):
n = len(v.shape)
N = v.shape[axis]
slices = [None]*3
for k in range(3):
slices[k] = []
for j in range(n):
slices[k].append(slice(None))
newshape = list(v.shape)
newshape[axis] = 2*(N+1)
Xt = sb.zeros(newshape,sb.Complex)
slices[0][axis] = slice(1,N+1)
slices[1][axis] = slice(N+2,None)
slices[2][axis] = slice(None,None,-1)
val = 2j*v
for k in range(3):
slices[k] = tuple(slices[k])
Xt[slices[0]] = -val
Xt[slices[1]] = val[slices[2]]
xhat = real(scipy.ifft(Xt,axis=axis))
return xhat[slices[0]]
def dst2(x,axes=(-1,-2)):
return dst(dst(x,axis=axes[0]),axis=axes[1])
def idst2(v,axes=(-1,-2)):
return idst(idst(v,axis=axes[0]),axis=axes[1])
def dstn(x,axes=None):
if axes is None:
axes = arange(len(x.shape))
res = x
for k in axes:
res = dst(res,axis=k)
return res
def idstn(v,axes=None):
if axes is None:
axes = arange(len(v.shape))
res = v
for k in axes:
res = idst(res,axis=k)
return res
def digitrevorder(x,base):
x = asarray(x)
rem = N = len(x)
L = 0
while 1:
if rem < base:
break
intd = rem // base
if base*intd != rem:
raise ValueError("Length of data must be power of base.")
rem = intd
L += 1
vec = r_[[base**n for n in range(L)]]
newx = x[NewAxis,:]*vec[:,NewAxis]
# compute digits
for k in range(L-1,-1,-1):
newx[k] = x // vec[k]
x = x - newx[k]*vec[k]
# reverse digits
newx = newx[::-1,:]
x = 0*x
# construct new result from reversed digts
for k in range(L):
x += newx[k]*vec[k]
return x
def bitrevorder(x):
return digitrevorder(x,2)
# needs to be fixed
def wht(data):
"""Walsh-Hadamaard Transform (sequency ordered)
adapted from MATLAB algorithm published on the web by
Author: Gylson Thomas
e-mail: gylson_thomas@yahoo.com
Asst. Professor, Electrical and Electronics Engineering Dept.
MES College of Engineering Kuttippuram,
Kerala, India, February 2005.
copyright 2005.
Reference: N.Ahmed, K.R. Rao, "Orthogonal Transformations for
Digital Signal Processing" Spring Verlag, New York 1975. page-111.
"""
N = len(data)
L=log2(N);
if ((L-floor(L)) > 0.0):
raise ValueError("Length must be power of 2")
x=bitrevorder(data);
k1=N; k2=1; k3=N/2;
for i1 in range(1,L+1): #Iteration stage
L1=1;
for i2 in range(1,k2+1):
for i3 in range(1,k3+1):
i=i3+L1-1; j=i+k3;
temp1= x[i-1]; temp2 = x[j-1];
if (i2 % 2) == 0:
x[i-1] = temp1 - temp2;
x[j-1] = temp1 + temp2;
x[i-1] = temp1 + temp2;
x[j-1] = temp1 - temp2;
L1=L1+k1;
k1 = k1/2; k2 = k2*2; k3 = k3/2;
x = x*1.0/N; # Delete this line for inverse wht
return x
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@tests@functional@Gravity@transforms.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "brinckmann/montepython_public",
"repo_path": "montepython_public_extracted/montepython_public-master/montepython/likelihoods/fake_planck_realistic_lowl/__init__.py",
"type": "Python"
}
|
# there is no specific likelihood code for this experiment, because it
# falls in the category of CMB experiments described in the "mock CMB"
# format. The class below inherits the properties of a general class
# "Likelihood_mock_cmb", which knows how to deal with all experiments in
# "mock CMB" format.
from montepython.likelihood_class import Likelihood_mock_cmb
class fake_planck_realistic_lowl(Likelihood_mock_cmb):
pass
|
brinckmannREPO_NAMEmontepython_publicPATH_START.@montepython_public_extracted@montepython_public-master@montepython@likelihoods@fake_planck_realistic_lowl@__init__.py@.PATH_END.py
|
{
"filename": "plotTEA_analytical.py",
"repo_name": "exoclime/HELIOS",
"repo_path": "HELIOS_extracted/HELIOS-master/supplementary/reproducing_Fig4_of_Malik2017/TEA_compendium/plotTEA_analytical.py",
"type": "Python"
}
|
#! /usr/bin/env python
from readconf import *
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
#from PIL import Image
import Image
# Correct directory names
if location_out[-1] != '/':
location_out += '/'
CtoO_list = np.logspace(-1,1,100)
def plotTEA():
'''
This code plots a figure of C/O vs. abundances for validation of the analytical chemistry model,
as shown in Figure. 4 in Malik et al. 2016.
'''
# Get plots directory, create if non-existent
plots_dir = location_out + "/plots/"
if not os.path.exists(plots_dir): os.makedirs(plots_dir)
# Counts number of arguments given
noArguments = len(sys.argv)
# Prints usage if number of arguments different from 4 or adiabatic profile
if noArguments != 3:
print("\nUsage : ../TEA/tea/plotTEA.py atmfile species(divided by comma, no breaks)")
print("Example: ../TEA/tea/plotTEA.py ../TEA/doc/examples/multiTP/results/multiTP_Example.tea CO,CH4,H2O,NH3\n")
# Sets the first argument given as the atmospheric file
filename = sys.argv[1]
# Sets the second argument given as the atmospheric file
filename2 = sys.argv[2]
# Sets the second argument given as the atmospheric file
vulcanname = sys.argv[3]
# Sets the species for plotting ('H2' is always required becasue the mixing ratio in the analytical formula
# is defined as the ratio between the species and H2)
species = ['CO', 'CO2', 'CH4', 'H2O','C2H2','H2']
# Open the two TEA files and read
f = open(filename, 'r')
lines = np.asarray(f.readlines())
f.close()
f2 = open(filename2, 'r')
lines2 = np.asarray(f2.readlines())
f2.close()
# Get molecules names
imol = np.where(lines == "#SPECIES\n")[0][0] + 1
molecules = lines[imol].split()
nmol = len(molecules)
for m in np.arange(nmol):
molecules[m] = molecules[m].partition('_')[0]
# Get molecules names for tea2
imol2 = np.where(lines2 == "#SPECIES\n")[0][0] + 1
molecules2 = lines[imol2].split()
nmol2 = len(molecules2)
for m in np.arange(nmol2):
molecules2[m] = molecules2[m].partition('_')[0]
# convert the list to tuple
species = tuple(species)
nspec = len(species)
# Populate column numbers for requested species and
# update list of species if order is not appropriate
columns = []
spec = []
spec2 = []
columns2 = []
for i in np.arange(nmol):
for j in np.arange(nspec):
if molecules[i] == species[j]:
columns.append(i+2)
spec.append(species[j])
for i in np.arange(nmol2):
for j in np.arange(nspec):
if molecules2[i] == species[j]:
columns2.append(i+2)
spec2.append(species[j])
#Calulate the numer of user-input species which is not in moleculues
nnot = 0
for _ in species:
if _ not in molecules:
nnot += 1
# Convert spec to tuple
spec = tuple(spec)
spec2 = tuple(spec2)
# Concatenate spec with pressure for data and columns
data = tuple(np.concatenate((['p'], spec)))
data2 = tuple(np.concatenate((['p'], spec2)))
usecols = tuple(np.concatenate(([0], columns)))
usecols2 = tuple(np.concatenate(([0], columns2)))
# print 'data'
# print data
# Load all data for all interested species
data = np.loadtxt(filename, dtype=float, comments='#', delimiter=None, \
converters=None, skiprows=8, usecols=usecols, unpack=True)
data2 = np.loadtxt(filename2, dtype=float, comments='#', delimiter=None, \
converters=None, skiprows=8, usecols=usecols, unpack=True)
#print molecules
# Open a figure
plt.figure(1)
plt.clf()
# Set different colours of lines
colors = ['b','g','r','c','m','y','k', 'orange','pink', 'grey']
color_index = 0
# Read mixing ratios of H2
for i in np.arange(nspec-nnot):
if spec[i] == 'H2': H2_1 =data[i+1]
if spec2[i] == 'H2': H2_2 =data2[i+1]
tex_labels = {'H':'$H$','H2':'$H_2$','O':'$O$','OH':'$OH$','H2O':'$H_2O$','CH':'$CH$','C':'$C$','CH2':'$CH_2$','CH3':'$CH_3$','CH4':'$CH_4$',\
'C2':'$C_2$','C2H2':'$C_2H_2$','C2H3':'$C_2H_3$','C2H':'$C_2H$','CO':'$CO$','CO2':'$CO_2$','He':'$He$','O2':'$O_2$'}
# plot C/O vs mixing ratios(normalized by H2)
for i in np.arange(nspec-nnot):
if spec[i]!='H2':
plt.loglog(np.logspace(-1,1,len(data[i+1])), data[i+1]/H2_1, '-', color=colors[color_index],label=tex_labels[spec[i]],lw=1)
plt.loglog(np.logspace(-1,1,len(data2[i+1])), data2[i+1]/H2_2, '-', color=colors[color_index],lw=3)
color_index += 1
# Plot analytical solutions
vulcan_species = ['H','H2','O','OH','H2O','CH','C','CH2','CH3','CH4','C2','C2H2','C2H3','C2H','C2H4','C2H5','C2H6','C4H2','CO','CO2','CH2OH','H2CO','HCO','CH3O',\
'CH3OH','CH3CO','O2','H2CCO','HCCO','He']
colors = ['b','g','r','c','m','y','k', 'orange','pink', 'grey']
################################# VULCAN ######################################
Heng = np.load(vulcanname)['n_mix'][0]
# load solutions for 800K and 3000K
Heng1 = Heng[800]
Heng2 = Heng[3000]
color_index=0
for sp in spec :
if sp != 'H2':
plt.scatter(np.logspace(-1,1,len(Heng1[sp]))[::5],Heng1[sp][::5],color=colors[color_index], marker='o',facecolor='None')
plt.scatter(np.logspace(-1,1,len(Heng2[sp]))[::5] ,Heng2[sp][::5],color=colors[color_index], marker='o')
color_index+=1
################################# VULCAN ######################################
# Label the plot
plt.xlabel('C/O ratio', fontsize=14)
plt.ylabel('volume mixing ratio' , fontsize=14)
handles, labels = plt.gca().get_legend_handles_labels()
#display = range(len(plot_spec))
#Create custom artists
art0 = plt.Line2D((0,0),(0,0), ls='None')
Artist1 = plt.Line2D(range(10),range(10), color='black', lw=1)
Artist2 = plt.Line2D((0,1),(0,0), color='black', lw=3)
#Create legend from custom artist/label lists
#plt.legend(frameon=0, prop={'size':12}, loc='best')
#,'C, EQ', 'C, $K_{zz} = 10^{10}$'
plt.legend([handle for i,handle in enumerate(handles)]+[Artist1,Artist2],\
[label for i,label in enumerate(labels)]+['800 K', '3000 K'], \
frameon=1, prop={'size':10}, loc=4)
plt.xlim((0.1,10.))
# Place plot into plots directory with appropriate name
plot_out = plots_dir + filename.split("/")[-1][:-4] + '.png'
plot_eps = plots_dir + filename.split("/")[-1][:-4] + '.eps'
plt.savefig(plot_out)
plt.savefig(plot_eps)
plt.close()
# Return name of plot created
return plot_out
# Call the function to execute
if __name__ == '__main__':
# Make plot and retrieve plot's name
plot_out = plotTEA()
# Open plot
plot = Image.open(plot_out)
plot.show()
|
exoclimeREPO_NAMEHELIOSPATH_START.@HELIOS_extracted@HELIOS-master@supplementary@reproducing_Fig4_of_Malik2017@TEA_compendium@plotTEA_analytical.py@.PATH_END.py
|
{
"filename": "encoder_ellastic.py",
"repo_name": "alercebroker/ATAT",
"repo_path": "ATAT_extracted/ATAT-main/layers/encoder_ellastic.py",
"type": "Python"
}
|
'''
Encoder model
'''
import functools
import math
import torch
import torch.nn as nn
from torch.nn import init
import torch.optim as optim
import torch.nn.functional as F
from torch.nn import Parameter as P
from . import time_modulator as tmod
from . import init_model as itmodel
from . import mha
from . import optimizers
activation_dict = {'inplace_relu': nn.ReLU(inplace=True),
'relu': nn.ReLU(inplace=False),
'ir': nn.ReLU(inplace=True),}
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.norm(x), **kwargs)
class TokenClassifier(nn.Module):
def __init__(self, input_dim, n_classes):
super().__init__()
self.norm = nn.LayerNorm(input_dim)
self.output_layer = nn.Linear(input_dim, n_classes)
def forward(self, x):
return self.output_layer(self.norm(x))
class FeedForward(nn.Module):
def __init__(self, dim, embed_dim, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, embed_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(embed_dim, dim),
nn.Dropout(dropout)
)
def forward(self, x):
return self.net(x)
class ClassifierFeedForward(nn.Module):
def __init__(self, input_dim, embed_dim, n_classes, dropout = 0.):
super().__init__()
self.norm = nn.LayerNorm(input_dim)
self.net = nn.Sequential(
nn.Linear(input_dim, embed_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(embed_dim, embed_dim),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(embed_dim, n_classes)
)
def forward(self, x):
return self.net(self.norm(x))
class Transformer(nn.Module):
def __init__(self, **kwargs):
super().__init__()
self.cfg_general(**kwargs)
self.create_layers(**kwargs)
def cfg_general(self, head_dim, num_heads, attn_layers = 1, dropout = 0.0, **kwargs):
self.input_dim = head_dim * num_heads
self.attn_layers = attn_layers
self.dropout = dropout
def create_layers(self,**kwargs):
self.layers = nn.ModuleList([])
for _ in range(self.attn_layers):
self.layers.append(nn.ModuleList([
PreNorm(self.input_dim, mha.MultiheadAttentionHandler(**{**kwargs, 'input_dim': self.input_dim})),
PreNorm(self.input_dim, FeedForward(self.input_dim, 2* self.input_dim, dropout = self.dropout))
]))
def get_input_dim(self):
return self.layers[0][0].fn.get_input_dim()
def forward(self, x, mask, causal_mask = False):
for idx, (attn, ff) in enumerate(self.layers):
x = attn(**{'x': x, 'mask': mask, 'causal_mask': causal_mask}) + x
x = ff(x) + x
return x
class Encoder(nn.Module):
def __init__(self, **kwargs):
super(Encoder, self).__init__()
self.cfg_general(**kwargs)
self.cfg_layers(**kwargs)
self.cfg_bn(**kwargs)
self.cfg_optimizers(**kwargs)
self.create_layers(**kwargs)
self.cfg_init(**kwargs)
def cfg_general(self, dim_z = 2, dataset_channel=3,
which_encoder = 'vanilla', cat_noise_to_E = False,
which_train_fn = 'VAE', n_classes = 10, emb_norm_cte = 0.0,
dropout_first_mha = 0.0, dropout_second_mha = 0.0,
drop_mask_second_mha = False, **kwargs):
# Data/Latent dimension
self.dataset_channel = dataset_channel
self.dim_z = dim_z
self.which_encoder = which_encoder
self.cat_noise_to_E = cat_noise_to_E
self.which_train_fn = which_train_fn
self.input_dim = 1 if not self.cat_noise_to_E else 2
self.n_classes = n_classes
self.emb_norm_cte = emb_norm_cte
self.dropout_first_mha = dropout_first_mha
self.dropout_second_mha = dropout_second_mha
self.drop_mask_second_mha = drop_mask_second_mha
def reset_some_params(self, reset_tab_transformer = False, reset_lc_transformer = False, F_max =[]):
self.time_modulator = tmod.EncTimeModulatorHandler(**{**nkwargs, 'embed_dim': self.input_dim_mha})
self.dim_tab = len(F_max)
if (self.using_metadata or self.using_features) and not self.combine_lc_tab:
if not self.not_tabular_transformer:
self.tab_W_feat = nn.Parameter(torch.randn(1, self.dim_tab, self.tab_input_dim_mha))
self.tab_b_feat = nn.Parameter(torch.randn(1, self.dim_tab, self.tab_input_dim_mha))
def cfg_init(self, E_init='ortho', skip_init=False, **kwargs):
self.init = E_init
# Initialize weights
if not skip_init:
itmodel.init_weights(self)
def cfg_layers(self, E_nl = 'relu', num_linear = 0, **kwargs):
self.nl = E_nl
self.num_linear = num_linear
def cfg_bn(self, BN_eps=1e-5, norm_style='in', **kwargs):
self.BN_eps, self.norm_style = BN_eps, norm_style
def cfg_optimizers(self, optimizer_type='adam', E_lr=5e-5, E_B1=0.0, E_B2=0.999,
adam_eps=1e-8, weight_decay=5e-4, **kwargs):
self.lr, self.B1, self.B2, self.adam_eps, self.weight_decay = E_lr, E_B1, E_B2, adam_eps, weight_decay
self.optimizer_type = optimizer_type
def create_layers(self, using_metadata = False, using_features = False, emb_to_classifier = 'token', F_max = [],
tab_detach = False, tab_num_heads = 4, tab_head_dim = 32,
tab_output_dim = 0, combine_lc_tab = False,
use_detection_token = False, not_tabular_transformer = False, **kwargs):
# Which convs, batchnorms, and linear layers to use
# No option to turn off SN in D right nows
self.activation = activation_dict[self.nl]
self.which_linear = nn.Linear
self.which_embedding = nn.Embedding
self.which_bn = nn.BatchNorm2d
self.using_metadata = using_metadata
self.using_features = using_features
self.emb_to_classifier = emb_to_classifier
self.not_tabular_transformer = not_tabular_transformer
nkwargs = kwargs.copy()
nkwargs.update({'input_dim': self.input_dim, 'which_linear': self.which_linear,
'which_bn': self.which_bn, 'which_embedding': self.which_embedding})
# Using MultiheadAttention
self.transformer = Transformer(**{**nkwargs, 'dropout': self.dropout_first_mha})
self.input_dim_mha = self.transformer.get_input_dim()
self.time_modulator = tmod.EncTimeModulatorHandler(**{**nkwargs, 'embed_dim': self.input_dim_mha})
self.tab_detach = tab_detach
self.tab_num_heads = tab_num_heads
self.tab_head_dim = tab_head_dim
self.tab_output_dim = tab_output_dim
self.use_detection_token = use_detection_token
self.combine_lc_tab = combine_lc_tab
self.F_len = len(F_max)
self.dim_tab = len(F_max)
if self.combine_lc_tab:
self.tab_input_dim_mha = self.input_dim_mha
self.tab_W_feat = nn.Parameter(torch.randn(1, self.dim_tab, self.tab_input_dim_mha))
self.tab_b_feat = nn.Parameter(torch.randn(1, self.dim_tab, self.tab_input_dim_mha))
if (self.using_metadata or self.using_features) and not self.combine_lc_tab:
self.tab_transformer = Transformer(**{**nkwargs, 'head_dim' : self.tab_head_dim,
'num_heads': self.tab_num_heads,
'dropout' : self.dropout_first_mha})
self.tab_input_dim_mha = self.tab_transformer.get_input_dim()
if not self.not_tabular_transformer:
self.input_dim_mix = self.tab_input_dim_mha
self.tab_token = nn.Parameter(torch.randn(1, 1, self.tab_input_dim_mha))
self.tab_W_feat = nn.Parameter(torch.randn(1, self.dim_tab, self.tab_input_dim_mha))
self.tab_b_feat = nn.Parameter(torch.randn(1, self.dim_tab, self.tab_input_dim_mha))
else:
self.input_dim_mix = len(F_max)
self.tab_classifier = TokenClassifier(self.input_dim_mix, self.n_classes)
self.mix_classifier = ClassifierFeedForward(self.input_dim_mix + self.input_dim_mha,
self.input_dim_mix + self.input_dim_mha,
self.n_classes, self.dropout_second_mha)
self.token = nn.Parameter(torch.randn(1, 1, self.input_dim_mha))
self.lc_classifier = TokenClassifier(self.input_dim_mha, self.n_classes)
self.log_softmax = torch.nn.LogSoftmax()
if self.use_detection_token:
self.detection_token = nn.Parameter(torch.randn(1, 1, self.input_dim_mha))
self.non_detection_token = nn.Parameter(torch.randn(1, 1, self.input_dim_mha))
def obtain_emb_to_classify(self, emb_x, mask, time = None, **kwargs):
if self.emb_to_classifier == 'avg':
return (emb_x * mask).sum(1)/mask.sum(1)
elif self.emb_to_classifier == 'token':
return emb_x[:, 0, :]
def obtain_argsort(self, time, mask):
return (time * mask + (1 - mask) * 9999999).argsort(1)
def obtain_last_emb(self, emb_x, mask, time):
bs = emb_x.shape[0]
time_r = time.permute(0,2,1).reshape(bs, -1)
mask_r = mask.permute(0,2,1).reshape(bs, -1)
a_time = self.obtain_argsort(time_r, mask_r)
time_sorted = time_r.gather(1, a_time)
mask_sorted = mask_r.gather(1, a_time)
idx = (time_sorted * mask_sorted).argmax(1)
return emb_x[torch.arange(bs), idx, :]
def obtain_all_lc_emb(self, data, data_var = None, time = None, mask = None,
tabular_feat = None, mask_detection = False, **kwargs):
emb_x, mask = self.time_modulator(data, time, mask, var = data_var)
if self.emb_to_classifier == 'token':
token_repeated = self.token.repeat(emb_x.shape[0], 1, 1)
mask_token = torch.ones(emb_x.shape[0], 1, 1).float().to(emb_x.device)
mask = torch.cat([mask_token, mask], axis = 1)
emb_x = torch.cat([token_repeated, emb_x], axis = 1)
if self.combine_lc_tab:
tab_emb = self.tab_W_feat * tabular_feat + self.tab_b_feat
tab_mask = torch.ones(tabular_feat.shape).float().to(emb_x.device)
emb_x = torch.cat([tab_emb, emb_x], axis = 1)
mask = torch.cat([tab_mask, mask], axis = 1)
emb_x = self.transformer(emb_x, mask)
return emb_x
def obtain_lc_emb(self, obtain_all_seq_not_token = False, **kwargs):
emb_x = self.obtain_all_lc_emb(**kwargs)
emb_to_classify = self.obtain_emb_to_classify(emb_x, **kwargs)
if not obtain_all_seq_not_token:
return emb_to_classify
else:
return emb_x, emb_to_classify
def obtain_all_tab_emb(self, tabular_feat = None, **kwargs):
tab_emb = self.tab_W_feat * tabular_feat + self.tab_b_feat
tab_token_repeated = self.tab_token.repeat(tab_emb.shape[0], 1, 1)
tab_emb = torch.cat([tab_token_repeated, tab_emb], axis = 1)
tab_emb = self.tab_transformer(tab_emb, None)
return tab_emb
def obtain_tab_emb(self, obtain_all_seq_not_token = False, **kwargs):
output = self.obtain_all_tab_emb(**kwargs)
if not obtain_all_seq_not_token:
return output[:, 0, :]
else:
return output, output[:, 0, :]
def obtain_raw_feat(self, tabular_feat, **kwargs):
return tabular_feat.squeeze()
def predict_lc(self, **kwargs):
z_rep = self.obtain_lc_emb(**kwargs)
return {'MLP': self.log_softmax(self.lc_classifier(z_rep))}
def predict_tab(self, **kwargs):
emb_x = self.obtain_tab_emb(**kwargs)
return {'MLPTab': self.log_softmax(self.tab_classifier(emb_x))}
def predict_mix(self, **kwargs):
emb_y = self(**kwargs)
return {'MLPMix': self.log_softmax(emb_y['MLPMix'])}
def predict_all(self, **kwargs):
emb_y = self(**kwargs)
return {key: self.log_softmax(emb_y[key]) for key in emb_y.keys()}
def combine_lc_tab_emb(self, emb_lc, emb_tab, **kwargs):
return torch.cat([emb_tab, emb_lc], axis = 1)
def forward(self, global_step = 0, **kwargs):
output = {}
# Obtain lc embedding
emb_lc = self.obtain_lc_emb(**kwargs)
output.update({'MLP': self.lc_classifier(emb_lc)})
if (self.using_metadata or self.using_features) and not self.combine_lc_tab:
# Obtain tabular embedding
emb_tab = self.obtain_tab_emb(**kwargs) if not self.not_tabular_transformer else self.obtain_raw_feat(**kwargs)
output.update({'MLPTab': self.tab_classifier(emb_tab)})
# Combine both embedding and we classified them with a MLP
emb_mix = self.combine_lc_tab_emb(emb_lc, emb_tab, **kwargs)
mix_output = self.mix_classifier(emb_mix)
#output.update({'MLPMix': mix_output if global_step > 20000 else mix_output.detach()})
output.update({'MLPMix': mix_output})
if not ('MLPMix' in output.keys()):
output['MLPMix'] = output['MLP']
return output
# Arguments for parser
def add_sample_parser(parser):
parser.add_argument(
'--attn_layers', type=int, default = 1,
help='Number of attentions layers'
'(default: %(default)s)')
parser.add_argument(
'--emb_to_classifier', type=str, default = 'avg',
help='what embedding to use'
'(default: %(default)s)')
parser.add_argument(
'--using_tabular_feat', action='store_true', default=False,
help='using tabular features?'
'(default: %(default)s)')
return parser
def add_name_config(config):
name = []
if config['which_encoder'] == 'mha' or config['which_decoder'] == 'mha':
name += [
'MHA',
'HD%d' % config['head_dim'],
'NHead%d' % config['num_heads']]
return name
|
alercebrokerREPO_NAMEATATPATH_START.@ATAT_extracted@ATAT-main@layers@encoder_ellastic.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.