metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "__init__.py",
"repo_name": "aewallin/allantools",
"repo_path": "allantools_extracted/allantools-master/tests/gps/__init__.py",
"type": "Python"
}
|
# for python import
|
aewallinREPO_NAMEallantoolsPATH_START.@allantools_extracted@allantools-master@tests@gps@__init__.py@.PATH_END.py
|
{
"filename": "rclone.py",
"repo_name": "mhardcastle/ddf-pipeline",
"repo_path": "ddf-pipeline_extracted/ddf-pipeline-master/utils/rclone.py",
"type": "Python"
}
|
# Basic rclone functionality thin wrapper which allows you to create an
# object and send multiple commands, capturing output where necessary.
from __future__ import print_function
import subprocess
import os
import tempfile
import glob
import json
def splitlines(s):
l=s.decode().split('\n')
if l[-1]=='':
return l[:-1]
else:
return l
class RClone(object):
def __init__(self, cfg, debug=False):
# Set up environment variables or sensible defaults
# cfg may have wild cards (first found will be used)
try:
self.command=os.environ['RCLONE_COMMAND']
except KeyError:
self.command='rclone'
try:
self.ada_command=os.environ['ADA_COMMAND']
except KeyError:
self.ada_command='ada'
for config in ['RCLONE_CONFIG_DIR','MACAROON_DIR']:
if config in os.environ:
self.config_dir=os.environ[config]
break
else:
self.config_dir=None
# if no config dir specified, full path should be used
self.debug=debug
if self.config_dir is not None:
self.config_file=os.path.join(self.config_dir,cfg)
else:
self.config_file=cfg
if '*' in self.config_file:
g=glob.glob(self.config_file)
if len(g)==0:
raise RuntimeError('Config file '+self.config_file+' has wild cards but no match found')
else:
self.config_file=g[0]
if not os.path.isfile(self.config_file):
raise RuntimeError('Config file not found at '+self.config_file)
self.remote=None
def execute(self,command):
'''
generic execution with standard out and error caught so that
they can be parsed. Command is a string or a list that can be passed to Popen. stdout and stderr are caught and returned as elements of a dictionary along with any return code.
'''
if isinstance(command,str):
command=command.split()
fullcommand=[self.command,'--multi-thread-streams','1','--config='+self.config_file]+command
if self.debug:
print('Running command',' '.join(fullcommand))
proc=subprocess.Popen(fullcommand,stdout=subprocess.PIPE,stderr=subprocess.PIPE)
(out, err) = proc.communicate()
if err:
print('Rclone command returned error!\n',err)
return {
"code": proc.returncode,
"out": splitlines(out),
"err": splitlines(err)
}
def execute_live(self,command):
'''
version of the execute command that does *not* catch stdout so you can see what's happening. Returns a dictionary of the same format as execute for consistency but as stdout and stderr are caught they are always None
'''
if isinstance(command,str):
command=command.split()
fullcommand=[self.command,'--multi-thread-streams','1','--config='+self.config_file]+command
if self.debug:
print('Running command',' '.join(fullcommand))
proc=subprocess.Popen(fullcommand)
proc.wait()
return {"code": proc.returncode, "err": None, "out": None }
def copy(self,source,dest):
'''
simplifying wrapper function -- one of source and dest needs
to contain a 'remote' specification, probably self.remote,
for this to do anything useful. As with rclone copy this will
work on a single file or a whole directory.
'''
return self.execute_live(['-P','copy',source,dest])
def multicopy(self,sourcedir,files,dest):
'''
another wrapper function, this time copy named files from
the source directory to the destination. Better to use this
than looping over copy if e.g. you want to exploit
multi-threading or stage more than one file at a time but do
not want to copy a whole directory.
'''
with tempfile.NamedTemporaryFile(suffix='.txt',delete=False,mode='w') as outfile:
filename=outfile.name
outfile.writelines([f+'\n' for f in files])
result=self.execute_live(['-P','--include-from',filename,'copy',sourcedir,dest])
os.unlink(filename)
return result
def get_remote(self):
'''
If there is only one remote covered by the config file, find out what it is and store in self.remote, else raise exception
'''
d=self.execute('listremotes')
if d['code']!=0 or d['err'] or len(d['out'])>1:
raise RuntimeError('Unable to find unique remote: result was '+repr(d))
else:
self.remote=d['out'][0]
def get_dirs(self,base='',remote=None):
'''
wrapper round rclone lsd that returns a list of directories either in the root of the remote or in a specified base directory. If no remote specified use the result of get_remote().
'''
if remote is None:
if self.remote is None:
self.get_remote()
remote=self.remote
d=self.execute(['lsd',remote+base])
return [l.split()[4] for l in d['out']]
def get_files(self,base='',remote=None, exclude_dirs=True):
'''
wrapper round rclone lsf that returns a list of files either in the root of the remote or in a specified base directory. If no remote specified use the result of get_remote().
'''
if remote is None:
if self.remote is None:
self.get_remote()
remote=self.remote
d=self.execute(['lsf',remote+base])
return [l for l in d['out'] if not exclude_dirs or not l.endswith('/')]
def get_fileinfo(self,base='',remote=None):
'''
wrapper round rclone lsjson that returns a list of file attributes either in the root of the remote or in a specified base directory. If no remote specified use the result of get_remote().
'''
if remote is None:
if self.remote is None:
self.get_remote()
remote=self.remote
d=self.execute(['lsjson',remote+base])
if d['code']>0:
return None
return json.loads(''.join(d['out']))
def get_checksum(self,filename):
''' Use ada to get the checksum. Filename is the remote filename. ada does not use the remote. ada config file should contain the API information. '''
command=self.ada_command+' --tokenfile '+self.config_file+' --checksum %s'% filename
if self.debug:
print('Running '+command)
t = os.popen(command).read()
if self.debug:
print('Output was:\n'+t)
return t.split()[1].replace('ADLER32=','')
|
mhardcastleREPO_NAMEddf-pipelinePATH_START.@ddf-pipeline_extracted@ddf-pipeline-master@utils@rclone.py@.PATH_END.py
|
{
"filename": "_weightsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/hoverlabel/font/_weightsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="weightsrc", parent_name="scatter3d.hoverlabel.font", **kwargs
):
super(WeightsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter3d@hoverlabel@font@_weightsrc.py@.PATH_END.py
|
{
"filename": "chains.ipynb",
"repo_name": "lucaborsato/trades",
"repo_path": "trades_extracted/trades-master/trades_example/python_examples/chains.ipynb",
"type": "Jupyter Notebook"
}
|
# TRADES CHAINS PLOTS
```python
import numpy as np
import os
import sys
```
```python
# change accordingly to you pytrades path
trades_path = os.path.abspath("/path/to/trades/")
pytrades_path = os.path.join(trades_path, "pytrades")
sys.path.append(pytrades_path)
import ancillary as anc
import constants as cst
import trades_emcee_analysis as pyta
from convergence import full_statistics, log_probability_trace
import pytrades # this import a python module, not the F90 module!
```
```python
import matplotlib.pyplot as plt
%matplotlib inline
anc.set_rcParams()
```
Paths
```python
exo_path = os.path.abspath(
"/path/to/exoplanet/system/folder"
)
exo_sim = os.path.join(
exo_path,
"simulation_folder",
)
```
Read configuration file that has to have `analysis` section
```python
yml_file = os.path.join(
exo_sim,
"configuration.yml"
)
cli = anc.ConfigurationAnalysis(yml_file)
```
Change parameters of Configuration Object, such as the `thinning`:
```python
cli.use_thin = 100
```
Set the analysis from the Configuration Object
```python
analysis = pyta.AnalysisTRADES(cli)
conf_run = anc.ConfigurationRun(cli.yaml_file)
```
```python
logs_folder = os.path.join(cli.full_path, "logs")
os.makedirs(logs_folder, exist_ok=True)
plots_folder = os.path.join(cli.full_path, "plots_{}thin".format(cli.use_thin))
os.makedirs(plots_folder, exist_ok=True)
anc.print_both("\nPlotting chains/convergence ... ")
# ===== LOG-PROB TRACE plot ===== #
log_probability_trace(
analysis.lnprobability_full_thinned,
analysis.lnprob_posterior,
plots_folder,
n_burn=cli.nburnin,
n_thin=conf_run.thin_by,
show_plot=False,
figsize=(6, 6)
)
# ===== CONVERGENCE ===== #
par_file = os.path.join(cli.full_path, "summary_parameters.hdf5")
stats_file = os.path.join(logs_folder, "convergence_stats.logs")
overplot = anc.set_overplot(cli.overplot)
with open(stats_file, 'w') as olog:
with h5py.File(par_file, "r") as s_h5f:
l = "fitted"
if overplot is not None:
sim_id_str = "{}".format(overplot)
overp_par = s_h5f["parameters/{:s}/{:s}/parameters".format(sim_id_str, l)][
...
]
else:
overp_par = analysis.fitting_posterior[np.argmax(analysis.lnprob_posterior), :]
exp_acf_fit, exp_steps_fit = full_statistics(
analysis.chains_full_thinned,
analysis.fitting_posterior,
analysis.fitting_names,
overp_par,
analysis.lnprob_posterior,
plots_folder,
olog=olog,
ilast=0,
n_burn=cli.nburnin,
n_thin=conf_run.thin_by,
show_plot=False,
figsize=(6, 6),
)
l = "physical"
if overplot is not None:
sim_id_str = "{}".format(overplot)
overp_par = s_h5f["parameters/{:s}/{:s}/parameters".format(sim_id_str, l)][
...
]
else:
overp_par = analysis.physical_posterior[np.argmax(analysis.lnprob_posterior), :]
exp_acf_phy, exp_steps_phy = full_statistics(
analysis.physical_chains,
analysis.physical_posterior,
analysis.physical_names,
overp_par,
analysis.lnprob_posterior,
plots_folder,
olog=olog,
ilast=analysis.sim.nfit,
n_burn=cli.nburnin,
n_thin=conf_run.thin_by,
show_plot=False,
figsize=(6, 6),
)
anc.print_both("", output=olog)
anc.print_both(
"All expected steps for each parameter needed to reach full convergence:\n{}".format(
exp_steps_fit
),
output=olog,
)
anc.print_both(
"All expected ACF len for each parameter needed to reach full convergence:\n{}".format(
exp_acf_fit
),
output=olog,
)
imax_acf = np.argmax(exp_acf_fit)
anc.print_both(
"MAX ACF = {} ==> needed chains of {} steps\n".format(
exp_acf_fit[imax_acf], exp_steps_fit[imax_acf]
),
output=olog,
)
```
```python
```
|
lucaborsatoREPO_NAMEtradesPATH_START.@trades_extracted@trades-master@trades_example@python_examples@chains.ipynb@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/_plotly_utils/README.md",
"type": "Markdown"
}
|
This package is for utilities that are used during code generation
and at runtime. The reason for not placing these under the main plotly/
package is that this avoids the complications of importing the module
we're generating code into during code generation.
This module must be independent of (it must not import from) both
plotly/ and codegen/
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@_plotly_utils@README.md@.PATH_END.py
|
{
"filename": "test_noise.py",
"repo_name": "aewallin/allantools",
"repo_path": "allantools_extracted/allantools-master/tests/functional_tests/test_noise.py",
"type": "Python"
}
|
#!/usr/bin/python
import sys
sys.path.append("..")
from allantools import noise
import numpy
import pytest
def test_noise():
N = 500
#rate = 1.0
w = noise.white(N)
b = noise.brown(N)
v = noise.violet(N)
p = noise.pink(N)
# check output length
assert len(w) == N
assert len(b) == N
assert len(v) == N
assert len(p) == N
# check output type
for x in [w, b, v, p]:
assert type(x) == numpy.ndarray, "%s is not numpy.ndarray" % (type(x))
if __name__ == "__main__":
test_noise()
|
aewallinREPO_NAMEallantoolsPATH_START.@allantools_extracted@allantools-master@tests@functional_tests@test_noise.py@.PATH_END.py
|
{
"filename": "reordered_wcs.py",
"repo_name": "sunpy/ndcube",
"repo_path": "ndcube_extracted/ndcube-main/ndcube/wcs/wrappers/reordered_wcs.py",
"type": "Python"
}
|
import numpy as np
from astropy.wcs.wcsapi.wrappers.base import BaseWCSWrapper
__all__ = ['ReorderedLowLevelWCS']
class ReorderedLowLevelWCS(BaseWCSWrapper):
"""
A wrapper for a low-level WCS object that has re-ordered
pixel and/or world axes.
Parameters
----------
wcs : `~astropy.wcs.wcsapi.BaseLowLevelWCS`
The original WCS for which to reorder axes
pixel_order : iterable
The indices of the original axes in the order of the
new WCS.
world_order : iterable
The indices of the original axes in the order of the
new WCS.
"""
def __init__(self, wcs, pixel_order, world_order):
if sorted(pixel_order) != list(range(wcs.pixel_n_dim)):
raise ValueError(f'pixel_order should be a permutation of {list(range(wcs.pixel_n_dim))}')
if sorted(world_order) != list(range(wcs.world_n_dim)):
raise ValueError(f'world_order should be a permutation of {list(range(wcs.world_n_dim))}')
self._wcs = wcs
self._pixel_order = pixel_order
self._world_order = world_order
self._pixel_order_inv = np.argsort(pixel_order)
self._world_order_inv = np.argsort(world_order)
@property
def world_axis_physical_types(self):
return [self._wcs.world_axis_physical_types[idx] for idx in self._world_order]
@property
def world_axis_units(self):
return [self._wcs.world_axis_units[idx] for idx in self._world_order]
@property
def pixel_axis_names(self):
return [self._wcs.pixel_axis_names[idx] for idx in self._pixel_order]
@property
def world_axis_names(self):
return [self._wcs.world_axis_names[idx] for idx in self._world_order]
def pixel_to_world_values(self, *pixel_arrays):
pixel_arrays = [pixel_arrays[idx] for idx in self._pixel_order_inv]
world_arrays = self._wcs.pixel_to_world_values(*pixel_arrays)
return [world_arrays[idx] for idx in self._world_order]
def world_to_pixel_values(self, *world_arrays):
world_arrays = [world_arrays[idx] for idx in self._world_order_inv]
pixel_arrays = self._wcs.world_to_pixel_values(*world_arrays)
return [pixel_arrays[idx] for idx in self._pixel_order]
@property
def world_axis_object_components(self):
return [self._wcs.world_axis_object_components[idx] for idx in self._world_order]
@property
def pixel_shape(self):
if self._wcs.pixel_shape:
return tuple([self._wcs.pixel_shape[idx] for idx in self._pixel_order])
return None
@property
def pixel_bounds(self):
if self._wcs.pixel_bounds:
return tuple([self._wcs.pixel_bounds[idx] for idx in self._pixel_order])
return None
@property
def axis_correlation_matrix(self):
return self._wcs.axis_correlation_matrix[self._world_order][:, self._pixel_order]
|
sunpyREPO_NAMEndcubePATH_START.@ndcube_extracted@ndcube-main@ndcube@wcs@wrappers@reordered_wcs.py@.PATH_END.py
|
{
"filename": "counter_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/data/kernel_tests/counter_test.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.counter`."""
from absl.testing import parameterized
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
class CounterTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(start=3, step=4, expected_output=[[3, 7, 11]]) +
combinations.combine(start=0, step=-1, expected_output=[[0, -1, -2]]))
)
def testCounter(self, start, step, expected_output):
dataset = dataset_ops.Dataset.counter(start, step)
self.assertEqual(
[], dataset_ops.get_legacy_output_shapes(dataset).as_list())
self.assertEqual(dtypes.int64, dataset_ops.get_legacy_output_types(dataset))
get_next = self.getNext(dataset)
for expected in expected_output:
self.assertEqual(expected, self.evaluate(get_next()))
class CounterCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_counter_dataset(self, start, step, num_outputs, options=None):
counter_dataset = dataset_ops.Dataset.counter(start, step)
range_dataset = dataset_ops.Dataset.range(num_outputs)
dataset = dataset_ops.Dataset.zip((counter_dataset, range_dataset))
if options:
dataset = dataset.with_options(options)
return dataset
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True])))
def test(self, verify_fn, symbolic_checkpoint):
num_outputs = 10
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(
self, lambda: self._build_counter_dataset(
start=2, step=10, num_outputs=num_outputs, options=options),
num_outputs)
if __name__ == "__main__":
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@data@kernel_tests@counter_test.py@.PATH_END.py
|
{
"filename": "plotting_script.py",
"repo_name": "LucIJspeert/star_shadow",
"repo_path": "star_shadow_extracted/star_shadow-master/star_shadow/data/plotting_script.py",
"type": "Python"
}
|
"""Script for making some of the plots in IJspeert et al. 2024"""
import os
import fnmatch
import numpy as np
import scipy as sp
import scipy.stats
import pandas as pd
import matplotlib.pyplot as plt
import star_shadow as sts
## SYNTHETIC TESTS
syn_dir = '~/data'
all_files = []
for root, dirs, files in os.walk(syn_dir):
for file_i in files:
if fnmatch.fnmatch(file_i, 'sim_[0-9][0-9][0-9]_lc.dat'):
all_files.append(os.path.join(root, file_i))
all_files = np.sort(all_files)
# plotting all per case diagnostic plots in series
for file in all_files:
target_id = os.path.splitext(os.path.basename(file))[0]
save_dir = os.path.dirname(file)
# load the light curve (this needs to be different for TESS files)
times, signal, signal_err = np.loadtxt(file, usecols=(0, 1, 2), unpack=True)
i_half_s = np.array([[0, len(times)]])
sts.ut.sequential_plotting(times, signal, signal_err, i_half_s, target_id, save_dir, save_dir=save_dir, show=False)
# collect results in a summary file (run analysis to get the individual case results)
summary_file = os.path.join(os.path.dirname(all_files[0]), 'sim_000_lc_analysis', 'sim_000_lc_analysis_summary.csv')
hdr = np.loadtxt(summary_file, usecols=(0), delimiter=',', unpack=True, dtype=str)
obs_par_dtype = np.dtype([('id', '<U20'), ('stage', '<U20')] + [(name, float) for name in hdr[2:]])
obs_par = np.ones(len(all_files), dtype=obs_par_dtype)
for k, file in enumerate(all_files):
target_id = os.path.splitext(os.path.basename(file))[0]
data_dir = os.path.join(os.path.dirname(file), f'{target_id}_analysis')
summary_file = os.path.join(data_dir, f'{target_id}_analysis_summary.csv')
if os.path.isfile(summary_file):
obs_par[k] = tuple(np.loadtxt(summary_file, usecols=(1), delimiter=',', unpack=True, dtype=str))
else:
print(summary_file)
# load parameter files
obs_par = pd.read_csv(syn_dir + '/test_results.csv')
true_par = pd.read_csv(syn_dir + '/sample_parameters_v02plus2.csv', index_col=0)
# transform result data
undetectable = [1, 2, 4, 5, 17, 21, 30, 32, 36, 37, 39, 46, 54, 59, 61, 65, 69, 70, 73, 80, 81, 83, 85, 90, 93, 95]
undetectable_sec = [3, 11, 13, 15, 22, 31, 33, 35, 38, 44, 45, 50, 74, 76, 79, 91]
# number of cycles
cycles = obs_par['t_tot'] / true_par['period']
sorter_c = np.argsort(cycles)[::-1]
# periods and bad datapoints
p_measure_1 = (obs_par['period'] - true_par['period']) / true_par['period']
p_measure_2 = (obs_par['period'] - true_par['period']) / obs_par['p_err']
p_error_1 = obs_par['p_err'] / true_par['period']
p_hdi_1 = obs_par['p_err_l'] / true_par['period']
p_hdi_2 = obs_par['p_err_u'] / true_par['period']
bad_p = (obs_par['period'][sorter_c] == -1) | (obs_par['p_err'][sorter_c] == -1)
finished = ((obs_par['stage'].astype(int) == 10) | (obs_par['stage'].astype(int) == 9)
| (obs_par['stage'].astype(int) == 8))
p_good = np.array([case not in undetectable for case in range(100)])
ps_good = np.array([case not in undetectable + undetectable_sec for case in range(100)])
fin_good = ps_good & finished & (np.abs(p_measure_1) < 0.01)
# sort by primary depth (plot depths and levels)
max_depth = np.max((obs_par['depth_1'], obs_par['depth_2']), axis=0)
max_depth = np.max((true_par['primary_depth'], true_par['secondary_depth']), axis=0)
min_depth = np.min((obs_par['depth_1'], obs_par['depth_2']), axis=0)
deeper_sec = (obs_par['depth_2'] > obs_par['depth_1'])
sorter_dmax = np.argsort(max_depth)[::-1]
sorter_dmin = np.argsort(min_depth)[::-1]
max_ratio = np.max((obs_par['ratio_3_1'], obs_par['ratio_3_2']), axis=0)
min_ratio = np.min((obs_par['ratio_3_1'], obs_par['ratio_3_2']), axis=0)
sorter_rmax = np.argsort(max_ratio)[::-1]
sorter_rmin = np.argsort(min_ratio)[::-1]
harm_resid = obs_par['std_4'] / obs_par['std_1']
# period and period uncertainty, ordered by primary depth
bad_p_2 = (obs_par['period'][sorter_dmax] == -1) | (obs_par['p_err'][sorter_dmax] == -1)
# absolute differences and errors
# ecosw
sign_flip = [7, 23, 28, 56, 60, 89, 92, 98] # these have prim and sec reversed (not by their mistake)
ecosw_form = obs_par['ecosw_form']
ecosw_phys = obs_par['ecosw_phys']
ecosw_form.loc[sign_flip] = -1 * obs_par['ecosw_form'] # flip sign
ecosw_phys.loc[sign_flip] = -1 * obs_par['ecosw_phys'] # flip sign
ecosw_measure_1 = (ecosw_form - (true_par['ecc'] * np.cos(true_par['omega'])))
ecosw_measure_2 = (ecosw_phys - (true_par['ecc'] * np.cos(true_par['omega'])))
ecosw_err_1 = np.vstack((obs_par['ecosw_low'], obs_par['ecosw_upp']))
ecosw_err_2 = np.vstack((obs_par['ecosw_err_l'], obs_par['ecosw_err_u']))
err_side_1 = np.clip(np.sign(-ecosw_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-ecosw_measure_2), 0, 1).astype(int)
ecosw_measure_4 = ecosw_measure_1 / ecosw_err_1[err_side_1, np.arange(100)]
ecosw_measure_5 = ecosw_measure_2 / ecosw_err_1[err_side_2, np.arange(100)]
# esinw
esinw_form = obs_par['esinw_form']
esinw_phys = obs_par['esinw_phys']
esinw_form.loc[sign_flip] = -1 * obs_par['esinw_form'] # flip sign
esinw_phys.loc[sign_flip] = -1 * obs_par['esinw_phys'] # flip sign
esinw_measure_1 = (esinw_form - (true_par['ecc'] * np.sin(true_par['omega'])))
esinw_measure_2 = (esinw_phys - (true_par['ecc'] * np.sin(true_par['omega'])))
esinw_err_1 = np.vstack((obs_par['esinw_low'], obs_par['esinw_upp']))
esinw_err_2 = np.vstack((obs_par['esinw_err_l'], obs_par['esinw_err_u']))
err_side_1 = np.clip(np.sign(-esinw_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-esinw_measure_2), 0, 1).astype(int)
esinw_measure_4 = esinw_measure_1 / esinw_err_1[err_side_1, np.arange(100)]
esinw_measure_5 = esinw_measure_2 / esinw_err_1[err_side_2, np.arange(100)]
# cosi
cosi_measure_1 = (obs_par['cosi_form'] - np.cos(true_par['incl']/180*np.pi))
cosi_measure_2 = (obs_par['cosi_phys'] - np.cos(true_par['incl']/180*np.pi))
cosi_err_1 = np.vstack((obs_par['cosi_low'], obs_par['cosi_upp']))
cosi_err_2 = np.vstack((obs_par['cosi_err_l'], obs_par['cosi_err_u']))
err_side_1 = np.clip(np.sign(-cosi_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-cosi_measure_2), 0, 1).astype(int)
cosi_measure_4 = cosi_measure_1 / cosi_err_1[err_side_1, np.arange(100)]
cosi_measure_5 = cosi_measure_2 / cosi_err_1[err_side_2, np.arange(100)]
# phi_0
phi_0_true = sts.af.phi_0_from_r_sum_sma(true_par['ecc'].to_numpy(), true_par['incl'].to_numpy()/180*np.pi, true_par['r_sum'].to_numpy())
phi_0_true[np.isnan(phi_0_true)] = 0
phi_0_measure_1 = (obs_par['phi_0_form'] - phi_0_true)
phi_0_measure_2 = (obs_par['phi_0_phys'] - phi_0_true)
phi_0_err_1 = np.vstack((obs_par['phi_0_low'], obs_par['phi_0_upp']))
phi_0_err_2 = np.vstack((obs_par['phi_0_err_l'], obs_par['phi_0_err_u']))
err_side_1 = np.clip(np.sign(-phi_0_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-phi_0_measure_2), 0, 1).astype(int)
phi_0_measure_4 = phi_0_measure_1 / phi_0_err_1[err_side_1, np.arange(100)]
phi_0_measure_5 = phi_0_measure_2 / phi_0_err_1[err_side_2, np.arange(100)]
# log r_rat
log_rr_true = np.log10(true_par['r_rat'])
log_rr_form = obs_par['log_rr_form']
log_rr_phys = obs_par['log_rr_phys']
log_rr_form.loc[sign_flip] = -1 * obs_par['log_rr_form'] # flip sign
log_rr_phys.loc[sign_flip] = -1 * obs_par['log_rr_phys'] # flip sign
log_rr_measure_1 = (log_rr_form - log_rr_true)
log_rr_measure_2 = (log_rr_phys - log_rr_true)
log_rr_err_1 = np.vstack((obs_par['log_rr_low'], obs_par['log_rr_upp']))
log_rr_err_2 = np.vstack((obs_par['log_rr_err_l'], obs_par['log_rr_err_u']))
err_side_1 = np.clip(np.sign(-log_rr_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-log_rr_measure_2), 0, 1).astype(int)
log_rr_measure_4 = log_rr_measure_1 / log_rr_err_1[err_side_1, np.arange(100)]
log_rr_measure_5 = log_rr_measure_2 / log_rr_err_1[err_side_2, np.arange(100)]
# log sb_rat
log_sb_true = np.log10(true_par['Sb'])
log_sb_form = obs_par['log_sb_form']
log_sb_phys = obs_par['log_sb_phys']
log_sb_form.loc[sign_flip] = -1 * obs_par['log_sb_form'] # flip sign
log_sb_phys.loc[sign_flip] = -1 * obs_par['log_sb_phys'] # flip sign
log_sb_measure_1 = (log_sb_form - log_sb_true)
log_sb_measure_2 = (log_sb_phys - log_sb_true)
log_sb_err_1 = np.vstack((obs_par['log_sb_low'], obs_par['log_sb_upp']))
log_sb_err_2 = np.vstack((obs_par['log_sb_err_l'], obs_par['log_sb_err_u']))
err_side_1 = np.clip(np.sign(-log_sb_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-log_sb_measure_2), 0, 1).astype(int)
log_sb_measure_4 = log_sb_measure_1 / log_sb_err_1[err_side_1, np.arange(100)]
log_sb_measure_5 = log_sb_measure_2 / log_sb_err_1[err_side_2, np.arange(100)]
# eccentricity
e_measure_1 = (obs_par['e_form'] - true_par['ecc'])
e_measure_2 = (obs_par['e_phys'] - true_par['ecc'])
e_err_1 = np.vstack((obs_par['e_low'], obs_par['e_upp']))
e_err_2 = np.vstack((obs_par['e_err_l'], obs_par['e_err_u']))
err_side_1 = np.clip(np.sign(-e_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-e_measure_2), 0, 1).astype(int)
e_measure_4 = e_measure_1 / e_err_1[err_side_1, np.arange(100)]
e_measure_5 = e_measure_2 / e_err_1[err_side_2, np.arange(100)]
# omega
w_form = obs_par['w_form']
w_phys = obs_par['w_phys']
w_form.loc[sign_flip] = (obs_par['w_form'].loc[sign_flip] - np.pi) % (2 * np.pi)
w_phys.loc[sign_flip] = (obs_par['w_phys'].loc[sign_flip] - np.pi) % (2 * np.pi)
w_measure_1 = (w_form - (true_par['omega'] % (2 * np.pi))) % (2 * np.pi)
w_measure_1[w_measure_1 > np.pi] = w_measure_1 - 2 * np.pi
w_measure_2 = (w_phys - (true_par['omega'] % (2 * np.pi))) % (2 * np.pi)
w_measure_2[w_measure_2 > np.pi] = w_measure_2 - 2 * np.pi
w_err_1 = np.vstack((obs_par['w_low'], obs_par['w_upp']))
w_err_2 = np.vstack((np.max([w_err_1[0], obs_par['w_sig']], axis=0), np.max([w_err_1[1], obs_par['w_sig']], axis=0)))
err_side_1 = np.clip(np.sign(-w_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-w_measure_2), 0, 1).astype(int)
w_measure_4 = w_measure_1 / w_err_2[err_side_1, np.arange(100)]
w_measure_5 = w_measure_2 / w_err_2[err_side_2, np.arange(100)]
# inclination
i_measure_1 = (obs_par['i_form'] - (true_par['incl']/180*np.pi))
i_measure_2 = (obs_par['i_phys'] - (true_par['incl']/180*np.pi))
i_err_1 = np.vstack((obs_par['i_low'], obs_par['i_upp']))
# i_err_2 = np.vstack((obs_par['i_err_l'], obs_par['i_err_r']))
err_side_1 = np.clip(np.sign(-i_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-i_measure_2), 0, 1).astype(int)
i_measure_4 = i_measure_1 / i_err_1[err_side_1, np.arange(100)]
i_measure_5 = i_measure_2 / i_err_1[err_side_2, np.arange(100)]
# r_sum
obs_par['r_sum_form'][np.isnan(obs_par['r_sum_form'])] = -1
obs_par['r_sum_phys'][np.isnan(obs_par['r_sum_phys'])] = -1
r_sum_measure_1 = (obs_par['r_sum_form'] - true_par['r_sum'])
r_sum_measure_2 = (obs_par['r_sum_phys'] - true_par['r_sum'])
r_sum_err = np.vstack((obs_par['r_sum_low'], obs_par['r_sum_upp']))
err_side_1 = np.clip(np.sign(-r_sum_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-r_sum_measure_2), 0, 1).astype(int)
r_sum_measure_4 = r_sum_measure_1 / r_sum_err[err_side_1, np.arange(100)]
r_sum_measure_5 = r_sum_measure_2 / r_sum_err[err_side_2, np.arange(100)]
# r_rat
r_rat_form = obs_par['r_rat_form']
r_rat_phys = obs_par['r_rat_phys']
r_rat_form.loc[sign_flip] = 1 / obs_par['r_rat_form'].loc[sign_flip] # flip fraction
r_rat_phys.loc[sign_flip] = 1 / obs_par['r_rat_phys'].loc[sign_flip] # flip fraction
r_rat_measure_1 = (r_rat_form - true_par['r_rat'])
r_rat_measure_2 = (r_rat_phys - true_par['r_rat'])
r_rat_err = np.vstack((obs_par['r_rat_low'], obs_par['r_rat_upp'])) # error bars are changed as well:
err_side_1 = np.clip(np.sign(-r_rat_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-r_rat_measure_2), 0, 1).astype(int)
r_rat_measure_4 = r_rat_measure_1 / r_rat_err[err_side_1, np.arange(100)]
r_rat_measure_5 = r_rat_measure_2 / r_rat_err[err_side_2, np.arange(100)]
# sb_rat
sb_rat_form = obs_par['sb_rat_form']
sb_rat_phys = obs_par['sb_rat_phys']
sb_rat_form.loc[sign_flip] = 1 / obs_par['sb_rat_form'].loc[sign_flip] # flip fraction
sb_rat_phys.loc[sign_flip] = 1 / obs_par['sb_rat_phys'].loc[sign_flip] # flip fraction
sb_rat_measure_1 = (sb_rat_form - true_par['Sb'])
sb_rat_measure_2 = (sb_rat_phys - true_par['Sb'])
sb_rat_err = np.vstack((obs_par['sb_rat_low'], obs_par['sb_rat_upp'])) # error bars are changed as well:
err_side_1 = np.clip(np.sign(-sb_rat_measure_1), 0, 1).astype(int)
err_side_2 = np.clip(np.sign(-sb_rat_measure_2), 0, 1).astype(int)
sb_rat_measure_4 = sb_rat_measure_1 / sb_rat_err[err_side_1, np.arange(100)]
sb_rat_measure_5 = sb_rat_measure_2 / sb_rat_err[err_side_2, np.arange(100)]
# period and period uncertainty, ordered by number of cycles
fix_range = 0.01
data_range = np.array([np.min(p_measure_1), np.max(p_measure_1)])
cycles_b2 = true_par.index[cycles[sorter_c] < 2][0] - 0.5 # sorted index for cycles below 3
fig, ax = plt.subplots(figsize=(12, 5))
ax.plot(true_par.index, np.zeros(100), c='tab:grey', alpha=0.8)
ax.errorbar(true_par.index, p_measure_1[sorter_c], yerr=p_error_1[sorter_c],
capsize=2, marker='.', c='tab:blue', linestyle='none')
ax.errorbar(true_par.index[bad_p], p_measure_1[sorter_c][bad_p], yerr=p_error_1[sorter_c][bad_p],
capsize=2, marker='.', c='tab:red', linestyle='none')
for i in true_par.index:
p = true_par.index[i == sorter_c][0]
if i in undetectable:
ax.fill_between([p - 0.5, p + 0.5], data_range[[0, 0]], color='tab:grey', alpha=0.25, linewidth=0.0, hatch='/')
ax.fill_between([p - 0.5, p + 0.5], data_range[[1, 1]], color='tab:grey', alpha=0.25, linewidth=0.0, hatch='/')
if i in undetectable_sec:
ax.fill_between([p - 0.5, p + 0.5], data_range[[0, 0]], color='tab:grey', alpha=0.25, linewidth=0.0, hatch='')
ax.fill_between([p - 0.5, p + 0.5], data_range[[1, 1]], color='tab:grey', alpha=0.25, linewidth=0.0, hatch='')
n = obs_par['stage'][i]
# ax.annotate(f'{i}, {n}', (p, p_measure_1[i]), alpha=0.6)
ax.fill_between([cycles_b2, 99.5], data_range[[0, 0]], color='tab:orange', alpha=0.25, linewidth=0.0)
ax.fill_between([cycles_b2, 99.5], data_range[[1, 1]], color='tab:orange', alpha=0.25, linewidth=0.0)
ax.fill_between([], [], color='tab:grey', alpha=0.25, linewidth=0.0, label='no eclipses visible', hatch='/')
ax.fill_between([], [], color='tab:grey', alpha=0.25, linewidth=0.0, label='no secondary visible')
ax.fill_between([], [], color='tab:orange', alpha=0.25, linewidth=0.0, label='n<2 cycles')
ax.set_ylim(-fix_range, fix_range)
ax.set_xlabel('test case (sorted by number of cycles)', fontsize=14)
ax.set_ylabel(r'$\frac{P_{measured} - P_{input}}{P_{input}}$', fontsize=20)
ax2 = ax.twinx()
ax2.plot(true_par.index, cycles[sorter_c], marker='.', c='tab:grey', alpha=0.6)
ax2.set_ylim(-310, 310)
ax2.set_ylabel('Number of cycles', fontsize=14)
ax.legend()
plt.tight_layout()
plt.show()
# period and period uncertainty, ordered by primary depth
fix_range = 0.01
fig, ax = plt.subplots(figsize=(12, 5))
ax.plot(true_par.index, np.zeros(100), c='tab:grey', alpha=0.8)
ax.errorbar(true_par.index, p_measure_1[sorter_dmax], yerr=p_error_1[sorter_dmax],
capsize=2, marker='.', c='tab:blue', linestyle='none')
ax.errorbar(true_par.index[bad_p_2], p_measure_1[sorter_dmax][bad_p_2], yerr=p_error_1[sorter_dmax][bad_p_2],
capsize=2, marker='.', c='tab:red', linestyle='none')
for i in true_par.index:
p = true_par.index[i == sorter_dmax][0]
if i in undetectable:
ax.fill_between([p - 0.5, p + 0.5], data_range[[0, 0]], color='tab:grey', alpha=0.25, linewidth=0.0, hatch='/')
ax.fill_between([p - 0.5, p + 0.5], data_range[[1, 1]], color='tab:grey', alpha=0.25, linewidth=0.0, hatch='/')
if i in undetectable_sec:
ax.fill_between([p - 0.5, p + 0.5], data_range[[0, 0]], color='tab:grey', alpha=0.25, linewidth=0.0, hatch='')
ax.fill_between([p - 0.5, p + 0.5], data_range[[1, 1]], color='tab:grey', alpha=0.25, linewidth=0.0, hatch='')
if (cycles[sorter_dmax][i] < 2):
ax.fill_between([p - 0.5, p + 0.5], data_range[[0, 0]], color='tab:orange', alpha=0.25, linewidth=0.0)
ax.fill_between([p - 0.5, p + 0.5], data_range[[1, 1]], color='tab:orange', alpha=0.25, linewidth=0.0)
# ax.annotate(f'{i}', (p, p_measure_1[i]), alpha=0.6)
ax.fill_between([], [], color='tab:grey', alpha=0.25, linewidth=0.0, label='no eclipses visible', hatch='/')
ax.fill_between([], [], color='tab:grey', alpha=0.25, linewidth=0.0, label='no secondary visible')
ax.fill_between([], [], color='tab:orange', alpha=0.25, linewidth=0.0, label='n<2 cycles')
ax.set_ylim(-fix_range, fix_range)
ax.set_xlabel('test case (sorted by eclipse depth)', fontsize=14)
ax.set_ylabel(r'$\frac{P_{measured} - P_{input}}{P_{input}}$', fontsize=20)
ax2 = ax.twinx()
ax2.plot(true_par.index, max_depth[sorter_dmax], marker='.', c='tab:grey', alpha=0.6)
ax2.set_ylim(-0.5, 0.5)
ax2.set_ylabel('Primary eclipse depth', fontsize=14)
ax.legend()
plt.tight_layout()
plt.show()
# e and e_err vs secondary depth
fix_range = 0.23
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))
ax[0].scatter(min_depth[fin_good], true_par['ecc'][fin_good], marker='.', c='tab:grey', alpha=0.8, label='input')
ax[0].scatter(min_depth[fin_good], obs_par['e_phys'][fin_good], marker='.', c='tab:blue', label='eclipse model')
for x_par, y_par, true_y in zip(min_depth[fin_good], obs_par['e_phys'][fin_good], true_par['ecc'][fin_good]):
ax[0].plot([x_par, x_par], [y_par, true_y], ':', c='tab:gray')
ax[0].set_ylim(-0.1, 1.1)
ax[0].set_ylabel('eccentricity', fontsize=14)
ax[0].legend()
ax[1].plot([0, np.max(min_depth[fin_good])], [0, 0], c='tab:grey', alpha=0.8)
ax[1].errorbar(min_depth[fin_good], e_measure_2[fin_good],
yerr=np.vstack((e_err_1[0][fin_good], e_err_1[1][fin_good])),
capsize=2, marker='.', c='tab:blue', linestyle='none', label='eclipse model')
# for i in true_par.index:
# if fin_good[i]:
# ax[1].annotate(f'{i}', (min_depth[i], e_measure_2[i]), alpha=0.6)
ax[1].set_ylim(-fix_range, fix_range)
ax[1].set_xlabel('secondary eclipse depth', fontsize=14)
ax[1].set_ylabel('$e_{measured} - e_{input}$', fontsize=14)
plt.tight_layout()
plt.subplots_adjust(hspace=0)
plt.show()
# ecosw
fix_range = 0.09
true_ecosw = true_par['ecc'] * np.cos(true_par['omega'])
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(16, 9))
ax[0].scatter(min_depth[fin_good], true_ecosw[fin_good], marker='.', c='tab:grey', alpha=0.8, label='input')
ax[0].scatter(min_depth[fin_good], ecosw_phys[fin_good], marker='.', c='tab:blue', label='eclipse model')
for x_par, y_par, true_y in zip(min_depth[fin_good], ecosw_phys[fin_good], true_ecosw[fin_good]):
ax[0].plot([x_par, x_par], [y_par, true_y], ':', c='tab:gray')
ax[0].set_ylim(-1.1, 1.1)
ax[0].set_ylabel('ecosw', fontsize=14)
ax[0].legend()
ax[1].plot([0, np.max(min_depth[fin_good])], [0, 0], c='tab:grey', alpha=0.8)
ax[1].errorbar(min_depth[fin_good], ecosw_measure_2[fin_good],
yerr=np.vstack((e_err_1[0][fin_good], e_err_1[1][fin_good])),
capsize=2, marker='.', c='tab:blue', linestyle='none', label='eclipse model')
# for i in true_par.index:
# if fin_good[i]:
# ax[1].annotate(f'{i}', (min_depth[i], ecosw_measure_2[i]), alpha=0.6)
ax[1].set_ylim(-fix_range, fix_range)
ax[1].set_xlabel('secondary eclipse depth', fontsize=14)
ax[1].set_ylabel('$ecosw_{measured} - ecosw_{input}$', fontsize=14)
plt.tight_layout()
plt.subplots_adjust(hspace=0)
plt.show()
# plot absolute i difference versus third light
fig, ax = plt.subplots(nrows=2, sharex=True, figsize=(6, 6))
ax[0].plot([0, np.max(true_par['l3'])], [90, 90], '--', c='tab:grey', alpha=0.8, label='$90^\circ$')
ax[0].scatter(true_par['l3'][fin_good], true_par['incl'][fin_good], marker='.', c='tab:grey', alpha=0.8, label='input')
ax[0].scatter(true_par['l3'][fin_good], obs_par['i_phys'][fin_good]/np.pi*180, marker='.', c='tab:blue',
label='eclipse model')
for x_par, y_par, true_y in zip(true_par['l3'][fin_good], obs_par['i_phys'][fin_good]/np.pi*180, true_par['incl'][fin_good]):
ax[0].plot([x_par, x_par], [y_par, true_y], ':', c='tab:gray')
ax[0].set_ylim(45, 95)
ax[0].set_ylabel('inclination (degrees)', fontsize=14)
ax[0].legend()
ax[1].plot([0, np.max(true_par['l3'])], [0, 0], c='tab:grey', alpha=0.8)
ax[1].errorbar(true_par['l3'][fin_good], i_measure_2[fin_good] / np.pi * 180,
yerr=np.vstack((i_err_1[0][fin_good] / np.pi * 180, i_err_1[1][fin_good] / np.pi * 180)),
capsize=2, marker='.', c='tab:blue', linestyle='none', label='eclipse model')
# for i in true_par.index:
# if fin_good[i]:
# ax[1].annotate(f'{i}', (true_par['l3'][fin_good][i], i_measure_2[i]/np.pi*180), alpha=0.6)
ax[1].set_xlabel('third light', fontsize=14)
ax[1].set_ylabel('$i_{measured} - i_{input}$ (degrees)', fontsize=14)
plt.tight_layout()
plt.subplots_adjust(hspace=0)
plt.show()
# KDE and hist - error in p
norm_kde_2 = sp.stats.gaussian_kde(p_measure_2[fin_good], bw_method=1/(p_measure_2[fin_good]).std())
points = np.arange(-7, 7, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(p_measure_2[fin_good], bins=np.arange(-6.875, 7, 0.5), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_2(points) * len(p_measure_2[fin_good]), color='tab:blue', linewidth=4)
ax.set_xlabel('$\chi_p$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in e
norm_kde_1 = sp.stats.gaussian_kde(e_measure_1[fin_good], bw_method=0.02/(e_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(e_measure_2[fin_good], bw_method=0.02/(e_measure_2[fin_good]).std())
points = np.arange(-0.3, 0.3, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(e_measure_1[fin_good], bins=np.arange(-0.3, 0.32, 0.02), linewidth=0, alpha=0.3)
ax.hist(e_measure_2[fin_good], bins=np.arange(-0.3, 0.32, 0.02), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$e_{measured} - e_{input}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - scaled error in e
norm_kde_4 = sp.stats.gaussian_kde(e_measure_4[fin_good], bw_method=1/(e_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(e_measure_5[fin_good], bw_method=1/(e_measure_5[fin_good]).std())
points = np.arange(-14, 14, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(e_measure_4[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.hist(e_measure_5[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(e_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(e_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_e$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in ecosw
norm_kde_1 = sp.stats.gaussian_kde(ecosw_measure_1[fin_good], bw_method=0.005/(ecosw_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(ecosw_measure_2[fin_good], bw_method=0.005/(ecosw_measure_2[fin_good]).std())
points = np.arange(-0.06, 0.06, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(ecosw_measure_1[fin_good], bins=np.arange(-0.06, 0.07, 0.005), linewidth=0, alpha=0.3)
ax.hist(ecosw_measure_2[fin_good], bins=np.arange(-0.06, 0.07, 0.005), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points) / 2, color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points) / 2, color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$ecos(w)_{measured} - ecos(w)_{input}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - scaled error in ecosw
norm_kde_4 = sp.stats.gaussian_kde(ecosw_measure_4[fin_good], bw_method=1/(ecosw_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(ecosw_measure_5[fin_good], bw_method=1/(ecosw_measure_5[fin_good]).std())
points = np.arange(-14, 14, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(ecosw_measure_4[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.hist(ecosw_measure_5[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(ecosw_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(ecosw_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_{ecos(w)}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in esinw
norm_kde_1 = sp.stats.gaussian_kde(esinw_measure_1[fin_good], bw_method=0.02/(esinw_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(esinw_measure_2[fin_good], bw_method=0.02/(esinw_measure_2[fin_good]).std())
points = np.arange(-0.35, 0.35, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(esinw_measure_1[fin_good], bins=np.arange(-0.35, 0.36, 0.02), linewidth=0, alpha=0.3)
ax.hist(esinw_measure_2[fin_good], bins=np.arange(-0.35, 0.36, 0.02), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$esin(w)_{measured} - esin(w)_{input}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - scaled error in esinw
norm_kde_4 = sp.stats.gaussian_kde(esinw_measure_4[fin_good], bw_method=1/(esinw_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(esinw_measure_5[fin_good], bw_method=1/(esinw_measure_5[fin_good]).std())
points = np.arange(-14, 14, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(esinw_measure_4[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.hist(esinw_measure_5[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(esinw_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(esinw_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_{esin(w)}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in w
norm_kde_1 = sp.stats.gaussian_kde(w_measure_1[fin_good], bw_method=0.05/(w_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(w_measure_2[fin_good], bw_method=0.05/(w_measure_2[fin_good]).std())
points = np.arange(-1, 1, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(w_measure_1[fin_good], bins=np.arange(-1, 1, 0.05), linewidth=0, alpha=0.3)
ax.hist(w_measure_2[fin_good], bins=np.arange(-1, 1, 0.05), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points) * 2, color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points) * 2, color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$w_{measured} - w_{input} (radians)$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - error in w
norm_kde_4 = sp.stats.gaussian_kde(w_measure_4[fin_good], bw_method=1/(w_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(w_measure_5[fin_good], bw_method=1/(w_measure_5[fin_good]).std())
points = np.arange(-14, 14, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(w_measure_4[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.hist(w_measure_5[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(w_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(w_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_w$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in i
norm_kde_1 = sp.stats.gaussian_kde(i_measure_1[fin_good], bw_method=0.02/(i_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(i_measure_2[fin_good], bw_method=0.02/(i_measure_2[fin_good]).std())
points = np.arange(-0.3, 0.3, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(i_measure_1[fin_good], bins=np.arange(-0.3, 0.32, 0.02), linewidth=0, alpha=0.3)
ax.hist(i_measure_2[fin_good], bins=np.arange(-0.3, 0.32, 0.02), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$i_{measured} - i_{input} (radians)$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - error in i
norm_kde_4 = sp.stats.gaussian_kde(i_measure_4[fin_good], bw_method=1/(i_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(i_measure_5[fin_good], bw_method=1/(i_measure_5[fin_good]).std())
points = np.arange(-9, 9, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(i_measure_4[fin_good], bins=np.arange(-9, 10), linewidth=0, alpha=0.3)
ax.hist(i_measure_5[fin_good], bins=np.arange(-9, 10), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(i_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(i_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_i$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in cosi
norm_kde_1 = sp.stats.gaussian_kde(cosi_measure_1[fin_good], bw_method=0.02/(cosi_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(cosi_measure_2[fin_good], bw_method=0.02/(cosi_measure_2[fin_good]).std())
points = np.arange(-0.3, 0.3, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(cosi_measure_1[fin_good], bins=np.arange(-0.3, 0.32, 0.02), linewidth=0, alpha=0.3)
ax.hist(cosi_measure_2[fin_good], bins=np.arange(-0.3, 0.32, 0.02), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$cos(i)_{measured} - cos(i)_{input} (radians)$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - error in cosi
norm_kde_4 = sp.stats.gaussian_kde(i_measure_4[fin_good], bw_method=1/(i_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(i_measure_5[fin_good], bw_method=1/(i_measure_5[fin_good]).std())
points = np.arange(-9, 9, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(i_measure_4[fin_good], bins=np.arange(-9, 10), linewidth=0, alpha=0.3)
ax.hist(i_measure_5[fin_good], bins=np.arange(-9, 10), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(i_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(i_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_{cos(i)}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in r_sum
norm_kde_1 = sp.stats.gaussian_kde(r_sum_measure_1[fin_good], bw_method=0.02/(r_sum_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(r_sum_measure_2[fin_good], bw_method=0.02/(r_sum_measure_2[fin_good]).std())
points = np.arange(-0.22, 0.22, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(r_sum_measure_1[fin_good], bins=np.arange(-0.22, 0.23, 0.02), linewidth=0, alpha=0.3)
ax.hist(r_sum_measure_2[fin_good], bins=np.arange(-0.22, 0.23, 0.02), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$r\_sum_{measured} - r\_sum_{input}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - error in r_sum
norm_kde_4 = sp.stats.gaussian_kde(r_sum_measure_4[fin_good], bw_method=1/(r_sum_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(r_sum_measure_5[fin_good], bw_method=1/(r_sum_measure_5[fin_good]).std())
points = np.arange(-9, 9, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(r_sum_measure_4[fin_good], bins=np.arange(-9, 10), linewidth=0, alpha=0.3)
ax.hist(r_sum_measure_5[fin_good], bins=np.arange(-9, 10), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(r_sum_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(r_sum_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_{r sum}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in phi_0
norm_kde_1 = sp.stats.gaussian_kde(phi_0_measure_1[fin_good], bw_method=0.02/(phi_0_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(phi_0_measure_2[fin_good], bw_method=0.02/(phi_0_measure_2[fin_good]).std())
points = np.arange(-0.15, 0.15, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(phi_0_measure_1[fin_good], bins=np.arange(-0.15, 0.16, 0.02), linewidth=0, alpha=0.3)
ax.hist(phi_0_measure_2[fin_good], bins=np.arange(-0.15, 0.16, 0.02), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\phi_{0, measured} - \phi_{0, true}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - error in phi_0
norm_kde_4 = sp.stats.gaussian_kde(phi_0_measure_4[fin_good], bw_method=1/(phi_0_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(phi_0_measure_5[fin_good], bw_method=1/(phi_0_measure_5[fin_good]).std())
points = np.arange(-15, 15, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(phi_0_measure_4[fin_good], bins=np.arange(-15, 16), linewidth=0, alpha=0.3)
ax.hist(phi_0_measure_5[fin_good], bins=np.arange(-15, 16), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(phi_0_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(phi_0_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_{\phi_0}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in r_rat
norm_kde_1 = sp.stats.gaussian_kde(r_rat_measure_1[fin_good], bw_method=0.02/(r_rat_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(r_rat_measure_2[fin_good], bw_method=0.02/(r_rat_measure_2[fin_good]).std())
points = np.arange(-0.5, 0.5, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(r_rat_measure_1[fin_good], bins=np.arange(-0.5, 0.55, 0.05), linewidth=0, alpha=0.3)
ax.hist(r_rat_measure_2[fin_good], bins=np.arange(-0.5, 0.55, 0.05), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$r\_rat_{measured} - r\_rat_{input}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - error in r_rat
norm_kde_4 = sp.stats.gaussian_kde(r_rat_measure_4[fin_good], bw_method=1/(r_rat_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(r_rat_measure_5[fin_good], bw_method=1/(r_rat_measure_5[fin_good]).std())
points = np.arange(-14, 14, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(r_rat_measure_4[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.hist(r_rat_measure_5[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(r_rat_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(r_rat_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_{r ratio}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in log_rr
norm_kde_1 = sp.stats.gaussian_kde(log_rr_measure_1[fin_good], bw_method=0.02/(log_rr_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(log_rr_measure_2[fin_good], bw_method=0.02/(log_rr_measure_2[fin_good]).std())
points = np.arange(-0.5, 0.5, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(log_rr_measure_1[fin_good], bins=np.arange(-0.5, 0.55, 0.05), linewidth=0, alpha=0.3)
ax.hist(log_rr_measure_2[fin_good], bins=np.arange(-0.5, 0.55, 0.05), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$log_{10}(r\_rat_{measured}) - log_{10}(r\_rat_{input})$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - error in log_rr
norm_kde_4 = sp.stats.gaussian_kde(log_rr_measure_4[fin_good], bw_method=1/(log_rr_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(log_rr_measure_5[fin_good], bw_method=1/(log_rr_measure_5[fin_good]).std())
points = np.arange(-14, 14, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(log_rr_measure_4[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.hist(log_rr_measure_5[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(log_rr_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(log_rr_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_{log_{10}(r\_rat)}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in sb_rat
norm_kde_1 = sp.stats.gaussian_kde(sb_rat_measure_1[fin_good], bw_method=0.02/(sb_rat_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(sb_rat_measure_2[fin_good], bw_method=0.02/(sb_rat_measure_2[fin_good]).std())
points = np.arange(-0.5, 0.5, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(sb_rat_measure_1[fin_good], bins=np.arange(-0.5, 0.55, 0.05), linewidth=0, alpha=0.3)
ax.hist(sb_rat_measure_2[fin_good], bins=np.arange(-0.5, 0.55, 0.05), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$sb\_rat_{measured} - sb\_rat_{input}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - error in sb_rat
norm_kde_4 = sp.stats.gaussian_kde(sb_rat_measure_4[fin_good], bw_method=1/(sb_rat_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(sb_rat_measure_5[fin_good], bw_method=1/(sb_rat_measure_5[fin_good]).std())
points = np.arange(-14, 14, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(sb_rat_measure_4[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.hist(sb_rat_measure_5[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(sb_rat_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(sb_rat_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_{sb ratio}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - absolute error in log_sb
norm_kde_1 = sp.stats.gaussian_kde(log_sb_measure_1[fin_good], bw_method=0.02/(log_sb_measure_1[fin_good]).std())
norm_kde_2 = sp.stats.gaussian_kde(log_sb_measure_2[fin_good], bw_method=0.02/(log_sb_measure_2[fin_good]).std())
points = np.arange(-0.5, 0.5, 0.001)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(log_sb_measure_1[fin_good], bins=np.arange(-0.5, 0.55, 0.05), linewidth=0, alpha=0.3)
ax.hist(log_sb_measure_2[fin_good], bins=np.arange(-0.5, 0.55, 0.05), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_1(points), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_2(points), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$log_{10}(sb\_rat_{measured}) - log_{10}(sb\_rat_{input})$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# KDE and hist - error in log_sb
norm_kde_4 = sp.stats.gaussian_kde(log_sb_measure_4[fin_good], bw_method=1/(log_sb_measure_4[fin_good]).std())
norm_kde_5 = sp.stats.gaussian_kde(log_sb_measure_5[fin_good], bw_method=1/(log_sb_measure_5[fin_good]).std())
points = np.arange(-14, 14, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(log_sb_measure_4[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.hist(log_sb_measure_5[fin_good], bins=np.arange(-14, 15), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde_4(points) * len(log_sb_measure_4[fin_good]), color='tab:blue', linewidth=4, label='formulae')
ax.plot(points, norm_kde_5(points) * len(log_sb_measure_5[fin_good]), color='tab:orange', linewidth=4, label='eclipse model')
ax.set_xlabel('$\chi_{log_{10}(sb\_rat)}$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.legend()
plt.tight_layout()
plt.show()
# frequency analysis
# number of pulsations found vs. input
n_f_true = true_par['npulsations'][fin_good].to_numpy()
n_f_tot = obs_par['total_freqs'][fin_good].to_numpy()
n_f_pass = obs_par['passed_both'][fin_good].to_numpy()
n_f_hpass = obs_par['passed_harmonics'][fin_good].to_numpy()
sorter_n_f = np.argsort(n_f_true)
data_range = np.array([0, np.max(n_f_tot)])
short_t = (obs_par['t_tot'][fin_good] < 50)
long_t = (obs_par['t_tot'][fin_good] > 50)
fig, ax = plt.subplots(figsize=(6, 6))
# ax.scatter(n_f_true, n_f_pass, marker='d', c='tab:blue', label='passing criteria')
# ax.scatter(n_f_true, n_f_hpass, marker='^', c='tab:green', label='harmonics passing criteria')
# ax.scatter(n_f_true, n_f_pass - n_f_hpass, marker='o', c='tab:orange', label='passing - passing harmonics')
# ax.errorbar(n_f_true, n_f_pass - n_f_hpass, yerr=np.sqrt(n_f_pass - n_f_hpass),
# marker='o', c='tab:blue', linestyle='none', label='passing - passing harmonics')
ax.errorbar(n_f_true[short_t], n_f_pass[short_t] - n_f_hpass[short_t], yerr=np.sqrt(n_f_pass[short_t] - n_f_hpass[short_t]),
marker='o', c='tab:blue', linestyle='none', label='month')
ax.errorbar(n_f_true[long_t], n_f_pass[long_t] - n_f_hpass[long_t], yerr=np.sqrt(n_f_pass[long_t] - n_f_hpass[long_t]),
marker='o', c='tab:orange', linestyle='none', label='year')
ax.plot([0, 100], [0, 100], c='tab:grey', alpha=0.2)
# for p, i in enumerate(true_par.index[fin_good]):
# n = obs_par['stage'][i]
# ax.annotate(f'{i}', (n_f_true[p], n_f_pass[p] - n_f_hpass[p]), alpha=0.6)
ax.set_xlabel('number of input sinusoids', fontsize=14)
ax.set_ylabel('number of output sinusoids', fontsize=16)
ax.legend(loc='upper left')
plt.tight_layout()
plt.show()
# KDE and hist - n freq
n_sin_dif = ((n_f_pass - n_f_hpass) - n_f_true)
n_sin_measure = n_sin_dif / np.clip(np.sqrt(np.abs(n_f_pass - n_f_hpass)), 1, None)
norm_kde = sp.stats.gaussian_kde(n_sin_measure, bw_method=1/n_sin_measure.std())
points = np.arange(-9, 9, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
# ax.hist(n_sin_measure, bins=np.arange(-9, 10), linewidth=0, alpha=0.3)
ax.hist(n_sin_measure[short_t], bins=np.arange(-9, 10), linewidth=0, alpha=0.3, label='month')
ax.hist(n_sin_measure[long_t], bins=np.arange(-9, 10), linewidth=0, alpha=0.3, label='year')
ax.plot(points, norm_kde(points) * len(n_sin_measure), color='tab:blue', linewidth=4)
ax.set_xlabel(r'$\frac{(n - n_h) - n_{input}}{\sqrt{n - n_h}}$', fontsize=20)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
plt.legend()
plt.tight_layout()
plt.show()
# individual frequencies (for case 26)
case = '026'
sin_true = np.loadtxt(syn_dir + f'/pulse_data/sim_{case}_lc_pulse_info.dat', delimiter=',')
times, signal, signal_err = np.loadtxt(all_files[int(case)], usecols=(0, 1, 2), unpack=True)
freqs, ampls = sts.tsf.scargle(times, signal)
results = sts.ut.read_parameters_hdf5(syn_dir + f'/sim_{case}_lc_analysis/sim_{case}_lc_analysis_8.hdf5', verbose=False)
const, slope, f_n, a_n, ph_n = results['sin_mean']
c_err, sl_err, f_n_err, a_n_err, ph_n_err = results['sin_err']
passed_sigma, passed_snr, passed_both, passed_h = results['sin_select']
matcher_f = np.zeros(len(sin_true), dtype=int)
for i in range(len(sin_true)):
matcher_f[i] = np.arange(len(f_n[passed_both]))[np.argmin(np.abs(f_n[passed_both] - sin_true[i, 0]))]
# hist/KDE of sinusoid parameters - f, a, ph
f_measure = (f_n[passed_both][matcher_f] - sin_true[:, 0]) / f_n_err[passed_both][matcher_f]
f_measure = f_measure[np.abs(f_measure) < 30]
norm_kde = sp.stats.gaussian_kde(f_measure, bw_method=1 / f_measure.std())
points = np.arange(-5, 5, 0.01)
fig, ax = plt.subplots(figsize=(5, 5))
ax.hist(f_measure, bins=np.arange(-5, 5.5, 0.25), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde(points) * len(f_measure) / 3, color='tab:blue', linewidth=4)
ax.set_xlabel('$\chi_f$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
plt.tight_layout()
plt.show()
a_measure = (a_n[passed_both][matcher_f] - sin_true[:, 1]) / a_n_err[passed_both][matcher_f]
a_measure = a_measure[np.abs(a_measure) < 30]
norm_kde = sp.stats.gaussian_kde(a_measure, bw_method=1 / a_measure.std())
fig, ax = plt.subplots(figsize=(5, 5))
ax.hist(a_measure, bins=np.arange(-5, 5.5, 0.25), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde(points) * len(a_measure) / 3, color='tab:blue', linewidth=4)
ax.set_xlabel('$\chi_a$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
plt.tight_layout()
plt.show()
phase_shift = (2*np.pi * f_n[passed_both][matcher_f] * np.mean(times))
sin_true_ph_rad = sin_true[:, 2] * (2 * np.pi) % (2 * np.pi) # phases were mistakenly multiplied by 2pi
ph_measure = ((ph_n[passed_both][matcher_f] - phase_shift) % (2 * np.pi) - sin_true_ph_rad) / ph_n_err[passed_both][matcher_f]
ph_measure = ph_measure[np.abs(ph_measure) < 30]
norm_kde = sp.stats.gaussian_kde(ph_measure, bw_method=1 / ph_measure.std())
fig, ax = plt.subplots(figsize=(5, 5))
ax.hist(ph_measure, bins=np.arange(-5, 5.5, 0.25), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde(points) * len(ph_measure) / 3, color='tab:blue', linewidth=4)
ax.set_xlabel('$\chi_\phi$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
plt.tight_layout()
plt.show()
# KEPLER PERIOD TESTS
# kepler eb catalogue test results
kep_dir = '~/Kepler_EB_catalogue'
summary_file = os.path.join(kep_dir, '01433410.00.lc_analysis', '01433410.00.lc_analysis_summary.csv')
hdr = np.loadtxt(summary_file, usecols=(0), delimiter=',', unpack=True, dtype=str)
obs_par_dtype = np.dtype([('id', '<U20'), ('stage', '<U20')] + [(name, float) for name in hdr[2:]])
kep_ebs = np.loadtxt(os.path.join(kep_dir, 'kepler_eb_files.dat'), dtype=str)[1:]
obs_par = np.ones(len(kep_ebs), dtype=obs_par_dtype)
not_done = []
for k, file in enumerate(kep_ebs):
target_id = os.path.splitext(os.path.basename(file))[0]
data_dir = os.path.join(kep_dir, f'{target_id}_analysis')
summary_file = os.path.join(data_dir, f'{target_id}_analysis_summary.csv')
if os.path.isfile(summary_file):
obs_par[k] = tuple(np.loadtxt(summary_file, usecols=(1), delimiter=',', unpack=True, dtype=str))
else:
not_done.append(summary_file)
obs_par = pd.DataFrame(obs_par, columns=hdr)
obs_par.to_csv(os.path.join(kep_dir + '_summary.csv'), index=False)
# load kepler eb catalogue and result summary
kepobs_par = pd.read_csv(os.path.join(kep_dir + '_summary.csv'))
kepcat_par = pd.read_csv(os.path.join(kep_dir, 'kepler_eb_catalog.csv'), skiprows=7)
# periods
kep_zero = (np.char.find(kep_ebs, '.00.') != -1) # files with index .00.
kep_p_avail = (kepobs_par['id'][kep_zero].to_numpy() != '1') # period saved
min_max = [np.min(kepcat_par['period']), np.max(kepcat_par['period'])]
obs_p = kepobs_par['period'][kep_zero][kep_p_avail].to_numpy()
obs_p_err = kepobs_par['p_err'][kep_zero][kep_p_avail].to_numpy()
cat_p = kepcat_par['period'][kep_p_avail].to_numpy()
cat_p_err = kepcat_par['period_err'][kep_p_avail].to_numpy()
cat_morph = kepcat_par['morph'][kep_p_avail].to_numpy()
p_diff = obs_p - cat_p
p_diff_2 = p_diff / cat_p
obs_p_err2 = obs_p_err / cat_p
p_diff_3 = (p_diff) / obs_p_err
select_good_p_3 = (np.abs(p_diff_2) < 0.01) & (obs_p != -1)
p_diff_mult = []
p_diff_m = []
select_good_p_m = []
for m in [1/5, 1/4, 1/3, 1/2, 2, 3, 4, 5]:
p_diff_mult.append(obs_p - cat_p / m)
p_diff_m.append(p_diff_mult[-1] / obs_p_err)
select_good_p_m.append((np.abs(p_diff_mult[-1] / cat_p) < 0.01) & (obs_p != -1))
select_good_p_all_m = np.sum(select_good_p_m, axis=0, dtype=bool)
# intrinsic variability (at non-harmonic frequencies)
obs_std_1 = kepobs_par['std_1'][kep_zero][kep_p_avail].to_numpy()
obs_std_2 = kepobs_par['std_2'][kep_zero][kep_p_avail].to_numpy()
obs_std_4 = kepobs_par['std_4'][kep_zero][kep_p_avail].to_numpy()
obs_std_5 = np.sqrt(obs_std_2**2 - obs_std_4**2)
obs_std_5_rat = obs_std_5 / obs_std_1
var_mask = (obs_std_5_rat > 6) & (cat_morph < 0.5)
# eccentricities
obs_e_form = kepobs_par['e_form'][kep_zero][kep_p_avail].to_numpy()
obs_e_l = kepobs_par['e_low'][kep_zero][kep_p_avail].to_numpy()
obs_e_u = kepobs_par['e_upp'][kep_zero][kep_p_avail].to_numpy()
obs_e_err = np.vstack((obs_e_l, obs_e_u))
obs_e_phys = kepobs_par['e_phys'][kep_zero][kep_p_avail].to_numpy()
obs_ecosw_phys = kepobs_par['ecosw_phys'][kep_zero][kep_p_avail].to_numpy()
obs_ecosw_l = kepobs_par['ecosw_low'][kep_zero][kep_p_avail].to_numpy()
obs_ecosw_u = kepobs_par['ecosw_upp'][kep_zero][kep_p_avail].to_numpy()
obs_ecosw_err = np.vstack((obs_ecosw_l, obs_ecosw_u))
# KDE and hist - percentage error in p
norm_kde = sp.stats.gaussian_kde(p_diff_2[select_good_p_3], bw_method=0.000004/p_diff_2[select_good_p_3].std())
points = np.arange(-0.00009, 0.00009, 0.0000005)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(p_diff_2[select_good_p_3], bins=np.arange(-0.00009, 0.000091, 0.000004), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde(points) * len(p_diff_2[select_good_p_3])*0.000004, color='tab:blue', linewidth=4)
ax.set_xlabel(r'$\frac{P_{measured} - P_{catalogue}}{P_{catalogue}}$', fontsize=18)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
ax.get_xaxis().get_offset_text().set_fontsize(14)
plt.tight_layout()
plt.show()
# KDE and hist - error in p
norm_kde = sp.stats.gaussian_kde(p_diff_3[select_good_p_3], bw_method=1/p_diff_3[select_good_p_3].std())
points = np.arange(-8, 8, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(p_diff_3[select_good_p_3], bins=np.arange(-8, 8.1, 0.2), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde(points) * len(p_diff_3[select_good_p_3])*0.2, color='tab:blue', linewidth=4)
ax.set_xlabel('$\chi_p$', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
plt.tight_layout()
plt.show()
# at half period and multiples
p_diff_all_m = np.copy(p_diff_3)
for p_d, select in zip(p_diff_m, select_good_p_m):
p_diff_all_m[select] = p_d[select]
# norm_kde = sp.stats.gaussian_kde(p_diff_all_m[select_good_p_all_m], bw_method=1/p_diff_all_m[select_good_p_all_m].std())
norm_kde = sp.stats.gaussian_kde(p_diff_m[4][select_good_p_m[4]], bw_method=1/p_diff_m[4][select_good_p_m[4]].std())
points = np.arange(-8, 8, 0.01)
fig, ax = plt.subplots(figsize=(6, 6))
ax.hist(p_diff_m[4][select_good_p_m[4]], bins=np.arange(-8, 8.1, 0.4), linewidth=0, alpha=0.3)
# ax.hist(p_diff_all_m[select_good_p_all_m], bins=np.arange(-8, 8.1, 0.4), linewidth=0, alpha=0.3)
ax.plot(points, norm_kde(points) * len(p_diff_all_m[select_good_p_all_m])*0.4, color='tab:blue', linewidth=4)
ax.set_xlabel('$\chi_p$ at half period', fontsize=14)
# ax.set_xlabel('$\chi_p$ at other multiples', fontsize=14)
ax.set_ylabel('number of cases', fontsize=14)
ax.tick_params(axis='both', which='major', labelsize=12)
plt.tight_layout()
plt.show()
# eccentricities [select correct p, e between 0 and 1, error < 0.1, morph < 0.5]
good_e_3 = (obs_e_form[select_good_p_3] > 0)
good_e_3 &= (obs_e_err[0][select_good_p_3] < 0.1) & (obs_e_err[1][select_good_p_3] < 0.1)
good_e_3 &= (cat_morph[select_good_p_3] < 0.5)
good_e_p_3 = (obs_e_phys[select_good_p_3] > 0)
good_e_p_3 &= (obs_e_err[0][select_good_p_3] < 0.1) & (obs_e_err[1][select_good_p_3] < 0.1)
good_e_p_3 &= (cat_morph[select_good_p_3] < 0.5)
periods_line = np.logspace(0, 2.4, 1000)
line_theo = np.sqrt(1 - (5 / periods_line)**(2/3))
line_theo[~np.isfinite(line_theo)] = 0
plotting_kic = ['04544587', '7943535', '8196180', '11867071']
fig, ax = plt.subplots(figsize=(12, 12))
ax.errorbar(obs_p[select_good_p_3][good_e_p_3], obs_e_phys[select_good_p_3][good_e_p_3],
xerr=obs_p_err[select_good_p_3][good_e_p_3], yerr=obs_e_err[:, select_good_p_3][:, good_e_p_3],
marker='.', color='tab:blue', linestyle='none', capsize=2, zorder=1)
for kic in plotting_kic: # janky way to plot a few points in different colour
mask = [kic in item for item in kepobs_par['id'][kep_zero][kep_p_avail].to_numpy().astype(str)]
mask = np.array(mask)
i = np.arange(len(kepobs_par[kep_zero][kep_p_avail]))[mask][0]
p = obs_p[i]
e = obs_e_phys[i]
p_e = obs_p_err[i]
e_e = obs_e_err[:, i]
ax.errorbar(p, e, xerr=p_e, yerr=e_e.reshape((2, 1)), marker='.', color='tab:orange', linestyle='none', capsize=2, zorder=1)
ax.plot(periods_line, line_theo, c='k', linestyle='--', zorder=2)
ax.set_xlabel('period (d)', fontsize=14)
ax.set_ylabel('eccentricity', fontsize=14)
plt.tight_layout()
plt.xscale('log')
plt.show()
# variability
good_e_p_3 = (obs_e_phys[var_mask][select_good_p_3[var_mask]] > 0)
good_e_p_3 &= (obs_e_err[0][var_mask][select_good_p_3[var_mask]] < 0.1) & (obs_e_err[1][var_mask][select_good_p_3[var_mask]] < 0.1)
good_e_p_3 &= (cat_morph[var_mask][select_good_p_3[var_mask]] < 0.5)
periods_line = np.logspace(0, 2.4, 1000)
line_theo = np.sqrt(1 - (5 / periods_line)**(2 / 3))
line_theo_2 = np.sqrt(1 - (6.5 / periods_line)**(2 / 3))
line_theo[~np.isfinite(line_theo)] = 0
line_theo_2[~np.isfinite(line_theo_2)] = 0
plotting_kic = ['08719324', '09899216', '05034333', '11706658', '07833144']
fig, ax = plt.subplots(figsize=(12, 12))
ax.errorbar(obs_p[var_mask][select_good_p_3[var_mask]][good_e_p_3], obs_e_phys[var_mask][select_good_p_3[var_mask]][good_e_p_3],
xerr=obs_p_err[var_mask][select_good_p_3[var_mask]][good_e_p_3], yerr=obs_e_err[:, var_mask][:, select_good_p_3[var_mask]][:, good_e_p_3],
marker='.', color='tab:blue', linestyle='none', capsize=2, zorder=1)
for kic in plotting_kic: # janky way to plot a few points in different colour
mask = [kic in item for item in kepobs_par['id'][kep_zero][kep_p_avail].to_numpy().astype(str)]
mask = np.array(mask)
i = np.arange(len(kepobs_par[kep_zero][kep_p_avail]))[mask][0]
p = obs_p[i]
e = obs_e_phys[i]
p_e = obs_p_err[i]
e_e = obs_e_err[:, i]
ax.errorbar(p, e, xerr=p_e, yerr=e_e.reshape((2, 1)), marker='.', color='tab:red', linestyle='none', capsize=2, zorder=1)
ax.plot(periods_line, line_theo, c='k', linestyle='--', zorder=2)
ax.plot(periods_line, line_theo_2, c='grey', linestyle='--', zorder=2)
ax.set_xlabel('period (d)', fontsize=14)
ax.set_ylabel('eccentricity', fontsize=14)
plt.tight_layout()
plt.xscale('log')
plt.show()
# ecosw
fig, ax = plt.subplots(figsize=(12, 12))
ax.errorbar(obs_p[select_good_p_3][good_e_p_3], obs_ecosw_phys[select_good_p_3][good_e_p_3],
xerr=obs_p_err[select_good_p_3][good_e_p_3], yerr=obs_ecosw_err[:, select_good_p_3][:, good_e_p_3],
marker='.', color='tab:blue', linestyle='none', capsize=2)
ax.set_xlabel('period (d)', fontsize=14)
ax.set_ylabel('e cos(w)', fontsize=14)
plt.xscale('log')
plt.tight_layout()
plt.show()
|
LucIJspeertREPO_NAMEstar_shadowPATH_START.@star_shadow_extracted@star_shadow-master@star_shadow@data@plotting_script.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "tardis-sn/tardis",
"repo_path": "tardis_extracted/tardis-main/tardis/tests/__init__.py",
"type": "Python"
}
|
tardis-snREPO_NAMEtardisPATH_START.@tardis_extracted@tardis-main@tardis@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "utils.py",
"repo_name": "cosmodesi/pypower",
"repo_path": "pypower_extracted/pypower-main/pypower/utils.py",
"type": "Python"
}
|
"""A few utilities."""
import os
import sys
import time
import logging
import traceback
from functools import lru_cache
import numpy as np
logger = logging.getLogger('Utils')
def is_sequence(item):
"""Whether input item is a tuple or list."""
return isinstance(item, (list, tuple))
def exception_handler(exc_type, exc_value, exc_traceback):
"""Print exception with a logger."""
# Do not print traceback if the exception has been handled and logged
_logger_name = 'Exception'
log = logging.getLogger(_logger_name)
line = '=' * 100
# log.critical(line[len(_logger_name) + 5:] + '\n' + ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)) + line)
log.critical('\n' + line + '\n' + ''.join(traceback.format_exception(exc_type, exc_value, exc_traceback)) + line)
if exc_type is KeyboardInterrupt:
log.critical('Interrupted by the user.')
else:
log.critical('An error occured.')
def mkdir(dirname):
"""Try to create ``dirname`` and catch :class:`OSError`."""
try:
os.makedirs(dirname) # MPI...
except OSError:
return
def savefig(filename, fig=None, bbox_inches='tight', pad_inches=0.1, dpi=200, **kwargs):
"""
Save figure to ``filename``.
Warning
-------
Take care to close figure at the end, ``plt.close(fig)``.
Parameters
----------
filename : string
Path where to save figure.
fig : matplotlib.figure.Figure, default=None
Figure to save. Defaults to current figure.
kwargs : dict
Optional arguments for :meth:`matplotlib.figure.Figure.savefig`.
Returns
-------
fig : matplotlib.figure.Figure
"""
from matplotlib import pyplot as plt
mkdir(os.path.dirname(filename))
logger.info('Saving figure to {}.'.format(filename))
if fig is None:
fig = plt.gcf()
fig.savefig(filename, bbox_inches=bbox_inches, pad_inches=pad_inches, dpi=dpi, **kwargs)
return fig
def setup_logging(level=logging.INFO, stream=sys.stdout, filename=None, filemode='w', **kwargs):
"""
Set up logging.
Parameters
----------
level : string, int, default=logging.INFO
Logging level.
stream : _io.TextIOWrapper, default=sys.stdout
Where to stream.
filename : string, default=None
If not ``None`` stream to file name.
filemode : string, default='w'
Mode to open file, only used if filename is not ``None``.
kwargs : dict
Other arguments for :func:`logging.basicConfig`.
"""
# Cannot provide stream and filename kwargs at the same time to logging.basicConfig, so handle different cases
# Thanks to https://stackoverflow.com/questions/30861524/logging-basicconfig-not-creating-log-file-when-i-run-in-pycharm
if isinstance(level, str):
level = {'info': logging.INFO, 'debug': logging.DEBUG, 'warning': logging.WARNING}[level.lower()]
for handler in logging.root.handlers:
logging.root.removeHandler(handler)
t0 = time.time()
class MyFormatter(logging.Formatter):
def format(self, record):
self._style._fmt = '[%09.2f] ' % (time.time() - t0) + ' %(asctime)s %(name)-28s %(levelname)-8s %(message)s'
return super(MyFormatter, self).format(record)
fmt = MyFormatter(datefmt='%m-%d %H:%M ')
if filename is not None:
mkdir(os.path.dirname(filename))
handler = logging.FileHandler(filename, mode=filemode)
else:
handler = logging.StreamHandler(stream=stream)
handler.setFormatter(fmt)
logging.basicConfig(level=level, handlers=[handler], **kwargs)
sys.excepthook = exception_handler
class BaseMetaClass(type):
"""Metaclass to add logging attributes to :class:`BaseClass` derived classes."""
def __new__(meta, name, bases, class_dict):
cls = super().__new__(meta, name, bases, class_dict)
cls.set_logger()
return cls
def set_logger(cls):
"""
Add attributes for logging:
- logger
- methods log_debug, log_info, log_warning, log_error, log_critical
"""
cls.logger = logging.getLogger(cls.__name__)
def make_logger(level):
@classmethod
def logger(cls, *args, **kwargs):
return getattr(cls.logger, level)(*args, **kwargs)
return logger
for level in ['debug', 'info', 'warning', 'error', 'critical']:
setattr(cls, 'log_{}'.format(level), make_logger(level))
class BaseClass(object, metaclass=BaseMetaClass):
"""
Base class that implements :meth:`copy`.
To be used throughout this package.
"""
def __copy__(self):
new = self.__class__.__new__(self.__class__)
new.__dict__.update(self.__dict__)
return new
def copy(self, **kwargs):
new = self.__copy__()
new.__dict__.update(kwargs)
return new
def __setstate__(self, state):
self.__dict__.update(state)
@classmethod
def from_state(cls, state):
new = cls.__new__(cls)
new.__setstate__(state)
return new
@property
def with_mpi(self):
"""Whether to use MPI."""
return getattr(self, 'mpicomm', None) is not None and self.mpicomm.size > 1
def save(self, filename):
"""Save to ``filename``."""
if not self.with_mpi or self.mpicomm.rank == 0:
self.log_info('Saving {}.'.format(filename))
mkdir(os.path.dirname(filename))
np.save(filename, self.__getstate__(), allow_pickle=True)
# if self.with_mpi:
# self.mpicomm.Barrier()
@classmethod
def load(cls, filename):
cls.log_info('Loading {}.'.format(filename))
state = np.load(filename, allow_pickle=True)[()]
new = cls.from_state(state)
return new
def distance(positions):
"""Return cartesian distance, taking coordinates along ``position`` first axis."""
return np.sqrt(sum(pos**2 for pos in positions))
def _make_array(value, shape, dtype='f8'):
# Return numpy array filled with value
toret = np.empty(shape, dtype=dtype)
toret[...] = value
return toret
def _get_box(*positions):
"""Return minimal box containing input positions."""
pos_min, pos_max = _make_array(np.inf, 3, dtype='f8'), _make_array(-np.inf, 3, dtype='f8')
for position in positions:
if position.shape[0] > 0: pos_min, pos_max = np.min([pos_min, position.min(axis=0)], axis=0), np.max([pos_max, position.max(axis=0)], axis=0)
return pos_min, pos_max
def cartesian_to_sky(positions, wrap=True, degree=True, dtype=None):
r"""
Transform cartesian coordinates into distance, RA, Dec.
Parameters
----------
positions : array of shape (3, N), list of 3 arrays
Positions in cartesian coordinates.
wrap : bool, default=True
Whether to wrap RA in :math:`[0, 2 \pi]`.
degree : bool, default=True
Whether RA, Dec are in degrees (``True``) or radians (``False``).
Returns
-------
rdd : list of 3 arrays
Right ascension, declination and distance.
"""
dist = distance(positions)
ra = np.arctan2(positions[1], positions[0])
if wrap: ra %= 2. * np.pi
dec = np.arcsin(positions[2] / dist)
conversion = np.pi / 180. if degree else 1.
return [np.asarray(xx, dtype=dtype) for xx in [ra / conversion, dec / conversion, dist]]
def sky_to_cartesian(rdd, degree=True, dtype=None):
"""
Transform distance, RA, Dec into cartesian coordinates.
Parameters
----------
rdd : array of shape (3, N), list of 3 arrays
Right ascension, declination and distance.
degree : default=True
Whether RA, Dec are in degrees (``True``) or radians (``False``).
Returns
-------
positions : list of 3 arrays
Positions x, y, z in cartesian coordinates.
"""
conversion = 1.
if degree: conversion = np.pi / 180.
ra, dec, dist = rdd
cos_dec = np.cos(dec * conversion)
x = dist * cos_dec * np.cos(ra * conversion)
y = dist * cos_dec * np.sin(ra * conversion)
z = dist * np.sin(dec * conversion)
return [np.asarray(xx, dtype=dtype) for xx in [x, y, z]]
def rebin(array, new_shape, statistic=np.sum):
"""
Bin an array in all axes based on the target shape, by summing or
averaging. Number of output dimensions must match number of input dimensions and
new axes must divide old ones.
Taken from https://stackoverflow.com/questions/8090229/resize-with-averaging-or-rebin-a-numpy-2d-array
and https://nbodykit.readthedocs.io/en/latest/_modules/nbodykit/binned_statistic.html#BinnedStatistic.reindex.
Example
-------
>>> m = np.arange(0, 100, 1).reshape((10, 10))
>>> n = rebin(m, new_shape=(5, 5), statistic=np.sum)
>>> print(n)
[[ 22 30 38 46 54]
[102 110 118 126 134]
[182 190 198 206 214]
[262 270 278 286 294]
[342 350 358 366 374]]
"""
if array.ndim == 1 and np.ndim(new_shape) == 0:
new_shape = [new_shape]
if array.ndim != len(new_shape):
raise ValueError('Input array dim is {}, but requested output one is {}'.format(array.ndim, len(new_shape)))
pairs = []
for d, c in zip(new_shape, array.shape):
if c % d != 0:
raise ValueError('New shape should divide current shape, but {:d} % {:d} = {:d}'.format(c, d, c % d))
pairs.append((d, c // d))
flattened = [ll for p in pairs for ll in p]
array = array.reshape(flattened)
for i in range(len(new_shape)):
array = statistic(array, axis=-1 * (i + 1))
return array
# Create a lookup table for set bits per byte
_popcount_lookuptable = np.array([bin(i).count('1') for i in range(256)], dtype=np.int32)
def popcount(*arrays):
"""
Return number of 1 bits in each value of input array.
Inspired from https://github.com/numpy/numpy/issues/16325.
"""
# if not np.issubdtype(array.dtype, np.unsignedinteger):
# raise ValueError('input array must be an unsigned int dtype')
toret = _popcount_lookuptable[arrays[0].view((np.uint8, (arrays[0].dtype.itemsize,)))].sum(axis=-1)
for array in arrays[1:]: toret += popcount(array)
return toret
def pack_bitarrays(*arrays, dtype=np.uint64):
"""
Pack bit arrays into a list of integer arrays.
Inverse operation is :func:`unpack_bitarray`, i.e.
``unpack_bitarrays(pack_bitarrays(*arrays, dtype=dtype))``is ``arrays``,
whatever integer ``dtype`` is.
Parameters
----------
arrays : bool arrays
Arrays of integers or booleans whose elements should be packed to bits.
dtype : string, dtype
Type of output integer arrays.
Returns
-------
arrays : list
List of integer arrays of type ``dtype``, representing input boolean arrays.
"""
if not arrays:
return []
return reformat_bitarrays(*np.packbits(arrays, axis=0, bitorder='little'), dtype=dtype)
def unpack_bitarrays(*arrays):
"""
Unpack integer arrays into a bit array.
Inverse operation is :func:`pack_bitarray`, i.e.
``pack_bitarrays(unpack_bitarrays(*arrays), dtype=arrays.dtype)``is ``arrays``.
Parameters
----------
arrays : integer arrays
Arrays of integers whose elements should be unpacked to bits.
Returns
-------
arrays : list
List of boolean arrays of type ``np.uint8``, representing input integer arrays.
"""
arrayofbytes = reformat_bitarrays(*arrays, dtype=np.uint8)
return np.unpackbits(arrayofbytes, axis=0, count=None, bitorder='little')
def reformat_bitarrays(*arrays, dtype=np.uint64, copy=True):
"""
Reformat input integer arrays into list of arrays of type ``dtype``.
If, e.g. 6 arrays of type ``np.uint8`` are input, and ``dtype`` is ``np.uint32``,
a list of 2 arrays is returned.
Parameters
----------
arrays : integer arrays
Arrays of integers to reformat.
dtype : string, dtype
Type of output integer arrays.
copy : bool, default=True
If ``False``, avoids copy of input arrays if ``dtype`` is uint8.
Returns
-------
arrays : list
List of integer arrays of type ``dtype``, representing input integer arrays.
"""
dtype = np.dtype(dtype)
toret = []
nremainingbytes = 0
for array in arrays:
# first bits are in the first byte array
arrayofbytes = array.view((np.uint8, (array.dtype.itemsize,)))
arrayofbytes = np.moveaxis(arrayofbytes, -1, 0)
for arrayofbyte in arrayofbytes:
if nremainingbytes == 0:
toret.append([])
nremainingbytes = dtype.itemsize
newarray = toret[-1]
nremainingbytes -= 1
newarray.append(arrayofbyte[..., None])
for iarray, array in enumerate(toret):
npad = dtype.itemsize - len(array)
if npad: array += [np.zeros_like(array[0])] * npad
if len(array) > 1 or copy:
toret[iarray] = np.squeeze(np.concatenate(array, axis=-1).view(dtype), axis=-1)
else:
toret[iarray] = array[0][..., 0]
return toret
def pascal_triangle(n_rows):
"""
Compute Pascal triangle.
Taken from https://stackoverflow.com/questions/24093387/pascals-triangle-for-python.
Parameters
----------
n_rows : int
Number of rows in the Pascal triangle, i.e. maximum number of elements :math:`n`.
Returns
-------
triangle : list
List of list of binomial coefficients.
The binomial coefficient :math:`(k, n)` is ``triangle[n][k]``.
"""
toret = [[1]] # a container to collect the rows
for _ in range(1, n_rows + 1):
row = [1]
last_row = toret[-1] # reference the previous row
# this is the complicated part, it relies on the fact that zip
# stops at the shortest iterable, so for the second row, we have
# nothing in this list comprension, but the third row sums 1 and 1
# and the fourth row sums in pairs. It's a sliding window.
row += [sum(pair) for pair in zip(last_row, last_row[1:])]
# finally append the final 1 to the outside
row.append(1)
toret.append(row) # add the row to the results.
return toret
@lru_cache(maxsize=10, typed=False)
def joint_occurences(nrealizations=128, max_occurences=None, noffset=1, default_value=0):
"""
Return expected value of inverse counts, i.e. eq. 21 of arXiv:1912.08803.
Parameters
----------
nrealizations : int
Number of realizations (including current realization).
max_occurences : int, default=None
Maximum number of occurences (including ``noffset``).
If ``None``, defaults to ``nrealizations``.
noffset : int, default=1
The offset added to the bitwise count, typically 0 or 1.
See "zero truncated estimator" and "efficient estimator" of arXiv:1912.08803.
default_value : float, default=0.
The default value of pairwise weights if the denominator is zero (defaulting to 0).
Returns
-------
occurences : list
Expected value of inverse counts.
"""
# gk(c1, c2)
if max_occurences is None: max_occurences = nrealizations
binomial_coeffs = pascal_triangle(nrealizations)
def prob(c12, c1, c2):
return binomial_coeffs[c1 - noffset][c12 - noffset] * binomial_coeffs[nrealizations - c1][c2 - c12] / binomial_coeffs[nrealizations - noffset][c2 - noffset]
def fk(c12):
if c12 == 0:
return default_value
return nrealizations / c12
toret = []
for c1 in range(noffset, max_occurences + 1):
row = []
for c2 in range(noffset, c1 + 1):
# we have c12 <= c1, c2 and nrealizations >= c1 + c2 + c12
row.append(sum(fk(c12) * prob(c12, c1, c2) for c12 in range(max(noffset, c1 + c2 - nrealizations), min(c1, c2) + 1)))
toret.append(row)
return toret
|
cosmodesiREPO_NAMEpypowerPATH_START.@pypower_extracted@pypower-main@pypower@utils.py@.PATH_END.py
|
{
"filename": "IntegratingArbitraryODEs.ipynb",
"repo_name": "tigerchenlu98/rebound",
"repo_path": "rebound_extracted/rebound-main/ipython_examples/IntegratingArbitraryODEs.ipynb",
"type": "Jupyter Notebook"
}
|
# Integrating arbitrary ODEs
Although REBOUND is primarily an N-body integrator, it can also integrate arbitrary ordinary differential equations (ODEs). Even better: it can integrate arbitrary ODEs in parallel with an N-body simulation. This allows you to couple various physical effects such as spin and tides to orbital dynamics.
In this example, we are integrating a two planet system and a decoupled harmonic oscillator which is governed by the following ODE:
$$ y_0(t)'' = -\frac km y_0(t)$$
or equivalently as a set of 2 first order differential equations
$$ \begin{pmatrix} y_0(t)\\y_1(t)\end{pmatrix}' = \begin{pmatrix} y_1(t)\\- \frac k m y_0(t)\end{pmatrix}
$$
```python
import rebound
import numpy as np
import matplotlib.pyplot as plt
```
We first set up our N-body simulation. Note that we are using the Gragg-Bulirsch-Stoer integrator (BS).
```python
sim = rebound.Simulation()
sim.add(m=1)
sim.add(a=1.2,m=1e-3,e=0.1)
sim.add(a=2.3,m=1e-3,e=0.1)
sim.integrator = "BS"
```
We now create an ODE structure. Note that the ODE is linked to the simulation. If you run multiple simulations in parallel, you need to create an ode structure for each of them.
```python
ode_ho = sim.create_ode(length=2, needs_nbody=False)
```
Next, we setup the ODE structure with the initial conditions and the right hand side (RHS) of the harmonic oscillator:
```python
# Mass and spring constants
m = 1.
k = 10.
# Initial conditions
ode_ho.y[0] = 1.
ode_ho.y[1] = 0. # zero velocity
# RHS
def derivatives_ho(ode, yDot, y, t):
yDot[0] = y[1]
yDot[1] = -k/m*y[0]
ode_ho.derivatives = derivatives_ho
```
To keep track of how accurate the integration of the harmonic oscillator is, we can calculate the energy which is conserved in the physical system.
```python
def energy_ho(ode):
return 0.5*k*ode.y[0]**2 + 0.5*m*ode.y[1]**2
```
Now we can run the simulation, keeping track of a few quantities along the way.
```python
times = np.linspace(0.,60.,1000)
energies_nbody = np.zeros(len(times))
energies_ho = np.zeros(len(times))
r_nbody = np.zeros(len(times))
x_ho = np.zeros(len(times))
for i, t in enumerate(times):
sim.integrate(t)
r_nbody[i] = sim.particles[1].d
x_ho[i] = ode_ho.y[0]
energies_nbody[i] = sim.energy()
energies_ho[i] = energy_ho(ode_ho)
```
Let's plot the relative energy error over time for both the N-body and the harmonic oscillator integration.
```python
fig, ax = plt.subplots(1,1)
ax.set_xlabel("time")
ax.set_ylabel("relative energy error")
ax.set_yscale("log")
ax.plot(times,np.abs((energies_nbody-energies_nbody[0])/energies_nbody[0]), label="N-body")
ax.plot(times,np.abs((energies_ho-energies_ho[0])/energies_ho[0]), label="harmonic oscillator")
ax.legend()
```
<matplotlib.legend.Legend at 0x107fc2c10>

Let us also plot the radius of the inner planet and the position coordinate of the harmonic oscillator.
```python
fig, ax = plt.subplots(1,1)
ax.set_xlabel("time")
ax.plot(times,r_nbody, label="planet")
ax.plot(times,x_ho, label="harmonic oscillator")
ax.legend()
```
<matplotlib.legend.Legend at 0x1122a4df0>

The above example is using the BS integrator for both the N-body and the harmonic oscillator integration. The BS integrator has default tolerance parameters set to $10^{-5}$. You can change the relative or absolute tolerance with to get more accurate results:
```python
sim.ri_bs.eps_rel = 1e-8
sim.ri_bs.eps_abs = 1e-8
```
Note that in this example, the harmonic oscillator has a period that is shorter than any orbital timescale. Therefore the timestep is limited by the harmonic oscillator, not the N-body integration. As a result, the N-body integration has an error much smaller than the tolerance parameters.
Let us change the simple harmonic oscillator to a forced harmonic oscillator where the forcing depends on phase of a planet.
```python
def derivatives_ho_forced(ode, yDot, y, t):
# Now we can access particles and their orbital parameters during sub-steps
forcing = np.sin(sim.particles[1].f)
# Note that we are using the global sim variable.
# Alternatively, one can also access the simulation via
# sim = ode.contents.r.contents
yDot[0] = y[1]
yDot[1] = -k/m*y[0] + forcing
ode_ho.derivatives = derivatives_ho_forced
```
We explicitly set `needs_nbody = False` during initialization. We therefore need to tell REBOUND that our ODE now needs access to the particle state during the integrations:
```python
ode_ho.needs_nbody = True
```
Running the integration a bit further, now with the forced harmonic oscillator:
```python
times = np.linspace(65.,120.,1000)
for i, t in enumerate(times):
sim.integrate(t)
r_nbody[i] = sim.particles[1].d
x_ho[i] = ode_ho.y[0]
energies_nbody[i] = sim.energy()
energies_ho[i] = energy_ho(ode_ho)
```
The harmonic oscillator is now getting forced by the planet.
```python
fig, ax = plt.subplots(1,1)
ax.set_xlabel("time")
ax.plot(times,r_nbody, label="planet")
ax.plot(times,x_ho, label="harmonic oscillator")
ax.legend()
```
<matplotlib.legend.Legend at 0x1122f46a0>

In addition to using BS, it is also possible to integrate arbitrary ODEs in conjunction with other REBOUND integrators such as IAS15 and WHFast. In that case, only the user-defined ODEs are integrated with BS **after** a successfull N-body integration step. This type of switching back and forth the different ODEs will lead to an error. However, if the timescale involved in the user-defined ODEs are much longer than the timestep of the N-body integration, then this will be a small error.
```python
```
|
tigerchenlu98REPO_NAMEreboundPATH_START.@rebound_extracted@rebound-main@ipython_examples@IntegratingArbitraryODEs.ipynb@.PATH_END.py
|
{
"filename": "apero_mk_model_nirps_ha.py",
"repo_name": "njcuk9999/apero-drs",
"repo_path": "apero-drs_extracted/apero-drs-main/apero/recipes/nirps_ha/apero_mk_model_nirps_ha.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
apero_mk_model_nirps_ha.py [obs dir] [files]
Takes the transmissions made in apero_mk_tellu and saves them as three
arrays that have the same shape as an e2ds (zero_residual, a dc offset of
residuals, a linear dependency with water absorption and a linear dependency
with dry absorption
Created on 2019-09-03 at 14:58
@author: cook
"""
from typing import Any, Dict, Tuple, Union
from apero import lang
from apero.base import base
from apero.core import constants
from apero.core.core import drs_database
from apero.core.core import drs_file
from apero.core.core import drs_log
from apero.core.utils import drs_recipe
from apero.core.utils import drs_startup
from apero.science import telluric
# =============================================================================
# Define variables
# =============================================================================
__NAME__ = 'apero_mk_model_nirps_ha.py'
__INSTRUMENT__ = 'NIRPS_HA'
__PACKAGE__ = base.__PACKAGE__
__version__ = base.__version__
__author__ = base.__author__
__date__ = base.__date__
__release__ = base.__release__
# Get Logging function
WLOG = drs_log.wlog
# Get Recipe class
DrsRecipe = drs_recipe.DrsRecipe
# Get parameter class
ParamDict = constants.ParamDict
# Get the text types
textentry = lang.textentry
# =============================================================================
# Define functions
# =============================================================================
# All recipe code goes in _main
# Only change the following from here:
# 1) function calls (i.e. main(arg1, arg2, **kwargs)
# 2) fkwargs (i.e. fkwargs=dict(arg1=arg1, arg2=arg2, **kwargs)
# 3) config_main outputs value (i.e. None, pp, reduced)
# Everything else is controlled from recipe_definition
def main(**kwargs) -> Union[Dict[str, Any], Tuple[DrsRecipe, ParamDict]]:
"""
Main function for apero_mk_model
:param kwargs: any additional keywords
:keyword debug: int, debug level (0 for None)
:returns: dictionary of the local space
"""
# assign function calls (must add positional)
fkwargs = dict(**kwargs)
# ----------------------------------------------------------------------
# deal with command line inputs / function call inputs
recipe, params = drs_startup.setup(__NAME__, __INSTRUMENT__, fkwargs)
# solid debug mode option
if kwargs.get('DEBUG0000', False):
return recipe, params
# ----------------------------------------------------------------------
# run main bulk of code (catching all errors)
llmain, success = drs_startup.run(__main__, recipe, params)
# ----------------------------------------------------------------------
# End Message
# ----------------------------------------------------------------------
return drs_startup.end_main(params, llmain, recipe, success)
def __main__(recipe: DrsRecipe, params: ParamDict) -> Dict[str, Any]:
"""
Main code: should only call recipe and params (defined from main)
:param recipe: DrsRecipe, the recipe class using this function
:param params: ParamDict, the parameter dictionary of constants
:return: dictionary containing the local variables
"""
# ----------------------------------------------------------------------
# Main Code
# ----------------------------------------------------------------------
mainname = __NAME__ + '._main()'
# get psuedo constants
pconst = constants.pload()
# get fiber from parameters
if 'FIBER' in params['INPUTS']:
fiber = params['INPUTS']['FIBER']
else:
fiber = params['TELLURIC_FIBER_TYPE']
# load the telluric database
telludbm = drs_database.TelluricDatabase(params)
telludbm.load_db()
# set up plotting (no plotting before this) -- must be after setting
# night name
recipe.plot.set_location(0)
# set observation directory (we have no info about filename)
obs_dir = 'other'
params.set(key='OBS_DIR', value='other', source=mainname)
# ------------------------------------------------------------------
# Load transmission files and header vectors
# ------------------------------------------------------------------
# load trans filenames
transfiles = telluric.get_trans_files(params, None, fiber,
database=telludbm)
# get trans file type
infiletype = drs_file.get_file_definition(params, 'TELLU_TRANS',
block_kind='red')
# get new copy of file definition
infile = infiletype.newcopy(params=params, fiber=fiber)
# set reference filename
infile.set_filename(transfiles[-1])
# read data
infile.read_file()
# get cube and header vectors
transcube, transtable = telluric.make_trans_cube(params, transfiles)
# ------------------------------------------------------------------
# Calculate the model
# ------------------------------------------------------------------
# create trans model parameter dictionary (with e2ds shaped out vectors)
tprops = telluric.make_trans_model(params, transcube, transtable)
# ----------------------------------------------------------------------
# print/log quality control (all assigned previously)
# ----------------------------------------------------------------------
qc_params, passed = telluric.mk_model_qc(params)
# update recipe log
recipe.log.add_qc(qc_params, passed)
# ------------------------------------------------------------------
# Plot and save
# ------------------------------------------------------------------
# plot model (debug)
recipe.plot('MKTELLU_MODEL', tprops=tprops)
# plot model (summary)
recipe.plot('SUM_MKTELLU_MODEL', tprops=tprops)
# save model
model_file = telluric.mk_write_model(params, recipe, infile, tprops,
transtable, fiber, qc_params)
# add to telluric database
if passed:
# copy the big cube median to the calibDB
telludbm.add_tellu_file(model_file)
# ----------------------------------------------------------------------
# Construct summary document
# ----------------------------------------------------------------------
telluric.mk_model_summary(recipe, params, qc_params, tprops)
# ----------------------------------------------------------------------
# End of main code
# ----------------------------------------------------------------------
return locals()
# =============================================================================
# Start of code
# =============================================================================
if __name__ == "__main__":
# run main with no arguments (get from command line - sys.argv)
ll = main()
# =============================================================================
# End of code
# =============================================================================
|
njcuk9999REPO_NAMEapero-drsPATH_START.@apero-drs_extracted@apero-drs-main@apero@recipes@nirps_ha@apero_mk_model_nirps_ha.py@.PATH_END.py
|
{
"filename": "io.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/gammapy/irf/io.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
from astropy.io import fits
from gammapy.data.hdu_index_table import HDUIndexTable
from gammapy.utils.fits import HDULocation
from gammapy.utils.scripts import make_path
__all__ = ["load_irf_dict_from_file"]
log = logging.getLogger(__name__)
IRF_DL3_AXES_SPECIFICATION = {
"THETA": {"name": "offset", "interp": "lin"},
"ENERG": {"name": "energy_true", "interp": "log"},
"ETRUE": {"name": "energy_true", "interp": "log"},
"RAD": {"name": "rad", "interp": "lin"},
"DETX": {"name": "fov_lon", "interp": "lin"},
"DETY": {"name": "fov_lat", "interp": "lin"},
"MIGRA": {"name": "migra", "interp": "lin"},
}
COMMON_HEADERS = {
"HDUCLASS": "GADF",
"HDUDOC": "https://github.com/open-gamma-ray-astro/gamma-astro-data-formats",
"HDUVERS": "0.2",
}
COMMON_IRF_HEADERS = {
**COMMON_HEADERS,
"HDUCLAS1": "RESPONSE",
}
# The key is the class tag.
# TODO: extend the info here with the minimal header info
IRF_DL3_HDU_SPECIFICATION = {
"bkg_3d": {
"extname": "BACKGROUND",
"column_name": "BKG",
"mandatory_keywords": {
**COMMON_IRF_HEADERS,
"HDUCLAS2": "BKG",
"HDUCLAS3": "FULL-ENCLOSURE", # added here to have HDUCLASN in order
"HDUCLAS4": "BKG_3D",
"FOVALIGN": "RADEC",
},
},
"bkg_2d": {
"extname": "BACKGROUND",
"column_name": "BKG",
"mandatory_keywords": {
**COMMON_IRF_HEADERS,
"HDUCLAS2": "BKG",
"HDUCLAS3": "FULL-ENCLOSURE", # added here to have HDUCLASN in order
"HDUCLAS4": "BKG_2D",
},
},
"edisp_2d": {
"extname": "ENERGY DISPERSION",
"column_name": "MATRIX",
"mandatory_keywords": {
**COMMON_IRF_HEADERS,
"HDUCLAS2": "EDISP",
"HDUCLAS3": "FULL-ENCLOSURE", # added here to have HDUCLASN in order
"HDUCLAS4": "EDISP_2D",
},
},
"psf_table": {
"extname": "PSF_2D_TABLE",
"column_name": "RPSF",
"mandatory_keywords": {
**COMMON_IRF_HEADERS,
"HDUCLAS2": "RPSF",
"HDUCLAS3": "FULL-ENCLOSURE", # added here to have HDUCLASN in order
"HDUCLAS4": "PSF_TABLE",
},
},
"psf_3gauss": {
"extname": "PSF_2D_GAUSS",
"column_name": {
"sigma_1": "SIGMA_1",
"sigma_2": "SIGMA_2",
"sigma_3": "SIGMA_3",
"scale": "SCALE",
"ampl_2": "AMPL_2",
"ampl_3": "AMPL_3",
},
"mandatory_keywords": {
**COMMON_IRF_HEADERS,
"HDUCLAS2": "RPSF",
"HDUCLAS3": "FULL-ENCLOSURE", # added here to have HDUCLASN in order
"HDUCLAS4": "PSF_3GAUSS",
},
},
"psf_king": {
"extname": "PSF_2D_KING",
"column_name": {
"sigma": "SIGMA",
"gamma": "GAMMA",
},
"mandatory_keywords": {
**COMMON_IRF_HEADERS,
"HDUCLAS2": "RPSF",
"HDUCLAS3": "FULL-ENCLOSURE", # added here to have HDUCLASN in order
"HDUCLAS4": "PSF_KING",
},
},
"aeff_2d": {
"extname": "EFFECTIVE AREA",
"column_name": "EFFAREA",
"mandatory_keywords": {
**COMMON_IRF_HEADERS,
"HDUCLAS2": "EFF_AREA",
"HDUCLAS3": "FULL-ENCLOSURE", # added here to have HDUCLASN in order
"HDUCLAS4": "AEFF_2D",
},
},
"rad_max_2d": {
"extname": "RAD_MAX",
"column_name": "RAD_MAX",
"mandatory_keywords": {
**COMMON_IRF_HEADERS,
"HDUCLAS2": "RAD_MAX",
"HDUCLAS3": "POINT-LIKE",
"HDUCLAS4": "RAD_MAX_2D",
},
},
}
IRF_MAP_HDU_SPECIFICATION = {
"edisp_kernel_map": "edisp",
"edisp_map": "edisp",
"psf_map": "psf",
"psf_map_reco": "psf",
}
def gadf_is_pointlike(header):
"""Check if a GADF IRF is pointlike based on the header."""
return header.get("HDUCLAS3") == "POINT-LIKE"
class UnknownHDUClass(IOError):
"""Raised when a file contains an unknown HDUCLASS."""
def _get_hdu_type_and_class(header):
"""Get gammapy hdu_type and class from FITS header.
Contains a workaround to support CTA 1DC irf file.
"""
hdu_clas2 = header.get("HDUCLAS2", "")
hdu_clas4 = header.get("HDUCLAS4", "")
clas2_to_type = {"rpsf": "psf", "eff_area": "aeff"}
hdu_type = clas2_to_type.get(hdu_clas2.lower(), hdu_clas2.lower())
hdu_class = hdu_clas4.lower()
if hdu_type not in HDUIndexTable.VALID_HDU_TYPE:
raise UnknownHDUClass(f"HDUCLAS2={hdu_clas2}, HDUCLAS4={hdu_clas4}")
# workaround for CTA 1DC files with non-compliant HDUCLAS4 names
if hdu_class not in HDUIndexTable.VALID_HDU_CLASS:
hdu_class = f"{hdu_type}_{hdu_class}"
if hdu_class not in HDUIndexTable.VALID_HDU_CLASS:
raise UnknownHDUClass(f"HDUCLAS2={hdu_clas2}, HDUCLAS4={hdu_clas4}")
return hdu_type, hdu_class
def load_irf_dict_from_file(filename):
"""Load all available IRF components from given file into a dictionary.
If multiple IRFs of the same type are present, the first encountered is returned.
Parameters
----------
filename : str or `~pathlib.Path`
Path to the file containing the IRF components, if EVENTS and GTI HDUs
are included in the file, they are ignored.
Returns
-------
irf_dict : dict of `~gammapy.irf.IRF`
Dictionary with instances of the Gammapy objects corresponding
to the IRF components.
"""
from .rad_max import RadMax2D
filename = make_path(filename)
irf_dict = {}
is_pointlike = False
with fits.open(filename) as hdulist:
for hdu in hdulist:
hdu_clas1 = hdu.header.get("HDUCLAS1", "").lower()
# not an IRF component
if hdu_clas1 != "response":
continue
is_pointlike |= hdu.header.get("HDUCLAS3") == "POINT-LIKE"
try:
hdu_type, hdu_class = _get_hdu_type_and_class(hdu.header)
except UnknownHDUClass as e:
log.warning("File has unknown class %s", e)
continue
loc = HDULocation(
hdu_class=hdu_class,
hdu_name=hdu.name,
file_dir=filename.parent,
file_name=filename.name,
)
if hdu_type in irf_dict.keys():
log.warning(f"more than one HDU of {hdu_type} type found")
log.warning(
f"loaded the {irf_dict[hdu_type].meta['EXTNAME']} HDU in the dictionary"
)
continue
data = loc.load()
irf_dict[hdu_type] = data
if is_pointlike and "rad_max" not in irf_dict:
irf_dict["rad_max"] = RadMax2D.from_irf(irf_dict["aeff"])
return irf_dict
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@gammapy@irf@io.py@.PATH_END.py
|
{
"filename": "builder.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/fonttools/fontTools/otlLib/builder.py",
"type": "Python"
}
|
from collections import namedtuple, OrderedDict
import os
from fontTools.misc.fixedTools import fixedToFloat
from fontTools.misc.roundTools import otRound
from fontTools import ttLib
from fontTools.ttLib.tables import otTables as ot
from fontTools.ttLib.tables.otBase import (
ValueRecord,
valueRecordFormatDict,
OTLOffsetOverflowError,
OTTableWriter,
CountReference,
)
from fontTools.ttLib.tables import otBase
from fontTools.feaLib.ast import STATNameStatement
from fontTools.otlLib.optimize.gpos import (
_compression_level_from_env,
compact_lookup,
)
from fontTools.otlLib.error import OpenTypeLibError
from functools import reduce
import logging
import copy
log = logging.getLogger(__name__)
def buildCoverage(glyphs, glyphMap):
"""Builds a coverage table.
Coverage tables (as defined in the `OpenType spec <https://docs.microsoft.com/en-gb/typography/opentype/spec/chapter2#coverage-table>`__)
are used in all OpenType Layout lookups apart from the Extension type, and
define the glyphs involved in a layout subtable. This allows shaping engines
to compare the glyph stream with the coverage table and quickly determine
whether a subtable should be involved in a shaping operation.
This function takes a list of glyphs and a glyphname-to-ID map, and
returns a ``Coverage`` object representing the coverage table.
Example::
glyphMap = font.getReverseGlyphMap()
glyphs = [ "A", "B", "C" ]
coverage = buildCoverage(glyphs, glyphMap)
Args:
glyphs: a sequence of glyph names.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.Coverage`` object or ``None`` if there are no glyphs
supplied.
"""
if not glyphs:
return None
self = ot.Coverage()
try:
self.glyphs = sorted(set(glyphs), key=glyphMap.__getitem__)
except KeyError as e:
raise ValueError(f"Could not find glyph {e} in font") from e
return self
LOOKUP_FLAG_RIGHT_TO_LEFT = 0x0001
LOOKUP_FLAG_IGNORE_BASE_GLYPHS = 0x0002
LOOKUP_FLAG_IGNORE_LIGATURES = 0x0004
LOOKUP_FLAG_IGNORE_MARKS = 0x0008
LOOKUP_FLAG_USE_MARK_FILTERING_SET = 0x0010
def buildLookup(subtables, flags=0, markFilterSet=None):
"""Turns a collection of rules into a lookup.
A Lookup (as defined in the `OpenType Spec <https://docs.microsoft.com/en-gb/typography/opentype/spec/chapter2#lookupTbl>`__)
wraps the individual rules in a layout operation (substitution or
positioning) in a data structure expressing their overall lookup type -
for example, single substitution, mark-to-base attachment, and so on -
as well as the lookup flags and any mark filtering sets. You may import
the following constants to express lookup flags:
- ``LOOKUP_FLAG_RIGHT_TO_LEFT``
- ``LOOKUP_FLAG_IGNORE_BASE_GLYPHS``
- ``LOOKUP_FLAG_IGNORE_LIGATURES``
- ``LOOKUP_FLAG_IGNORE_MARKS``
- ``LOOKUP_FLAG_USE_MARK_FILTERING_SET``
Args:
subtables: A list of layout subtable objects (e.g.
``MultipleSubst``, ``PairPos``, etc.) or ``None``.
flags (int): This lookup's flags.
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
Returns:
An ``otTables.Lookup`` object or ``None`` if there are no subtables
supplied.
"""
if subtables is None:
return None
subtables = [st for st in subtables if st is not None]
if not subtables:
return None
assert all(
t.LookupType == subtables[0].LookupType for t in subtables
), "all subtables must have the same LookupType; got %s" % repr(
[t.LookupType for t in subtables]
)
self = ot.Lookup()
self.LookupType = subtables[0].LookupType
self.LookupFlag = flags
self.SubTable = subtables
self.SubTableCount = len(self.SubTable)
if markFilterSet is not None:
self.LookupFlag |= LOOKUP_FLAG_USE_MARK_FILTERING_SET
assert isinstance(markFilterSet, int), markFilterSet
self.MarkFilteringSet = markFilterSet
else:
assert (self.LookupFlag & LOOKUP_FLAG_USE_MARK_FILTERING_SET) == 0, (
"if markFilterSet is None, flags must not set "
"LOOKUP_FLAG_USE_MARK_FILTERING_SET; flags=0x%04x" % flags
)
return self
class LookupBuilder(object):
SUBTABLE_BREAK_ = "SUBTABLE_BREAK"
def __init__(self, font, location, table, lookup_type):
self.font = font
self.glyphMap = font.getReverseGlyphMap()
self.location = location
self.table, self.lookup_type = table, lookup_type
self.lookupflag = 0
self.markFilterSet = None
self.lookup_index = None # assigned when making final tables
assert table in ("GPOS", "GSUB")
def equals(self, other):
return (
isinstance(other, self.__class__)
and self.table == other.table
and self.lookupflag == other.lookupflag
and self.markFilterSet == other.markFilterSet
)
def inferGlyphClasses(self):
"""Infers glyph glasses for the GDEF table, such as {"cedilla":3}."""
return {}
def getAlternateGlyphs(self):
"""Helper for building 'aalt' features."""
return {}
def buildLookup_(self, subtables):
return buildLookup(subtables, self.lookupflag, self.markFilterSet)
def buildMarkClasses_(self, marks):
"""{"cedilla": ("BOTTOM", ast.Anchor), ...} --> {"BOTTOM":0, "TOP":1}
Helper for MarkBasePostBuilder, MarkLigPosBuilder, and
MarkMarkPosBuilder. Seems to return the same numeric IDs
for mark classes as the AFDKO makeotf tool.
"""
ids = {}
for mark in sorted(marks.keys(), key=self.font.getGlyphID):
markClassName, _markAnchor = marks[mark]
if markClassName not in ids:
ids[markClassName] = len(ids)
return ids
def setBacktrackCoverage_(self, prefix, subtable):
subtable.BacktrackGlyphCount = len(prefix)
subtable.BacktrackCoverage = []
for p in reversed(prefix):
coverage = buildCoverage(p, self.glyphMap)
subtable.BacktrackCoverage.append(coverage)
def setLookAheadCoverage_(self, suffix, subtable):
subtable.LookAheadGlyphCount = len(suffix)
subtable.LookAheadCoverage = []
for s in suffix:
coverage = buildCoverage(s, self.glyphMap)
subtable.LookAheadCoverage.append(coverage)
def setInputCoverage_(self, glyphs, subtable):
subtable.InputGlyphCount = len(glyphs)
subtable.InputCoverage = []
for g in glyphs:
coverage = buildCoverage(g, self.glyphMap)
subtable.InputCoverage.append(coverage)
def setCoverage_(self, glyphs, subtable):
subtable.GlyphCount = len(glyphs)
subtable.Coverage = []
for g in glyphs:
coverage = buildCoverage(g, self.glyphMap)
subtable.Coverage.append(coverage)
def build_subst_subtables(self, mapping, klass):
substitutions = [{}]
for key in mapping:
if key[0] == self.SUBTABLE_BREAK_:
substitutions.append({})
else:
substitutions[-1][key] = mapping[key]
subtables = [klass(s) for s in substitutions]
return subtables
def add_subtable_break(self, location):
"""Add an explicit subtable break.
Args:
location: A string or tuple representing the location in the
original source which produced this break, or ``None`` if
no location is provided.
"""
log.warning(
OpenTypeLibError(
'unsupported "subtable" statement for lookup type', location
)
)
class AlternateSubstBuilder(LookupBuilder):
"""Builds an Alternate Substitution (GSUB3) lookup.
Users are expected to manually add alternate glyph substitutions to
the ``alternates`` attribute after the object has been initialized,
e.g.::
builder.alternates["A"] = ["A.alt1", "A.alt2"]
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
alternates: An ordered dictionary of alternates, mapping glyph names
to a list of names of alternates.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 3)
self.alternates = OrderedDict()
def equals(self, other):
return LookupBuilder.equals(self, other) and self.alternates == other.alternates
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the alternate
substitution lookup.
"""
subtables = self.build_subst_subtables(
self.alternates, buildAlternateSubstSubtable
)
return self.buildLookup_(subtables)
def getAlternateGlyphs(self):
return self.alternates
def add_subtable_break(self, location):
self.alternates[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class ChainContextualRule(
namedtuple("ChainContextualRule", ["prefix", "glyphs", "suffix", "lookups"])
):
@property
def is_subtable_break(self):
return self.prefix == LookupBuilder.SUBTABLE_BREAK_
class ChainContextualRuleset:
def __init__(self):
self.rules = []
def addRule(self, rule):
self.rules.append(rule)
@property
def hasPrefixOrSuffix(self):
# Do we have any prefixes/suffixes? If this is False for all
# rulesets, we can express the whole lookup as GPOS5/GSUB7.
for rule in self.rules:
if len(rule.prefix) > 0 or len(rule.suffix) > 0:
return True
return False
@property
def hasAnyGlyphClasses(self):
# Do we use glyph classes anywhere in the rules? If this is False
# we can express this subtable as a Format 1.
for rule in self.rules:
for coverage in (rule.prefix, rule.glyphs, rule.suffix):
if any(len(x) > 1 for x in coverage):
return True
return False
def format2ClassDefs(self):
PREFIX, GLYPHS, SUFFIX = 0, 1, 2
classDefBuilders = []
for ix in [PREFIX, GLYPHS, SUFFIX]:
context = []
for r in self.rules:
context.append(r[ix])
classes = self._classBuilderForContext(context)
if not classes:
return None
classDefBuilders.append(classes)
return classDefBuilders
def _classBuilderForContext(self, context):
classdefbuilder = ClassDefBuilder(useClass0=False)
for position in context:
for glyphset in position:
glyphs = set(glyphset)
if not classdefbuilder.canAdd(glyphs):
return None
classdefbuilder.add(glyphs)
return classdefbuilder
class ChainContextualBuilder(LookupBuilder):
def equals(self, other):
return LookupBuilder.equals(self, other) and self.rules == other.rules
def rulesets(self):
# Return a list of ChainContextRuleset objects, taking explicit
# subtable breaks into account
ruleset = [ChainContextualRuleset()]
for rule in self.rules:
if rule.is_subtable_break:
ruleset.append(ChainContextualRuleset())
continue
ruleset[-1].addRule(rule)
# Squish any empty subtables
return [x for x in ruleset if len(x.rules) > 0]
def getCompiledSize_(self, subtables):
if not subtables:
return 0
# We need to make a copy here because compiling
# modifies the subtable (finalizing formats etc.)
table = self.buildLookup_(copy.deepcopy(subtables))
w = OTTableWriter()
table.compile(w, self.font)
size = len(w.getAllData())
return size
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the chained
contextual positioning lookup.
"""
subtables = []
rulesets = self.rulesets()
chaining = any(ruleset.hasPrefixOrSuffix for ruleset in rulesets)
# https://github.com/fonttools/fonttools/issues/2539
#
# Unfortunately, as of 2022-03-07, Apple's CoreText renderer does not
# correctly process GPOS7 lookups, so for now we force contextual
# positioning lookups to be chaining (GPOS8).
#
# This seems to be fixed as of macOS 13.2, but we keep disabling this
# for now until we are no longer concerned about old macOS versions.
# But we allow people to opt-out of this with the config key below.
write_gpos7 = self.font.cfg.get("fontTools.otlLib.builder:WRITE_GPOS7")
# horrible separation of concerns breach
if not write_gpos7 and self.subtable_type == "Pos":
chaining = True
for ruleset in rulesets:
# Determine format strategy. We try to build formats 1, 2 and 3
# subtables and then work out which is best. candidates list holds
# the subtables in each format for this ruleset (including a dummy
# "format 0" to make the addressing match the format numbers).
# We can always build a format 3 lookup by accumulating each of
# the rules into a list, so start with that.
candidates = [None, None, None, []]
for rule in ruleset.rules:
candidates[3].append(self.buildFormat3Subtable(rule, chaining))
# Can we express the whole ruleset as a format 2 subtable?
classdefs = ruleset.format2ClassDefs()
if classdefs:
candidates[2] = [
self.buildFormat2Subtable(ruleset, classdefs, chaining)
]
if not ruleset.hasAnyGlyphClasses:
candidates[1] = [self.buildFormat1Subtable(ruleset, chaining)]
candidates_by_size = []
for i in [1, 2, 3]:
if candidates[i]:
try:
size = self.getCompiledSize_(candidates[i])
except OTLOffsetOverflowError as e:
log.warning(
"Contextual format %i at %s overflowed (%s)"
% (i, str(self.location), e)
)
else:
candidates_by_size.append((size, candidates[i]))
if not candidates_by_size:
raise OpenTypeLibError("All candidates overflowed", self.location)
_min_size, winner = min(candidates_by_size, key=lambda x: x[0])
subtables.extend(winner)
# If we are not chaining, lookup type will be automatically fixed by
# buildLookup_
return self.buildLookup_(subtables)
def buildFormat1Subtable(self, ruleset, chaining=True):
st = self.newSubtable_(chaining=chaining)
st.Format = 1
st.populateDefaults()
coverage = set()
rulesetsByFirstGlyph = {}
ruleAttr = self.ruleAttr_(format=1, chaining=chaining)
for rule in ruleset.rules:
ruleAsSubtable = self.newRule_(format=1, chaining=chaining)
if chaining:
ruleAsSubtable.BacktrackGlyphCount = len(rule.prefix)
ruleAsSubtable.LookAheadGlyphCount = len(rule.suffix)
ruleAsSubtable.Backtrack = [list(x)[0] for x in reversed(rule.prefix)]
ruleAsSubtable.LookAhead = [list(x)[0] for x in rule.suffix]
ruleAsSubtable.InputGlyphCount = len(rule.glyphs)
else:
ruleAsSubtable.GlyphCount = len(rule.glyphs)
ruleAsSubtable.Input = [list(x)[0] for x in rule.glyphs[1:]]
self.buildLookupList(rule, ruleAsSubtable)
firstGlyph = list(rule.glyphs[0])[0]
if firstGlyph not in rulesetsByFirstGlyph:
coverage.add(firstGlyph)
rulesetsByFirstGlyph[firstGlyph] = []
rulesetsByFirstGlyph[firstGlyph].append(ruleAsSubtable)
st.Coverage = buildCoverage(coverage, self.glyphMap)
ruleSets = []
for g in st.Coverage.glyphs:
ruleSet = self.newRuleSet_(format=1, chaining=chaining)
setattr(ruleSet, ruleAttr, rulesetsByFirstGlyph[g])
setattr(ruleSet, f"{ruleAttr}Count", len(rulesetsByFirstGlyph[g]))
ruleSets.append(ruleSet)
setattr(st, self.ruleSetAttr_(format=1, chaining=chaining), ruleSets)
setattr(
st, self.ruleSetAttr_(format=1, chaining=chaining) + "Count", len(ruleSets)
)
return st
def buildFormat2Subtable(self, ruleset, classdefs, chaining=True):
st = self.newSubtable_(chaining=chaining)
st.Format = 2
st.populateDefaults()
if chaining:
(
st.BacktrackClassDef,
st.InputClassDef,
st.LookAheadClassDef,
) = [c.build() for c in classdefs]
else:
st.ClassDef = classdefs[1].build()
inClasses = classdefs[1].classes()
classSets = []
for _ in inClasses:
classSet = self.newRuleSet_(format=2, chaining=chaining)
classSets.append(classSet)
coverage = set()
classRuleAttr = self.ruleAttr_(format=2, chaining=chaining)
for rule in ruleset.rules:
ruleAsSubtable = self.newRule_(format=2, chaining=chaining)
if chaining:
ruleAsSubtable.BacktrackGlyphCount = len(rule.prefix)
ruleAsSubtable.LookAheadGlyphCount = len(rule.suffix)
# The glyphs in the rule may be list, tuple, odict_keys...
# Order is not important anyway because they are guaranteed
# to be members of the same class.
ruleAsSubtable.Backtrack = [
st.BacktrackClassDef.classDefs[list(x)[0]]
for x in reversed(rule.prefix)
]
ruleAsSubtable.LookAhead = [
st.LookAheadClassDef.classDefs[list(x)[0]] for x in rule.suffix
]
ruleAsSubtable.InputGlyphCount = len(rule.glyphs)
ruleAsSubtable.Input = [
st.InputClassDef.classDefs[list(x)[0]] for x in rule.glyphs[1:]
]
setForThisRule = classSets[
st.InputClassDef.classDefs[list(rule.glyphs[0])[0]]
]
else:
ruleAsSubtable.GlyphCount = len(rule.glyphs)
ruleAsSubtable.Class = [ # The spec calls this InputSequence
st.ClassDef.classDefs[list(x)[0]] for x in rule.glyphs[1:]
]
setForThisRule = classSets[
st.ClassDef.classDefs[list(rule.glyphs[0])[0]]
]
self.buildLookupList(rule, ruleAsSubtable)
coverage |= set(rule.glyphs[0])
getattr(setForThisRule, classRuleAttr).append(ruleAsSubtable)
setattr(
setForThisRule,
f"{classRuleAttr}Count",
getattr(setForThisRule, f"{classRuleAttr}Count") + 1,
)
for i, classSet in enumerate(classSets):
if not getattr(classSet, classRuleAttr):
# class sets can be null so replace nop sets with None
classSets[i] = None
setattr(st, self.ruleSetAttr_(format=2, chaining=chaining), classSets)
setattr(
st, self.ruleSetAttr_(format=2, chaining=chaining) + "Count", len(classSets)
)
st.Coverage = buildCoverage(coverage, self.glyphMap)
return st
def buildFormat3Subtable(self, rule, chaining=True):
st = self.newSubtable_(chaining=chaining)
st.Format = 3
if chaining:
self.setBacktrackCoverage_(rule.prefix, st)
self.setLookAheadCoverage_(rule.suffix, st)
self.setInputCoverage_(rule.glyphs, st)
else:
self.setCoverage_(rule.glyphs, st)
self.buildLookupList(rule, st)
return st
def buildLookupList(self, rule, st):
for sequenceIndex, lookupList in enumerate(rule.lookups):
if lookupList is not None:
if not isinstance(lookupList, list):
# Can happen with synthesised lookups
lookupList = [lookupList]
for l in lookupList:
if l.lookup_index is None:
if isinstance(self, ChainContextPosBuilder):
other = "substitution"
else:
other = "positioning"
raise OpenTypeLibError(
"Missing index of the specified "
f"lookup, might be a {other} lookup",
self.location,
)
rec = self.newLookupRecord_(st)
rec.SequenceIndex = sequenceIndex
rec.LookupListIndex = l.lookup_index
def add_subtable_break(self, location):
self.rules.append(
ChainContextualRule(
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
[self.SUBTABLE_BREAK_],
)
)
def newSubtable_(self, chaining=True):
subtablename = f"Context{self.subtable_type}"
if chaining:
subtablename = "Chain" + subtablename
st = getattr(ot, subtablename)() # ot.ChainContextPos()/ot.ChainSubst()/etc.
setattr(st, f"{self.subtable_type}Count", 0)
setattr(st, f"{self.subtable_type}LookupRecord", [])
return st
# Format 1 and format 2 GSUB5/GSUB6/GPOS7/GPOS8 rulesets and rules form a family:
#
# format 1 ruleset format 1 rule format 2 ruleset format 2 rule
# GSUB5 SubRuleSet SubRule SubClassSet SubClassRule
# GSUB6 ChainSubRuleSet ChainSubRule ChainSubClassSet ChainSubClassRule
# GPOS7 PosRuleSet PosRule PosClassSet PosClassRule
# GPOS8 ChainPosRuleSet ChainPosRule ChainPosClassSet ChainPosClassRule
#
# The following functions generate the attribute names and subtables according
# to this naming convention.
def ruleSetAttr_(self, format=1, chaining=True):
if format == 1:
formatType = "Rule"
elif format == 2:
formatType = "Class"
else:
raise AssertionError(formatType)
subtablename = f"{self.subtable_type[0:3]}{formatType}Set" # Sub, not Subst.
if chaining:
subtablename = "Chain" + subtablename
return subtablename
def ruleAttr_(self, format=1, chaining=True):
if format == 1:
formatType = ""
elif format == 2:
formatType = "Class"
else:
raise AssertionError(formatType)
subtablename = f"{self.subtable_type[0:3]}{formatType}Rule" # Sub, not Subst.
if chaining:
subtablename = "Chain" + subtablename
return subtablename
def newRuleSet_(self, format=1, chaining=True):
st = getattr(
ot, self.ruleSetAttr_(format, chaining)
)() # ot.ChainPosRuleSet()/ot.SubRuleSet()/etc.
st.populateDefaults()
return st
def newRule_(self, format=1, chaining=True):
st = getattr(
ot, self.ruleAttr_(format, chaining)
)() # ot.ChainPosClassRule()/ot.SubClassRule()/etc.
st.populateDefaults()
return st
def attachSubtableWithCount_(
self, st, subtable_name, count_name, existing=None, index=None, chaining=False
):
if chaining:
subtable_name = "Chain" + subtable_name
count_name = "Chain" + count_name
if not hasattr(st, count_name):
setattr(st, count_name, 0)
setattr(st, subtable_name, [])
if existing:
new_subtable = existing
else:
# Create a new, empty subtable from otTables
new_subtable = getattr(ot, subtable_name)()
setattr(st, count_name, getattr(st, count_name) + 1)
if index:
getattr(st, subtable_name).insert(index, new_subtable)
else:
getattr(st, subtable_name).append(new_subtable)
return new_subtable
def newLookupRecord_(self, st):
return self.attachSubtableWithCount_(
st,
f"{self.subtable_type}LookupRecord",
f"{self.subtable_type}Count",
chaining=False,
) # Oddly, it isn't ChainSubstLookupRecord
class ChainContextPosBuilder(ChainContextualBuilder):
"""Builds a Chained Contextual Positioning (GPOS8) lookup.
Users are expected to manually add rules to the ``rules`` attribute after
the object has been initialized, e.g.::
# pos [A B] [C D] x' lookup lu1 y' z' lookup lu2 E;
prefix = [ ["A", "B"], ["C", "D"] ]
suffix = [ ["E"] ]
glyphs = [ ["x"], ["y"], ["z"] ]
lookups = [ [lu1], None, [lu2] ]
builder.rules.append( (prefix, glyphs, suffix, lookups) )
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
rules: A list of tuples representing the rules in this lookup.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 8)
self.rules = []
self.subtable_type = "Pos"
def find_chainable_single_pos(self, lookups, glyphs, value):
"""Helper for add_single_pos_chained_()"""
res = None
for lookup in lookups[::-1]:
if lookup == self.SUBTABLE_BREAK_:
return res
if isinstance(lookup, SinglePosBuilder) and all(
lookup.can_add(glyph, value) for glyph in glyphs
):
res = lookup
return res
class ChainContextSubstBuilder(ChainContextualBuilder):
"""Builds a Chained Contextual Substitution (GSUB6) lookup.
Users are expected to manually add rules to the ``rules`` attribute after
the object has been initialized, e.g.::
# sub [A B] [C D] x' lookup lu1 y' z' lookup lu2 E;
prefix = [ ["A", "B"], ["C", "D"] ]
suffix = [ ["E"] ]
glyphs = [ ["x"], ["y"], ["z"] ]
lookups = [ [lu1], None, [lu2] ]
builder.rules.append( (prefix, glyphs, suffix, lookups) )
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
rules: A list of tuples representing the rules in this lookup.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 6)
self.rules = [] # (prefix, input, suffix, lookups)
self.subtable_type = "Subst"
def getAlternateGlyphs(self):
result = {}
for rule in self.rules:
if rule.is_subtable_break:
continue
for lookups in rule.lookups:
if not isinstance(lookups, list):
lookups = [lookups]
for lookup in lookups:
if lookup is not None:
alts = lookup.getAlternateGlyphs()
for glyph, replacements in alts.items():
alts_for_glyph = result.setdefault(glyph, [])
alts_for_glyph.extend(
g for g in replacements if g not in alts_for_glyph
)
return result
def find_chainable_subst(self, mapping, builder_class):
"""Helper for add_{single,multi}_subst_chained_()"""
res = None
for rule in self.rules[::-1]:
if rule.is_subtable_break:
return res
for sub in rule.lookups:
if isinstance(sub, builder_class) and not any(
g in mapping and mapping[g] != sub.mapping[g] for g in sub.mapping
):
res = sub
return res
class LigatureSubstBuilder(LookupBuilder):
"""Builds a Ligature Substitution (GSUB4) lookup.
Users are expected to manually add ligatures to the ``ligatures``
attribute after the object has been initialized, e.g.::
# sub f i by f_i;
builder.ligatures[("f","f","i")] = "f_f_i"
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
ligatures: An ordered dictionary mapping a tuple of glyph names to the
ligature glyphname.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 4)
self.ligatures = OrderedDict() # {('f','f','i'): 'f_f_i'}
def equals(self, other):
return LookupBuilder.equals(self, other) and self.ligatures == other.ligatures
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the ligature
substitution lookup.
"""
subtables = self.build_subst_subtables(
self.ligatures, buildLigatureSubstSubtable
)
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
self.ligatures[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class MultipleSubstBuilder(LookupBuilder):
"""Builds a Multiple Substitution (GSUB2) lookup.
Users are expected to manually add substitutions to the ``mapping``
attribute after the object has been initialized, e.g.::
# sub uni06C0 by uni06D5.fina hamza.above;
builder.mapping["uni06C0"] = [ "uni06D5.fina", "hamza.above"]
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
mapping: An ordered dictionary mapping a glyph name to a list of
substituted glyph names.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 2)
self.mapping = OrderedDict()
def equals(self, other):
return LookupBuilder.equals(self, other) and self.mapping == other.mapping
def build(self):
subtables = self.build_subst_subtables(self.mapping, buildMultipleSubstSubtable)
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class CursivePosBuilder(LookupBuilder):
"""Builds a Cursive Positioning (GPOS3) lookup.
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
attachments: An ordered dictionary mapping a glyph name to a two-element
tuple of ``otTables.Anchor`` objects.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 3)
self.attachments = {}
def equals(self, other):
return (
LookupBuilder.equals(self, other) and self.attachments == other.attachments
)
def add_attachment(self, location, glyphs, entryAnchor, exitAnchor):
"""Adds attachment information to the cursive positioning lookup.
Args:
location: A string or tuple representing the location in the
original source which produced this lookup. (Unused.)
glyphs: A list of glyph names sharing these entry and exit
anchor locations.
entryAnchor: A ``otTables.Anchor`` object representing the
entry anchor, or ``None`` if no entry anchor is present.
exitAnchor: A ``otTables.Anchor`` object representing the
exit anchor, or ``None`` if no exit anchor is present.
"""
for glyph in glyphs:
self.attachments[glyph] = (entryAnchor, exitAnchor)
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the cursive
positioning lookup.
"""
st = buildCursivePosSubtable(self.attachments, self.glyphMap)
return self.buildLookup_([st])
class MarkBasePosBuilder(LookupBuilder):
"""Builds a Mark-To-Base Positioning (GPOS4) lookup.
Users are expected to manually add marks and bases to the ``marks``
and ``bases`` attributes after the object has been initialized, e.g.::
builder.marks["acute"] = (0, a1)
builder.marks["grave"] = (0, a1)
builder.marks["cedilla"] = (1, a2)
builder.bases["a"] = {0: a3, 1: a5}
builder.bases["b"] = {0: a4, 1: a5}
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
marks: An dictionary mapping a glyph name to a two-element
tuple containing a mark class ID and ``otTables.Anchor`` object.
bases: An dictionary mapping a glyph name to a dictionary of
mark class IDs and ``otTables.Anchor`` object.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 4)
self.marks = {} # glyphName -> (markClassName, anchor)
self.bases = {} # glyphName -> {markClassName: anchor}
def equals(self, other):
return (
LookupBuilder.equals(self, other)
and self.marks == other.marks
and self.bases == other.bases
)
def inferGlyphClasses(self):
result = {glyph: 1 for glyph in self.bases}
result.update({glyph: 3 for glyph in self.marks})
return result
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the mark-to-base
positioning lookup.
"""
markClasses = self.buildMarkClasses_(self.marks)
marks = {}
for mark, (mc, anchor) in self.marks.items():
if mc not in markClasses:
raise ValueError(
"Mark class %s not found for mark glyph %s" % (mc, mark)
)
marks[mark] = (markClasses[mc], anchor)
bases = {}
for glyph, anchors in self.bases.items():
bases[glyph] = {}
for mc, anchor in anchors.items():
if mc not in markClasses:
raise ValueError(
"Mark class %s not found for base glyph %s" % (mc, glyph)
)
bases[glyph][markClasses[mc]] = anchor
subtables = buildMarkBasePos(marks, bases, self.glyphMap)
return self.buildLookup_(subtables)
class MarkLigPosBuilder(LookupBuilder):
"""Builds a Mark-To-Ligature Positioning (GPOS5) lookup.
Users are expected to manually add marks and bases to the ``marks``
and ``ligatures`` attributes after the object has been initialized, e.g.::
builder.marks["acute"] = (0, a1)
builder.marks["grave"] = (0, a1)
builder.marks["cedilla"] = (1, a2)
builder.ligatures["f_i"] = [
{ 0: a3, 1: a5 }, # f
{ 0: a4, 1: a5 } # i
]
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
marks: An dictionary mapping a glyph name to a two-element
tuple containing a mark class ID and ``otTables.Anchor`` object.
ligatures: An dictionary mapping a glyph name to an array with one
element for each ligature component. Each array element should be
a dictionary mapping mark class IDs to ``otTables.Anchor`` objects.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 5)
self.marks = {} # glyphName -> (markClassName, anchor)
self.ligatures = {} # glyphName -> [{markClassName: anchor}, ...]
def equals(self, other):
return (
LookupBuilder.equals(self, other)
and self.marks == other.marks
and self.ligatures == other.ligatures
)
def inferGlyphClasses(self):
result = {glyph: 2 for glyph in self.ligatures}
result.update({glyph: 3 for glyph in self.marks})
return result
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the mark-to-ligature
positioning lookup.
"""
markClasses = self.buildMarkClasses_(self.marks)
marks = {
mark: (markClasses[mc], anchor) for mark, (mc, anchor) in self.marks.items()
}
ligs = {}
for lig, components in self.ligatures.items():
ligs[lig] = []
for c in components:
ligs[lig].append({markClasses[mc]: a for mc, a in c.items()})
subtables = buildMarkLigPos(marks, ligs, self.glyphMap)
return self.buildLookup_(subtables)
class MarkMarkPosBuilder(LookupBuilder):
"""Builds a Mark-To-Mark Positioning (GPOS6) lookup.
Users are expected to manually add marks and bases to the ``marks``
and ``baseMarks`` attributes after the object has been initialized, e.g.::
builder.marks["acute"] = (0, a1)
builder.marks["grave"] = (0, a1)
builder.marks["cedilla"] = (1, a2)
builder.baseMarks["acute"] = {0: a3}
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
marks: An dictionary mapping a glyph name to a two-element
tuple containing a mark class ID and ``otTables.Anchor`` object.
baseMarks: An dictionary mapping a glyph name to a dictionary
containing one item: a mark class ID and a ``otTables.Anchor`` object.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 6)
self.marks = {} # glyphName -> (markClassName, anchor)
self.baseMarks = {} # glyphName -> {markClassName: anchor}
def equals(self, other):
return (
LookupBuilder.equals(self, other)
and self.marks == other.marks
and self.baseMarks == other.baseMarks
)
def inferGlyphClasses(self):
result = {glyph: 3 for glyph in self.baseMarks}
result.update({glyph: 3 for glyph in self.marks})
return result
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the mark-to-mark
positioning lookup.
"""
markClasses = self.buildMarkClasses_(self.marks)
markClassList = sorted(markClasses.keys(), key=markClasses.get)
marks = {
mark: (markClasses[mc], anchor) for mark, (mc, anchor) in self.marks.items()
}
st = ot.MarkMarkPos()
st.Format = 1
st.ClassCount = len(markClasses)
st.Mark1Coverage = buildCoverage(marks, self.glyphMap)
st.Mark2Coverage = buildCoverage(self.baseMarks, self.glyphMap)
st.Mark1Array = buildMarkArray(marks, self.glyphMap)
st.Mark2Array = ot.Mark2Array()
st.Mark2Array.Mark2Count = len(st.Mark2Coverage.glyphs)
st.Mark2Array.Mark2Record = []
for base in st.Mark2Coverage.glyphs:
anchors = [self.baseMarks[base].get(mc) for mc in markClassList]
st.Mark2Array.Mark2Record.append(buildMark2Record(anchors))
return self.buildLookup_([st])
class ReverseChainSingleSubstBuilder(LookupBuilder):
"""Builds a Reverse Chaining Contextual Single Substitution (GSUB8) lookup.
Users are expected to manually add substitutions to the ``substitutions``
attribute after the object has been initialized, e.g.::
# reversesub [a e n] d' by d.alt;
prefix = [ ["a", "e", "n"] ]
suffix = []
mapping = { "d": "d.alt" }
builder.substitutions.append( (prefix, suffix, mapping) )
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
substitutions: A three-element tuple consisting of a prefix sequence,
a suffix sequence, and a dictionary of single substitutions.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 8)
self.rules = [] # (prefix, suffix, mapping)
def equals(self, other):
return LookupBuilder.equals(self, other) and self.rules == other.rules
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the chained
contextual substitution lookup.
"""
subtables = []
for prefix, suffix, mapping in self.rules:
st = ot.ReverseChainSingleSubst()
st.Format = 1
self.setBacktrackCoverage_(prefix, st)
self.setLookAheadCoverage_(suffix, st)
st.Coverage = buildCoverage(mapping.keys(), self.glyphMap)
st.GlyphCount = len(mapping)
st.Substitute = [mapping[g] for g in st.Coverage.glyphs]
subtables.append(st)
return self.buildLookup_(subtables)
def add_subtable_break(self, location):
# Nothing to do here, each substitution is in its own subtable.
pass
class SingleSubstBuilder(LookupBuilder):
"""Builds a Single Substitution (GSUB1) lookup.
Users are expected to manually add substitutions to the ``mapping``
attribute after the object has been initialized, e.g.::
# sub x by y;
builder.mapping["x"] = "y"
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
mapping: A dictionary mapping a single glyph name to another glyph name.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GSUB", 1)
self.mapping = OrderedDict()
def equals(self, other):
return LookupBuilder.equals(self, other) and self.mapping == other.mapping
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the multiple
substitution lookup.
"""
subtables = self.build_subst_subtables(self.mapping, buildSingleSubstSubtable)
return self.buildLookup_(subtables)
def getAlternateGlyphs(self):
return {glyph: [repl] for glyph, repl in self.mapping.items()}
def add_subtable_break(self, location):
self.mapping[(self.SUBTABLE_BREAK_, location)] = self.SUBTABLE_BREAK_
class ClassPairPosSubtableBuilder(object):
"""Builds class-based Pair Positioning (GPOS2 format 2) subtables.
Note that this does *not* build a GPOS2 ``otTables.Lookup`` directly,
but builds a list of ``otTables.PairPos`` subtables. It is used by the
:class:`PairPosBuilder` below.
Attributes:
builder (PairPosBuilder): A pair positioning lookup builder.
"""
def __init__(self, builder):
self.builder_ = builder
self.classDef1_, self.classDef2_ = None, None
self.values_ = {} # (glyphclass1, glyphclass2) --> (value1, value2)
self.forceSubtableBreak_ = False
self.subtables_ = []
def addPair(self, gc1, value1, gc2, value2):
"""Add a pair positioning rule.
Args:
gc1: A set of glyph names for the "left" glyph
value1: An ``otTables.ValueRecord`` object for the left glyph's
positioning.
gc2: A set of glyph names for the "right" glyph
value2: An ``otTables.ValueRecord`` object for the right glyph's
positioning.
"""
mergeable = (
not self.forceSubtableBreak_
and self.classDef1_ is not None
and self.classDef1_.canAdd(gc1)
and self.classDef2_ is not None
and self.classDef2_.canAdd(gc2)
)
if not mergeable:
self.flush_()
self.classDef1_ = ClassDefBuilder(useClass0=True)
self.classDef2_ = ClassDefBuilder(useClass0=False)
self.values_ = {}
self.classDef1_.add(gc1)
self.classDef2_.add(gc2)
self.values_[(gc1, gc2)] = (value1, value2)
def addSubtableBreak(self):
"""Add an explicit subtable break at this point."""
self.forceSubtableBreak_ = True
def subtables(self):
"""Return the list of ``otTables.PairPos`` subtables constructed."""
self.flush_()
return self.subtables_
def flush_(self):
if self.classDef1_ is None or self.classDef2_ is None:
return
st = buildPairPosClassesSubtable(self.values_, self.builder_.glyphMap)
if st.Coverage is None:
return
self.subtables_.append(st)
self.forceSubtableBreak_ = False
class PairPosBuilder(LookupBuilder):
"""Builds a Pair Positioning (GPOS2) lookup.
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
pairs: An array of class-based pair positioning tuples. Usually
manipulated with the :meth:`addClassPair` method below.
glyphPairs: A dictionary mapping a tuple of glyph names to a tuple
of ``otTables.ValueRecord`` objects. Usually manipulated with the
:meth:`addGlyphPair` method below.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 2)
self.pairs = [] # [(gc1, value1, gc2, value2)*]
self.glyphPairs = {} # (glyph1, glyph2) --> (value1, value2)
self.locations = {} # (gc1, gc2) --> (filepath, line, column)
def addClassPair(self, location, glyphclass1, value1, glyphclass2, value2):
"""Add a class pair positioning rule to the current lookup.
Args:
location: A string or tuple representing the location in the
original source which produced this rule. Unused.
glyphclass1: A set of glyph names for the "left" glyph in the pair.
value1: A ``otTables.ValueRecord`` for positioning the left glyph.
glyphclass2: A set of glyph names for the "right" glyph in the pair.
value2: A ``otTables.ValueRecord`` for positioning the right glyph.
"""
self.pairs.append((glyphclass1, value1, glyphclass2, value2))
def addGlyphPair(self, location, glyph1, value1, glyph2, value2):
"""Add a glyph pair positioning rule to the current lookup.
Args:
location: A string or tuple representing the location in the
original source which produced this rule.
glyph1: A glyph name for the "left" glyph in the pair.
value1: A ``otTables.ValueRecord`` for positioning the left glyph.
glyph2: A glyph name for the "right" glyph in the pair.
value2: A ``otTables.ValueRecord`` for positioning the right glyph.
"""
key = (glyph1, glyph2)
oldValue = self.glyphPairs.get(key, None)
if oldValue is not None:
# the Feature File spec explicitly allows specific pairs generated
# by an 'enum' rule to be overridden by preceding single pairs
otherLoc = self.locations[key]
log.debug(
"Already defined position for pair %s %s at %s; "
"choosing the first value",
glyph1,
glyph2,
otherLoc,
)
else:
self.glyphPairs[key] = (value1, value2)
self.locations[key] = location
def add_subtable_break(self, location):
self.pairs.append(
(
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
self.SUBTABLE_BREAK_,
)
)
def equals(self, other):
return (
LookupBuilder.equals(self, other)
and self.glyphPairs == other.glyphPairs
and self.pairs == other.pairs
)
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the pair positioning
lookup.
"""
builders = {}
builder = ClassPairPosSubtableBuilder(self)
for glyphclass1, value1, glyphclass2, value2 in self.pairs:
if glyphclass1 is self.SUBTABLE_BREAK_:
builder.addSubtableBreak()
continue
builder.addPair(glyphclass1, value1, glyphclass2, value2)
subtables = []
if self.glyphPairs:
subtables.extend(buildPairPosGlyphs(self.glyphPairs, self.glyphMap))
subtables.extend(builder.subtables())
lookup = self.buildLookup_(subtables)
# Compact the lookup
# This is a good moment to do it because the compaction should create
# smaller subtables, which may prevent overflows from happening.
# Keep reading the value from the ENV until ufo2ft switches to the config system
level = self.font.cfg.get(
"fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL",
default=_compression_level_from_env(),
)
if level != 0:
log.info("Compacting GPOS...")
compact_lookup(self.font, level, lookup)
return lookup
class SinglePosBuilder(LookupBuilder):
"""Builds a Single Positioning (GPOS1) lookup.
Attributes:
font (``fontTools.TTLib.TTFont``): A font object.
location: A string or tuple representing the location in the original
source which produced this lookup.
mapping: A dictionary mapping a glyph name to a ``otTables.ValueRecord``
objects. Usually manipulated with the :meth:`add_pos` method below.
lookupflag (int): The lookup's flag
markFilterSet: Either ``None`` if no mark filtering set is used, or
an integer representing the filtering set to be used for this
lookup. If a mark filtering set is provided,
`LOOKUP_FLAG_USE_MARK_FILTERING_SET` will be set on the lookup's
flags.
"""
def __init__(self, font, location):
LookupBuilder.__init__(self, font, location, "GPOS", 1)
self.locations = {} # glyph -> (filename, line, column)
self.mapping = {} # glyph -> ot.ValueRecord
def add_pos(self, location, glyph, otValueRecord):
"""Add a single positioning rule.
Args:
location: A string or tuple representing the location in the
original source which produced this lookup.
glyph: A glyph name.
otValueRection: A ``otTables.ValueRecord`` used to position the
glyph.
"""
if not self.can_add(glyph, otValueRecord):
otherLoc = self.locations[glyph]
raise OpenTypeLibError(
'Already defined different position for glyph "%s" at %s'
% (glyph, otherLoc),
location,
)
if otValueRecord:
self.mapping[glyph] = otValueRecord
self.locations[glyph] = location
def can_add(self, glyph, value):
assert isinstance(value, ValueRecord)
curValue = self.mapping.get(glyph)
return curValue is None or curValue == value
def equals(self, other):
return LookupBuilder.equals(self, other) and self.mapping == other.mapping
def build(self):
"""Build the lookup.
Returns:
An ``otTables.Lookup`` object representing the single positioning
lookup.
"""
subtables = buildSinglePos(self.mapping, self.glyphMap)
return self.buildLookup_(subtables)
# GSUB
def buildSingleSubstSubtable(mapping):
"""Builds a single substitution (GSUB1) subtable.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.SingleSubstBuilder` instead.
Args:
mapping: A dictionary mapping input glyph names to output glyph names.
Returns:
An ``otTables.SingleSubst`` object, or ``None`` if the mapping dictionary
is empty.
"""
if not mapping:
return None
self = ot.SingleSubst()
self.mapping = dict(mapping)
return self
def buildMultipleSubstSubtable(mapping):
"""Builds a multiple substitution (GSUB2) subtable.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.MultipleSubstBuilder` instead.
Example::
# sub uni06C0 by uni06D5.fina hamza.above
# sub uni06C2 by uni06C1.fina hamza.above;
subtable = buildMultipleSubstSubtable({
"uni06C0": [ "uni06D5.fina", "hamza.above"],
"uni06C2": [ "uni06D1.fina", "hamza.above"]
})
Args:
mapping: A dictionary mapping input glyph names to a list of output
glyph names.
Returns:
An ``otTables.MultipleSubst`` object or ``None`` if the mapping dictionary
is empty.
"""
if not mapping:
return None
self = ot.MultipleSubst()
self.mapping = dict(mapping)
return self
def buildAlternateSubstSubtable(mapping):
"""Builds an alternate substitution (GSUB3) subtable.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.AlternateSubstBuilder` instead.
Args:
mapping: A dictionary mapping input glyph names to a list of output
glyph names.
Returns:
An ``otTables.AlternateSubst`` object or ``None`` if the mapping dictionary
is empty.
"""
if not mapping:
return None
self = ot.AlternateSubst()
self.alternates = dict(mapping)
return self
def buildLigatureSubstSubtable(mapping):
"""Builds a ligature substitution (GSUB4) subtable.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.LigatureSubstBuilder` instead.
Example::
# sub f f i by f_f_i;
# sub f i by f_i;
subtable = buildLigatureSubstSubtable({
("f", "f", "i"): "f_f_i",
("f", "i"): "f_i",
})
Args:
mapping: A dictionary mapping tuples of glyph names to output
glyph names.
Returns:
An ``otTables.LigatureSubst`` object or ``None`` if the mapping dictionary
is empty.
"""
if not mapping:
return None
self = ot.LigatureSubst()
# The following single line can replace the rest of this function
# with fontTools >= 3.1:
# self.ligatures = dict(mapping)
self.ligatures = {}
for components in sorted(mapping.keys(), key=self._getLigatureSortKey):
ligature = ot.Ligature()
ligature.Component = components[1:]
ligature.CompCount = len(ligature.Component) + 1
ligature.LigGlyph = mapping[components]
firstGlyph = components[0]
self.ligatures.setdefault(firstGlyph, []).append(ligature)
return self
# GPOS
def buildAnchor(x, y, point=None, deviceX=None, deviceY=None):
"""Builds an Anchor table.
This determines the appropriate anchor format based on the passed parameters.
Args:
x (int): X coordinate.
y (int): Y coordinate.
point (int): Index of glyph contour point, if provided.
deviceX (``otTables.Device``): X coordinate device table, if provided.
deviceY (``otTables.Device``): Y coordinate device table, if provided.
Returns:
An ``otTables.Anchor`` object.
"""
self = ot.Anchor()
self.XCoordinate, self.YCoordinate = x, y
self.Format = 1
if point is not None:
self.AnchorPoint = point
self.Format = 2
if deviceX is not None or deviceY is not None:
assert (
self.Format == 1
), "Either point, or both of deviceX/deviceY, must be None."
self.XDeviceTable = deviceX
self.YDeviceTable = deviceY
self.Format = 3
return self
def buildBaseArray(bases, numMarkClasses, glyphMap):
"""Builds a base array record.
As part of building mark-to-base positioning rules, you will need to define
a ``BaseArray`` record, which "defines for each base glyph an array of
anchors, one for each mark class." This function builds the base array
subtable.
Example::
bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}}
basearray = buildBaseArray(bases, 2, font.getReverseGlyphMap())
Args:
bases (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being dictionaries mapping mark class ID
to the appropriate ``otTables.Anchor`` object used for attaching marks
of that class.
numMarkClasses (int): The total number of mark classes for which anchors
are defined.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.BaseArray`` object.
"""
self = ot.BaseArray()
self.BaseRecord = []
for base in sorted(bases, key=glyphMap.__getitem__):
b = bases[base]
anchors = [b.get(markClass) for markClass in range(numMarkClasses)]
self.BaseRecord.append(buildBaseRecord(anchors))
self.BaseCount = len(self.BaseRecord)
return self
def buildBaseRecord(anchors):
# [otTables.Anchor, otTables.Anchor, ...] --> otTables.BaseRecord
self = ot.BaseRecord()
self.BaseAnchor = anchors
return self
def buildComponentRecord(anchors):
"""Builds a component record.
As part of building mark-to-ligature positioning rules, you will need to
define ``ComponentRecord`` objects, which contain "an array of offsets...
to the Anchor tables that define all the attachment points used to attach
marks to the component." This function builds the component record.
Args:
anchors: A list of ``otTables.Anchor`` objects or ``None``.
Returns:
A ``otTables.ComponentRecord`` object or ``None`` if no anchors are
supplied.
"""
if not anchors:
return None
self = ot.ComponentRecord()
self.LigatureAnchor = anchors
return self
def buildCursivePosSubtable(attach, glyphMap):
"""Builds a cursive positioning (GPOS3) subtable.
Cursive positioning lookups are made up of a coverage table of glyphs,
and a set of ``EntryExitRecord`` records containing the anchors for
each glyph. This function builds the cursive positioning subtable.
Example::
subtable = buildCursivePosSubtable({
"AlifIni": (None, buildAnchor(0, 50)),
"BehMed": (buildAnchor(500,250), buildAnchor(0,50)),
# ...
}, font.getReverseGlyphMap())
Args:
attach (dict): A mapping between glyph names and a tuple of two
``otTables.Anchor`` objects representing entry and exit anchors.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.CursivePos`` object, or ``None`` if the attachment
dictionary was empty.
"""
if not attach:
return None
self = ot.CursivePos()
self.Format = 1
self.Coverage = buildCoverage(attach.keys(), glyphMap)
self.EntryExitRecord = []
for glyph in self.Coverage.glyphs:
entryAnchor, exitAnchor = attach[glyph]
rec = ot.EntryExitRecord()
rec.EntryAnchor = entryAnchor
rec.ExitAnchor = exitAnchor
self.EntryExitRecord.append(rec)
self.EntryExitCount = len(self.EntryExitRecord)
return self
def buildDevice(deltas):
"""Builds a Device record as part of a ValueRecord or Anchor.
Device tables specify size-specific adjustments to value records
and anchors to reflect changes based on the resolution of the output.
For example, one could specify that an anchor's Y position should be
increased by 1 pixel when displayed at 8 pixels per em. This routine
builds device records.
Args:
deltas: A dictionary mapping pixels-per-em sizes to the delta
adjustment in pixels when the font is displayed at that size.
Returns:
An ``otTables.Device`` object if any deltas were supplied, or
``None`` otherwise.
"""
if not deltas:
return None
self = ot.Device()
keys = deltas.keys()
self.StartSize = startSize = min(keys)
self.EndSize = endSize = max(keys)
assert 0 <= startSize <= endSize
self.DeltaValue = deltaValues = [
deltas.get(size, 0) for size in range(startSize, endSize + 1)
]
maxDelta = max(deltaValues)
minDelta = min(deltaValues)
assert minDelta > -129 and maxDelta < 128
if minDelta > -3 and maxDelta < 2:
self.DeltaFormat = 1
elif minDelta > -9 and maxDelta < 8:
self.DeltaFormat = 2
else:
self.DeltaFormat = 3
return self
def buildLigatureArray(ligs, numMarkClasses, glyphMap):
"""Builds a LigatureArray subtable.
As part of building a mark-to-ligature lookup, you will need to define
the set of anchors (for each mark class) on each component of the ligature
where marks can be attached. For example, for an Arabic divine name ligature
(lam lam heh), you may want to specify mark attachment positioning for
superior marks (fatha, etc.) and inferior marks (kasra, etc.) on each glyph
of the ligature. This routine builds the ligature array record.
Example::
buildLigatureArray({
"lam-lam-heh": [
{ 0: superiorAnchor1, 1: inferiorAnchor1 }, # attach points for lam1
{ 0: superiorAnchor2, 1: inferiorAnchor2 }, # attach points for lam2
{ 0: superiorAnchor3, 1: inferiorAnchor3 }, # attach points for heh
]
}, 2, font.getReverseGlyphMap())
Args:
ligs (dict): A mapping of ligature names to an array of dictionaries:
for each component glyph in the ligature, an dictionary mapping
mark class IDs to anchors.
numMarkClasses (int): The number of mark classes.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.LigatureArray`` object if deltas were supplied.
"""
self = ot.LigatureArray()
self.LigatureAttach = []
for lig in sorted(ligs, key=glyphMap.__getitem__):
anchors = []
for component in ligs[lig]:
anchors.append([component.get(mc) for mc in range(numMarkClasses)])
self.LigatureAttach.append(buildLigatureAttach(anchors))
self.LigatureCount = len(self.LigatureAttach)
return self
def buildLigatureAttach(components):
# [[Anchor, Anchor], [Anchor, Anchor, Anchor]] --> LigatureAttach
self = ot.LigatureAttach()
self.ComponentRecord = [buildComponentRecord(c) for c in components]
self.ComponentCount = len(self.ComponentRecord)
return self
def buildMarkArray(marks, glyphMap):
"""Builds a mark array subtable.
As part of building mark-to-* positioning rules, you will need to define
a MarkArray subtable, which "defines the class and the anchor point
for a mark glyph." This function builds the mark array subtable.
Example::
mark = {
"acute": (0, buildAnchor(300,712)),
# ...
}
markarray = buildMarkArray(marks, font.getReverseGlyphMap())
Args:
marks (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being a tuple of mark class number and
an ``otTables.Anchor`` object representing the mark's attachment
point.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
An ``otTables.MarkArray`` object.
"""
self = ot.MarkArray()
self.MarkRecord = []
for mark in sorted(marks.keys(), key=glyphMap.__getitem__):
markClass, anchor = marks[mark]
markrec = buildMarkRecord(markClass, anchor)
self.MarkRecord.append(markrec)
self.MarkCount = len(self.MarkRecord)
return self
def buildMarkBasePos(marks, bases, glyphMap):
"""Build a list of MarkBasePos (GPOS4) subtables.
This routine turns a set of marks and bases into a list of mark-to-base
positioning subtables. Currently the list will contain a single subtable
containing all marks and bases, although at a later date it may return the
optimal list of subtables subsetting the marks and bases into groups which
save space. See :func:`buildMarkBasePosSubtable` below.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.MarkBasePosBuilder` instead.
Example::
# a1, a2, a3, a4, a5 = buildAnchor(500, 100), ...
marks = {"acute": (0, a1), "grave": (0, a1), "cedilla": (1, a2)}
bases = {"a": {0: a3, 1: a5}, "b": {0: a4, 1: a5}}
markbaseposes = buildMarkBasePos(marks, bases, font.getReverseGlyphMap())
Args:
marks (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being a tuple of mark class number and
an ``otTables.Anchor`` object representing the mark's attachment
point. (See :func:`buildMarkArray`.)
bases (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being dictionaries mapping mark class ID
to the appropriate ``otTables.Anchor`` object used for attaching marks
of that class. (See :func:`buildBaseArray`.)
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A list of ``otTables.MarkBasePos`` objects.
"""
# TODO: Consider emitting multiple subtables to save space.
# Partition the marks and bases into disjoint subsets, so that
# MarkBasePos rules would only access glyphs from a single
# subset. This would likely lead to smaller mark/base
# matrices, so we might be able to omit many of the empty
# anchor tables that we currently produce. Of course, this
# would only work if the MarkBasePos rules of real-world fonts
# allow partitioning into multiple subsets. We should find out
# whether this is the case; if so, implement the optimization.
# On the other hand, a very large number of subtables could
# slow down layout engines; so this would need profiling.
return [buildMarkBasePosSubtable(marks, bases, glyphMap)]
def buildMarkBasePosSubtable(marks, bases, glyphMap):
"""Build a single MarkBasePos (GPOS4) subtable.
This builds a mark-to-base lookup subtable containing all of the referenced
marks and bases. See :func:`buildMarkBasePos`.
Args:
marks (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being a tuple of mark class number and
an ``otTables.Anchor`` object representing the mark's attachment
point. (See :func:`buildMarkArray`.)
bases (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being dictionaries mapping mark class ID
to the appropriate ``otTables.Anchor`` object used for attaching marks
of that class. (See :func:`buildBaseArray`.)
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A ``otTables.MarkBasePos`` object.
"""
self = ot.MarkBasePos()
self.Format = 1
self.MarkCoverage = buildCoverage(marks, glyphMap)
self.MarkArray = buildMarkArray(marks, glyphMap)
self.ClassCount = max([mc for mc, _ in marks.values()]) + 1
self.BaseCoverage = buildCoverage(bases, glyphMap)
self.BaseArray = buildBaseArray(bases, self.ClassCount, glyphMap)
return self
def buildMarkLigPos(marks, ligs, glyphMap):
"""Build a list of MarkLigPos (GPOS5) subtables.
This routine turns a set of marks and ligatures into a list of mark-to-ligature
positioning subtables. Currently the list will contain a single subtable
containing all marks and ligatures, although at a later date it may return
the optimal list of subtables subsetting the marks and ligatures into groups
which save space. See :func:`buildMarkLigPosSubtable` below.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.MarkLigPosBuilder` instead.
Example::
# a1, a2, a3, a4, a5 = buildAnchor(500, 100), ...
marks = {
"acute": (0, a1),
"grave": (0, a1),
"cedilla": (1, a2)
}
ligs = {
"f_i": [
{ 0: a3, 1: a5 }, # f
{ 0: a4, 1: a5 } # i
],
# "c_t": [{...}, {...}]
}
markligposes = buildMarkLigPos(marks, ligs,
font.getReverseGlyphMap())
Args:
marks (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being a tuple of mark class number and
an ``otTables.Anchor`` object representing the mark's attachment
point. (See :func:`buildMarkArray`.)
ligs (dict): A mapping of ligature names to an array of dictionaries:
for each component glyph in the ligature, an dictionary mapping
mark class IDs to anchors. (See :func:`buildLigatureArray`.)
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A list of ``otTables.MarkLigPos`` objects.
"""
# TODO: Consider splitting into multiple subtables to save space,
# as with MarkBasePos, this would be a trade-off that would need
# profiling. And, depending on how typical fonts are structured,
# it might not be worth doing at all.
return [buildMarkLigPosSubtable(marks, ligs, glyphMap)]
def buildMarkLigPosSubtable(marks, ligs, glyphMap):
"""Build a single MarkLigPos (GPOS5) subtable.
This builds a mark-to-base lookup subtable containing all of the referenced
marks and bases. See :func:`buildMarkLigPos`.
Args:
marks (dict): A dictionary mapping anchors to glyphs; the keys being
glyph names, and the values being a tuple of mark class number and
an ``otTables.Anchor`` object representing the mark's attachment
point. (See :func:`buildMarkArray`.)
ligs (dict): A mapping of ligature names to an array of dictionaries:
for each component glyph in the ligature, an dictionary mapping
mark class IDs to anchors. (See :func:`buildLigatureArray`.)
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A ``otTables.MarkLigPos`` object.
"""
self = ot.MarkLigPos()
self.Format = 1
self.MarkCoverage = buildCoverage(marks, glyphMap)
self.MarkArray = buildMarkArray(marks, glyphMap)
self.ClassCount = max([mc for mc, _ in marks.values()]) + 1
self.LigatureCoverage = buildCoverage(ligs, glyphMap)
self.LigatureArray = buildLigatureArray(ligs, self.ClassCount, glyphMap)
return self
def buildMarkRecord(classID, anchor):
assert isinstance(classID, int)
assert isinstance(anchor, ot.Anchor)
self = ot.MarkRecord()
self.Class = classID
self.MarkAnchor = anchor
return self
def buildMark2Record(anchors):
# [otTables.Anchor, otTables.Anchor, ...] --> otTables.Mark2Record
self = ot.Mark2Record()
self.Mark2Anchor = anchors
return self
def _getValueFormat(f, values, i):
# Helper for buildPairPos{Glyphs|Classes}Subtable.
if f is not None:
return f
mask = 0
for value in values:
if value is not None and value[i] is not None:
mask |= value[i].getFormat()
return mask
def buildPairPosClassesSubtable(pairs, glyphMap, valueFormat1=None, valueFormat2=None):
"""Builds a class pair adjustment (GPOS2 format 2) subtable.
Kerning tables are generally expressed as pair positioning tables using
class-based pair adjustments. This routine builds format 2 PairPos
subtables.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.ClassPairPosSubtableBuilder`
instead, as this takes care of ensuring that the supplied pairs can be
formed into non-overlapping classes and emitting individual subtables
whenever the non-overlapping requirement means that a new subtable is
required.
Example::
pairs = {}
pairs[(
[ "K", "X" ],
[ "W", "V" ]
)] = ( buildValue(xAdvance=+5), buildValue() )
# pairs[(... , ...)] = (..., ...)
pairpos = buildPairPosClassesSubtable(pairs, font.getReverseGlyphMap())
Args:
pairs (dict): Pair positioning data; the keys being a two-element
tuple of lists of glyphnames, and the values being a two-element
tuple of ``otTables.ValueRecord`` objects.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
valueFormat1: Force the "left" value records to the given format.
valueFormat2: Force the "right" value records to the given format.
Returns:
A ``otTables.PairPos`` object.
"""
coverage = set()
classDef1 = ClassDefBuilder(useClass0=True)
classDef2 = ClassDefBuilder(useClass0=False)
for gc1, gc2 in sorted(pairs):
coverage.update(gc1)
classDef1.add(gc1)
classDef2.add(gc2)
self = ot.PairPos()
self.Format = 2
valueFormat1 = self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0)
valueFormat2 = self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1)
self.Coverage = buildCoverage(coverage, glyphMap)
self.ClassDef1 = classDef1.build()
self.ClassDef2 = classDef2.build()
classes1 = classDef1.classes()
classes2 = classDef2.classes()
self.Class1Record = []
for c1 in classes1:
rec1 = ot.Class1Record()
rec1.Class2Record = []
self.Class1Record.append(rec1)
for c2 in classes2:
rec2 = ot.Class2Record()
val1, val2 = pairs.get((c1, c2), (None, None))
rec2.Value1 = (
ValueRecord(src=val1, valueFormat=valueFormat1)
if valueFormat1
else None
)
rec2.Value2 = (
ValueRecord(src=val2, valueFormat=valueFormat2)
if valueFormat2
else None
)
rec1.Class2Record.append(rec2)
self.Class1Count = len(self.Class1Record)
self.Class2Count = len(classes2)
return self
def buildPairPosGlyphs(pairs, glyphMap):
"""Builds a list of glyph-based pair adjustment (GPOS2 format 1) subtables.
This organises a list of pair positioning adjustments into subtables based
on common value record formats.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.PairPosBuilder`
instead.
Example::
pairs = {
("K", "W"): ( buildValue(xAdvance=+5), buildValue() ),
("K", "V"): ( buildValue(xAdvance=+5), buildValue() ),
# ...
}
subtables = buildPairPosGlyphs(pairs, font.getReverseGlyphMap())
Args:
pairs (dict): Pair positioning data; the keys being a two-element
tuple of glyphnames, and the values being a two-element
tuple of ``otTables.ValueRecord`` objects.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A list of ``otTables.PairPos`` objects.
"""
p = {} # (formatA, formatB) --> {(glyphA, glyphB): (valA, valB)}
for (glyphA, glyphB), (valA, valB) in pairs.items():
formatA = valA.getFormat() if valA is not None else 0
formatB = valB.getFormat() if valB is not None else 0
pos = p.setdefault((formatA, formatB), {})
pos[(glyphA, glyphB)] = (valA, valB)
return [
buildPairPosGlyphsSubtable(pos, glyphMap, formatA, formatB)
for ((formatA, formatB), pos) in sorted(p.items())
]
def buildPairPosGlyphsSubtable(pairs, glyphMap, valueFormat1=None, valueFormat2=None):
"""Builds a single glyph-based pair adjustment (GPOS2 format 1) subtable.
This builds a PairPos subtable from a dictionary of glyph pairs and
their positioning adjustments. See also :func:`buildPairPosGlyphs`.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.PairPosBuilder` instead.
Example::
pairs = {
("K", "W"): ( buildValue(xAdvance=+5), buildValue() ),
("K", "V"): ( buildValue(xAdvance=+5), buildValue() ),
# ...
}
pairpos = buildPairPosGlyphsSubtable(pairs, font.getReverseGlyphMap())
Args:
pairs (dict): Pair positioning data; the keys being a two-element
tuple of glyphnames, and the values being a two-element
tuple of ``otTables.ValueRecord`` objects.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
valueFormat1: Force the "left" value records to the given format.
valueFormat2: Force the "right" value records to the given format.
Returns:
A ``otTables.PairPos`` object.
"""
self = ot.PairPos()
self.Format = 1
valueFormat1 = self.ValueFormat1 = _getValueFormat(valueFormat1, pairs.values(), 0)
valueFormat2 = self.ValueFormat2 = _getValueFormat(valueFormat2, pairs.values(), 1)
p = {}
for (glyphA, glyphB), (valA, valB) in pairs.items():
p.setdefault(glyphA, []).append((glyphB, valA, valB))
self.Coverage = buildCoverage({g for g, _ in pairs.keys()}, glyphMap)
self.PairSet = []
for glyph in self.Coverage.glyphs:
ps = ot.PairSet()
ps.PairValueRecord = []
self.PairSet.append(ps)
for glyph2, val1, val2 in sorted(p[glyph], key=lambda x: glyphMap[x[0]]):
pvr = ot.PairValueRecord()
pvr.SecondGlyph = glyph2
pvr.Value1 = (
ValueRecord(src=val1, valueFormat=valueFormat1)
if valueFormat1
else None
)
pvr.Value2 = (
ValueRecord(src=val2, valueFormat=valueFormat2)
if valueFormat2
else None
)
ps.PairValueRecord.append(pvr)
ps.PairValueCount = len(ps.PairValueRecord)
self.PairSetCount = len(self.PairSet)
return self
def buildSinglePos(mapping, glyphMap):
"""Builds a list of single adjustment (GPOS1) subtables.
This builds a list of SinglePos subtables from a dictionary of glyph
names and their positioning adjustments. The format of the subtables are
determined to optimize the size of the resulting subtables.
See also :func:`buildSinglePosSubtable`.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.SinglePosBuilder` instead.
Example::
mapping = {
"V": buildValue({ "xAdvance" : +5 }),
# ...
}
subtables = buildSinglePos(pairs, font.getReverseGlyphMap())
Args:
mapping (dict): A mapping between glyphnames and
``otTables.ValueRecord`` objects.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A list of ``otTables.SinglePos`` objects.
"""
result, handled = [], set()
# In SinglePos format 1, the covered glyphs all share the same ValueRecord.
# In format 2, each glyph has its own ValueRecord, but these records
# all have the same properties (eg., all have an X but no Y placement).
coverages, masks, values = {}, {}, {}
for glyph, value in mapping.items():
key = _getSinglePosValueKey(value)
coverages.setdefault(key, []).append(glyph)
masks.setdefault(key[0], []).append(key)
values[key] = value
# If a ValueRecord is shared between multiple glyphs, we generate
# a SinglePos format 1 subtable; that is the most compact form.
for key, glyphs in coverages.items():
# 5 ushorts is the length of introducing another sublookup
if len(glyphs) * _getSinglePosValueSize(key) > 5:
format1Mapping = {g: values[key] for g in glyphs}
result.append(buildSinglePosSubtable(format1Mapping, glyphMap))
handled.add(key)
# In the remaining ValueRecords, look for those whose valueFormat
# (the set of used properties) is shared between multiple records.
# These will get encoded in format 2.
for valueFormat, keys in masks.items():
f2 = [k for k in keys if k not in handled]
if len(f2) > 1:
format2Mapping = {}
for k in f2:
format2Mapping.update((g, values[k]) for g in coverages[k])
result.append(buildSinglePosSubtable(format2Mapping, glyphMap))
handled.update(f2)
# The remaining ValueRecords are only used by a few glyphs, normally
# one. We encode these in format 1 again.
for key, glyphs in coverages.items():
if key not in handled:
for g in glyphs:
st = buildSinglePosSubtable({g: values[key]}, glyphMap)
result.append(st)
# When the OpenType layout engine traverses the subtables, it will
# stop after the first matching subtable. Therefore, we sort the
# resulting subtables by decreasing coverage size; this increases
# the chance that the layout engine can do an early exit. (Of course,
# this would only be true if all glyphs were equally frequent, which
# is not really the case; but we do not know their distribution).
# If two subtables cover the same number of glyphs, we sort them
# by glyph ID so that our output is deterministic.
result.sort(key=lambda t: _getSinglePosTableKey(t, glyphMap))
return result
def buildSinglePosSubtable(values, glyphMap):
"""Builds a single adjustment (GPOS1) subtable.
This builds a list of SinglePos subtables from a dictionary of glyph
names and their positioning adjustments. The format of the subtable is
determined to optimize the size of the output.
See also :func:`buildSinglePos`.
Note that if you are implementing a layout compiler, you may find it more
flexible to use
:py:class:`fontTools.otlLib.lookupBuilders.SinglePosBuilder` instead.
Example::
mapping = {
"V": buildValue({ "xAdvance" : +5 }),
# ...
}
subtable = buildSinglePos(pairs, font.getReverseGlyphMap())
Args:
mapping (dict): A mapping between glyphnames and
``otTables.ValueRecord`` objects.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A ``otTables.SinglePos`` object.
"""
self = ot.SinglePos()
self.Coverage = buildCoverage(values.keys(), glyphMap)
valueFormat = self.ValueFormat = reduce(
int.__or__, [v.getFormat() for v in values.values()], 0
)
valueRecords = [
ValueRecord(src=values[g], valueFormat=valueFormat)
for g in self.Coverage.glyphs
]
if all(v == valueRecords[0] for v in valueRecords):
self.Format = 1
if self.ValueFormat != 0:
self.Value = valueRecords[0]
else:
self.Value = None
else:
self.Format = 2
self.Value = valueRecords
self.ValueCount = len(self.Value)
return self
def _getSinglePosTableKey(subtable, glyphMap):
assert isinstance(subtable, ot.SinglePos), subtable
glyphs = subtable.Coverage.glyphs
return (-len(glyphs), glyphMap[glyphs[0]])
def _getSinglePosValueKey(valueRecord):
# otBase.ValueRecord --> (2, ("YPlacement": 12))
assert isinstance(valueRecord, ValueRecord), valueRecord
valueFormat, result = 0, []
for name, value in valueRecord.__dict__.items():
if isinstance(value, ot.Device):
result.append((name, _makeDeviceTuple(value)))
else:
result.append((name, value))
valueFormat |= valueRecordFormatDict[name][0]
result.sort()
result.insert(0, valueFormat)
return tuple(result)
_DeviceTuple = namedtuple("_DeviceTuple", "DeltaFormat StartSize EndSize DeltaValue")
def _makeDeviceTuple(device):
# otTables.Device --> tuple, for making device tables unique
return _DeviceTuple(
device.DeltaFormat,
device.StartSize,
device.EndSize,
() if device.DeltaFormat & 0x8000 else tuple(device.DeltaValue),
)
def _getSinglePosValueSize(valueKey):
# Returns how many ushorts this valueKey (short form of ValueRecord) takes up
count = 0
for _, v in valueKey[1:]:
if isinstance(v, _DeviceTuple):
count += len(v.DeltaValue) + 3
else:
count += 1
return count
def buildValue(value):
"""Builds a positioning value record.
Value records are used to specify coordinates and adjustments for
positioning and attaching glyphs. Many of the positioning functions
in this library take ``otTables.ValueRecord`` objects as arguments.
This function builds value records from dictionaries.
Args:
value (dict): A dictionary with zero or more of the following keys:
- ``xPlacement``
- ``yPlacement``
- ``xAdvance``
- ``yAdvance``
- ``xPlaDevice``
- ``yPlaDevice``
- ``xAdvDevice``
- ``yAdvDevice``
Returns:
An ``otTables.ValueRecord`` object.
"""
self = ValueRecord()
for k, v in value.items():
setattr(self, k, v)
return self
# GDEF
def buildAttachList(attachPoints, glyphMap):
"""Builds an AttachList subtable.
A GDEF table may contain an Attachment Point List table (AttachList)
which stores the contour indices of attachment points for glyphs with
attachment points. This routine builds AttachList subtables.
Args:
attachPoints (dict): A mapping between glyph names and a list of
contour indices.
Returns:
An ``otTables.AttachList`` object if attachment points are supplied,
or ``None`` otherwise.
"""
if not attachPoints:
return None
self = ot.AttachList()
self.Coverage = buildCoverage(attachPoints.keys(), glyphMap)
self.AttachPoint = [buildAttachPoint(attachPoints[g]) for g in self.Coverage.glyphs]
self.GlyphCount = len(self.AttachPoint)
return self
def buildAttachPoint(points):
# [4, 23, 41] --> otTables.AttachPoint
# Only used by above.
if not points:
return None
self = ot.AttachPoint()
self.PointIndex = sorted(set(points))
self.PointCount = len(self.PointIndex)
return self
def buildCaretValueForCoord(coord):
# 500 --> otTables.CaretValue, format 1
# (500, DeviceTable) --> otTables.CaretValue, format 3
self = ot.CaretValue()
if isinstance(coord, tuple):
self.Format = 3
self.Coordinate, self.DeviceTable = coord
else:
self.Format = 1
self.Coordinate = coord
return self
def buildCaretValueForPoint(point):
# 4 --> otTables.CaretValue, format 2
self = ot.CaretValue()
self.Format = 2
self.CaretValuePoint = point
return self
def buildLigCaretList(coords, points, glyphMap):
"""Builds a ligature caret list table.
Ligatures appear as a single glyph representing multiple characters; however
when, for example, editing text containing a ``f_i`` ligature, the user may
want to place the cursor between the ``f`` and the ``i``. The ligature caret
list in the GDEF table specifies the position to display the "caret" (the
character insertion indicator, typically a flashing vertical bar) "inside"
the ligature to represent an insertion point. The insertion positions may
be specified either by coordinate or by contour point.
Example::
coords = {
"f_f_i": [300, 600] # f|fi cursor at 300 units, ff|i cursor at 600.
}
points = {
"c_t": [28] # c|t cursor appears at coordinate of contour point 28.
}
ligcaretlist = buildLigCaretList(coords, points, font.getReverseGlyphMap())
Args:
coords: A mapping between glyph names and a list of coordinates for
the insertion point of each ligature component after the first one.
points: A mapping between glyph names and a list of contour points for
the insertion point of each ligature component after the first one.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns:
A ``otTables.LigCaretList`` object if any carets are present, or
``None`` otherwise."""
glyphs = set(coords.keys()) if coords else set()
if points:
glyphs.update(points.keys())
carets = {g: buildLigGlyph(coords.get(g), points.get(g)) for g in glyphs}
carets = {g: c for g, c in carets.items() if c is not None}
if not carets:
return None
self = ot.LigCaretList()
self.Coverage = buildCoverage(carets.keys(), glyphMap)
self.LigGlyph = [carets[g] for g in self.Coverage.glyphs]
self.LigGlyphCount = len(self.LigGlyph)
return self
def buildLigGlyph(coords, points):
# ([500], [4]) --> otTables.LigGlyph; None for empty coords/points
carets = []
if coords:
coords = sorted(coords, key=lambda c: c[0] if isinstance(c, tuple) else c)
carets.extend([buildCaretValueForCoord(c) for c in coords])
if points:
carets.extend([buildCaretValueForPoint(p) for p in sorted(points)])
if not carets:
return None
self = ot.LigGlyph()
self.CaretValue = carets
self.CaretCount = len(self.CaretValue)
return self
def buildMarkGlyphSetsDef(markSets, glyphMap):
"""Builds a mark glyph sets definition table.
OpenType Layout lookups may choose to use mark filtering sets to consider
or ignore particular combinations of marks. These sets are specified by
setting a flag on the lookup, but the mark filtering sets are defined in
the ``GDEF`` table. This routine builds the subtable containing the mark
glyph set definitions.
Example::
set0 = set("acute", "grave")
set1 = set("caron", "grave")
markglyphsets = buildMarkGlyphSetsDef([set0, set1], font.getReverseGlyphMap())
Args:
markSets: A list of sets of glyphnames.
glyphMap: a glyph name to ID map, typically returned from
``font.getReverseGlyphMap()``.
Returns
An ``otTables.MarkGlyphSetsDef`` object.
"""
if not markSets:
return None
self = ot.MarkGlyphSetsDef()
self.MarkSetTableFormat = 1
self.Coverage = [buildCoverage(m, glyphMap) for m in markSets]
self.MarkSetCount = len(self.Coverage)
return self
class ClassDefBuilder(object):
"""Helper for building ClassDef tables."""
def __init__(self, useClass0):
self.classes_ = set()
self.glyphs_ = {}
self.useClass0_ = useClass0
def canAdd(self, glyphs):
if isinstance(glyphs, (set, frozenset)):
glyphs = sorted(glyphs)
glyphs = tuple(glyphs)
if glyphs in self.classes_:
return True
for glyph in glyphs:
if glyph in self.glyphs_:
return False
return True
def add(self, glyphs):
if isinstance(glyphs, (set, frozenset)):
glyphs = sorted(glyphs)
glyphs = tuple(glyphs)
if glyphs in self.classes_:
return
self.classes_.add(glyphs)
for glyph in glyphs:
if glyph in self.glyphs_:
raise OpenTypeLibError(
f"Glyph {glyph} is already present in class.", None
)
self.glyphs_[glyph] = glyphs
def classes(self):
# In ClassDef1 tables, class id #0 does not need to be encoded
# because zero is the default. Therefore, we use id #0 for the
# glyph class that has the largest number of members. However,
# in other tables than ClassDef1, 0 means "every other glyph"
# so we should not use that ID for any real glyph classes;
# we implement this by inserting an empty set at position 0.
#
# TODO: Instead of counting the number of glyphs in each class,
# we should determine the encoded size. If the glyphs in a large
# class form a contiguous range, the encoding is actually quite
# compact, whereas a non-contiguous set might need a lot of bytes
# in the output file. We don't get this right with the key below.
result = sorted(self.classes_, key=lambda s: (-len(s), s))
if not self.useClass0_:
result.insert(0, frozenset())
return result
def build(self):
glyphClasses = {}
for classID, glyphs in enumerate(self.classes()):
if classID == 0:
continue
for glyph in glyphs:
glyphClasses[glyph] = classID
classDef = ot.ClassDef()
classDef.classDefs = glyphClasses
return classDef
AXIS_VALUE_NEGATIVE_INFINITY = fixedToFloat(-0x80000000, 16)
AXIS_VALUE_POSITIVE_INFINITY = fixedToFloat(0x7FFFFFFF, 16)
def buildStatTable(
ttFont, axes, locations=None, elidedFallbackName=2, windowsNames=True, macNames=True
):
"""Add a 'STAT' table to 'ttFont'.
'axes' is a list of dictionaries describing axes and their
values.
Example::
axes = [
dict(
tag="wght",
name="Weight",
ordering=0, # optional
values=[
dict(value=100, name='Thin'),
dict(value=300, name='Light'),
dict(value=400, name='Regular', flags=0x2),
dict(value=900, name='Black'),
],
)
]
Each axis dict must have 'tag' and 'name' items. 'tag' maps
to the 'AxisTag' field. 'name' can be a name ID (int), a string,
or a dictionary containing multilingual names (see the
addMultilingualName() name table method), and will translate to
the AxisNameID field.
An axis dict may contain an 'ordering' item that maps to the
AxisOrdering field. If omitted, the order of the axes list is
used to calculate AxisOrdering fields.
The axis dict may contain a 'values' item, which is a list of
dictionaries describing AxisValue records belonging to this axis.
Each value dict must have a 'name' item, which can be a name ID
(int), a string, or a dictionary containing multilingual names,
like the axis name. It translates to the ValueNameID field.
Optionally the value dict can contain a 'flags' item. It maps to
the AxisValue Flags field, and will be 0 when omitted.
The format of the AxisValue is determined by the remaining contents
of the value dictionary:
If the value dict contains a 'value' item, an AxisValue record
Format 1 is created. If in addition to the 'value' item it contains
a 'linkedValue' item, an AxisValue record Format 3 is built.
If the value dict contains a 'nominalValue' item, an AxisValue
record Format 2 is built. Optionally it may contain 'rangeMinValue'
and 'rangeMaxValue' items. These map to -Infinity and +Infinity
respectively if omitted.
You cannot specify Format 4 AxisValue tables this way, as they are
not tied to a single axis, and specify a name for a location that
is defined by multiple axes values. Instead, you need to supply the
'locations' argument.
The optional 'locations' argument specifies AxisValue Format 4
tables. It should be a list of dicts, where each dict has a 'name'
item, which works just like the value dicts above, an optional
'flags' item (defaulting to 0x0), and a 'location' dict. A
location dict key is an axis tag, and the associated value is the
location on the specified axis. They map to the AxisIndex and Value
fields of the AxisValueRecord.
Example::
locations = [
dict(name='Regular ABCD', location=dict(wght=300, ABCD=100)),
dict(name='Bold ABCD XYZ', location=dict(wght=600, ABCD=200)),
]
The optional 'elidedFallbackName' argument can be a name ID (int),
a string, a dictionary containing multilingual names, or a list of
STATNameStatements. It translates to the ElidedFallbackNameID field.
The 'ttFont' argument must be a TTFont instance that already has a
'name' table. If a 'STAT' table already exists, it will be
overwritten by the newly created one.
"""
ttFont["STAT"] = ttLib.newTable("STAT")
statTable = ttFont["STAT"].table = ot.STAT()
statTable.ElidedFallbackNameID = _addName(
ttFont, elidedFallbackName, windows=windowsNames, mac=macNames
)
# 'locations' contains data for AxisValue Format 4
axisRecords, axisValues = _buildAxisRecords(
axes, ttFont, windowsNames=windowsNames, macNames=macNames
)
if not locations:
statTable.Version = 0x00010001
else:
# We'll be adding Format 4 AxisValue records, which
# requires a higher table version
statTable.Version = 0x00010002
multiAxisValues = _buildAxisValuesFormat4(
locations, axes, ttFont, windowsNames=windowsNames, macNames=macNames
)
axisValues = multiAxisValues + axisValues
ttFont["name"].names.sort()
# Store AxisRecords
axisRecordArray = ot.AxisRecordArray()
axisRecordArray.Axis = axisRecords
# XXX these should not be hard-coded but computed automatically
statTable.DesignAxisRecordSize = 8
statTable.DesignAxisRecord = axisRecordArray
statTable.DesignAxisCount = len(axisRecords)
statTable.AxisValueCount = 0
statTable.AxisValueArray = None
if axisValues:
# Store AxisValueRecords
axisValueArray = ot.AxisValueArray()
axisValueArray.AxisValue = axisValues
statTable.AxisValueArray = axisValueArray
statTable.AxisValueCount = len(axisValues)
def _buildAxisRecords(axes, ttFont, windowsNames=True, macNames=True):
axisRecords = []
axisValues = []
for axisRecordIndex, axisDict in enumerate(axes):
axis = ot.AxisRecord()
axis.AxisTag = axisDict["tag"]
axis.AxisNameID = _addName(
ttFont, axisDict["name"], 256, windows=windowsNames, mac=macNames
)
axis.AxisOrdering = axisDict.get("ordering", axisRecordIndex)
axisRecords.append(axis)
for axisVal in axisDict.get("values", ()):
axisValRec = ot.AxisValue()
axisValRec.AxisIndex = axisRecordIndex
axisValRec.Flags = axisVal.get("flags", 0)
axisValRec.ValueNameID = _addName(
ttFont, axisVal["name"], windows=windowsNames, mac=macNames
)
if "value" in axisVal:
axisValRec.Value = axisVal["value"]
if "linkedValue" in axisVal:
axisValRec.Format = 3
axisValRec.LinkedValue = axisVal["linkedValue"]
else:
axisValRec.Format = 1
elif "nominalValue" in axisVal:
axisValRec.Format = 2
axisValRec.NominalValue = axisVal["nominalValue"]
axisValRec.RangeMinValue = axisVal.get(
"rangeMinValue", AXIS_VALUE_NEGATIVE_INFINITY
)
axisValRec.RangeMaxValue = axisVal.get(
"rangeMaxValue", AXIS_VALUE_POSITIVE_INFINITY
)
else:
raise ValueError("Can't determine format for AxisValue")
axisValues.append(axisValRec)
return axisRecords, axisValues
def _buildAxisValuesFormat4(locations, axes, ttFont, windowsNames=True, macNames=True):
axisTagToIndex = {}
for axisRecordIndex, axisDict in enumerate(axes):
axisTagToIndex[axisDict["tag"]] = axisRecordIndex
axisValues = []
for axisLocationDict in locations:
axisValRec = ot.AxisValue()
axisValRec.Format = 4
axisValRec.ValueNameID = _addName(
ttFont, axisLocationDict["name"], windows=windowsNames, mac=macNames
)
axisValRec.Flags = axisLocationDict.get("flags", 0)
axisValueRecords = []
for tag, value in axisLocationDict["location"].items():
avr = ot.AxisValueRecord()
avr.AxisIndex = axisTagToIndex[tag]
avr.Value = value
axisValueRecords.append(avr)
axisValueRecords.sort(key=lambda avr: avr.AxisIndex)
axisValRec.AxisCount = len(axisValueRecords)
axisValRec.AxisValueRecord = axisValueRecords
axisValues.append(axisValRec)
return axisValues
def _addName(ttFont, value, minNameID=0, windows=True, mac=True):
nameTable = ttFont["name"]
if isinstance(value, int):
# Already a nameID
return value
if isinstance(value, str):
names = dict(en=value)
elif isinstance(value, dict):
names = value
elif isinstance(value, list):
nameID = nameTable._findUnusedNameID()
for nameRecord in value:
if isinstance(nameRecord, STATNameStatement):
nameTable.setName(
nameRecord.string,
nameID,
nameRecord.platformID,
nameRecord.platEncID,
nameRecord.langID,
)
else:
raise TypeError("value must be a list of STATNameStatements")
return nameID
else:
raise TypeError("value must be int, str, dict or list")
return nameTable.addMultilingualName(
names, ttFont=ttFont, windows=windows, mac=mac, minNameID=minNameID
)
def buildMathTable(
ttFont,
constants=None,
italicsCorrections=None,
topAccentAttachments=None,
extendedShapes=None,
mathKerns=None,
minConnectorOverlap=0,
vertGlyphVariants=None,
horizGlyphVariants=None,
vertGlyphAssembly=None,
horizGlyphAssembly=None,
):
"""
Add a 'MATH' table to 'ttFont'.
'constants' is a dictionary of math constants. The keys are the constant
names from the MATH table specification (with capital first letter), and the
values are the constant values as numbers.
'italicsCorrections' is a dictionary of italic corrections. The keys are the
glyph names, and the values are the italic corrections as numbers.
'topAccentAttachments' is a dictionary of top accent attachments. The keys
are the glyph names, and the values are the top accent horizontal positions
as numbers.
'extendedShapes' is a set of extended shape glyphs.
'mathKerns' is a dictionary of math kerns. The keys are the glyph names, and
the values are dictionaries. The keys of these dictionaries are the side
names ('TopRight', 'TopLeft', 'BottomRight', 'BottomLeft'), and the values
are tuples of two lists. The first list contains the correction heights as
numbers, and the second list contains the kern values as numbers.
'minConnectorOverlap' is the minimum connector overlap as a number.
'vertGlyphVariants' is a dictionary of vertical glyph variants. The keys are
the glyph names, and the values are tuples of glyph name and full advance height.
'horizGlyphVariants' is a dictionary of horizontal glyph variants. The keys
are the glyph names, and the values are tuples of glyph name and full
advance width.
'vertGlyphAssembly' is a dictionary of vertical glyph assemblies. The keys
are the glyph names, and the values are tuples of assembly parts and italics
correction. The assembly parts are tuples of glyph name, flags, start
connector length, end connector length, and full advance height.
'horizGlyphAssembly' is a dictionary of horizontal glyph assemblies. The
keys are the glyph names, and the values are tuples of assembly parts
and italics correction. The assembly parts are tuples of glyph name, flags,
start connector length, end connector length, and full advance width.
Where a number is expected, an integer or a float can be used. The floats
will be rounded.
Example::
constants = {
"ScriptPercentScaleDown": 70,
"ScriptScriptPercentScaleDown": 50,
"DelimitedSubFormulaMinHeight": 24,
"DisplayOperatorMinHeight": 60,
...
}
italicsCorrections = {
"fitalic-math": 100,
"fbolditalic-math": 120,
...
}
topAccentAttachments = {
"circumflexcomb": 500,
"acutecomb": 400,
"A": 300,
"B": 340,
...
}
extendedShapes = {"parenleft", "parenright", ...}
mathKerns = {
"A": {
"TopRight": ([-50, -100], [10, 20, 30]),
"TopLeft": ([50, 100], [10, 20, 30]),
...
},
...
}
vertGlyphVariants = {
"parenleft": [("parenleft", 700), ("parenleft.size1", 1000), ...],
"parenright": [("parenright", 700), ("parenright.size1", 1000), ...],
...
}
vertGlyphAssembly = {
"braceleft": [
(
("braceleft.bottom", 0, 0, 200, 500),
("braceleft.extender", 1, 200, 200, 200)),
("braceleft.middle", 0, 100, 100, 700),
("braceleft.extender", 1, 200, 200, 200),
("braceleft.top", 0, 200, 0, 500),
),
100,
],
...
}
"""
glyphMap = ttFont.getReverseGlyphMap()
ttFont["MATH"] = math = ttLib.newTable("MATH")
math.table = table = ot.MATH()
table.Version = 0x00010000
table.populateDefaults()
table.MathConstants = _buildMathConstants(constants)
table.MathGlyphInfo = _buildMathGlyphInfo(
glyphMap,
italicsCorrections,
topAccentAttachments,
extendedShapes,
mathKerns,
)
table.MathVariants = _buildMathVariants(
glyphMap,
minConnectorOverlap,
vertGlyphVariants,
horizGlyphVariants,
vertGlyphAssembly,
horizGlyphAssembly,
)
def _buildMathConstants(constants):
if not constants:
return None
mathConstants = ot.MathConstants()
for conv in mathConstants.getConverters():
value = otRound(constants.get(conv.name, 0))
if conv.tableClass:
assert issubclass(conv.tableClass, ot.MathValueRecord)
value = _mathValueRecord(value)
setattr(mathConstants, conv.name, value)
return mathConstants
def _buildMathGlyphInfo(
glyphMap,
italicsCorrections,
topAccentAttachments,
extendedShapes,
mathKerns,
):
if not any([extendedShapes, italicsCorrections, topAccentAttachments, mathKerns]):
return None
info = ot.MathGlyphInfo()
info.populateDefaults()
if italicsCorrections:
coverage = buildCoverage(italicsCorrections.keys(), glyphMap)
info.MathItalicsCorrectionInfo = ot.MathItalicsCorrectionInfo()
info.MathItalicsCorrectionInfo.Coverage = coverage
info.MathItalicsCorrectionInfo.ItalicsCorrectionCount = len(coverage.glyphs)
info.MathItalicsCorrectionInfo.ItalicsCorrection = [
_mathValueRecord(italicsCorrections[n]) for n in coverage.glyphs
]
if topAccentAttachments:
coverage = buildCoverage(topAccentAttachments.keys(), glyphMap)
info.MathTopAccentAttachment = ot.MathTopAccentAttachment()
info.MathTopAccentAttachment.TopAccentCoverage = coverage
info.MathTopAccentAttachment.TopAccentAttachmentCount = len(coverage.glyphs)
info.MathTopAccentAttachment.TopAccentAttachment = [
_mathValueRecord(topAccentAttachments[n]) for n in coverage.glyphs
]
if extendedShapes:
info.ExtendedShapeCoverage = buildCoverage(extendedShapes, glyphMap)
if mathKerns:
coverage = buildCoverage(mathKerns.keys(), glyphMap)
info.MathKernInfo = ot.MathKernInfo()
info.MathKernInfo.MathKernCoverage = coverage
info.MathKernInfo.MathKernCount = len(coverage.glyphs)
info.MathKernInfo.MathKernInfoRecords = []
for glyph in coverage.glyphs:
record = ot.MathKernInfoRecord()
for side in {"TopRight", "TopLeft", "BottomRight", "BottomLeft"}:
if side in mathKerns[glyph]:
correctionHeights, kernValues = mathKerns[glyph][side]
assert len(correctionHeights) == len(kernValues) - 1
kern = ot.MathKern()
kern.HeightCount = len(correctionHeights)
kern.CorrectionHeight = [
_mathValueRecord(h) for h in correctionHeights
]
kern.KernValue = [_mathValueRecord(v) for v in kernValues]
setattr(record, f"{side}MathKern", kern)
info.MathKernInfo.MathKernInfoRecords.append(record)
return info
def _buildMathVariants(
glyphMap,
minConnectorOverlap,
vertGlyphVariants,
horizGlyphVariants,
vertGlyphAssembly,
horizGlyphAssembly,
):
if not any(
[vertGlyphVariants, horizGlyphVariants, vertGlyphAssembly, horizGlyphAssembly]
):
return None
variants = ot.MathVariants()
variants.populateDefaults()
variants.MinConnectorOverlap = minConnectorOverlap
if vertGlyphVariants or vertGlyphAssembly:
variants.VertGlyphCoverage, variants.VertGlyphConstruction = (
_buildMathGlyphConstruction(
glyphMap,
vertGlyphVariants,
vertGlyphAssembly,
)
)
if horizGlyphVariants or horizGlyphAssembly:
variants.HorizGlyphCoverage, variants.HorizGlyphConstruction = (
_buildMathGlyphConstruction(
glyphMap,
horizGlyphVariants,
horizGlyphAssembly,
)
)
return variants
def _buildMathGlyphConstruction(glyphMap, variants, assemblies):
glyphs = set()
if variants:
glyphs.update(variants.keys())
if assemblies:
glyphs.update(assemblies.keys())
coverage = buildCoverage(glyphs, glyphMap)
constructions = []
for glyphName in coverage.glyphs:
construction = ot.MathGlyphConstruction()
construction.populateDefaults()
if variants and glyphName in variants:
construction.VariantCount = len(variants[glyphName])
construction.MathGlyphVariantRecord = []
for variantName, advance in variants[glyphName]:
record = ot.MathGlyphVariantRecord()
record.VariantGlyph = variantName
record.AdvanceMeasurement = otRound(advance)
construction.MathGlyphVariantRecord.append(record)
if assemblies and glyphName in assemblies:
parts, ic = assemblies[glyphName]
construction.GlyphAssembly = ot.GlyphAssembly()
construction.GlyphAssembly.ItalicsCorrection = _mathValueRecord(ic)
construction.GlyphAssembly.PartCount = len(parts)
construction.GlyphAssembly.PartRecords = []
for part in parts:
part_name, flags, start, end, advance = part
record = ot.GlyphPartRecord()
record.glyph = part_name
record.PartFlags = int(flags)
record.StartConnectorLength = otRound(start)
record.EndConnectorLength = otRound(end)
record.FullAdvance = otRound(advance)
construction.GlyphAssembly.PartRecords.append(record)
constructions.append(construction)
return coverage, constructions
def _mathValueRecord(value):
value_record = ot.MathValueRecord()
value_record.Value = otRound(value)
return value_record
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@fonttools@fontTools@otlLib@builder.py@.PATH_END.py
|
{
"filename": "_textcase.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcoords/legendgrouptitle/font/_textcase.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextcaseValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="textcase",
parent_name="parcoords.legendgrouptitle.font",
**kwargs,
):
super(TextcaseValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
values=kwargs.pop("values", ["normal", "word caps", "upper", "lower"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@parcoords@legendgrouptitle@font@_textcase.py@.PATH_END.py
|
{
"filename": "higher_order.py",
"repo_name": "ricardoclandim/NIRVANA",
"repo_path": "NIRVANA_extracted/NIRVANA-master/nirvana/models/higher_order.py",
"type": "Python"
}
|
import numpy as np
from .beam import smear, ConvolveFFTW
from ..data.util import unpack
from .geometry import projected_polar
def bisym_model(args, paramdict, plot=False, relative_pab=False):
'''
Evaluate a bisymmetric velocity field model for given parameters.
The model for this is a second order nonaxisymmetric model taken from
Leung (2018) who in turn took it from Spekkens & Sellwood (2007). It
evaluates the specified models at the desired coordinates.
Args:
args (:class:`~nirvana.data.fitargs.FitArgs`):
Object containing all of the data and settings needed for the
galaxy.
paramdict (:obj:`dict`):
Dictionary of galaxy parameters that are being fit. Assumes the
format produced :func:`nirvana.fitting.unpack`.
plot (:obj:`bool`, optional):
Flag to return resulting models as 2D arrays instead of 1D for
plotting purposes.
relative_pab (:obj:`bool`, optional):
Whether to define the second order position angle relative to the
first order position angle (better for fitting) or absolutely
(better for output).
Returns:
:obj:`tuple`: Tuple of two objects that are the model velocity field and
the model velocity dispersion (if `args.disp = True`, otherwise second
object is `None`). Arrays are 1D unless specified otherwise and should
be rebinned to match the data.
'''
#convert angles to polar and normalize radial coorinate
inc, pa, pab = np.radians([paramdict['inc'], paramdict['pa'], paramdict['pab']])
r, th = projected_polar(args.kin.grid_x-paramdict['xc'], args.kin.grid_y-paramdict['yc'], pa, inc)
#interpolate the velocity arrays over full coordinates
if len(args.edges) != len(paramdict['vt']):
raise ValueError(f"Bin edge and velocity arrays are not the same shape: {len(args.edges)} and {len(paramdict['vt'])}")
vtvals = np.interp(r, args.edges, paramdict['vt'])
v2tvals = np.interp(r, args.edges, paramdict['v2t'])
v2rvals = np.interp(r, args.edges, paramdict['v2r'])
#spekkens and sellwood 2nd order vf model (from andrew's thesis)
velmodel = paramdict['vsys'] + np.sin(inc) * (vtvals * np.cos(th) \
- v2tvals * np.cos(2 * th - pab) * np.cos(th) \
- v2rvals * np.sin(2 * th - pab) * np.sin(th))
#define dispersion and surface brightness if desired
if args.disp:
sigmodel = np.interp(r, args.edges, paramdict['sig'])
else:
sigmodel = None
#apply beam smearing if beam is given
if args.kin.beam_fft is not None:
conv = args.conv if hasattr(args, 'conv') else None
if hasattr(args, 'smearing') and not args.smearing: pass
else:
sbmodel, velmodel, sigmodel = smear(velmodel, args.kin.beam_fft, sb=args.kin.remap('sb').filled(0.),
sig=sigmodel, beam_fft=True, cnvfftw=conv, verbose=False)
#remasking after convolution
if args.kin.vel_mask is not None: velmodel = np.ma.array(velmodel, mask=args.kin.remap('vel_mask'))
if args.kin.sig_mask is not None: sigmodel = np.ma.array(sigmodel, mask=args.kin.remap('sig_mask'))
#rebin data
binvel = np.ma.MaskedArray(args.kin.bin(velmodel), mask=args.kin.vel_mask)
if sigmodel is not None: binsig = np.ma.MaskedArray(args.kin.bin(sigmodel), mask=args.kin.sig_mask)
else: binsig = None
#return a 2D array for plotting reasons
if plot:
velremap = args.kin.remap(binvel, args.kin.vel_mask)
if sigmodel is not None:
sigremap = args.kin.remap(binsig, args.kin.vel_mask)
return velremap, sigremap
return velremap
return binvel, binsig
|
ricardoclandimREPO_NAMENIRVANAPATH_START.@NIRVANA_extracted@NIRVANA-master@nirvana@models@higher_order.py@.PATH_END.py
|
{
"filename": "check_PPF_approx.py",
"repo_name": "yacobozdalkiran/CLASS_mod",
"repo_path": "CLASS_mod_extracted/CLASS_mod-main/class_public-master/scripts/check_PPF_approx.py",
"type": "Python"
}
|
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
#get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from classy import Class
# In[ ]:
k_out = [5e-5, 5e-4, 5e-3]
models = ['PPF1','PPF2','FLD1','FLD1S']
w0 = {'PPF1':-0.7,'PPF2':-1.15,'FLD1':-0.7,'FLD1S':-0.7}
wa = {'PPF1':0.,'PPF2':0.5,'FLD1':0.,'FLD1S':0.}
omega_cdm = {'PPF1':0.104976,'PPF2':0.120376,'FLD1':0.104976,'FLD1S':0.104976}
omega_b = 0.022
##Omega_cdm = {'PPF1':0.26,'PPF2':0.21,'FLD1':0.26,'FLD1S':0.26}
##Omega_b = 0.05
h = {'PPF1':0.64,'PPF2':0.74,'FLD1':0.64,'FLD1S':0.64}
cosmo = {}
for M in models:
use_ppf = 'yes'
gauge = 'Newtonian'
if 'FLD' in M:
use_ppf = 'no'
if 'S' in M:
gauge = 'Synchronous'
cosmo[M] = Class()
cosmo[M].set({'output':'tCl mPk dTk vTk','k_output_values':str(k_out).strip('[]'),
'h':h[M],
'omega_b':omega_b,'omega_cdm':omega_cdm[M],
##'Omega_b':Omega_b,'omega_cdm':Omega_cdm[M],
'cs2_fld':1.,
'w0_fld':w0[M],'wa_fld':wa[M],'Omega_Lambda':0.,'gauge':gauge,
'use_ppf':use_ppf})
cosmo[M].compute()
# In[ ]:
colours = ['r','k','g','m']
for i,M in enumerate(models):
cl = cosmo[M].raw_cl()
l = cl['ell']
plt.loglog(l,cl['tt']*l*(l+1)/(2.*np.pi),label=M,color=colours[i])
plt.legend(loc='upper left')
plt.xlim([2,300])
plt.ylim([6e-11,1e-9])
plt.xlabel(r'$\ell$')
plt.ylabel(r'$[\ell(\ell+1)/2\pi] C_\ell^\mathrm{TT}$')
plt.savefig('check_PPF_clTT.pdf')
# In[ ]:
for M in ['PPF1','FLD1']:
csm = cosmo[M]
pt = csm.get_perturbations()
pts = pt['scalar']
for i,k in enumerate(k_out):
ptk = pts[i]
a = ptk['a']
phi = ptk['phi']
psi = ptk['psi']
if 'FLD' in M:
ls = ':'
lw=5
else:
ls = '-'
lw=1
plt.semilogx(a,0.5*(phi+psi),label=M+' '+'$k='+str(k)+'Mpc^{-1}$',ls=ls,lw=lw)
plt.legend(loc='lower left')
plt.xlim([1e-2,1])
plt.ylim([0.3,0.63])
plt.xlabel(r'$a/a_0$')
plt.ylabel(r'$\frac{1}{2} ~(\Phi+\Psi)$')
plt.savefig('check_PPF_metric.pdf')
# In[ ]:
#kminclosed = sqrt(-8*Omega_k)*(70/3e5) Mpc^(-1)
k_out = [1e-3] #[1e-4, 1e-3, 1e-2]
#models = ['PPF1','PPF2','FLD1']
models = ['PPF1','FLD1']
w0 = {'PPF1':-0.7,'PPF2':-1.15,'FLD1':-0.7,'FLD1S':-0.7}
wa = {'PPF1':0.,'PPF2':0.5,'FLD1':0.,'FLD1S':0.}
omega_cdm = {'PPF1':0.104976,'PPF2':0.120376,'FLD1':0.104976,'FLD1S':0.104976}
omega_b = 0.022
##Omega_cdm = {'PPF1':0.26,'PPF2':0.21,'FLD1':0.26,'FLD1S':0.26}
##Omega_b = 0.05
h = {'PPF1':0.64,'PPF2':0.74,'FLD1':0.64}
fig, axes = plt.subplots(1,2,figsize=(16,5))
for Omega_K in [-0.1, 0.0, 0.15]:
for gauge in ['Synchronous','Newtonian']:
cosmo = {}
for M in models:
use_ppf = 'yes'
if 'FLD' in M:
use_ppf = 'no'
cosmo[M] = Class()
cosmo[M].set({'output':'tCl mPk dTk vTk','k_output_values':str(k_out).strip('[]'),
'h':h[M],
'omega_b':omega_b,'omega_cdm':omega_cdm[M],'Omega_k':Omega_K,
##'Omega_b':Omega_b,'omega_cdm':Omega_cdm[M],
'cs2_fld':1.,
'w0_fld':w0[M],'wa_fld':wa[M],'Omega_Lambda':0.,'gauge':gauge,
'use_ppf':use_ppf,'hyper_sampling_curved_low_nu':10.0})
cosmo[M].compute()
label = r'$\Omega_k='+str(Omega_K)+'$, '+gauge[0]
clfld = cosmo['FLD1'].raw_cl()
clppf = cosmo['PPF1'].raw_cl()
axes[0].semilogx(clfld['ell'][2:],clppf['tt'][2:]/clfld['tt'][2:],label=label)
ptfld = cosmo['FLD1'].get_perturbations()['scalar']
ptppf = cosmo['PPF1'].get_perturbations()['scalar']
for i,k in enumerate(k_out):
ptkfld = ptfld[i]
a = ptkfld['a']
phi_plus_phi_fld = ptkfld['phi']+ptkfld['psi']
ptkppf = ptppf[i]
phi_plus_phi_ppf = ptkppf['phi']+ptkppf['psi']
axes[1].semilogx(ptkppf['a'],phi_plus_phi_ppf,label=label+'_ppf')
axes[1].semilogx(ptkfld['a'],phi_plus_phi_fld,label=label+'_fld')
print (len(ptkppf['a']),len(ptkfld['a']))
axes[0].legend(loc='lower left',ncol=2)
axes[0].set_xlim([2,300])
axes[0].set_ylim([0.98,1.02])
axes[0].set_xlabel(r'$\ell$')
axes[0].set_ylabel(r'$C_\ell^\mathrm{FLD1}/C_\ell^\mathrm{PPF1}$')
axes[1].legend(loc='lower left',ncol=2)
axes[1].set_xlim([1e-2,1])
axes[1].set_xlabel(r'$a/a_0$')
axes[1].set_ylabel(r'$(\Phi+\Psi)$')
fig.savefig('check_PPF_Omegak.pdf')
# In[ ]:
colours = ['r','k','g','m']
k_out = [1e-1] #[1e-4, 1e-3, 1e-2]
#models = ['PPF1','PPF2','FLD1']
models = ['PPF1','FLD1']
w0 = {'PPF1':-0.7,'PPF2':-1.15,'FLD1':-0.7,'FLD1S':-0.7}
wa = {'PPF1':0.,'PPF2':0.5,'FLD1':0.,'FLD1S':0.}
omega_cdm = {'PPF1':0.104976,'PPF2':0.120376,'FLD1':0.104976,'FLD1S':0.104976}
omega_b = 0.022
##Omega_cdm = {'PPF1':0.26,'PPF2':0.21,'FLD1':0.26,'FLD1S':0.26}
##Omega_b = 0.05
h = {'PPF1':0.64,'PPF2':0.74,'FLD1':0.64}
fig, axes = plt.subplots(1,2,figsize=(18,8))
for Omega_K in [-0.1, 0.0, 0.15]:
for ppfgauge in ['Synchronous','Newtonian']:
cosmo = {}
for M in models:
use_ppf = 'yes'
gauge = ppfgauge
if 'FLD' in M:
use_ppf = 'no'
cosmo[M] = Class()
cosmo[M].set({'output':'tCl mPk dTk vTk','k_output_values':str(k_out).strip('[]'),
'h':h[M],
'omega_b':omega_b,'omega_cdm':omega_cdm[M],'Omega_k':Omega_K,
##'Omega_b':Omega_b,'omega_cdm':Omega_cdm[M],
'cs2_fld':1.,
'w0_fld':w0[M],'wa_fld':wa[M],'Omega_Lambda':0.,'gauge':gauge,
'use_ppf':use_ppf,'hyper_sampling_curved_low_nu':6.1})
cosmo[M].compute()
#fig, axes = plt.subplots(1,2,figsize=(16,5))
for j,M in enumerate(models):
cl = cosmo[M].raw_cl()
l = cl['ell']
label = M+r'$\Omega_k='+str(Omega_K)+'$, '+gauge[0]
axes[0].loglog(l,cl['tt']*l*(l+1)/(2.*np.pi),label=label,color=colours[j])
csm = cosmo[M]
pt = csm.get_perturbations()
pts = pt['scalar']
for i,k in enumerate(k_out):
ptk = pts[i]
a = ptk['a']
phi = ptk['phi']
psi = ptk['psi']
if 'FLD' in M:
ls = ':'
lw=5
else:
ls = '-'
lw=1
axes[1].semilogx(a,0.5*abs(phi+psi),label=label+' '+'$k='+str(k)+'Mpc^{-1}$',ls=ls,lw=lw)
axes[0].legend(loc='upper left')
axes[0].set_xlim([2,300])
axes[0].set_ylim([6e-11,1e-9])
axes[0].set_xlabel(r'$\ell$')
axes[0].set_ylabel(r'$[\ell(\ell+1)/2\pi] C_\ell^\mathrm{TT}$')
axes[1].legend(loc='upper right')
#axes[1].set_xlim([1e-2,1])
#axes[1].set_ylim([0.3,0.63])
axes[1].set_xlabel(r'$a/a_0$')
axes[1].set_ylabel(r'$\frac{1}{2}~(\Phi+\Psi)$')
fig.savefig('check_PPF_Omegak2.pdf')
# In[ ]:
print (0.31*0.64**2-0.022)
print (0.26*0.74**2-0.022)
|
yacobozdalkiranREPO_NAMECLASS_modPATH_START.@CLASS_mod_extracted@CLASS_mod-main@class_public-master@scripts@check_PPF_approx.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/kernels/shim/README.md",
"type": "Markdown"
}
|
This folder contains a convenience library called *tf-shim* over TF and TFLite
op kernel APIs.
## Summary
This library creates a shim over the custom op APIs of TF and TFLite so the
developer can write the custom op once with minimal binary or runtime overhead.
An example usage is an input preprocessing op kernel that can be used in
both TF and TFLite.
## Background
When there is a need to implement a logic that is not supported by the TF
builtin ops the alternative is to build a custom op. If that op needs to
run on-device then it needs to be written in C++ against the client API for
custom ops.
For example, feature processing especially for textual input in an ML model
can involve operations that don't lend themselves well to vectorization and the
code, if written as a C++ function, would be much shorter and more readable.
However, Tensorflow and TFLite APIs for creating op kernels are, at the moment,
not identical. This library offers a convenient way to write the kernel once and
adapt it to both TF and TFLite with minimal binary and runtime overhead.
## Implementation
This folder contains two pieces:
1. `TensorView` as a shim over `::tensorflow::Tensor` and `TfLiteTensor`
2. `OpKernelShim` class which abstracts the TF and TFLite op kernel APIs.
### TensorView
This class is a *view* over an already allocated tensor in TF or TFLite without
taking any ownership. In that sense it is similar to `absl::string_view` but with
the difference that the underlying buffer can be mutable.
Example Usage:
```
::tensorflow::Tensor tf_tensor;
auto t = TensorView::New(&tf_tensor);
auto t_str_mat = t.As<::tensorflow::tstring, /*RANK=*/ 2>();
t(0, 0) = "ab";
t(0, 1) = "cde"
auto t_buffer = t.Data<::tensorflow::tstring>();
t[0] = "ab";
t[1] = "cde"
```
```
TfLiteTensor tflite_tensor;
auto t = TensorView::New(&tflite_tensor);
auto t_int_vec = t.As<int32, /*RANK=*/ 1>();
t(0) = 123;
t(1) = 456
auto t_buffer = t.Data<int32>();
t[0] = 123;
t[1] = 456
```
The `New` is the factory function which based on the type of the input returns
either a `TfTensorView` or a `TfLiteTensorView`.
See the unit tests `tf_tensor_view_test.cc` and `tflite_tensor_view_test.cc` for
more usage.
The string tensor in `TfLiteTensorView` is a bit of special case. Since string
tensors in TfLite are serialized in a specific format, while writing to those
tensors an intermediate buffer is needed to hold intermediate values before all
the strings get serialized. The intermediate string buffers are serialized back
to the TfLite string format once the last remaining `TfLiteTensorView` goes out
of scope. Only then the user can see the string values in the underlying
`TfLiteTensor`. That said, when implementing an op kernel, there is rarely a
need to read back the contents of a mutable output `TfLiteTensor` within the
same code block.
### OpKernelShim
*WARNING: Experimental interface, subject to change*
This class defines the interface which when implemented allows for convenient
adaptation to TF and TFLite op kernels.
Here is an example op kernel implementing this interface:
```
template<TfRuntime R>
class MyOp : public OpKernelShim<MyOp, R> {
// Attributes declaration (syntax: https://www.tensorflow.org/guide/create_op)
static std::vector<std::string> Attrs();
// Input tensors declaration (syntax: https://www.tensorflow.org/guide/create_op)
static std::vector<std::string> Inputs();
// Output tensors declaration (syntax: https://www.tensorflow.org/guide/create_op)
static std::vector<std::string> Outputs();
// Initializes the op
absl::Status Init(InitContext* ctx);
// Runs the operation
absl::Status Invoke(InvokeContext* ctx);
// Shape inference
static absl::Status ShapeInference(ShapeInferenceContext* ctx);
};
```
The class `MyOp` is passing itself to `OpKernelShim` as a template parameter.
This is because `OpKernelShim` is a static interface using the CRTP pattern.
Similarly, the context classes: `InitContext`, `InvokeContext` and
`ShapeInferenceContext` are all static interfaces in the same way.
The class `MyOp` can also be templatized. See `test_op/tmpl_op.h` for an
example.
### Context Interfaces
An op kernel written using this library has access to a number of *context*
objects at various stages of its lifecycle. These context objects are
effectively shims over the existing context objects in TF and TFLite.
#### InitContext
An instance of this class is passed to the op kernel during its initialization.
```
template <typename SubType>
class InitContext {
public:
// Read the given attribute and populate the given value.
template <typename AttrType>
absl::Status GetAttr(const std::string& attr_name, AttrType* value) const;
};
```
#### InvokeContext
An instance of this class is passed to the op kernel during its invocation.
```
template <typename SubType>
class InvokeContext {
public:
// Read an input tensor
ConstTensorViewOr GetInput(const int idx) const;
// Get a mutable output tensor
TensorViewOr GetOutput(const int idx, const Shape& shape) const;
};
```
#### ShapeInferenceContext
An instance of this class is passed to the op kernel during its shape inference.
```
template <typename SubType>
class ShapeInferenceContext {
public:
// Read an input tensor shape
ShapeOr GetInputShape(const int idx) const;
// Set an output tensor shape
absl::Status SetOutputShape(const int idx, const Shape& shape);
// Read an input tensor during shape inference
ConstTensorViewOr GetInputTensor(const int idx) const;
};
```
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@kernels@shim@README.md@.PATH_END.py
|
{
"filename": "test_openai_tools.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/core/tests/unit_tests/output_parsers/test_openai_tools.py",
"type": "Python"
}
|
from collections.abc import AsyncIterator, Iterator
from typing import Any
import pytest
from pydantic import BaseModel, Field
from langchain_core.messages import (
AIMessage,
AIMessageChunk,
BaseMessage,
ToolCallChunk,
)
from langchain_core.output_parsers.openai_tools import (
JsonOutputKeyToolsParser,
JsonOutputToolsParser,
PydanticToolsParser,
)
from langchain_core.outputs import ChatGeneration
from langchain_core.utils.pydantic import PYDANTIC_MAJOR_VERSION
STREAMED_MESSAGES: list = [
AIMessageChunk(content=""),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": "call_OwL7f5PEPJTYzw9sQlNJtCZl",
"function": {"arguments": "", "name": "NameCollector"},
"type": "function",
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": '{"na', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": 'mes":', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": ' ["suz', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": 'y", ', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": '"jerm', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": 'aine",', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": ' "al', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": 'ex"],', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": ' "pers', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": 'on":', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": ' {"ag', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": 'e": 39', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": ', "h', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": "air_c", "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": 'olor":', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": ' "br', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": 'own",', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": ' "job"', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": ': "c', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": "oncie", "name": None},
"type": None,
}
]
},
),
AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {"arguments": 'rge"}}', "name": None},
"type": None,
}
]
},
),
AIMessageChunk(content=""),
]
STREAMED_MESSAGES_WITH_TOOL_CALLS = []
for message in STREAMED_MESSAGES:
if message.additional_kwargs:
STREAMED_MESSAGES_WITH_TOOL_CALLS.append(
AIMessageChunk(
content=message.content,
additional_kwargs=message.additional_kwargs,
tool_call_chunks=[
ToolCallChunk(
name=chunk["function"].get("name"),
args=chunk["function"].get("arguments"),
id=chunk.get("id"),
index=chunk["index"],
)
for chunk in message.additional_kwargs["tool_calls"]
],
)
)
else:
STREAMED_MESSAGES_WITH_TOOL_CALLS.append(message)
EXPECTED_STREAMED_JSON = [
{},
{"names": ["suz"]},
{"names": ["suzy"]},
{"names": ["suzy", "jerm"]},
{"names": ["suzy", "jermaine"]},
{"names": ["suzy", "jermaine", "al"]},
{"names": ["suzy", "jermaine", "alex"]},
{"names": ["suzy", "jermaine", "alex"], "person": {}},
{"names": ["suzy", "jermaine", "alex"], "person": {"age": 39}},
{"names": ["suzy", "jermaine", "alex"], "person": {"age": 39, "hair_color": "br"}},
{
"names": ["suzy", "jermaine", "alex"],
"person": {"age": 39, "hair_color": "brown"},
},
{
"names": ["suzy", "jermaine", "alex"],
"person": {"age": 39, "hair_color": "brown", "job": "c"},
},
{
"names": ["suzy", "jermaine", "alex"],
"person": {"age": 39, "hair_color": "brown", "job": "concie"},
},
{
"names": ["suzy", "jermaine", "alex"],
"person": {"age": 39, "hair_color": "brown", "job": "concierge"},
},
]
def _get_iter(use_tool_calls: bool = False) -> Any:
if use_tool_calls:
list_to_iter = STREAMED_MESSAGES_WITH_TOOL_CALLS
else:
list_to_iter = STREAMED_MESSAGES
def input_iter(_: Any) -> Iterator[BaseMessage]:
yield from list_to_iter
return input_iter
def _get_aiter(use_tool_calls: bool = False) -> Any:
if use_tool_calls:
list_to_iter = STREAMED_MESSAGES_WITH_TOOL_CALLS
else:
list_to_iter = STREAMED_MESSAGES
async def input_iter(_: Any) -> AsyncIterator[BaseMessage]:
for msg in list_to_iter:
yield msg
return input_iter
def test_partial_json_output_parser() -> None:
for use_tool_calls in [False, True]:
input_iter = _get_iter(use_tool_calls)
chain = input_iter | JsonOutputToolsParser()
actual = list(chain.stream(None))
expected: list = [[]] + [
[{"type": "NameCollector", "args": chunk}]
for chunk in EXPECTED_STREAMED_JSON
]
assert actual == expected
async def test_partial_json_output_parser_async() -> None:
for use_tool_calls in [False, True]:
input_iter = _get_aiter(use_tool_calls)
chain = input_iter | JsonOutputToolsParser()
actual = [p async for p in chain.astream(None)]
expected: list = [[]] + [
[{"type": "NameCollector", "args": chunk}]
for chunk in EXPECTED_STREAMED_JSON
]
assert actual == expected
def test_partial_json_output_parser_return_id() -> None:
for use_tool_calls in [False, True]:
input_iter = _get_iter(use_tool_calls)
chain = input_iter | JsonOutputToolsParser(return_id=True)
actual = list(chain.stream(None))
expected: list = [[]] + [
[
{
"type": "NameCollector",
"args": chunk,
"id": "call_OwL7f5PEPJTYzw9sQlNJtCZl",
}
]
for chunk in EXPECTED_STREAMED_JSON
]
assert actual == expected
def test_partial_json_output_key_parser() -> None:
for use_tool_calls in [False, True]:
input_iter = _get_iter(use_tool_calls)
chain = input_iter | JsonOutputKeyToolsParser(key_name="NameCollector")
actual = list(chain.stream(None))
expected: list = [[]] + [[chunk] for chunk in EXPECTED_STREAMED_JSON]
assert actual == expected
async def test_partial_json_output_parser_key_async() -> None:
for use_tool_calls in [False, True]:
input_iter = _get_aiter(use_tool_calls)
chain = input_iter | JsonOutputKeyToolsParser(key_name="NameCollector")
actual = [p async for p in chain.astream(None)]
expected: list = [[]] + [[chunk] for chunk in EXPECTED_STREAMED_JSON]
assert actual == expected
def test_partial_json_output_key_parser_first_only() -> None:
for use_tool_calls in [False, True]:
input_iter = _get_iter(use_tool_calls)
chain = input_iter | JsonOutputKeyToolsParser(
key_name="NameCollector", first_tool_only=True
)
assert list(chain.stream(None)) == EXPECTED_STREAMED_JSON
async def test_partial_json_output_parser_key_async_first_only() -> None:
for use_tool_calls in [False, True]:
input_iter = _get_aiter(use_tool_calls)
chain = input_iter | JsonOutputKeyToolsParser(
key_name="NameCollector", first_tool_only=True
)
assert [p async for p in chain.astream(None)] == EXPECTED_STREAMED_JSON
class Person(BaseModel):
age: int
hair_color: str
job: str
class NameCollector(BaseModel):
"""record names of all people mentioned"""
names: list[str] = Field(..., description="all names mentioned")
person: Person = Field(..., description="info about the main subject")
# Expected to change when we support more granular pydantic streaming.
EXPECTED_STREAMED_PYDANTIC = [
NameCollector(
names=["suzy", "jermaine", "alex"],
person=Person(age=39, hair_color="brown", job="c"),
),
NameCollector(
names=["suzy", "jermaine", "alex"],
person=Person(age=39, hair_color="brown", job="concie"),
),
NameCollector(
names=["suzy", "jermaine", "alex"],
person=Person(age=39, hair_color="brown", job="concierge"),
),
]
def test_partial_pydantic_output_parser() -> None:
for use_tool_calls in [False, True]:
input_iter = _get_iter(use_tool_calls)
chain = input_iter | PydanticToolsParser(
tools=[NameCollector], first_tool_only=True
)
actual = list(chain.stream(None))
assert actual == EXPECTED_STREAMED_PYDANTIC
async def test_partial_pydantic_output_parser_async() -> None:
for use_tool_calls in [False, True]:
input_iter = _get_aiter(use_tool_calls)
chain = input_iter | PydanticToolsParser(
tools=[NameCollector], first_tool_only=True
)
actual = [p async for p in chain.astream(None)]
assert actual == EXPECTED_STREAMED_PYDANTIC
@pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="This test is for pydantic 2")
def test_parse_with_different_pydantic_2_v1() -> None:
"""Test with pydantic.v1.BaseModel from pydantic 2."""
import pydantic
class Forecast(pydantic.v1.BaseModel):
temperature: int
forecast: str
# Can't get pydantic to work here due to the odd typing of tryig to support
# both v1 and v2 in the same codebase.
parser = PydanticToolsParser(tools=[Forecast]) # type: ignore[list-item]
message = AIMessage(
content="",
tool_calls=[
{
"id": "call_OwL7f5PE",
"name": "Forecast",
"args": {"temperature": 20, "forecast": "Sunny"},
}
],
)
generation = ChatGeneration(
message=message,
)
assert parser.parse_result([generation]) == [
Forecast(
temperature=20,
forecast="Sunny",
)
]
@pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 2, reason="This test is for pydantic 2")
def test_parse_with_different_pydantic_2_proper() -> None:
"""Test with pydantic.BaseModel from pydantic 2."""
import pydantic
class Forecast(pydantic.BaseModel):
temperature: int
forecast: str
# Can't get pydantic to work here due to the odd typing of tryig to support
# both v1 and v2 in the same codebase.
parser = PydanticToolsParser(tools=[Forecast]) # type: ignore[list-item]
message = AIMessage(
content="",
tool_calls=[
{
"id": "call_OwL7f5PE",
"name": "Forecast",
"args": {"temperature": 20, "forecast": "Sunny"},
}
],
)
generation = ChatGeneration(
message=message,
)
assert parser.parse_result([generation]) == [
Forecast(
temperature=20,
forecast="Sunny",
)
]
@pytest.mark.skipif(PYDANTIC_MAJOR_VERSION != 1, reason="This test is for pydantic 1")
def test_parse_with_different_pydantic_1_proper() -> None:
"""Test with pydantic.BaseModel from pydantic 1."""
import pydantic
class Forecast(pydantic.BaseModel):
temperature: int
forecast: str
# Can't get pydantic to work here due to the odd typing of tryig to support
# both v1 and v2 in the same codebase.
parser = PydanticToolsParser(tools=[Forecast]) # type: ignore[list-item]
message = AIMessage(
content="",
tool_calls=[
{
"id": "call_OwL7f5PE",
"name": "Forecast",
"args": {"temperature": 20, "forecast": "Sunny"},
}
],
)
generation = ChatGeneration(
message=message,
)
assert parser.parse_result([generation]) == [
Forecast(
temperature=20,
forecast="Sunny",
)
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@core@tests@unit_tests@output_parsers@test_openai_tools.py@.PATH_END.py
|
{
"filename": "_version.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py2/numpy/lib/_version.py",
"type": "Python"
}
|
"""Utility to compare (NumPy) version strings.
The NumpyVersion class allows properly comparing numpy version strings.
The LooseVersion and StrictVersion classes that distutils provides don't
work; they don't recognize anything like alpha/beta/rc/dev versions.
"""
from __future__ import division, absolute_import, print_function
import re
from numpy.compat import basestring
__all__ = ['NumpyVersion']
class NumpyVersion():
"""Parse and compare numpy version strings.
NumPy has the following versioning scheme (numbers given are examples; they
can be > 9) in principle):
- Released version: '1.8.0', '1.8.1', etc.
- Alpha: '1.8.0a1', '1.8.0a2', etc.
- Beta: '1.8.0b1', '1.8.0b2', etc.
- Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
- Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
- Development versions after a1: '1.8.0a1.dev-f1234afa',
'1.8.0b2.dev-f1234afa',
'1.8.1rc1.dev-f1234afa', etc.
- Development versions (no git hash available): '1.8.0.dev-Unknown'
Comparing needs to be done against a valid version string or other
`NumpyVersion` instance. Note that all development versions of the same
(pre-)release compare equal.
.. versionadded:: 1.9.0
Parameters
----------
vstring : str
NumPy version string (``np.__version__``).
Examples
--------
>>> from numpy.lib import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
"""
def __init__(self, vstring):
self.vstring = vstring
ver_main = re.match(r'\d[.]\d+[.]\d+', vstring)
if not ver_main:
raise ValueError("Not a valid numpy version string")
self.version = ver_main.group()
self.major, self.minor, self.bugfix = [int(x) for x in
self.version.split('.')]
if len(vstring) == ver_main.end():
self.pre_release = 'final'
else:
alpha = re.match(r'a\d', vstring[ver_main.end():])
beta = re.match(r'b\d', vstring[ver_main.end():])
rc = re.match(r'rc\d', vstring[ver_main.end():])
pre_rel = [m for m in [alpha, beta, rc] if m is not None]
if pre_rel:
self.pre_release = pre_rel[0].group()
else:
self.pre_release = ''
self.is_devversion = bool(re.search(r'.dev', vstring))
def _compare_version(self, other):
"""Compare major.minor.bugfix"""
if self.major == other.major:
if self.minor == other.minor:
if self.bugfix == other.bugfix:
vercmp = 0
elif self.bugfix > other.bugfix:
vercmp = 1
else:
vercmp = -1
elif self.minor > other.minor:
vercmp = 1
else:
vercmp = -1
elif self.major > other.major:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare_pre_release(self, other):
"""Compare alpha/beta/rc/final."""
if self.pre_release == other.pre_release:
vercmp = 0
elif self.pre_release == 'final':
vercmp = 1
elif other.pre_release == 'final':
vercmp = -1
elif self.pre_release > other.pre_release:
vercmp = 1
else:
vercmp = -1
return vercmp
def _compare(self, other):
if not isinstance(other, (basestring, NumpyVersion)):
raise ValueError("Invalid object to compare with NumpyVersion.")
if isinstance(other, basestring):
other = NumpyVersion(other)
vercmp = self._compare_version(other)
if vercmp == 0:
# Same x.y.z version, check for alpha/beta/rc
vercmp = self._compare_pre_release(other)
if vercmp == 0:
# Same version and same pre-release, check if dev version
if self.is_devversion is other.is_devversion:
vercmp = 0
elif self.is_devversion:
vercmp = -1
else:
vercmp = 1
return vercmp
def __lt__(self, other):
return self._compare(other) < 0
def __le__(self, other):
return self._compare(other) <= 0
def __eq__(self, other):
return self._compare(other) == 0
def __ne__(self, other):
return self._compare(other) != 0
def __gt__(self, other):
return self._compare(other) > 0
def __ge__(self, other):
return self._compare(other) >= 0
def __repr(self):
return "NumpyVersion(%s)" % self.vstring
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py2@numpy@lib@_version.py@.PATH_END.py
|
{
"filename": "runner.py",
"repo_name": "NannyML/nannyml",
"repo_path": "nannyml_extracted/nannyml-main/nannyml/runner.py",
"type": "Python"
}
|
# Author: Niels Nuyttens <niels@nannyml.com>
#
# License: Apache Software License 2.0
"""Used as an access point to start using NannyML in its most simple form."""
import logging
from contextlib import contextmanager
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union
import pandas as pd
from rich.console import Console
from nannyml._typing import Calculator, Estimator, Result
from nannyml.config import Config, InputDataConfig, StoreConfig, WriterConfig
from nannyml.data_quality.missing import MissingValuesCalculator
from nannyml.data_quality.unseen import UnseenValuesCalculator
from nannyml.distribution.categorical import CategoricalDistributionCalculator
from nannyml.distribution.continuous import ContinuousDistributionCalculator
from nannyml.drift.multivariate.data_reconstruction import DataReconstructionDriftCalculator
from nannyml.drift.multivariate.domain_classifier import DomainClassifierCalculator
from nannyml.drift.univariate import UnivariateDriftCalculator
from nannyml.exceptions import InvalidArgumentsException
from nannyml.io import FileReader, FilesystemStore, Writer, WriterFactory
from nannyml.io.store import Store
from nannyml.performance_calculation import PerformanceCalculator
from nannyml.performance_estimation.confidence_based import CBPE
from nannyml.performance_estimation.direct_loss_estimation import DLE
from nannyml.stats.avg.calculator import SummaryStatsAvgCalculator
from nannyml.stats.count.calculator import SummaryStatsRowCountCalculator
from nannyml.stats.median.calculator import SummaryStatsMedianCalculator
from nannyml.stats.std.calculator import SummaryStatsStdCalculator
from nannyml.stats.sum.calculator import SummaryStatsSumCalculator
@dataclass
class RunContext:
STEPS_PER_CALCULATOR = 3
current_step: int
total_steps: int
current_calculator: str
current_calculator_config: Optional[Dict[str, Any]] = None
current_calculator_success: bool = True
run_success: bool = True
result: Optional[Result] = None
def increase_step(self):
self.current_step += 1
@dataclass
class RunInput:
reference_data: pd.DataFrame
analysis_data: pd.DataFrame
target_data: Optional[pd.DataFrame] = None
target_join_column: Optional[str] = None
@contextmanager
def run_context(config: Config):
yield RunContext(
current_step=0,
total_steps=len([c for c in config.calculators if c.enabled]) * RunContext.STEPS_PER_CALCULATOR,
current_calculator='',
)
_logger = logging.getLogger(__name__)
class CalculatorFactory:
"""A factory class that produces Metric instances based on a given magic string or a metric specification."""
registry: Dict[str, Type] = {
'univariate_drift': UnivariateDriftCalculator,
'reconstruction_error': DataReconstructionDriftCalculator,
'domain_classifier': DomainClassifierCalculator,
'performance': PerformanceCalculator,
'cbpe': CBPE,
'dle': DLE,
'missing_values': MissingValuesCalculator,
'unseen_values': UnseenValuesCalculator,
'continuous_distribution': ContinuousDistributionCalculator,
'categorical_distribution': CategoricalDistributionCalculator,
'summary_stats_avg': SummaryStatsAvgCalculator,
'summary_stats_row_count': SummaryStatsRowCountCalculator,
'summary_stats_median': SummaryStatsMedianCalculator,
'summary_stats_std': SummaryStatsStdCalculator,
'summary_stats_sum': SummaryStatsSumCalculator,
}
@classmethod
def register(cls, name: str, calculator_type: Union[Type[Calculator], Type[Estimator]]):
cls.registry[name] = calculator_type
class RunnerLogger:
def __init__(self, logger: logging.Logger, console: Optional[Console] = None):
self.logger = logger
self.console = console
def log(self, message: object, log_level: int = logging.INFO):
if self.logger:
self.logger.log(level=log_level, msg=message)
if self.console and log_level == logging.INFO:
self.console.log(message)
def run( # noqa: C901
config: Config,
input: Optional[RunInput] = None,
logger: logging.Logger = logging.getLogger(__name__),
console: Optional[Console] = None,
on_fit: Optional[Callable[[RunContext], Any]] = None,
on_calculate: Optional[Callable[[RunContext], Any]] = None,
on_write: Optional[Callable[[RunContext], Any]] = None,
on_calculator_complete: Optional[Callable[[RunContext], Any]] = None,
on_run_complete: Optional[Callable[[RunContext], Any]] = None,
on_fail: Optional[Callable[[RunContext, Optional[Exception]], Any]] = None,
):
run_logger = RunnerLogger(logger, console)
try:
with run_context(config) as context:
if input is not None:
if config.input is not None:
raise InvalidArgumentsException("Both config.input and input provided. Please provide only one")
reference_data = input.reference_data
analysis_data = input.analysis_data
if input.target_data is not None:
analysis_data = _add_targets_to_analysis_data(
analysis_data, input.target_data, input.target_join_column
)
elif config.input is not None:
run_logger.log("reading reference data", log_level=logging.DEBUG)
reference_data = read_data(config.input.reference_data, run_logger)
# read analysis data
run_logger.log("reading analysis data", log_level=logging.DEBUG)
analysis_data = read_data(config.input.analysis_data, run_logger)
if config.input.target_data:
run_logger.log("reading target data", log_level=logging.DEBUG)
target_data = read_data(config.input.target_data, run_logger)
analysis_data = _add_targets_to_analysis_data(
analysis_data, target_data, config.input.target_data.join_column
)
else:
raise InvalidArgumentsException("no input data provided")
for calculator_config in config.calculators:
try:
context.current_calculator_config = calculator_config.dict()
context.current_calculator = calculator_config.name or calculator_config.type
if not calculator_config.enabled:
continue
writers = get_output_writers(calculator_config.outputs, run_logger)
store = get_store(calculator_config.store, run_logger)
if calculator_config.type not in CalculatorFactory.registry:
raise InvalidArgumentsException(f"unknown calculator type '{calculator_config.type}'")
# first step: load or (create + fit) calculator
context.increase_step()
calc_cls = CalculatorFactory.registry[calculator_config.type]
if store and calculator_config.store:
run_logger.log(
f"[{context.current_step}/{context.total_steps}] '{context.current_calculator}': "
f"loading calculator from store"
)
if calculator_config.store.invalidate:
calc = None
else:
calc = store.load(filename=calculator_config.store.filename, as_type=calc_cls)
if calc is None:
reason = 'invalidated' if calculator_config.store.invalidate else 'not found in store'
run_logger.log(
f"calculator '{context.current_calculator}' {reason}. "
f"Creating, fitting and storing new instance",
log_level=logging.DEBUG,
)
calc = calc_cls(**calculator_config.params)
if on_fit:
on_fit(context)
calc.fit(reference_data)
store.store(obj=calc, filename=calculator_config.store.filename)
else:
run_logger.log(
f"[{context.current_step}/{context.total_steps}] '{context.current_calculator}': "
f"creating and fitting calculator"
)
calc = calc_cls(**calculator_config.params)
if on_fit:
on_fit(context)
calc.fit(reference_data)
# second step: perform calculations
context.increase_step()
run_logger.log(
f"[{context.current_step}/{context.total_steps}] '{context.current_calculator}': "
f"running calculator"
)
if on_calculate:
on_calculate(context)
result = (
calc.calculate(analysis_data) if hasattr(calc, 'calculate') else calc.estimate(analysis_data)
)
context.result = result
# third step: write results
context.increase_step()
run_logger.log(
f"[{context.current_step}/{context.total_steps}] '{context.current_calculator}': "
f"writing out results"
)
if on_write:
on_write(context)
for writer, write_args in writers:
run_logger.log(f"writing results with {writer} and args {write_args}", log_level=logging.DEBUG)
writer.write(result, **write_args)
if on_calculator_complete:
on_calculator_complete(context)
except Exception as exc:
context.current_calculator_success = False
context.run_success = False
if on_fail:
on_fail(context, exc)
run_logger.log(f"an unexpected exception occurred running '{calculator_config.type}': {exc}")
if on_run_complete:
on_run_complete(context)
except Exception as exc:
context.current_calculator = None
context.current_calculator_config = None
context.run_success = False
if on_fail:
on_fail(context, exc)
raise exc
def read_data(input_config: InputDataConfig, logger: Optional[RunnerLogger] = None) -> pd.DataFrame:
data = FileReader(
filepath=input_config.path, credentials=input_config.credentials, read_args=input_config.read_args
).read()
if logger:
logger.log(f"read {data.size} rows from {input_config.path}")
return data
def get_output_writers(
outputs_config: Optional[List[WriterConfig]], logger: Optional[RunnerLogger] = None
) -> List[Tuple[Writer, Dict[str, Any]]]:
if not outputs_config:
return []
writers: List[Tuple[Writer, Dict[str, Any]]] = []
for writer_config in outputs_config:
writer = WriterFactory.create(writer_config.type, writer_config.params)
writers.append((writer, writer_config.write_args or {}))
return writers
def get_store(store_config: Optional[StoreConfig], logger: Optional[RunnerLogger] = None) -> Optional[Store]:
if store_config:
if logger:
logger.log(f"using file system store with path '{store_config.path}'", log_level=logging.DEBUG)
store = FilesystemStore(
root_path=store_config.path,
credentials=store_config.credentials or {},
)
return store
return None
def _get_ignore_errors(ignore_errors: bool, config: Config) -> bool:
if ignore_errors is None:
if config.ignore_errors is None:
return False
else:
return config.ignore_errors
else:
return ignore_errors
def _add_targets_to_analysis_data(
analysis_data: pd.DataFrame, target_data: pd.DataFrame, join_column: Optional[str]
) -> pd.DataFrame:
if join_column is not None:
return analysis_data.merge(target_data, on=join_column)
else:
return analysis_data.join(target_data)
|
NannyMLREPO_NAMEnannymlPATH_START.@nannyml_extracted@nannyml-main@nannyml@runner.py@.PATH_END.py
|
{
"filename": "_matfuncs_inv_ssq.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/linalg/_matfuncs_inv_ssq.py",
"type": "Python"
}
|
"""
Matrix functions that use Pade approximation with inverse scaling and squaring.
"""
import warnings
import numpy as np
from scipy.linalg._matfuncs_sqrtm import SqrtmError, _sqrtm_triu
from scipy.linalg._decomp_schur import schur, rsf2csf
from scipy.linalg._matfuncs import funm
from scipy.linalg import svdvals, solve_triangular
from scipy.sparse.linalg._interface import LinearOperator
from scipy.sparse.linalg import onenormest
import scipy.special
class LogmRankWarning(UserWarning):
pass
class LogmExactlySingularWarning(LogmRankWarning):
pass
class LogmNearlySingularWarning(LogmRankWarning):
pass
class LogmError(np.linalg.LinAlgError):
pass
class FractionalMatrixPowerError(np.linalg.LinAlgError):
pass
#TODO renovate or move this class when scipy operators are more mature
class _MatrixM1PowerOperator(LinearOperator):
"""
A representation of the linear operator (A - I)^p.
"""
def __init__(self, A, p):
if A.ndim != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if p < 0 or p != int(p):
raise ValueError('expected p to be a non-negative integer')
self._A = A
self._p = p
self.ndim = A.ndim
self.shape = A.shape
def _matvec(self, x):
for i in range(self._p):
x = self._A.dot(x) - x
return x
def _rmatvec(self, x):
for i in range(self._p):
x = x.dot(self._A) - x
return x
def _matmat(self, X):
for i in range(self._p):
X = self._A.dot(X) - X
return X
def _adjoint(self):
return _MatrixM1PowerOperator(self._A.T, self._p)
#TODO renovate or move this function when SciPy operators are more mature
def _onenormest_m1_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of (A - I)^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
return onenormest(_MatrixM1PowerOperator(A, p),
t=t, itmax=itmax, compute_v=compute_v, compute_w=compute_w)
def _unwindk(z):
"""
Compute the scalar unwinding number.
Uses Eq. (5.3) in [1]_, and should be equal to (z - log(exp(z)) / (2 pi i).
Note that this definition differs in sign from the original definition
in equations (5, 6) in [2]_. The sign convention is justified in [3]_.
Parameters
----------
z : complex
A complex number.
Returns
-------
unwinding_number : integer
The scalar unwinding number of z.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
.. [2] Robert M. Corless and David J. Jeffrey,
"The unwinding number." Newsletter ACM SIGSAM Bulletin
Volume 30, Issue 2, June 1996, Pages 28-35.
.. [3] Russell Bradford and Robert M. Corless and James H. Davenport and
David J. Jeffrey and Stephen M. Watt,
"Reasoning about the elementary functions of complex analysis"
Annals of Mathematics and Artificial Intelligence,
36: 303-318, 2002.
"""
return int(np.ceil((z.imag - np.pi) / (2*np.pi)))
def _briggs_helper_function(a, k):
"""
Computes r = a^(1 / (2^k)) - 1.
This is algorithm (2) of [1]_.
The purpose is to avoid a danger of subtractive cancellation.
For more computational efficiency it should probably be cythonized.
Parameters
----------
a : complex
A complex number.
k : integer
A nonnegative integer.
Returns
-------
r : complex
The value r = a^(1 / (2^k)) - 1 computed with less cancellation.
Notes
-----
The algorithm as formulated in the reference does not handle k=0 or k=1
correctly, so these are special-cased in this implementation.
This function is intended to not allow `a` to belong to the closed
negative real axis, but this constraint is relaxed.
References
----------
.. [1] Awad H. Al-Mohy (2012)
"A more accurate Briggs method for the logarithm",
Numerical Algorithms, 59 : 393--402.
"""
if k < 0 or int(k) != k:
raise ValueError('expected a nonnegative integer k')
if k == 0:
return a - 1
elif k == 1:
return np.sqrt(a) - 1
else:
k_hat = k
if np.angle(a) >= np.pi / 2:
a = np.sqrt(a)
k_hat = k - 1
z0 = a - 1
a = np.sqrt(a)
r = 1 + a
for j in range(1, k_hat):
a = np.sqrt(a)
r = r * (1 + a)
r = z0 / r
return r
def _fractional_power_superdiag_entry(l1, l2, t12, p):
"""
Compute a superdiagonal entry of a fractional matrix power.
This is Eq. (5.6) in [1]_.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
p : float
A fractional power.
Returns
-------
f12 : complex
A superdiagonal entry of the fractional matrix power.
Notes
-----
Care has been taken to return a real number if possible when
all of the inputs are real numbers.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if l1 == l2:
f12 = t12 * p * l1**(p-1)
elif abs(l2 - l1) > abs(l1 + l2) / 2:
f12 = t12 * ((l2**p) - (l1**p)) / (l2 - l1)
else:
# This is Eq. (5.5) in [1].
z = (l2 - l1) / (l2 + l1)
log_l1 = np.log(l1)
log_l2 = np.log(l2)
arctanh_z = np.arctanh(z)
tmp_a = t12 * np.exp((p/2)*(log_l2 + log_l1))
tmp_u = _unwindk(log_l2 - log_l1)
if tmp_u:
tmp_b = p * (arctanh_z + np.pi * 1j * tmp_u)
else:
tmp_b = p * arctanh_z
tmp_c = 2 * np.sinh(tmp_b) / (l2 - l1)
f12 = tmp_a * tmp_c
return f12
def _logm_superdiag_entry(l1, l2, t12):
"""
Compute a superdiagonal entry of a matrix logarithm.
This is like Eq. (11.28) in [1]_, except the determination of whether
l1 and l2 are sufficiently far apart has been modified.
Parameters
----------
l1 : complex
A diagonal entry of the matrix.
l2 : complex
A diagonal entry of the matrix.
t12 : complex
A superdiagonal entry of the matrix.
Returns
-------
f12 : complex
A superdiagonal entry of the matrix logarithm.
Notes
-----
Care has been taken to return a real number if possible when
all of the inputs are real numbers.
References
----------
.. [1] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
"""
if l1 == l2:
f12 = t12 / l1
elif abs(l2 - l1) > abs(l1 + l2) / 2:
f12 = t12 * (np.log(l2) - np.log(l1)) / (l2 - l1)
else:
z = (l2 - l1) / (l2 + l1)
u = _unwindk(np.log(l2) - np.log(l1))
if u:
f12 = t12 * 2 * (np.arctanh(z) + np.pi*1j*u) / (l2 - l1)
else:
f12 = t12 * 2 * np.arctanh(z) / (l2 - l1)
return f12
def _inverse_squaring_helper(T0, theta):
"""
A helper function for inverse scaling and squaring for Pade approximation.
Parameters
----------
T0 : (N, N) array_like upper triangular
Matrix involved in inverse scaling and squaring.
theta : indexable
The values theta[1] .. theta[7] must be available.
They represent bounds related to Pade approximation, and they depend
on the matrix function which is being computed.
For example, different values of theta are required for
matrix logarithm than for fractional matrix power.
Returns
-------
R : (N, N) array_like upper triangular
Composition of zero or more matrix square roots of T0, minus I.
s : non-negative integer
Number of square roots taken.
m : positive integer
The degree of the Pade approximation.
Notes
-----
This subroutine appears as a chunk of lines within
a couple of published algorithms; for example it appears
as lines 4--35 in algorithm (3.1) of [1]_, and
as lines 3--34 in algorithm (4.1) of [2]_.
The instances of 'goto line 38' in algorithm (3.1) of [1]_
probably mean 'goto line 36' and have been interpreted accordingly.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
"""
if len(T0.shape) != 2 or T0.shape[0] != T0.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T0.shape
T = T0
# Find s0, the smallest s such that the spectral radius
# of a certain diagonal matrix is at most theta[7].
# Note that because theta[7] < 1,
# this search will not terminate if any diagonal entry of T is zero.
s0 = 0
tmp_diag = np.diag(T)
if np.count_nonzero(tmp_diag) != n:
raise Exception('Diagonal entries of T must be nonzero')
while np.max(np.absolute(tmp_diag - 1), initial=0.) > theta[7]:
tmp_diag = np.sqrt(tmp_diag)
s0 += 1
# Take matrix square roots of T.
for i in range(s0):
T = _sqrtm_triu(T)
# Flow control in this section is a little odd.
# This is because I am translating algorithm descriptions
# which have GOTOs in the publication.
s = s0
k = 0
d2 = _onenormest_m1_power(T, 2) ** (1/2)
d3 = _onenormest_m1_power(T, 3) ** (1/3)
a2 = max(d2, d3)
m = None
for i in (1, 2):
if a2 <= theta[i]:
m = i
break
while m is None:
if s > s0:
d3 = _onenormest_m1_power(T, 3) ** (1/3)
d4 = _onenormest_m1_power(T, 4) ** (1/4)
a3 = max(d3, d4)
if a3 <= theta[7]:
j1 = min(i for i in (3, 4, 5, 6, 7) if a3 <= theta[i])
if j1 <= 6:
m = j1
break
elif a3 / 2 <= theta[5] and k < 2:
k += 1
T = _sqrtm_triu(T)
s += 1
continue
d5 = _onenormest_m1_power(T, 5) ** (1/5)
a4 = max(d4, d5)
eta = min(a3, a4)
for i in (6, 7):
if eta <= theta[i]:
m = i
break
if m is not None:
break
T = _sqrtm_triu(T)
s += 1
# The subtraction of the identity is redundant here,
# because the diagonal will be replaced for improved numerical accuracy,
# but this formulation should help clarify the meaning of R.
R = T - np.identity(n)
# Replace the diagonal and first superdiagonal of T0^(1/(2^s)) - I
# using formulas that have less subtractive cancellation.
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
for j in range(n):
a = T0[j, j]
r = _briggs_helper_function(a, s)
R[j, j] = r
p = np.exp2(-s)
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
R[j, j+1] = f12
# Return the T-I matrix, the number of square roots, and the Pade degree.
if not np.array_equal(R, np.triu(R)):
raise Exception('R is not upper triangular')
return R, s, m
def _fractional_power_pade_constant(i, t):
# A helper function for matrix fractional power.
if i < 1:
raise ValueError('expected a positive integer i')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
if i == 1:
return -t
elif i % 2 == 0:
j = i // 2
return (-j + t) / (2 * (2*j - 1))
elif i % 2 == 1:
j = (i - 1) // 2
return (-j - t) / (2 * (2*j + 1))
else:
raise Exception(f'unnexpected value of i, i = {i}')
def _fractional_power_pade(R, t, m):
"""
Evaluate the Pade approximation of a fractional matrix power.
Evaluate the degree-m Pade approximation of R
to the fractional matrix power t using the continued fraction
in bottom-up fashion using algorithm (4.1) in [1]_.
Parameters
----------
R : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
m : positive integer
Degree of Pade approximation.
Returns
-------
U : (N, N) array_like
The degree-m Pade approximation of R to the fractional power t.
This matrix will be upper triangular.
References
----------
.. [1] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
if m < 1 or int(m) != m:
raise ValueError('expected a positive integer m')
if not (-1 < t < 1):
raise ValueError('expected -1 < t < 1')
R = np.asarray(R)
if len(R.shape) != 2 or R.shape[0] != R.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = R.shape
ident = np.identity(n)
Y = R * _fractional_power_pade_constant(2*m, t)
for j in range(2*m - 1, 0, -1):
rhs = R * _fractional_power_pade_constant(j, t)
Y = solve_triangular(ident + Y, rhs)
U = ident + Y
if not np.array_equal(U, np.triu(U)):
raise Exception('U is not upper triangular')
return U
def _remainder_matrix_power_triu(T, t):
"""
Compute a fractional power of an upper triangular matrix.
The fractional power is restricted to fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
m_to_theta = {
1: 1.51e-5,
2: 2.24e-3,
3: 1.88e-2,
4: 6.04e-2,
5: 1.24e-1,
6: 2.00e-1,
7: 2.79e-1,
}
n, n = T.shape
T0 = T
T0_diag = np.diag(T0)
if np.array_equal(T0, np.diag(T0_diag)):
U = np.diag(T0_diag ** t)
else:
R, s, m = _inverse_squaring_helper(T0, m_to_theta)
# Evaluate the Pade approximation.
# Note that this function expects the negative of the matrix
# returned by the inverse squaring helper.
U = _fractional_power_pade(-R, t, m)
# Undo the inverse scaling and squaring.
# Be less clever about this
# if the principal branch does not exist at T0;
# this happens when a diagonal entry of T0
# is negative with imaginary part 0.
eivals = np.diag(T0)
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in eivals)
for i in range(s, -1, -1):
if i < s:
U = U.dot(U)
else:
if has_principal_branch:
p = t * np.exp2(-i)
U[np.diag_indices(n)] = T0_diag ** p
for j in range(n-1):
l1 = T0[j, j]
l2 = T0[j+1, j+1]
t12 = T0[j, j+1]
f12 = _fractional_power_superdiag_entry(l1, l2, t12, p)
U[j, j+1] = f12
if not np.array_equal(U, np.triu(U)):
raise Exception('U is not upper triangular')
return U
def _remainder_matrix_power(A, t):
"""
Compute the fractional power of a matrix, for fractions -1 < t < 1.
This uses algorithm (3.1) of [1]_.
The Pade approximation itself uses algorithm (4.1) of [2]_.
Parameters
----------
A : (N, N) array_like
Matrix whose fractional power to evaluate.
t : float
Fractional power between -1 and 1 exclusive.
Returns
-------
X : (N, N) array_like
The fractional power of the matrix.
References
----------
.. [1] Nicholas J. Higham and Lijing Lin (2013)
"An Improved Schur-Pade Algorithm for Fractional Powers
of a Matrix and their Frechet Derivatives."
.. [2] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
# This code block is copied from numpy.matrix_power().
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('input must be a square array')
# Get the number of rows and columns.
n, n = A.shape
# Triangularize the matrix if necessary,
# attempting to preserve dtype if possible.
if np.array_equal(A, np.triu(A)):
Z = None
T = A
else:
if np.isrealobj(A):
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
# Zeros on the diagonal of the triangular matrix are forbidden,
# because the inverse scaling and squaring cannot deal with it.
T_diag = np.diag(T)
if np.count_nonzero(T_diag) != n:
raise FractionalMatrixPowerError(
'cannot use inverse scaling and squaring to find '
'the fractional matrix power of a singular matrix')
# If the triangular matrix is real and has a negative
# entry on the diagonal, then force the matrix to be complex.
if np.isrealobj(T) and np.min(T_diag) < 0:
T = T.astype(complex)
# Get the fractional power of the triangular matrix,
# and de-triangularize it if necessary.
U = _remainder_matrix_power_triu(T, t)
if Z is not None:
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
else:
return U
def _fractional_matrix_power(A, p):
"""
Compute the fractional power of a matrix.
See the fractional_matrix_power docstring in matfuncs.py for more info.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
if p == int(p):
return np.linalg.matrix_power(A, int(p))
# Compute singular values.
s = svdvals(A)
# Inverse scaling and squaring cannot deal with a singular matrix,
# because the process of repeatedly taking square roots
# would not converge to the identity matrix.
if s[-1]:
# Compute the condition number relative to matrix inversion,
# and use this to decide between floor(p) and ceil(p).
k2 = s[0] / s[-1]
p1 = p - np.floor(p)
p2 = p - np.ceil(p)
if p1 * k2 ** (1 - p1) <= -p2 * k2:
a = int(np.floor(p))
b = p1
else:
a = int(np.ceil(p))
b = p2
try:
R = _remainder_matrix_power(A, b)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
except np.linalg.LinAlgError:
pass
# If p is negative then we are going to give up.
# If p is non-negative then we can fall back to generic funm.
if p < 0:
X = np.empty_like(A)
X.fill(np.nan)
return X
else:
p1 = p - np.floor(p)
a = int(np.floor(p))
b = p1
R, info = funm(A, lambda x: pow(x, b), disp=False)
Q = np.linalg.matrix_power(A, a)
return Q.dot(R)
def _logm_triu(T):
"""
Compute matrix logarithm of an upper triangular matrix.
The matrix logarithm is the inverse of
expm: expm(logm(`T`)) == `T`
Parameters
----------
T : (N, N) array_like
Upper triangular matrix whose logarithm to evaluate
Returns
-------
logm : (N, N) ndarray
Matrix logarithm of `T`
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012)
"Improved Inverse Scaling and Squaring Algorithms
for the Matrix Logarithm."
SIAM Journal on Scientific Computing, 34 (4). C152-C169.
ISSN 1095-7197
.. [2] Nicholas J. Higham (2008)
"Functions of Matrices: Theory and Computation"
ISBN 978-0-898716-46-7
.. [3] Nicholas J. Higham and Lijing lin (2011)
"A Schur-Pade Algorithm for Fractional Powers of a Matrix."
SIAM Journal on Matrix Analysis and Applications,
32 (3). pp. 1056-1078. ISSN 0895-4798
"""
T = np.asarray(T)
if len(T.shape) != 2 or T.shape[0] != T.shape[1]:
raise ValueError('expected an upper triangular square matrix')
n, n = T.shape
# Construct T0 with the appropriate type,
# depending on the dtype and the spectrum of T.
T_diag = np.diag(T)
keep_it_real = np.isrealobj(T) and np.min(T_diag, initial=0.) >= 0
if keep_it_real:
T0 = T
else:
T0 = T.astype(complex)
# Define bounds given in Table (2.1).
theta = (None,
1.59e-5, 2.31e-3, 1.94e-2, 6.21e-2,
1.28e-1, 2.06e-1, 2.88e-1, 3.67e-1,
4.39e-1, 5.03e-1, 5.60e-1, 6.09e-1,
6.52e-1, 6.89e-1, 7.21e-1, 7.49e-1)
R, s, m = _inverse_squaring_helper(T0, theta)
# Evaluate U = 2**s r_m(T - I) using the partial fraction expansion (1.1).
# This requires the nodes and weights
# corresponding to degree-m Gauss-Legendre quadrature.
# These quadrature arrays need to be transformed from the [-1, 1] interval
# to the [0, 1] interval.
nodes, weights = scipy.special.p_roots(m)
nodes = nodes.real
if nodes.shape != (m,) or weights.shape != (m,):
raise Exception('internal error')
nodes = 0.5 + 0.5 * nodes
weights = 0.5 * weights
ident = np.identity(n)
U = np.zeros_like(R)
for alpha, beta in zip(weights, nodes):
U += solve_triangular(ident + beta*R, alpha*R)
U *= np.exp2(s)
# Skip this step if the principal branch
# does not exist at T0; this happens when a diagonal entry of T0
# is negative with imaginary part 0.
has_principal_branch = all(x.real > 0 or x.imag != 0 for x in np.diag(T0))
if has_principal_branch:
# Recompute diagonal entries of U.
U[np.diag_indices(n)] = np.log(np.diag(T0))
# Recompute superdiagonal entries of U.
# This indexing of this code should be renovated
# when newer np.diagonal() becomes available.
for i in range(n-1):
l1 = T0[i, i]
l2 = T0[i+1, i+1]
t12 = T0[i, i+1]
U[i, i+1] = _logm_superdiag_entry(l1, l2, t12)
# Return the logm of the upper triangular matrix.
if not np.array_equal(U, np.triu(U)):
raise Exception('U is not upper triangular')
return U
def _logm_force_nonsingular_triangular_matrix(T, inplace=False):
# The input matrix should be upper triangular.
# The eps is ad hoc and is not meant to be machine precision.
tri_eps = 1e-20
abs_diag = np.absolute(np.diag(T))
if np.any(abs_diag == 0):
exact_singularity_msg = 'The logm input matrix is exactly singular.'
warnings.warn(exact_singularity_msg, LogmExactlySingularWarning, stacklevel=3)
if not inplace:
T = T.copy()
n = T.shape[0]
for i in range(n):
if not T[i, i]:
T[i, i] = tri_eps
elif np.any(abs_diag < tri_eps):
near_singularity_msg = 'The logm input matrix may be nearly singular.'
warnings.warn(near_singularity_msg, LogmNearlySingularWarning, stacklevel=3)
return T
def _logm(A):
"""
Compute the matrix logarithm.
See the logm docstring in matfuncs.py for more info.
Notes
-----
In this function we look at triangular matrices that are similar
to the input matrix. If any diagonal entry of such a triangular matrix
is exactly zero then the original matrix is singular.
The matrix logarithm does not exist for such matrices,
but in such cases we will pretend that the diagonal entries that are zero
are actually slightly positive by an ad-hoc amount, in the interest
of returning something more useful than NaN. This will cause a warning.
"""
A = np.asarray(A)
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected a square matrix')
# If the input matrix dtype is integer then copy to a float dtype matrix.
if issubclass(A.dtype.type, np.integer):
A = np.asarray(A, dtype=float)
keep_it_real = np.isrealobj(A)
try:
if np.array_equal(A, np.triu(A)):
A = _logm_force_nonsingular_triangular_matrix(A)
if np.min(np.diag(A), initial=0.) < 0:
A = A.astype(complex)
return _logm_triu(A)
else:
if keep_it_real:
T, Z = schur(A)
if not np.array_equal(T, np.triu(T)):
T, Z = rsf2csf(T, Z)
else:
T, Z = schur(A, output='complex')
T = _logm_force_nonsingular_triangular_matrix(T, inplace=True)
U = _logm_triu(T)
ZH = np.conjugate(Z).T
return Z.dot(U).dot(ZH)
except (SqrtmError, LogmError):
X = np.empty_like(A)
X.fill(np.nan)
return X
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@linalg@_matfuncs_inv_ssq.py@.PATH_END.py
|
{
"filename": "SlideSurfaces.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/FSISPH/SlideSurfaces.py",
"type": "Python"
}
|
from SpheralCompiledPackages import *
from spheralDimensions import spheralDimensions
dims = spheralDimensions()
# clean up the smooth w/ diffent normal calculations
#-------------------------------------------------------------------------------
# Convienent constructor for slide surfaces.
#-------------------------------------------------------------------------------
# RE LINE 20-21
# if we have different smoothing lengths at a material interface w/ slip active,
# the E0 error of the SPH kernel will cause the surface normals to have really
# bad values two-or-so rows in from the interface. To get around this issue,
# whenever we base our surface normal on its same-material neighbors we need
# an extra smoothing step to reorient normals a few rows back. So effectively
# we have an optionally default:
# surfaceNormalMethod = DifferentMaterial
# no smoothing
# surfaceNormalMethod = SameMaterial | AllMaterial | MassWeighted
# smoothing
# this should be cleaned up once we have a better idea of what the best
# approach is.
#-------------------------------------------------------------------------------
SlideSurfaceFactoryString = """
def makeSlideSurfaces%(dim)s(dataBase,
slideSurfaces=None):
contactTypes = [0]*(dataBase.numNodeLists**2)
if slideSurfaces:
# create the map nodelist --> index
nodeLists = dataBase.nodeLists()
nodeListMap = {}
for i in range(dataBase.numNodeLists):
nodeListMap[nodeLists[i]]=i
# table (2d->1d) this should be fixed later
for slide in slideSurfaces:
nodeListi = nodeListMap[slide[0]]
nodeListj = nodeListMap[slide[1]]
contactTypes[dataBase.numNodeLists*nodeListi+nodeListj]=1
contactTypes[nodeListi+dataBase.numNodeLists*nodeListj]=1
result = SlideSurface%(dim)s(dataBase,
vector_of_int(contactTypes))
return result
"""
for dim in dims:
exec(SlideSurfaceFactoryString % {"dim": "%id" % dim})
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@FSISPH@SlideSurfaces.py@.PATH_END.py
|
{
"filename": "Readme.md",
"repo_name": "lukaswenzl/Magnification_bias_estimation_in_galaxy_surveys",
"repo_path": "Magnification_bias_estimation_in_galaxy_surveys_extracted/Magnification_bias_estimation_in_galaxy_surveys-main/Readme.md",
"type": "Markdown"
}
|
## Magnification bias estimation
In this repository, you can find our code to estimate magnification bias for a galaxy sample with a complex photometric selection for the example of SDSS BOSS. The code provided is for the example of CMASS (see Magnification_bias_estimate_example_CMASS.ipynb) and also works for the LOWZ, z1 and z3 samples.
This is the underlying code of the publication Wenzl, Chen, Bean 2023 https://arxiv.org/abs/2308.05892
We also provide a template to apply our approach to other surveys. The information needed to apply the approach to other surveys consists of:
* The galaxy catalog including the magnitudes used for the photometric selection
* The exact conditions used for the photometric selection
* An understanding of how the magnitudes used behave under lensing. In our work for SDSS BOSS we characterized this for magnitudes that capture the full light of the galaxy, psf magnitudes and aperture magnitudes. If you need other magnitudes you need to characterize them yourself.
See magnification_bias_template_other_surveys.py to get started.
|
lukaswenzlREPO_NAMEMagnification_bias_estimation_in_galaxy_surveysPATH_START.@Magnification_bias_estimation_in_galaxy_surveys_extracted@Magnification_bias_estimation_in_galaxy_surveys-main@Readme.md@.PATH_END.py
|
{
"filename": "_namelength.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter/hoverlabel/_namelength.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NamelengthValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="namelength", parent_name="scatter.hoverlabel", **kwargs
):
super(NamelengthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
min=kwargs.pop("min", -1),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter@hoverlabel@_namelength.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "simonsobs/nextline-rdb",
"repo_path": "nextline-rdb_extracted/nextline-rdb-main/tests/alembic/migrations/__init__.py",
"type": "Python"
}
|
simonsobsREPO_NAMEnextline-rdbPATH_START.@nextline-rdb_extracted@nextline-rdb-main@tests@alembic@migrations@__init__.py@.PATH_END.py
|
|
{
"filename": "test_waypoint.py",
"repo_name": "dsavransky/EXOSIMS",
"repo_path": "EXOSIMS_extracted/EXOSIMS-master/tests/util/test_waypoint.py",
"type": "Python"
}
|
import unittest
import numpy as np
import sys, os.path
import astropy.units as u
import inspect
import EXOSIMS.util.waypoint as wp
"""
waypoint.py module unit tests
Sonny Rappaport, June 2021 (in format of code for test_deltaMag)
General strategy: I put in arbitrary inputs and ensure that the dictionary
generated has the correct sums. I do not check the plot/that a file has been
generated.
"""
class Test_waypoint(unittest.TestCase):
def test1(self):
"""Testing the waypoint function for various arbitrary inputs"""
comps = []
intTimes = [] * u.d
self.assertDictEqual(
wp.waypoint(comps, intTimes, 365, None, None),
{"numStars": 0, "Total Completeness": 0, "Total intTime": 0},
)
comps = [1, 2, 3]
intTimes = [1, 2, 3] * u.d
self.assertDictEqual(
wp.waypoint(comps, intTimes, 365, None, None),
{"numStars": 3, "Total Completeness": 6, "Total intTime": 6 * u.d},
)
comps = [3, 3, 3]
intTimes = [180, 180, 180] * u.d
self.assertDictEqual(
wp.waypoint(comps, intTimes, 365, None, None),
{"numStars": 2, "Total Completeness": 6, "Total intTime": 360 * u.d},
)
if __name__ == "__main__":
unittest.main()
|
dsavranskyREPO_NAMEEXOSIMSPATH_START.@EXOSIMS_extracted@EXOSIMS-master@tests@util@test_waypoint.py@.PATH_END.py
|
{
"filename": "install_headers.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/setuptools/py3/setuptools/_distutils/command/install_headers.py",
"type": "Python"
}
|
"""distutils.command.install_headers
Implements the Distutils 'install_headers' command, to install C/C++ header
files to the Python include directory."""
from ..core import Command
# XXX force is never used
class install_headers(Command):
description = "install C/C++ header files"
user_options = [
('install-dir=', 'd', "directory to install header files to"),
('force', 'f', "force installation (overwrite existing files)"),
]
boolean_options = ['force']
def initialize_options(self):
self.install_dir = None
self.force = False
self.outfiles = []
def finalize_options(self):
self.set_undefined_options(
'install', ('install_headers', 'install_dir'), ('force', 'force')
)
def run(self):
headers = self.distribution.headers
if not headers:
return
self.mkpath(self.install_dir)
for header in headers:
(out, _) = self.copy_file(header, self.install_dir)
self.outfiles.append(out)
def get_inputs(self):
return self.distribution.headers or []
def get_outputs(self):
return self.outfiles
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@setuptools@py3@setuptools@_distutils@command@install_headers.py@.PATH_END.py
|
{
"filename": "hitran_downloader.py",
"repo_name": "spexod/iSLAT",
"repo_path": "iSLAT_extracted/iSLAT-master/iSLAT/COMPONENTS/hitran_downloader.py",
"type": "Python"
}
|
import os
import datetime
from COMPONENTS.Hitran_data import get_Hitran_data
from COMPONENTS.partition_function_writer import write_partition_function
from COMPONENTS.line_data_writer import write_line_data
def download_hitran_data(mols, basem, isot):
#mols = ["H2", "HD", "H2O", "H218O", "CO2", "13CO2", "CO", "13CO", "C18O", "CH4", "HCN", "H13CN", "NH3", "OH", "C2H2", "13CCH2", "C2H4", "C4H2", "C2H6", "HC3N"]
#basem = ["H2", "H2", "H2O", "H2O", "CO2", "CO2", "CO", "CO", "CO", "CH4", "HCN", "HCN", "NH3", "OH", "C2H2", "C2H2", "C2H4", "C4H2", "C2H6", "HC3N"]
#isot = [1, 2, 1, 2, 1, 2, 1, 2, 3, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 1]
#mols = ["O2"]
#basem = ["O2", "O2"]
#isot = [1, 2]
min_wave = 0.3 # micron
max_wave = 1000 # micron
min_vu = 1 / (min_wave / 1E6) / 100.
max_vu = 1 / (max_wave / 1E6) / 100.
print(' ')
print ('Checking for HITRAN files: ...')
for mol, bm, iso in zip(mols, basem, isot):
save_folder = 'HITRANdata'
file_path = os.path.join(save_folder, "data_Hitran_2020_{:}.par".format(mol))
if os.path.exists(file_path):
print("File already exists for mol: {:}. Skipping.".format(mol))
continue
print("Downloading data for mol: {:}".format(mol))
Htbl, qdata, M, G = get_Hitran_data(bm, iso, min_vu, max_vu)
os.makedirs(save_folder, exist_ok=True) # Create the folder if it doesn't exist
with open(file_path, 'w') as fh:
fh.write("# HITRAN 2020 {:}; id:{:}; iso:{:};gid:{:}\n".format(mol, M, iso, G))
fh.write("# Downloaded from the Hitran website\n")
fh.write("# {:s}\n".format(str(datetime.date.today())))
fh = write_partition_function(fh, qdata)
fh = write_line_data(fh, Htbl)
print("Data for Mol: {:} downloaded and saved.".format(mol))
|
spexodREPO_NAMEiSLATPATH_START.@iSLAT_extracted@iSLAT-master@iSLAT@COMPONENTS@hitran_downloader.py@.PATH_END.py
|
{
"filename": "demo_calculate_formation_efficiency_per_solar_mass_evolved-checkpoint.ipynb",
"repo_name": "FloorBroekgaarden/Double-Compact-Object-Mergers",
"repo_path": "Double-Compact-Object-Mergers_extracted/Double-Compact-Object-Mergers-main/demo_read_hdf5_file/.ipynb_checkpoints/demo_calculate_formation_efficiency_per_solar_mass_evolved-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
```python
# from __future__ import division # un comment if you use python 2 !
import numpy as np
import matplotlib.pyplot as plt
import h5py as h5
import time
import sys
import copy
#Quick fudge to make import from ../Scripts work
sys.path.append('../Scripts')
import gc
# import ClassCosmicIntegrator as CI #Given settings and redshifts returns rates (2D arrays) Loads the data
from PostProcessingScripts import *
import ClassCOMPAS as CC ###
# import ClassFormationChannels as FC
# from ClassFormationChannels_5mainchannels import *
import pandas as pd
from astropy import units as u
from astropy import constants as const
dictDCOtypeDCOlabel = {'BBH':'BHBH', 'BNS':'NSNS', 'BHNS':'BHNS'}
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import h5py as h5
import numpy as np
import os
import matplotlib.pyplot as plt
import pandas as pd
import string
```
<style>.container { width:100% !important; }</style>
```python
metallicityGrid =[0.0001, 0.00011, 0.00012, 0.00014, 0.00016, 0.00017,\
0.00019, 0.00022, 0.00024, 0.00027, 0.0003, 0.00034, \
0.00037, 0.00042, 0.00047, 0.00052, 0.00058, 0.00065,\
0.00073, 0.00081, 0.0009, 0.00101, 0.00113, 0.00126,\
0.0014, 0.00157, 0.00175, 0.00195, 0.00218, 0.00243, \
0.00272, 0.00303, 0.00339, 0.00378, 0.00422, 0.00471, \
0.00526, 0.00587, 0.00655, 0.00732, 0.00817, 0.00912, \
0.01018, 0.01137, 0.01269, 0.01416, 0.01581, 0.01765, 0.01971, 0.022, 0.0244, 0.02705, 0.03]
```
```python
def calculateEfficiencies(DCOtype='BBH', pathCOMPASOutput='/Volumes/Andromeda/DATA/AllDCO_bugfix/',BPS_model='A'):
"""
DCOtype='BBH': type of compact object mergers, options: ['BHBH', 'BHNS' or 'NSNS'],
pathCOMPASOutput='/Volumes/Andromeda/DATA/AllDCO_bugfix/': path to directory with data
BPS_model='A' : alphabetical letter label of the data
calculates the formation efficiency of the requested DCOtype
returns an array of formation efficiency, each element in the array is the efficiency for a grid point in the metallicities.
"""
# BPSnameslist = list(string.ascii_uppercase)[0:nBPSmodels]
DCOname=dictDCOtypeDCOlabel[DCOtype]
print('now at DCO type ', DCOtype)
# for ind_m, bps_model in enumerate(BPSnameslist):
for ind_m, bps_model in enumerate(['A']):
print()
# print('now at bps label,' bps_model)
print('now at model ', alphabetDirDict[bps_model])
# set always optimistic CE false, unless we are doing the optimistic variation
OPTIMISTIC=False
if (bps_model=='F') or (bps_model=='K'):
OPTIMISTIC=True
print('doing optimistic version of %s'%alphabetDirDict[bps_model])
# path to datafile
# path = pathCOMPASOutput+alphabetDirDict[bps_model] + '/' + 'COMPASCompactOutput_'+DCOtype +'_'+bps_model+'.h5'
path = pathCOMPASOutput+alphabetDirDict[bps_model] + '/' + 'COMPASOutput.h5'
#But I want only within Hubble time
Data = CC.COMPASData(path=path, lazyData=True, Mlower=5., \
Mupper=150, binaryFraction=1)
Data.setCOMPASDCOmask(types=DCOtype, withinHubbleTime=True, optimistic=OPTIMISTIC)
Data.setCOMPASData()
metallicities = Data.metallicitySystems
seeds = Data.seeds[Data.Hubble==True]
weights = Data.weight
# calculates the equivalent mass of the "Galaxy box" that is formed in stars, that represents our "COMPAS" box simulation (per Metallicity)
Data_totalMassEvolvedPerZ = Data.totalMassEvolvedPerZ
Data_metallicityGrid = Data.metallicityGrid
del Data
listt=[0.0001, 0.00011, 0.00012, 0.00014, 0.00016, 0.00017,\
0.00019, 0.00022, 0.00024, 0.00027, 0.0003, 0.00034, \
0.00037, 0.00042, 0.00047, 0.00052, 0.00058, 0.00065,\
0.00073, 0.00081, 0.0009, 0.00101, 0.00113, 0.00126,\
0.0014, 0.00157, 0.00175, 0.00195, 0.00218, 0.00243, \
0.00272, 0.00303, 0.00339, 0.00378, 0.00422, 0.00471, \
0.00526, 0.00587, 0.00655, 0.00732, 0.00817, 0.00912, \
0.01018, 0.01137, 0.01269, 0.01416, 0.01581, 0.01765, 0.01971, 0.022, 0.0244, 0.02705, 0.03]
formationRateTotal = np.zeros(len(listt))
# print('#Z =',len(Data.metallicityGrid))
for nrZ, Z in enumerate(listt):
# this if and else statement is a little hack. Data.metallicityGrid might not contains some metallicities since
# it is based on the systems in the hdf5 file, but since the big Data files only contain the DCOs, it can be that a certain metallciity point
# has 0 DCOs and thats what the data.metallicityGrid is based on
if Z in Data_metallicityGrid:
maskZ = (metallicities == Z)
formationRateTotal[nrZ] = np.sum(weights[maskZ]) # //floor weights because not every binary in COMPAS is equally represented in the galaxy
# print('total 1 =',formationRateTotal[nrZ])
# mask the Z that are in the grid
maskZgridinZlist = np.in1d(listt, Data_metallicityGrid)
formationRateTotal[maskZgridinZlist] = np.divide(formationRateTotal[maskZgridinZlist], Data_totalMassEvolvedPerZ) + 0 #lowerY
return formationRateTotal
```
```python
formationRateTotal_bps_model_A = calculateEfficiencies(DCOtype='BHNS', pathCOMPASOutput='/Volumes/Andromeda/DATA/AllDCO_bugfix/',BPS_model='A')
```
now at DCO type BHNS
now at model fiducial
weighted samples :-D
Remember to self.setCOMPASDCOmask() and self.setCOMPASData()
```python
print('The formation efficiencies in units of solar masses formed is:')
print(formationRateTotal_bps_model_A, '[Msun^-1]' )
print()
print(len(formationRateTotal_bps_model_A)==len(metallicityGrid))
print('this array is the length of the metallicity grid')
print()
print('the corresponding metallicities are:')
print(metallicityGrid)
```
The formation efficiencies in units of solar masses formed is:
[1.58560182e-06 2.02601743e-06 2.15477123e-06 2.16511354e-06
2.19468563e-06 2.24674886e-06 2.16498965e-06 2.39332787e-06
2.28243537e-06 2.72055955e-06 2.93979334e-06 3.31560631e-06
3.46749672e-06 4.05415032e-06 4.46359232e-06 4.77668565e-06
4.98050850e-06 5.12896260e-06 5.21805749e-06 5.47563555e-06
5.01577607e-06 5.47159403e-06 6.86509794e-06 8.34919475e-06
9.16911839e-06 9.96446701e-06 1.03680382e-05 1.10439890e-05
1.15122292e-05 1.18382793e-05 1.20692436e-05 1.20478815e-05
1.13977322e-05 1.07781090e-05 1.02268566e-05 9.73814687e-06
9.64172572e-06 9.41452071e-06 9.00329420e-06 9.01697914e-06
8.71761082e-06 7.76716714e-06 6.67396581e-06 4.90476977e-06
3.89202814e-06 2.79480403e-06 1.92073035e-06 1.22670624e-06
8.75799528e-07 4.73163072e-08 1.80446617e-08 3.93986879e-10
1.09016786e-08] [Msun^-1]
True
this array is the length of the metallicity grid
the corresponding metallicities are:
[0.0001, 0.00011, 0.00012, 0.00014, 0.00016, 0.00017, 0.00019, 0.00022, 0.00024, 0.00027, 0.0003, 0.00034, 0.00037, 0.00042, 0.00047, 0.00052, 0.00058, 0.00065, 0.00073, 0.00081, 0.0009, 0.00101, 0.00113, 0.00126, 0.0014, 0.00157, 0.00175, 0.00195, 0.00218, 0.00243, 0.00272, 0.00303, 0.00339, 0.00378, 0.00422, 0.00471, 0.00526, 0.00587, 0.00655, 0.00732, 0.00817, 0.00912, 0.01018, 0.01137, 0.01269, 0.01416, 0.01581, 0.01765, 0.01971, 0.022, 0.0244, 0.02705, 0.03]
```python
```
```python
```
|
LONG_NAME_28.py
|
{
"filename": "simulate_burst.py",
"repo_name": "CHIMEFRB/fitburst",
"repo_path": "fitburst_extracted/fitburst-main/fitburst/pipelines/simulate_burst.py",
"type": "Python"
}
|
#! /bin/env/python
import matplotlib
matplotlib.rcParams["font.family"] = "times"
matplotlib.rcParams["font.size"] = 15
matplotlib.rcParams["xtick.labelsize"] = 12
matplotlib.rcParams["ytick.labelsize"] = 12
from copy import deepcopy
import matplotlib.pyplot as plt
import fitburst as fb
import numpy as np
import sys
# define dimensions of the data.
is_dedispersed = True
num_freq = 2 ** 8
num_time = 2 ** 7
freq_lo = 1200.
freq_hi = 1600.
time_lo = 0.
time_hi = 0.08
freqs = np.linspace(freq_lo, freq_hi, num = num_freq)
times = np.linspace(time_lo, time_hi, num = num_time)
# define physical parameters for a dispersed burst to simulate.
params = {
"amplitude" : [0., 0., 0.],
"arrival_time" : [0.03, 0.04, 0.05],
"burst_width" : [0.001, 0.002, 0.0005],
"dm" : [349.5, 349.5, 349.5],
"dm_index" : [-2., -2., -2.],
"ref_freq" : [1500., 1400., 1300.],
"scattering_index" : [-4., -4., -4.],
"scattering_timescale" : [0., 0., 0.],
"spectral_index" : [0., 0., 0.],
"spectral_running" : [-300., -300., -300.],
}
num_components = len(params["dm"])
# define and/or extract parameters.
new_params = deepcopy(params)
if is_dedispersed:
new_params["dm"] = [0.] * num_components
# define model object for CHIME/FRB data and load in parameter values.
model_obj = fb.analysis.model.SpectrumModeler(
freqs,
times,
is_dedispersed = is_dedispersed,
num_components = num_components,
verbose = True,
)
model_obj.update_parameters(new_params)
# now compute model and add noise.
model = model_obj.compute_model()
model += np.random.normal(0., 0.2, size = model.shape)
# plot.
plt.pcolormesh(times, freqs, model)
plt.xlabel("Time (s)")
plt.xlabel("Observing Frequency (MHz)")
plt.show()
# finally, save data into fitburst-generic format.
metadata = {
"bad_chans" : [],
"freqs_bin0" : freqs[0],
"is_dedispersed" : is_dedispersed,
"num_freq" : num_freq,
"num_time" : num_time,
"times_bin0" : 0.,
"res_freq" : freqs[1] - freqs[0],
"res_time" : times[1] - times[0]
}
np.savez(
"simulated_data.npz",
data_full = model,
metadata = metadata,
burst_parameters = params,
)
|
CHIMEFRBREPO_NAMEfitburstPATH_START.@fitburst_extracted@fitburst-main@fitburst@pipelines@simulate_burst.py@.PATH_END.py
|
{
"filename": "_tickformatstops.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2dcontour/colorbar/_tickformatstops.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatstopsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self,
plotly_name="tickformatstops",
parent_name="histogram2dcontour.colorbar",
**kwargs,
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2dcontour@colorbar@_tickformatstops.py@.PATH_END.py
|
{
"filename": "convolutions.ipynb",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/docs/notebooks/convolutions.ipynb",
"type": "Jupyter Notebook"
}
|
# Generalized convolutions in JAX
<!--* freshness: { reviewed: '2024-04-08' } *-->
[](https://colab.research.google.com/github/jax-ml/jax/blob/main/docs/notebooks/convolutions.ipynb) [](https://kaggle.com/kernels/welcome?src=https://github.com/jax-ml/jax/blob/main/docs/notebooks/convolutions.ipynb)
JAX provides a number of interfaces to compute convolutions across data, including:
- {func}`jax.numpy.convolve` (also {func}`jax.numpy.correlate`)
- {func}`jax.scipy.signal.convolve` (also {func}`~jax.scipy.signal.correlate`)
- {func}`jax.scipy.signal.convolve2d` (also {func}`~jax.scipy.signal.correlate2d`)
- {func}`jax.lax.conv_general_dilated`
For basic convolution operations, the `jax.numpy` and `jax.scipy` operations are usually sufficient. If you want to do more general batched multi-dimensional convolution, the `jax.lax` function is where you should start.
## Basic one-dimensional convolution
Basic one-dimensional convolution is implemented by {func}`jax.numpy.convolve`, which provides a JAX interface for {func}`numpy.convolve`. Here is a simple example of 1D smoothing implemented via a convolution:
```python
import matplotlib.pyplot as plt
from jax import random
import jax.numpy as jnp
import numpy as np
key = random.key(1701)
x = jnp.linspace(0, 10, 500)
y = jnp.sin(x) + 0.2 * random.normal(key, shape=(500,))
window = jnp.ones(10) / 10
y_smooth = jnp.convolve(y, window, mode='same')
plt.plot(x, y, 'lightgray')
plt.plot(x, y_smooth, 'black');
```

The `mode` parameter controls how boundary conditions are treated; here we use `mode='same'` to ensure that the output is the same size as the input.
For more information, see the {func}`jax.numpy.convolve` documentation, or the documentation associated with the original {func}`numpy.convolve` function.
## Basic N-dimensional convolution
For *N*-dimensional convolution, {func}`jax.scipy.signal.convolve` provides a similar interface to that of {func}`jax.numpy.convolve`, generalized to *N* dimensions.
For example, here is a simple approach to de-noising an image based on convolution with a Gaussian filter:
```python
from scipy import misc
import jax.scipy as jsp
fig, ax = plt.subplots(1, 3, figsize=(12, 5))
# Load a sample image; compute mean() to convert from RGB to grayscale.
image = jnp.array(misc.face().mean(-1))
ax[0].imshow(image, cmap='binary_r')
ax[0].set_title('original')
# Create a noisy version by adding random Gaussian noise
key = random.key(1701)
noisy_image = image + 50 * random.normal(key, image.shape)
ax[1].imshow(noisy_image, cmap='binary_r')
ax[1].set_title('noisy')
# Smooth the noisy image with a 2D Gaussian smoothing kernel.
x = jnp.linspace(-3, 3, 7)
window = jsp.stats.norm.pdf(x) * jsp.stats.norm.pdf(x[:, None])
smooth_image = jsp.signal.convolve(noisy_image, window, mode='same')
ax[2].imshow(smooth_image, cmap='binary_r')
ax[2].set_title('smoothed');
```

Like in the one-dimensional case, we use `mode='same'` to specify how we would like edges to be handled. For more information on available options in *N*-dimensional convolutions, see the {func}`jax.scipy.signal.convolve` documentation.
## General convolutions
For the more general types of batched convolutions often useful in the context of building deep neural networks, JAX and XLA offer the very general N-dimensional __conv_general_dilated__ function, but it's not very obvious how to use it. We'll give some examples of the common use-cases.
A survey of the family of convolutional operators, [a guide to convolutional arithmetic](https://arxiv.org/abs/1603.07285), is highly recommended reading!
Let's define a simple diagonal edge kernel:
```python
# 2D kernel - HWIO layout
kernel = jnp.zeros((3, 3, 3, 3), dtype=jnp.float32)
kernel += jnp.array([[1, 1, 0],
[1, 0,-1],
[0,-1,-1]])[:, :, jnp.newaxis, jnp.newaxis]
print("Edge Conv kernel:")
plt.imshow(kernel[:, :, 0, 0]);
```
Edge Conv kernel:

And we'll make a simple synthetic image:
```python
# NHWC layout
img = jnp.zeros((1, 200, 198, 3), dtype=jnp.float32)
for k in range(3):
x = 30 + 60*k
y = 20 + 60*k
img = img.at[0, x:x+10, y:y+10, k].set(1.0)
print("Original Image:")
plt.imshow(img[0]);
```
Original Image:

### lax.conv and lax.conv_with_general_padding
These are the simple convenience functions for convolutions
️⚠️ The convenience `lax.conv`, `lax.conv_with_general_padding` helper function assume __NCHW__ images and __OIHW__ kernels.
```python
from jax import lax
out = lax.conv(jnp.transpose(img,[0,3,1,2]), # lhs = NCHW image tensor
jnp.transpose(kernel,[3,2,0,1]), # rhs = OIHW conv kernel tensor
(1, 1), # window strides
'SAME') # padding mode
print("out shape: ", out.shape)
print("First output channel:")
plt.figure(figsize=(10,10))
plt.imshow(np.array(out)[0,0,:,:]);
```
out shape: (1, 3, 200, 198)
First output channel:

```python
out = lax.conv_with_general_padding(
jnp.transpose(img,[0,3,1,2]), # lhs = NCHW image tensor
jnp.transpose(kernel,[2,3,0,1]), # rhs = IOHW conv kernel tensor
(1, 1), # window strides
((2,2),(2,2)), # general padding 2x2
(1,1), # lhs/image dilation
(1,1)) # rhs/kernel dilation
print("out shape: ", out.shape)
print("First output channel:")
plt.figure(figsize=(10,10))
plt.imshow(np.array(out)[0,0,:,:]);
```
out shape: (1, 3, 202, 200)
First output channel:

### Dimension Numbers define dimensional layout for conv_general_dilated
The important argument is the 3-tuple of axis layout arguments:
(Input Layout, Kernel Layout, Output Layout)
- __N__ - batch dimension
- __H__ - spatial height
- __W__ - spatial width
- __C__ - channel dimension
- __I__ - kernel _input_ channel dimension
- __O__ - kernel _output_ channel dimension
⚠️ To demonstrate the flexibility of dimension numbers we choose a __NHWC__ image and __HWIO__ kernel convention for `lax.conv_general_dilated` below.
```python
dn = lax.conv_dimension_numbers(img.shape, # only ndim matters, not shape
kernel.shape, # only ndim matters, not shape
('NHWC', 'HWIO', 'NHWC')) # the important bit
print(dn)
```
ConvDimensionNumbers(lhs_spec=(0, 3, 1, 2), rhs_spec=(3, 2, 0, 1), out_spec=(0, 3, 1, 2))
#### SAME padding, no stride, no dilation
```python
out = lax.conv_general_dilated(img, # lhs = image tensor
kernel, # rhs = conv kernel tensor
(1,1), # window strides
'SAME', # padding mode
(1,1), # lhs/image dilation
(1,1), # rhs/kernel dilation
dn) # dimension_numbers = lhs, rhs, out dimension permutation
print("out shape: ", out.shape)
print("First output channel:")
plt.figure(figsize=(10,10))
plt.imshow(np.array(out)[0,:,:,0]);
```
out shape: (1, 200, 198, 3)
First output channel:

#### VALID padding, no stride, no dilation
```python
out = lax.conv_general_dilated(img, # lhs = image tensor
kernel, # rhs = conv kernel tensor
(1,1), # window strides
'VALID', # padding mode
(1,1), # lhs/image dilation
(1,1), # rhs/kernel dilation
dn) # dimension_numbers = lhs, rhs, out dimension permutation
print("out shape: ", out.shape, "DIFFERENT from above!")
print("First output channel:")
plt.figure(figsize=(10,10))
plt.imshow(np.array(out)[0,:,:,0]);
```
out shape: (1, 198, 196, 3) DIFFERENT from above!
First output channel:

#### SAME padding, 2,2 stride, no dilation
```python
out = lax.conv_general_dilated(img, # lhs = image tensor
kernel, # rhs = conv kernel tensor
(2,2), # window strides
'SAME', # padding mode
(1,1), # lhs/image dilation
(1,1), # rhs/kernel dilation
dn) # dimension_numbers = lhs, rhs, out dimension permutation
print("out shape: ", out.shape, " <-- half the size of above")
plt.figure(figsize=(10,10))
print("First output channel:")
plt.imshow(np.array(out)[0,:,:,0]);
```
out shape: (1, 100, 99, 3) <-- half the size of above
First output channel:

#### VALID padding, no stride, rhs kernel dilation ~ Atrous convolution (excessive to illustrate)
```python
out = lax.conv_general_dilated(img, # lhs = image tensor
kernel, # rhs = conv kernel tensor
(1,1), # window strides
'VALID', # padding mode
(1,1), # lhs/image dilation
(12,12), # rhs/kernel dilation
dn) # dimension_numbers = lhs, rhs, out dimension permutation
print("out shape: ", out.shape)
plt.figure(figsize=(10,10))
print("First output channel:")
plt.imshow(np.array(out)[0,:,:,0]);
```
out shape: (1, 176, 174, 3)
First output channel:

#### VALID padding, no stride, lhs=input dilation ~ Transposed Convolution
```python
out = lax.conv_general_dilated(img, # lhs = image tensor
kernel, # rhs = conv kernel tensor
(1,1), # window strides
((0, 0), (0, 0)), # padding mode
(2,2), # lhs/image dilation
(1,1), # rhs/kernel dilation
dn) # dimension_numbers = lhs, rhs, out dimension permutation
print("out shape: ", out.shape, "<-- larger than original!")
plt.figure(figsize=(10,10))
print("First output channel:")
plt.imshow(np.array(out)[0,:,:,0]);
```
out shape: (1, 397, 393, 3) <-- larger than original!
First output channel:

We can use the last to, for instance, implement _transposed convolutions_:
```python
# The following is equivalent to tensorflow:
# N,H,W,C = img.shape
# out = tf.nn.conv2d_transpose(img, kernel, (N,2*H,2*W,C), (1,2,2,1))
# transposed conv = 180deg kernel rotation plus LHS dilation
# rotate kernel 180deg:
kernel_rot = jnp.rot90(jnp.rot90(kernel, axes=(0,1)), axes=(0,1))
# need a custom output padding:
padding = ((2, 1), (2, 1))
out = lax.conv_general_dilated(img, # lhs = image tensor
kernel_rot, # rhs = conv kernel tensor
(1,1), # window strides
padding, # padding mode
(2,2), # lhs/image dilation
(1,1), # rhs/kernel dilation
dn) # dimension_numbers = lhs, rhs, out dimension permutation
print("out shape: ", out.shape, "<-- transposed_conv")
plt.figure(figsize=(10,10))
print("First output channel:")
plt.imshow(np.array(out)[0,:,:,0]);
```
out shape: (1, 400, 396, 3) <-- transposed_conv
First output channel:

### 1D Convolutions
You aren't limited to 2D convolutions, a simple 1D demo is below:
```python
# 1D kernel - WIO layout
kernel = jnp.array([[[1, 0, -1], [-1, 0, 1]],
[[1, 1, 1], [-1, -1, -1]]],
dtype=jnp.float32).transpose([2,1,0])
# 1D data - NWC layout
data = np.zeros((1, 200, 2), dtype=jnp.float32)
for i in range(2):
for k in range(2):
x = 35*i + 30 + 60*k
data[0, x:x+30, k] = 1.0
print("in shapes:", data.shape, kernel.shape)
plt.figure(figsize=(10,5))
plt.plot(data[0]);
dn = lax.conv_dimension_numbers(data.shape, kernel.shape,
('NWC', 'WIO', 'NWC'))
print(dn)
out = lax.conv_general_dilated(data, # lhs = image tensor
kernel, # rhs = conv kernel tensor
(1,), # window strides
'SAME', # padding mode
(1,), # lhs/image dilation
(1,), # rhs/kernel dilation
dn) # dimension_numbers = lhs, rhs, out dimension permutation
print("out shape: ", out.shape)
plt.figure(figsize=(10,5))
plt.plot(out[0]);
```
in shapes: (1, 200, 2) (3, 2, 2)
ConvDimensionNumbers(lhs_spec=(0, 2, 1), rhs_spec=(2, 1, 0), out_spec=(0, 2, 1))
out shape: (1, 200, 2)


### 3D Convolutions
```python
import matplotlib as mpl
# Random 3D kernel - HWDIO layout
kernel = jnp.array([
[[0, 0, 0], [0, 1, 0], [0, 0, 0]],
[[0, -1, 0], [-1, 0, -1], [0, -1, 0]],
[[0, 0, 0], [0, 1, 0], [0, 0, 0]]],
dtype=jnp.float32)[:, :, :, jnp.newaxis, jnp.newaxis]
# 3D data - NHWDC layout
data = jnp.zeros((1, 30, 30, 30, 1), dtype=jnp.float32)
x, y, z = np.mgrid[0:1:30j, 0:1:30j, 0:1:30j]
data += (jnp.sin(2*x*jnp.pi)*jnp.cos(2*y*jnp.pi)*jnp.cos(2*z*jnp.pi))[None,:,:,:,None]
print("in shapes:", data.shape, kernel.shape)
dn = lax.conv_dimension_numbers(data.shape, kernel.shape,
('NHWDC', 'HWDIO', 'NHWDC'))
print(dn)
out = lax.conv_general_dilated(data, # lhs = image tensor
kernel, # rhs = conv kernel tensor
(1,1,1), # window strides
'SAME', # padding mode
(1,1,1), # lhs/image dilation
(1,1,1), # rhs/kernel dilation
dn) # dimension_numbers
print("out shape: ", out.shape)
# Make some simple 3d density plots:
def make_alpha(cmap):
my_cmap = cmap(jnp.arange(cmap.N))
my_cmap[:,-1] = jnp.linspace(0, 1, cmap.N)**3
return mpl.colors.ListedColormap(my_cmap)
my_cmap = make_alpha(plt.cm.viridis)
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(x.ravel(), y.ravel(), z.ravel(), c=data.ravel(), cmap=my_cmap)
ax.axis('off')
ax.set_title('input')
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(x.ravel(), y.ravel(), z.ravel(), c=out.ravel(), cmap=my_cmap)
ax.axis('off')
ax.set_title('3D conv output');
```
in shapes: (1, 30, 30, 30, 1) (3, 3, 3, 1, 1)
ConvDimensionNumbers(lhs_spec=(0, 4, 1, 2, 3), rhs_spec=(4, 3, 0, 1, 2), out_spec=(0, 4, 1, 2, 3))
out shape: (1, 30, 30, 30, 1)


|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@docs@notebooks@convolutions.ipynb@.PATH_END.py
|
{
"filename": "test_make_watchlist_files.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/tests/unit/services/test_make_watchlist_files.py",
"type": "Python"
}
|
import context
import make_watchlist_files
import unittest
class MakeWatchlistFilesTest(unittest.TestCase):
"""Placeholder"""
if __name__ == '__main__':
import xmlrunner
runner = xmlrunner.XMLTestRunner(output='test-reports')
unittest.main(testRunner=runner)
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@tests@unit@services@test_make_watchlist_files.py@.PATH_END.py
|
{
"filename": "plot_slow_sequence_residual.py",
"repo_name": "lgbouma/gyro-interp",
"repo_path": "gyro-interp_extracted/gyro-interp-main/drivers/plot_slow_sequence_residual.py",
"type": "Python"
}
|
import os
import gyrointerp.plotting as gp
from gyrointerp.paths import RESULTSDIR
PLOTDIR = os.path.join(RESULTSDIR, 'slow_sequence_residual')
if not os.path.exists(PLOTDIR):
os.mkdir(PLOTDIR)
outdir = PLOTDIR
#
# many clusters, overplotted
#
gp.plot_slow_sequence_residual(
outdir, ages=[10, 50, 120, 200, 300, 400], bounds_error='limit'
)
gp.plot_slow_sequence_residual(
outdir, ages=[120, 300, 670, 1000]
)
gp.plot_slow_sequence_residual(
outdir, ages=[120, 200, 300, 400, 500, 670]
)
|
lgboumaREPO_NAMEgyro-interpPATH_START.@gyro-interp_extracted@gyro-interp-main@drivers@plot_slow_sequence_residual.py@.PATH_END.py
|
{
"filename": "plot_pca_vs_fa_model_selection.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/examples/decomposition/plot_pca_vs_fa_model_selection.py",
"type": "Python"
}
|
"""
===============================================================
Model selection with Probabilistic PCA and Factor Analysis (FA)
===============================================================
Probabilistic PCA and Factor Analysis are probabilistic models.
The consequence is that the likelihood of new data can be used
for model selection and covariance estimation.
Here we compare PCA and FA with cross-validation on low rank data corrupted
with homoscedastic noise (noise variance
is the same for each feature) or heteroscedastic noise (noise variance
is the different for each feature). In a second step we compare the model
likelihood to the likelihoods obtained from shrinkage covariance estimators.
One can observe that with homoscedastic noise both FA and PCA succeed
in recovering the size of the low rank subspace. The likelihood with PCA
is higher than FA in this case. However PCA fails and overestimates
the rank when heteroscedastic noise is present. Under appropriate
circumstances (choice of the number of components), the held-out
data is more likely for low rank models than for shrinkage models.
The automatic estimation from
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604
by Thomas P. Minka is also compared.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
# %%
# Create the data
# ---------------
import numpy as np
from scipy import linalg
n_samples, n_features, rank = 500, 25, 5
sigma = 1.0
rng = np.random.RandomState(42)
U, _, _ = linalg.svd(rng.randn(n_features, n_features))
X = np.dot(rng.randn(n_samples, rank), U[:, :rank].T)
# Adding homoscedastic noise
X_homo = X + sigma * rng.randn(n_samples, n_features)
# Adding heteroscedastic noise
sigmas = sigma * rng.rand(n_features) + sigma / 2.0
X_hetero = X + rng.randn(n_samples, n_features) * sigmas
# %%
# Fit the models
# --------------
import matplotlib.pyplot as plt
from sklearn.covariance import LedoitWolf, ShrunkCovariance
from sklearn.decomposition import PCA, FactorAnalysis
from sklearn.model_selection import GridSearchCV, cross_val_score
n_components = np.arange(0, n_features, 5) # options for n_components
def compute_scores(X):
pca = PCA(svd_solver="full")
fa = FactorAnalysis()
pca_scores, fa_scores = [], []
for n in n_components:
pca.n_components = n
fa.n_components = n
pca_scores.append(np.mean(cross_val_score(pca, X)))
fa_scores.append(np.mean(cross_val_score(fa, X)))
return pca_scores, fa_scores
def shrunk_cov_score(X):
shrinkages = np.logspace(-2, 0, 30)
cv = GridSearchCV(ShrunkCovariance(), {"shrinkage": shrinkages})
return np.mean(cross_val_score(cv.fit(X).best_estimator_, X))
def lw_score(X):
return np.mean(cross_val_score(LedoitWolf(), X))
for X, title in [(X_homo, "Homoscedastic Noise"), (X_hetero, "Heteroscedastic Noise")]:
pca_scores, fa_scores = compute_scores(X)
n_components_pca = n_components[np.argmax(pca_scores)]
n_components_fa = n_components[np.argmax(fa_scores)]
pca = PCA(svd_solver="full", n_components="mle")
pca.fit(X)
n_components_pca_mle = pca.n_components_
print("best n_components by PCA CV = %d" % n_components_pca)
print("best n_components by FactorAnalysis CV = %d" % n_components_fa)
print("best n_components by PCA MLE = %d" % n_components_pca_mle)
plt.figure()
plt.plot(n_components, pca_scores, "b", label="PCA scores")
plt.plot(n_components, fa_scores, "r", label="FA scores")
plt.axvline(rank, color="g", label="TRUTH: %d" % rank, linestyle="-")
plt.axvline(
n_components_pca,
color="b",
label="PCA CV: %d" % n_components_pca,
linestyle="--",
)
plt.axvline(
n_components_fa,
color="r",
label="FactorAnalysis CV: %d" % n_components_fa,
linestyle="--",
)
plt.axvline(
n_components_pca_mle,
color="k",
label="PCA MLE: %d" % n_components_pca_mle,
linestyle="--",
)
# compare with other covariance estimators
plt.axhline(
shrunk_cov_score(X),
color="violet",
label="Shrunk Covariance MLE",
linestyle="-.",
)
plt.axhline(
lw_score(X),
color="orange",
label="LedoitWolf MLE" % n_components_pca_mle,
linestyle="-.",
)
plt.xlabel("nb of components")
plt.ylabel("CV scores")
plt.legend(loc="lower right")
plt.title(title)
plt.show()
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@examples@decomposition@plot_pca_vs_fa_model_selection.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "ExObsSim/ExoRad2-public",
"repo_path": "ExoRad2-public_extracted/ExoRad2-public-master/tests/conf.py",
"type": "Python"
}
|
skip_plot = True
|
ExObsSimREPO_NAMEExoRad2-publicPATH_START.@ExoRad2-public_extracted@ExoRad2-public-master@tests@conf.py@.PATH_END.py
|
{
"filename": "PhysicalConstants.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/PYB11/Material/PhysicalConstants.py",
"type": "Python"
}
|
#-------------------------------------------------------------------------------
# PhysicalConstants
#-------------------------------------------------------------------------------
from PYB11Generator import *
class PhysicalConstants:
"""
Choose the physical units for a given Spheral run.
This is done by constructing with the user choice for unit length, mass, and time in
SI units (m, kg, sec). All other constants are then derived from those choices.
"""
#...........................................................................
# Constructor
def pyinit(self,
unitLm = "const double",
unitMkg = "const double",
unitTsec = "const double",
unitTeK = ("const double", 1.0),
unitCcou = ("const double", 1.0)):
"Construct based on a unit length, unit mass, and unit time in SI units"
return
#...........................................................................
# Properties
unitLengthMeters = PYB11property("double", "unitLengthMeters", doc="unit of length in SI")
unitMassKg = PYB11property("double", "unitMassKg", doc="unit of mass in SI")
unitTimeSec = PYB11property("double", "unitTimeSec", doc="unit of time in SI")
unitTemperatureKelvin = PYB11property("double", "unitTemperatureKelvin", doc="unit of temperature in SI")
unitChargeCoulomb = PYB11property("double", "unitChargeCoulomb", doc="unit of charge in SI")
protonMass = PYB11property("double", "protonMass", doc="proton mass")
electronMass = PYB11property("double", "electronMass", doc="electron mass")
electronCharge = PYB11property("double", "electronCharge", doc="electron charge")
G = PYB11property("double", "G", doc="gravitational constant")
c = PYB11property("double", "c", doc="speed of light")
kB = PYB11property("double", "kB", doc="Boltzmann constant")
Navogadro = PYB11property("double", "Navogadro", doc="Avogadro's constant")
molarGasConstant = PYB11property("double", "molarGasConstant",
doc="R: the molar gas constant")
kelvinsToEnergyPerMole = PYB11property("double", "kelvinsToEnergyPerMole",
doc="Conversion factor from Kelvins to energy")
unitMassDensity = PYB11property("double", "unitMassDensity",
doc="What the unit mass density in these units corresponds to in SI")
stefanBoltzmannConstant = PYB11property("double", "stefanBoltzmannConstant",
doc="sigma: the Steffan-Boltzmann constant")
blackBodyConstant = PYB11property("double", "blackBodyConstant",
doc="a: the black body constant")
planckConstant = PYB11property("double", "planckConstant",
doc="h: the Planck constant")
unitEnergyJ = PYB11property("double", "unitEnergyJ",
doc="unit of energy in SI")
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@PYB11@Material@PhysicalConstants.py@.PATH_END.py
|
{
"filename": "OleFileIO_PL.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/olefile/py2/OleFileIO_PL.py",
"type": "Python"
}
|
#!/usr/local/bin/python
# -*- coding: latin-1 -*-
"""
olefile (formerly OleFileIO_PL)
Module to read/write Microsoft OLE2 files (also called Structured Storage or
Microsoft Compound Document File Format), such as Microsoft Office 97-2003
documents, Image Composer and FlashPix files, Outlook messages, ...
This version is compatible with Python 2.6+ and 3.x
Project website: http://www.decalage.info/olefile
olefile is copyright (c) 2005-2015 Philippe Lagadec (http://www.decalage.info)
olefile is based on the OleFileIO module from the PIL library v1.1.6
See: http://www.pythonware.com/products/pil/index.htm
The Python Imaging Library (PIL) is
Copyright (c) 1997-2005 by Secret Labs AB
Copyright (c) 1995-2005 by Fredrik Lundh
See source code and LICENSE.txt for information on usage and redistribution.
"""
# The OleFileIO_PL module is for backward compatibility
try:
# first try to import olefile for Python 2.6+/3.x
from olefile.olefile import *
# import metadata not covered by *:
from olefile.olefile import __version__, __author__, __date__
except:
# if it fails, fallback to the old version olefile2 for Python 2.x:
from olefile.olefile2 import *
# import metadata not covered by *:
from olefile.olefile2 import __doc__, __version__, __author__, __date__
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@olefile@py2@OleFileIO_PL.py@.PATH_END.py
|
{
"filename": "_legendgrouptitle.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/funnelarea/_legendgrouptitle.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Legendgrouptitle(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "funnelarea"
_path_str = "funnelarea.legendgrouptitle"
_valid_props = {"font", "text"}
# font
# ----
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
plotly.graph_objs.funnelarea.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnelarea.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super(Legendgrouptitle, self).__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.funnelarea.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnelarea.Legendgrouptitle`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@funnelarea@_legendgrouptitle.py@.PATH_END.py
|
{
"filename": "_width.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/bar/_width.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="width", parent_name="bar", **kwargs):
super(WidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@bar@_width.py@.PATH_END.py
|
{
"filename": "tree_filtering.py",
"repo_name": "CarolineHaigh/mtobjects",
"repo_path": "mtobjects_extracted/mtobjects-master/mtolib/tree_filtering.py",
"type": "Python"
}
|
"""Filter a maxtree."""
import ctypes as ct
import numpy as np
import mtolib.significance_tests as mt_sig
from mtolib import _ctype_classes as mt_class
from mtolib.utils import time_function
# Get access to the compiled C maxtree library
# Defaults to float version
mto_lib = ct.CDLL('mtolib/lib/mt_objects.so')
def init_double_filtering(params):
"""Set up the double version of the maxtree library."""
global mto_lib
# If the image is 64 bit, use the double version of the library
if params.d_type == ct.c_double:
mto_lib = ct.CDLL('mtolib/lib/mt_objects_double.so')
def up_tree():
"""Process a tree from root to leaves."""
return mt_class.SIGNODES_TYPE(mto_lib.mt_significant_nodes_up)
def down_tree():
"""Process a tree from leaves to root."""
return mt_class.SIGNODES_TYPE(mto_lib.mt_significant_nodes_down)
def default_sig_test():
return mt_sig.default_sig_test(mto_lib)
def get_c_significant_nodes(lib_name):
"""Get a significant nodes function from a compiled library."""
# Get access to a compiled C mt_object library
c_lib = ct.CDLL(lib_name)
return mt_class.SIGNODES_TYPE(c_lib.significant_nodes)
def filter_tree(mt_in, image, params, sig_test=default_sig_test,
sig_nodes_function=up_tree):
if params.verbosity:
print("\n---Finding Objects---")
return time_function(filter_tree_timed, (mt_in, image, params, sig_test, sig_nodes_function),
params.verbosity, 'find objects')
def filter_tree_timed(mt_in, image, params, sig_test=default_sig_test,
sig_nodes_function=up_tree):
"""Filter a maxtree using a given significance test and processing method,
and return an object id map"""
# Convert the maxtree object for ctypes compatibility
mt = mt_in.ctypes_maxtree()
# Declare an int type pointer type for the object id array
object_id_type = ct.POINTER(ct.c_int32)
# Create an object id array and get a pointer to it
object_ids = np.zeros(image.shape, dtype=ct.c_int32)
id_pointer = object_ids.ctypes.data_as(object_id_type)
# Ditto for significant ancestors
sig_ancs = np.zeros(image.shape, dtype=ct.c_int32) -3
sig_anc_pointer = sig_ancs.ctypes.data_as(object_id_type)
# Get up/down tree functions if necessary
if sig_nodes_function == up_tree:
sig_nodes_function = up_tree()
elif sig_nodes_function == down_tree:
sig_nodes_function = down_tree()
# Get sig test if necessary
if sig_test == default_sig_test:
sig_test = default_sig_test()
# Create a parameters object
mto_params = mt_class.MtParameters(bg_variance=params.bg_variance, gain=params.gain,
move_factor=params.move_factor, alpha=params.alpha,
verbosity=params.verbosity, min_distance=params.min_distance)
# Create the MTO struct and a pointer
# Avoids bizarre memory management issues - creating it in C seems to go very wrong
mto_struct = mt_class.MtObjectData(object_ids=id_pointer, mt=ct.pointer(mt),
paras=ct.pointer(mto_params),
significant_nodes=sig_nodes_function,
node_significance_test=sig_test.test,
closest_significant_ancestors=sig_anc_pointer)
mto_pointer = ct.pointer(mto_struct)
sig_test.init_test(mto_pointer)
# Set up the mt_objects c function interface
mto_lib.mt_objects.argtypes = [ct.POINTER(mt_class.MtObjectData)]
mto_lib.mt_objects(mto_pointer)
return object_ids, sig_ancs
|
CarolineHaighREPO_NAMEmtobjectsPATH_START.@mtobjects_extracted@mtobjects-master@mtolib@tree_filtering.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/image/hoverlabel/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "image.hoverlabel"
_path_str = "image.hoverlabel.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# linepositionsrc
# ---------------
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# shadowsrc
# ---------
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# stylesrc
# --------
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# textcasesrc
# -----------
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# variantsrc
# ----------
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# weightsrc
# ---------
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.image.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.image.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.image.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("linepositionsrc", None)
_v = linepositionsrc if linepositionsrc is not None else _v
if _v is not None:
self["linepositionsrc"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("shadowsrc", None)
_v = shadowsrc if shadowsrc is not None else _v
if _v is not None:
self["shadowsrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("stylesrc", None)
_v = stylesrc if stylesrc is not None else _v
if _v is not None:
self["stylesrc"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("textcasesrc", None)
_v = textcasesrc if textcasesrc is not None else _v
if _v is not None:
self["textcasesrc"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("variantsrc", None)
_v = variantsrc if variantsrc is not None else _v
if _v is not None:
self["variantsrc"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
_v = arg.pop("weightsrc", None)
_v = weightsrc if weightsrc is not None else _v
if _v is not None:
self["weightsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@image@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "rnn_generator.py",
"repo_name": "trivnguyen/florah",
"repo_path": "florah_extracted/florah-main/src/florah/models/rnn_model/rnn_generator.py",
"type": "Python"
}
|
from typing import Optional, Tuple
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from .. import base_modules, flows, transforms
from . import grud
class DataModule(base_modules.BaseFlowModule):
"""
DataModule for Recurrent-MAF model
"""
arch_type = "RecurrentMAF"
def __init__(
self, model_hparams: Optional[dict] = None,
transform_hparams: Optional[dict] = None,
optimizer_hparams: Optional[dict] = None
) -> None:
super(DataModule, self).__init__(
RecurrentMAF, transforms.Preprocess, model_hparams,
transform_hparams, optimizer_hparams)
class TimeEmbedding(torch.nn.Module):
r""" Time embedding neural network.
.. math::
PE() =
where :math:`d` is the embedding dimension
"""
def __init__(
self, time_channels: int, embed_channels: int) -> None:
"""
Parameters
----------
time_channels: int
Number of time input channels
embed_channels: int
Number of embedded dimension
"""
super().__init__()
self.embed_channels = embed_channels
self.linear_embed = torch.nn.Linear(time_channels, embed_channels)
def forward(self, t: Tensor) -> Tensor:
return torch.cos(self.linear_embed(t))
class RecurrentMAF(torch.nn.Module):
"""
Recurrent-MAF model for time series forecasting.
Parameters
----------
in_channels: int
Number of input channels
out_channels: int
Number of output channels
time_embed_channels: int
Number of time embeeding channels
rnn_name: str
Type of the recurrent layer to use
rnn_hparams: dict
Dictionary with extra kargs for current layer
num_layers: int
Number of recurrent hidden layers
hidden_features: int
Number of RNN hidden channels
num_layers_flows: int
number of MAF transformation
hidden_features_flows: int
Number of MAF hidden channels
num_blocks: int
Number of MADE blocks in each MAF transformation
softplus: bool
Deprecated.
Attributes
----------
rnn_name: str
Type of the recurrent layer to use
rnn_layer: torch.nn.Module
Recurrent layer
embedding_net: torch.nn.Module
Embedding layer
rnn: torch.nn.ModuleList
List of recurrent layers
maf_blocks: torch.nn.ModuleList
List of MAF blocks
activation: torch.nn.Module
Activation function
"""
# Static dictionary with recurrent layers
RNN_LAYERS = {
'RNN': torch.nn.RNN,
'GRU': torch.nn.GRU,
'LSTM': torch.nn.LSTM,
'GRUD': grud.GRUD
}
RNN_LAYERS_DEFAULT_ARGS = {
'RNN': {},
'GRU': {},
'LSTM': {},
'GRUD': {'delta_size': 1}
}
def __init__(
self, in_channels: int, out_channels: int,
hidden_features: int = 64, num_layers: int = 1,
rnn_name: str = "GRU", rnn_hparams: Optional[dict] = None,
time_embed_channels: Optional[int] = None,
num_layers_flows: int = 1, hidden_features_flows: int = 64,
num_blocks: int = 2, softplus: bool = False
) -> None:
"""
Parameters
----------
in_channels: int
Number of input channels
out_channels: int
Number of output channels
time_embed_channels: int
Number of time embeeding channels
rnn_name: str
Type of the recurrent layer to use
rnn_hparams: dict
Dictionary with extra kargs for current layer
num_layers: int
Number of recurrent hidden layers
hidden_features: int
Number of RNN hidden channels
num_layers_flows: int
number of MAF transformation
hidden_features_flows: int
Number of MAF hidden channels
num_blocks: int
Number of MADE blocks in each MAF transformation
softplus: bool
Deprecated.
"""
super().__init__()
# get recurrent layer type to use
self.rnn_name = rnn_name
if rnn_name in self.RNN_LAYERS:
self.rnn_layer = self.RNN_LAYERS[rnn_name]
else:
raise KeyError(
f"Unknown model name \"{rnn_name}\"."\
f"Available models are: {str(self.RNN_LAYERS.keys())}")
# time embedding layers, currently set to torch.nn.Identity
if time_embed_channels is None:
self.embedding_net = torch.nn.Identity()
in_channels = in_channels + 2
else:
self.embedding_net = TimeEmbedding(2, time_embed_channels)
in_channels = in_channels + time_embed_channels
# create RNN layers
self.rnn = torch.nn.ModuleList()
default_rnn_hparams = self.RNN_LAYERS_DEFAULT_ARGS[rnn_name]
if rnn_hparams is not None:
default_rnn_hparams.update(rnn_hparams)
for i in range(num_layers):
n_in = in_channels if i==0 else hidden_features
n_out = hidden_features
self.rnn.append(
self.rnn_layer(
n_in, n_out, batch_first=True, **default_rnn_hparams))
# MAF blocks
self.maf_blocks = flows.build_maf(
out_channels, hidden_features_flows, hidden_features,
num_layers_flows, num_blocks)
# activation
self.activation = F.relu
def forward(
self, x: Tensor, t_in: Tensor, t_out: Tensor,
h0: Optional[Tuple] = None) -> Tuple[Tensor, Tuple]:
r"""
Forward pass
Parameters:
x: Tensor (N_batch, L_padded, H_in)
Input tensor where `N_batch` is the batch size, `L_padded` is
the padded sequence length and `H_in` is the input dimension
t_in: Tensor (N_batch, L_padded, H_in_t)
Input time tensor
t_out: Tensor (N_batch, L_padded, H_in_t)
Output time tensor
h0: Tuple of Tensor
Tuple of initial hidden states to pass into each recurrent layer
"""
hout = [] # list of output hidden states
total_length = x.shape[1]
# time embedding and append into input array
t_embed = self.embedding_net(torch.cat([t_in, t_out], dim=-1))
x = torch.cat([x, t_embed], dim=-1)
# compute time difference (without embedding)
if self.rnn_name == "GRUD":
t_delta = torch.cat([
torch.zeros(t_in.shape[0], 1, 1, dtype=t_in.dtype, device=t_in.device),
t_out[:, :-1] - t_in[:, :-1]
], dim=1)
else:
t_delta = None
# iterate over all recurrent layers
for i in range(len(self.rnn)):
if t_delta is not None:
x, h = self.rnn[i](x, t_delta, h0[i] if h0 is not None else None)
else:
x, h = self.rnn[i](x, h0[i] if h0 is not None else None)
if i != len(self.rnn) - 1:
x = self.activation(x)
hout.append(h)
# return output sequence and hidden states
return x, hout
def log_prob(self, batch: Tuple[Tensor],
return_context: bool = False) -> Tensor:
""" Calculate log-likelihood from batch """
x, y, t, seq_len, mask = batch
t_in = t[:, :-1]
t_out = t[:, 1:]
# apply forward and return context
context, _ = self(x, t_in, t_out)
# reshape tensor before passing through MAF blocks
context = context.flatten(0, 1)
y = y.flatten(0, 1)
mask = mask.flatten()
# get log-likelihood P(y | context)
log_prob = self._log_prob_from_context(y[mask], context=context[mask])
if return_context:
return log_prob, context
else:
return log_prob
def sample(self, x: Tensor, t_in: Tensor, t_out: Tensor, num_samples: int,
return_context: bool = False) -> Tensor:
""" Sample from batch """
# forward pass and get context
context, _ = self(x, t_in, t_out)
context = context.flatten(0, 1)
# sample and reshape
y = self._sample_from_context(num_samples, context=context)
return y
def _sample_from_context(
self, num_samples: int, context: Optional[Tensor] = None
) -> Tensor:
""" Sample P(x | context) """
return self.maf_blocks.sample(num_samples, context=context)
def _log_prob_from_context(
self, x: Tensor,
context: Optional[Tensor] = None) -> Tensor:
""" Return MAF log-likelihood P(x | context)"""
return self.maf_blocks.log_prob(x, context=context)
|
trivnguyenREPO_NAMEflorahPATH_START.@florah_extracted@florah-main@src@florah@models@rnn_model@rnn_generator.py@.PATH_END.py
|
{
"filename": "broadening.py",
"repo_name": "exosports/BART",
"repo_path": "BART_extracted/BART-master/scripts/broadening.py",
"type": "Python"
}
|
import sys, os
import numpy as np
import scipy.constants as sc
import ConfigParser
scriptsdir = os.path.dirname(os.path.realpath(__file__))
sys.path.append(scriptsdir + "/../code")
import makeatm as ma
def get_widths(cfile):
"""
Calculate the max and min Lorentz and Doppler broadening HWHM for
the given configuration file.
Parameters:
-----------
cfile: String
A BART configuration file.
"""
# Read config:
config = ConfigParser.SafeConfigParser()
config.optionxform = str # This one enable Uppercase in arguments
config.read([cfile])
defaults = dict(config.items("MCMC"))
# Get min-max wavenumber:
try:
wnmin = 1.0/(float(defaults["wlhigh"])*float(defaults["wlfct"]))
except:
wnmin = float(defaults["wnlow"]) * float(defaults["wnfct"])
try:
wnmax = 1.0/(float(defaults["wllow"])*float(defaults["wlfct"]))
except:
wnmax = float(defaults["wnhigh"]) * float(defaults["wnfct"])
# Read atmospheric file:
molecs, pressure, temps, abun = ma.readatm(defaults["atmfile"])
# Get min-max temperatures:
try:
tmin = float(defaults["tlow"])
except:
tmin = np.amin(temps)
try:
tmax = float(defaults["thigh"])
except:
tmax = np.amax(temps)
# Get min-max pressures:
pmin = np.amin(pressure) * 1e6
pmax = np.amax(pressure) * 1e6
# Get masses:
molfile = scriptsdir + "/../modules/transit/inputs/molecules.dat"
ID, mol, mass, diam = readmol(molfile)
# Keep only molecules from the atmospheric file:
mask = np.zeros(len(mol), int)
for i in np.arange(len(mol)):
mask[i] = mol[i] in molecs
mol = mol [np.where(mask)]
mass = mass[np.where(mask)] * sc.u * 1e3 # grams
diam = diam[np.where(mask)] * sc.angstrom * 100 # cm
# Sort molecules according to the order in the atmospheric file:
isort = np.zeros(len(mol), int)
for i in np.arange(len(mol)):
isort[i] = np.where(np.asarray(mol) == molecs[i])[0][0]
mol = mol [isort]
mass = mass[isort]
diam = diam[isort]
# Calculate minimum and maximum Doppler widths:
dmin = Doppler(wnmin, tmin, np.amin(mass))
dmax = Doppler(wnmax, tmax, np.amax(mass))
iH2 = np.where(mol == 'H2')[0]
iHe = np.where(mol == 'He')[0]
# Calculate minimum and maximum Lorentz widths:
lmin = Lorentz(pmin, tmax, mass, iH2, iHe, abun[-1], diam, True)
lmax = Lorentz(pmax, tmin, mass, iH2, iHe, abun[ 0], diam, False)
print("Doppler minimum and maximum HWHM (cm-1): {:.3e}, {:.3e}\n"
"Lorentz minimum and maximum HWHM (cm-1): {:.3e}, {:.3e}".
format(dmin, dmax, lmin, lmax))
def Lorentz(pressure, temperature, mass, iH2, iHe, abundance,
diameter, min=True):
"""
Calculate the Lorentz HWHM (in cm-1) for the given parameters:
Parameters:
-----------
pressure: Float
Pressure in Barye.
temperature: Float
Temperature in K.
mass: 1D float array
Array of species masses in grams.
iH2: Integer
Index in mass correspoding to H2.
iHe: Integer
Index in mass correspoding to helium.
abundance: 1D float array
Mole mixing ratio of the species.
diameter: 1D float array
Collisional diameter of the species in cm.
min: Bool
Flag to calculate the minimum (True) or maximum (False) from the species.
"""
if min:
func = np.amin
else:
func = np.amax
# Take the minimum or maximum from the species:
flim = func(abundance[iH2] * ((diameter+diameter[iH2])*0.5)**2.0 *
np.sqrt((1/mass + 1/mass[iH2])) +
abundance[iHe] * ((diameter+diameter[iHe])*0.5)**2.0 *
np.sqrt((1/mass + 1/mass[iHe])) )
# Multiply by the other factors and return:
return np.sqrt(2)/(sc.c*100) / np.sqrt(temperature*np.pi*(sc.k*1e7)) * pressure * flim
def Doppler(wavenumber, temp, mass):
"""
Calculate the Doppler HWHM (in cm-1) for the given paramteres.
Parameters:
-----------
wavenumber: Float
The wavenumber in cm-1.
temp: Float
Temperature in Kelvin degrees.
mass: Float
Mass of the species in gr.
"""
return wavenumber/(sc.c*100) * np.sqrt(2*np.log(2)*(sc.k*1e7) * temp/mass)
def readmol(molfile):
"""
Read and extract species info from molfile.
Parameters:
-----------
molfile: String
Path to the molecular info file.
Returns:
--------
ID: 1D integer array
Universal ID for each species.
mol: 1D string array
Names of the species.
mass: 1D float array
Mass of the species in amu.
diam: 1D float array
Diameter of the species in angstroms.
"""
# Open read the file:
f = open(molfile, "r")
lines = f.readlines()
f.close()
# Find the line where the species are listed.
for start in np.arange(len(lines)):
if lines[start].startswith("# ID"):
break
start += 2
# Extract the species info:
ID, mol, mass, diam = [], [], [], []
while lines[start].strip() != "":
line = lines[start].split()
ID .append(line[0])
mol .append(line[1])
mass.append(line[2])
diam.append(line[3])
start += 1
return (np.asarray(ID, int), np.asarray(mol),
np.asarray(mass, np.double), np.asarray(diam, np.double))
|
exosportsREPO_NAMEBARTPATH_START.@BART_extracted@BART-master@scripts@broadening.py@.PATH_END.py
|
{
"filename": "triton.py",
"repo_name": "ultralytics/yolov5",
"repo_path": "yolov5_extracted/yolov5-master/utils/triton.py",
"type": "Python"
}
|
# Ultralytics YOLOv5 🚀, AGPL-3.0 license
"""Utils to interact with the Triton Inference Server."""
import typing
from urllib.parse import urlparse
import torch
class TritonRemoteModel:
"""
A wrapper over a model served by the Triton Inference Server.
It can be configured to communicate over GRPC or HTTP. It accepts Torch Tensors as input and returns them as
outputs.
"""
def __init__(self, url: str):
"""
Keyword Arguments:
url: Fully qualified address of the Triton server - for e.g. grpc://localhost:8000.
"""
parsed_url = urlparse(url)
if parsed_url.scheme == "grpc":
from tritonclient.grpc import InferenceServerClient, InferInput
self.client = InferenceServerClient(parsed_url.netloc) # Triton GRPC client
model_repository = self.client.get_model_repository_index()
self.model_name = model_repository.models[0].name
self.metadata = self.client.get_model_metadata(self.model_name, as_json=True)
def create_input_placeholders() -> typing.List[InferInput]:
return [
InferInput(i["name"], [int(s) for s in i["shape"]], i["datatype"]) for i in self.metadata["inputs"]
]
else:
from tritonclient.http import InferenceServerClient, InferInput
self.client = InferenceServerClient(parsed_url.netloc) # Triton HTTP client
model_repository = self.client.get_model_repository_index()
self.model_name = model_repository[0]["name"]
self.metadata = self.client.get_model_metadata(self.model_name)
def create_input_placeholders() -> typing.List[InferInput]:
return [
InferInput(i["name"], [int(s) for s in i["shape"]], i["datatype"]) for i in self.metadata["inputs"]
]
self._create_input_placeholders_fn = create_input_placeholders
@property
def runtime(self):
"""Returns the model runtime."""
return self.metadata.get("backend", self.metadata.get("platform"))
def __call__(self, *args, **kwargs) -> typing.Union[torch.Tensor, typing.Tuple[torch.Tensor, ...]]:
"""
Invokes the model.
Parameters can be provided via args or kwargs. args, if provided, are assumed to match the order of inputs of
the model. kwargs are matched with the model input names.
"""
inputs = self._create_inputs(*args, **kwargs)
response = self.client.infer(model_name=self.model_name, inputs=inputs)
result = []
for output in self.metadata["outputs"]:
tensor = torch.as_tensor(response.as_numpy(output["name"]))
result.append(tensor)
return result[0] if len(result) == 1 else result
def _create_inputs(self, *args, **kwargs):
"""Creates input tensors from args or kwargs, not both; raises error if none or both are provided."""
args_len, kwargs_len = len(args), len(kwargs)
if not args_len and not kwargs_len:
raise RuntimeError("No inputs provided.")
if args_len and kwargs_len:
raise RuntimeError("Cannot specify args and kwargs at the same time")
placeholders = self._create_input_placeholders_fn()
if args_len:
if args_len != len(placeholders):
raise RuntimeError(f"Expected {len(placeholders)} inputs, got {args_len}.")
for input, value in zip(placeholders, args):
input.set_data_from_numpy(value.cpu().numpy())
else:
for input in placeholders:
value = kwargs[input.name]
input.set_data_from_numpy(value.cpu().numpy())
return placeholders
|
ultralyticsREPO_NAMEyolov5PATH_START.@yolov5_extracted@yolov5-master@utils@triton.py@.PATH_END.py
|
{
"filename": "CARMATerm.py",
"repo_name": "ywx649999311/EzTao",
"repo_path": "EzTao_extracted/EzTao-master/src/eztao/carma/CARMATerm.py",
"type": "Python"
}
|
"""
A collection of GP kernels that express the autovariance structure of CARMA models using
celerite.
"""
import numpy as np
from celerite import terms
from numba import njit, float64, complex128
import warnings
__all__ = ["acf", "DRW_term", "DHO_term", "CARMA_term"]
@njit(complex128[:](complex128[:]))
def _compute_roots(coeffs):
"""Internal jitted function to compute roots"""
# find roots using np and make roots that are almost real real
roots = np.roots(coeffs)
roots[np.abs(roots.imag) < 1e-10] = roots[np.abs(roots.imag) < 1e-10].real
roots = roots[roots.real.argsort()] # ascending sort by real part
return roots
@njit(float64[:](float64[:]))
def _compute_exp(params):
"""Internal jitted np.exp"""
return np.exp(params)
@njit(float64[:](float64[:], float64[:]))
def polymul(poly1, poly2):
"""Multiply two polynomials
Inputs are the coefficients of the polynomials ranked by order (high
to low). Internally, this function operates from low order to high order.
"""
## convert from high->low to low->high
poly1 = poly1[::-1]
poly2 = poly2[::-1]
poly1_len = poly1.shape[0]
poly2_len = poly2.shape[0]
c = np.zeros(poly1_len + poly2_len - 1)
for i in np.arange(poly1_len):
for j in np.arange(poly2_len):
c[i + j] += poly1[i] * poly2[j]
## convert back to high->low
return c[::-1]
@njit(float64[:](float64[:]))
def fcoeffs2coeffs(fcoeffs):
"""Convert coeffs of a factored polynomial to the coeffs of the product
The input fcoeffs follow the notation (index) in Jones et al. (1981) with the last
element being an additional multiplying factor (the coeff of the highest-order term
in the final expanded polynomial).
:meta private:
"""
size = fcoeffs.shape[0] - 1
odd = bool(size & 0x1)
nPair = size // 2
poly = fcoeffs[-1:] # The coeff of highest order term in the product
if odd:
poly = polymul(poly, np.array([1.0, fcoeffs[-2]]))
for p in np.arange(nPair):
poly = polymul(poly, np.array([1.0, fcoeffs[p * 2], fcoeffs[p * 2 + 1]]))
# the returned is high->low
return poly
def _roots2coeffs(roots):
"""Generate factored polynomial from roots
The notation (index) for the factored polynomial follows that in
Jones et al. (1981). Note: No multiplying is returned.
"""
coeffs = []
size = len(roots)
odd = bool(size & 0x1)
rootsComp = roots[roots.imag != 0]
rootsReal = roots[roots.imag == 0]
nCompPair = len(rootsComp) // 2
nRealPair = len(rootsReal) // 2
for i in range(nCompPair):
root1 = rootsComp[i]
root2 = rootsComp[i + 1]
coeffs.append(-(root1.real + root2.real))
coeffs.append((root1 * root2).real)
for i in range(nRealPair):
root1 = rootsReal[i]
root2 = rootsReal[i + 1]
coeffs.append(-(root1.real + root2.real))
coeffs.append((root1 * root2).real)
if odd:
coeffs.append(-rootsReal[-1].real)
return coeffs
@njit(complex128[:](complex128[:], float64[:], float64[:]))
def acf(arroots, arparam, maparam):
"""Get ACVF coefficients given CARMA parameters
The CARMA noation (index) folows that in Brockwell et al. (2001).
Args:
arroots (array(complex)): AR roots in a numpy array
arparam (array(float)): AR parameters in a numpy array
maparam (array(float)): MA parameters in a numpy array
Returns:
array(complex): ACVF coefficients, each element correspond to a root.
"""
p = arparam.shape[0]
q = maparam.shape[0] - 1
sigma = maparam[0]
# MA param into Kelly's notation
# arparam = np.array(arparam)
maparam = np.array([x / sigma for x in maparam])
# init acf product terms
num_left = np.zeros(p, dtype=np.complex128)
num_right = np.zeros(p, dtype=np.complex128)
denom = -2 * arroots.real + np.zeros_like(arroots) * 1j
for k in range(q + 1):
num_left += maparam[k] * np.power(arroots, k)
num_right += maparam[k] * np.power(np.negative(arroots), k)
for j in range(1, p):
root_idx = np.arange(p)
root_k = arroots[np.roll(root_idx, j)]
denom *= (root_k - arroots) * (np.conj(root_k) + arroots)
return sigma**2 * num_left * num_right / denom
class DRW_term(terms.Term):
r"""
Damped Random Walk (DRW) term (kernel)
.. math::
k_{DRW}(\Delta t) = \sigma^2\,e^{-\Delta t/\tau}
with the parameters ``log_amp`` and ``log_tau``.
Args:
log_amp (float): The natural log of the RMS amplitude of the DRW process.
log_tau (float): The natural log of the characteristic timescale of the DRW
process.
.. note::
Conversions between EzTao DRW parameters and some other DRW representations
seen in the literature:
.. math::
\mathrm{SF_{\infty}^2} = 2*\sigma^2
.. math::
\sigma^2 = \tau*\sigma_{KBS}^2/2
.. math::
\tau = 1/\alpha_1; \,\sigma_{KBS} = \beta_0
see MacLeod et al. (2010) for SF_{\infty}} and Kelly et al. (2009)
for \sigma_{KBS}. \alpha_1 and \beta_0 are the AR and MA parameters for a
CARMA(1,0) model, respectively.
"""
parameter_names = ("log_amp", "log_tau")
def get_real_coefficients(self, params):
"""Get ``alpha_real`` and ``beta_real`` (coeffs of celerite's real kernel)
Args:
params (array(float)): Parameters of this kernel.
Returns:
(``alpha_real``, ``beta_real``).
"""
log_amp, log_tau = params
return (np.exp(2 * log_amp), 1 / np.exp(log_tau))
def get_perturb_amp(self):
"""Get the amplitude of the perturbing noise (beta_0) in DRW
Returns:
The amplitude of the perturbing noise (beta_0) in the current DRW.
"""
log_amp, log_tau = self.get_parameter_vector()
return self.perturb_amp(log_amp, log_tau)
@staticmethod
def perturb_amp(log_amp, log_tau):
"""Compute the amplitude of the perturbing noise (beta_0) in DRW.
Args:
log_amp (float): The natural log of the RMS amplitude of the DRW process.
log_tau (float): The natural log of the characteristic timescale of the DRW
process.
Returns:
The amplitude of the perturbing noise (beta_0) in the DRW specified by the
input parameters.
"""
return np.exp((2 * log_amp - np.log(1 / 2) - log_tau) / 2)
def get_rms_amp(self):
"""Get the RMS amplitude of this DRW process.
Returns:
The RMS amplitude of this DRW process.
"""
log_amp, log_tau = self.get_parameter_vector()
return np.exp(log_amp)
def get_carma_parameter(self):
"""Get DRW parameters in CARMA notation (alpha_*/beta_*).
Returns:
[alpha_1, beta_0].
"""
return [1 / np.exp(self.get_parameter("log_tau")), self.get_perturb_amp()]
@property
def p(self):
return 1
@property
def q(self):
return 0
class CARMA_term(terms.Term):
"""A general-purpose CARMA term (kernel)
Args:
log_arpars (array(float)): Natural log of the AR coefficients.
log_mapars (array(float)): Natural log of the MA coefficients.
"""
def __init__(self, log_arpars, log_mapars, *args, **kwargs):
arpar_temp = "log_a{}"
mapar_temp = "log_b{}"
arpar_names = ("log_a1",)
mapar_names = ("log_b0",)
# set order & trigger roots/acf computation
log_pars = np.append(log_arpars, log_mapars)
self._p = len(log_arpars)
self._q = len(log_mapars) - 1
self._dim = self._p + self._q + 1
self._compute(log_pars)
# check if stationary
self._arroots = _compute_roots(np.append([1 + 0j], self._pars[: self._p]))
if (self._arroots.real > 0).any():
print("Warning: CARMA process is not stationary!")
# loop over par array to find out how many params
for i in range(2, self._p + 1):
arpar_names += (arpar_temp.format(i),)
for i in range(1, self._q + 1):
mapar_names += (mapar_temp.format(i),)
self.parameter_names = arpar_names + mapar_names
super().__init__(*log_pars, **kwargs)
@property
def p(self):
return self._p
@property
def q(self):
return self._q
def _compute(self, params):
"""Compute important CARMA parameters"""
self._pars = _compute_exp(params)
self._arroots = _compute_roots(np.append([1 + 0j], self._pars[: self._p]))
self.acf = acf(self._arroots, self._pars[: self._p], self._pars[self._p :])
self.mask = self._arroots.imag != 0
def set_log_fcoeffs(self, log_fcoeffs):
"""Set kernel parameters
Use coeffs of the factored polynomial to set CARMA paramters, note that the
last input coeff is always the coeff for the highest-order MA differential.
While performing the conversion, 1 is added to the AR coeffs to maintain the
same formatting (AR polynomials always have the highest order coeff to be 1).
Args:
log_fcoeffs (array(float)): Natural log of the coefficients for the
factored characteristic polynomial, with the last coeff being an
additional multiplying factor on this polynomial.
"""
if log_fcoeffs.shape[0] != (self._dim):
raise ValueError("Dimension mismatch!")
fcoeffs = _compute_exp(log_fcoeffs)
ARpars = fcoeffs2coeffs(np.append(fcoeffs[: self._p], [1]))[1:]
MApars = fcoeffs2coeffs(fcoeffs[self._p :])[::-1]
self.set_parameter_vector(np.log(np.append(ARpars, MApars)))
def get_real_coefficients(self, params):
"""
Get arrays of ``alpha_real`` and ``beta_real`` (coefficients of celerite's
real kernel)
Args:
params (array(float)): Parameters of this kernel.
Returns:
Arrays of ``alpha_real`` and ``beta_real``, one for each.
"""
# trigger re_compute & get celerite coeffs
self._compute(params)
acf_real = self.acf[~self.mask]
roots_real = self._arroots[~self.mask]
ar = acf_real[:].real
cr = -roots_real[:].real
return (ar, cr)
def get_complex_coefficients(self, params):
"""
Get arrays of ``alpha_complex_real``, ``alpha_complex_imag``,
``beta_complex_real`` and ``beta_complex_imag`` (coefficients of celerite's
complex kernel)
Args:
params (array(float)): Parameters of this kernel.
Returns:
Arrays of ``alpha_complex_real``, ``alpha_complex_imag``,
``beta_complex_real`` and ``beta_complex_imag``, one for each.
"""
acf_complex = self.acf[self.mask]
roots_complex = self._arroots[self.mask]
ac = 2 * acf_complex[::2].real
bc = 2 * acf_complex[::2].imag
cc = -roots_complex[::2].real
dc = -roots_complex[::2].imag
return (ac, bc, cc, dc)
def get_rms_amp(self):
"""Get the RMS amplitude of this CARMA kernel
Returns:
The RMS amplitude of this CARMA kernel.
"""
log_pars = self.get_parameter_vector()
return self.rms_amp(log_pars[: self.p], log_pars[self.p :])
def get_carma_parameter(self):
"""Return CARMA parameters in the natural sacle."""
log_pars = self.get_parameter_vector()
return _compute_exp(log_pars)
@staticmethod
def rms_amp(log_arpars, log_mapars):
"""Compute the RMS amplitude of a CARMA kernel
Args:
log_arpars (array(float)): Natural log of the AR coefficients.
log_mapars (array(float)): Natural log of the MA coefficients.
Returns:
The RMS amplitude of the CARMA kernel specified by the input parameters.
"""
_p = len(log_arpars)
_pars = _compute_exp(np.append(log_arpars, log_mapars))
_arroots = _compute_roots(np.append([1 + 0j], _pars[:_p]))
_acf = acf(_arroots, _pars[:_p], _pars[_p:])
return np.sqrt(np.abs(np.sum(_acf)))
@staticmethod
def carma2fcoeffs_log(log_arpars, log_mapars):
"""Get the representation of a CARMA kernel in the factored polynomial space
Args:
log_arpars (array(float)): Natural log of the AR coefficients.
log_mapars (array(float)): Natural log of the MA coefficients.
Returns:
array(float): The coefficients (in natural log) of the factored polymoical
for the CARMA kernel specified by the input parameters. The last coeff
is a multiplying factor of the returned polynomial.
"""
_p = len(log_arpars)
_q = len(log_mapars) - 1
_pars = _compute_exp(np.append(log_arpars, log_mapars))
_arroots = _compute_roots(np.append([1 + 0j], _pars[:_p]))
_maroots = _compute_roots(np.array(_pars[_p:][::-1], dtype=np.complex128))
ma_mult = _pars[-1:] ## the multiplying factor
ar_coeffs = _roots2coeffs(_arroots)
if _q > 0:
ma_coeffs = _roots2coeffs(_maroots)
ma_coeffs.append(ma_mult[0])
else:
ma_coeffs = ma_mult
return np.log(np.append(ar_coeffs, ma_coeffs))
@staticmethod
def fcoeffs2carma_log(log_fcoeffs, p):
"""Get the representation of a CARMA kernel in the nominal CARMA parameter space
Args:
log_coeffs (array(float)): The array of coefficients for the factored
polynomial with the last coeff being a multiplying factor of the
polynomial.
p (int): The p order of the CARMA kernel.
Returns:
Natural log of the AR and MA parameters in two separate arrays.
"""
fcoeffs = np.exp(log_fcoeffs)
# Append one to AR fcoeffs as the multiplying factor; MA foceffs has that included.
# Index in CARMA for AR: high -> low; for MA: low -> high
ARpars = fcoeffs2coeffs(np.append(fcoeffs[:p], [1]))[1:]
MApars = fcoeffs2coeffs(fcoeffs[p:])[::-1]
return np.log(ARpars), np.log(MApars)
@staticmethod
def carma2fcoeffs(log_arpars, log_mapars):
"""Get the representation of a CARMA kernel in the factored polynomial space
A wrapper of `CARMA_term.carma2fcoeffs_log` for backward compatibility. This
function will be deprecated in future releases.
"""
warnings.warn("Use carma2fcoeffs_log instead", DeprecationWarning)
log_fcoeffs = CARMA_term.carma2fcoeffs_log(log_arpars, log_mapars)
return np.exp(log_fcoeffs)
@staticmethod
def fcoeffs2carma(log_fcoeffs, p):
"""Get the representation of a CARMA kernel in the nominal CARMA parameter space
A wrapper of `CARMA_term.fcoeffs2carma_log` for backward compatibility. This
function will be deprecated in future releases.
"""
warnings.warn("Use fcoeffs2carma_log instead", DeprecationWarning)
log_ar, log_ma = CARMA_term.fcoeffs2carma_log(log_fcoeffs, p)
return np.exp(log_ar), np.exp(log_ma)
class DHO_term(CARMA_term):
"""Damped Harmonic Oscillator (DHO) term (kernel)
Args:
log_a1 (float): Natural log of the DHO parameter a1.
log_a2 (float): Natural log of the DHO parameter a2.
log_b0 (float): Natural log of the DHO parameter b0.
log_b1 (float): Natural log of the DHO parameter b1.
"""
def __init__(self, log_a1, log_a2, log_b0, log_b1, *args, **kwargs):
"""Initiate the DHO term."""
super(DHO_term, self).__init__([log_a1, log_a2], [log_b0, log_b1], **kwargs)
|
ywx649999311REPO_NAMEEzTaoPATH_START.@EzTao_extracted@EzTao-master@src@eztao@carma@CARMATerm.py@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/contour/textfont/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="style", parent_name="contour.textfont", **kwargs):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@contour@textfont@_style.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "ZwickyTransientFacility/ztf_sim",
"repo_path": "ztf_sim_extracted/ztf_sim-master/ztf_sim/__init__.py",
"type": "Python"
}
|
from .constants import *
from .Fields import *
from .ObsLogger import *
from .ObservingProgram import *
from .Scheduler import *
from .SkyBrightness import *
from .TelescopeStateMachine import *
from .cadence import *
from .configuration import *
from .field_selection_functions import *
from .magnitudes import *
from .simulate import *
from .utils import *
import logging
set()
__version__ = "0.0.2dev"
logging.getLogger(__name__).addHandler(logging.NullHandler())
|
ZwickyTransientFacilityREPO_NAMEztf_simPATH_START.@ztf_sim_extracted@ztf_sim-master@ztf_sim@__init__.py@.PATH_END.py
|
{
"filename": "colorlog.py",
"repo_name": "davidharvey1986/pyRRG",
"repo_path": "pyRRG_extracted/pyRRG-master/unittests/bugFixPyRRG/lib/python3.7/site-packages/pip/_vendor/pep517/colorlog.py",
"type": "Python"
}
|
"""Nicer log formatting with colours.
Code copied from Tornado, Apache licensed.
"""
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import sys
try:
import curses
except ImportError:
curses = None
def _stderr_supports_color():
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
class LogFormatter(logging.Formatter):
"""Log formatter with colour support
"""
DEFAULT_COLORS = {
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
logging.CRITICAL: 1,
}
def __init__(self, color=True, datefmt=None):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = str(fg_color, "ascii")
for levelno, code in self.DEFAULT_COLORS.items():
self._colors[levelno] = str(
curses.tparm(fg_color, code), "ascii")
self._normal = str(curses.tigetstr("sgr0"), "ascii")
scr = curses.initscr()
self.termwidth = scr.getmaxyx()[1]
curses.endwin()
else:
self._normal = ''
# Default width is usually 80, but too wide is
# worse than too narrow
self.termwidth = 70
def formatMessage(self, record):
mlen = len(record.message)
right_text = '{initial}-{name}'.format(initial=record.levelname[0],
name=record.name)
if mlen + len(right_text) < self.termwidth:
space = ' ' * (self.termwidth - (mlen + len(right_text)))
else:
space = ' '
if record.levelno in self._colors:
start_color = self._colors[record.levelno]
end_color = self._normal
else:
start_color = end_color = ''
return record.message + space + start_color + right_text + end_color
def enable_colourful_output(level=logging.INFO):
handler = logging.StreamHandler()
handler.setFormatter(LogFormatter())
logging.root.addHandler(handler)
logging.root.setLevel(level)
|
davidharvey1986REPO_NAMEpyRRGPATH_START.@pyRRG_extracted@pyRRG-master@unittests@bugFixPyRRG@lib@python3.7@site-packages@pip@_vendor@pep517@colorlog.py@.PATH_END.py
|
{
"filename": "gaussfitter2.py",
"repo_name": "saopicc/DDFacet",
"repo_path": "DDFacet_extracted/DDFacet-master/DDFacet/ToolsDir/gaussfitter2.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# gaussfitter.py
# created by Adam Ginsburg (adam.ginsburg@colorado.edu or keflavich@gmail.com) 3/17/08)
#
#% $Id$
#
#
# Copyright (C) 2002-2011
# The MeqTree Foundation &
# ASTRON (Netherlands Foundation for Research in Astronomy)
# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>,
# or write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from DDFacet.compatibility import range
from numpy import *
from scipy import optimize
from scipy import stats
def moments (data,circle,rotate,vheight):
"""Returns (height, amplitude, x, y, width_x, width_y, rotation angle)
the gaussian parameters of a 2D distribution by calculating its
moments. Depending on the input parameters, will only output
a subset of the above"""
data = data.copy()
data[data<0] = 0 #clamp the negatives to 0 to make sure the moments computation doesn't fall over
total = data.sum()
X, Y = indices(data.shape)
x = (X*data).sum()/total
y = (Y*data).sum()/total
col = data[:, int(y)]
# col[where(col < 0)] = 0.0 #clamp the negatives to 0 to make sure the moments computation doesn't fall over
width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())
row = data[int(x), :]
# row[where(row < 0)] = 0.0 # clamp the negatives to 0 to make sure the moments computation doesn't fall over
width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())
width = ( width_x + width_y ) / 2.
height = stats.mode(data.ravel())[0][0] if vheight else 0;
amplitude = data.max()-height
mylist = [amplitude,x,y]
if vheight==1:
mylist = [height] + mylist
if circle==0:
mylist = mylist + [width_x,width_y]
else:
mylist = mylist + [width]
if rotate==1:
mylist = mylist + [0.] #rotation "moment" is just zero...
return tuple(mylist)
def twodgaussian(inpars, circle, rotate, vheight):
"""Returns a 2d gaussian function of the form:
x' = cos(rota) * x - sin(rota) * y
y' = sin(rota) * x + cos(rota) * y
(rota should be in degrees)
g = b + a exp ( - ( ((x-center_x)/width_x)**2 +
((y-center_y)/width_y)**2 ) / 2 )
where x and y are the input parameters of the returned function,
and all other parameters are specified by this function
However, the above values are passed by list. The list should be:
inpars = (height,amplitude,center_x,center_y,width_x,width_y,rota)
You can choose to ignore / neglect some of the above input parameters using the following options:
circle=0 - default is an elliptical gaussian (different x, y widths), but can reduce
the input by one parameter if it's a circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can remove last parameter
by setting rotate=0
vheight=1 - default allows a variable height-above-zero, i.e. an additive constant
for the Gaussian function. Can remove first parameter by setting this to 0
"""
inpars_old = inpars
inpars = list(inpars)
if vheight == 1:
height = inpars.pop(0)
height = float(height)
else:
height = float(0)
amplitude, center_x, center_y = inpars.pop(0),inpars.pop(0),inpars.pop(0)
amplitude = float(amplitude)
center_x = float(center_x)
center_y = float(center_y)
if circle == 1:
width = inpars.pop(0)
width_x = float(width)
width_y = float(width)
else:
width_x, width_y = inpars.pop(0),inpars.pop(0)
width_x = float(width_x)
width_y = float(width_y)
if rotate == 1:
rota = inpars.pop(0)
rota = pi/180. * float(rota)
rcen_x = center_x * cos(rota) - center_y * sin(rota)
rcen_y = center_x * sin(rota) + center_y * cos(rota)
else:
rcen_x = center_x
rcen_y = center_y
if len(inpars) > 0:
raise ValueError("There are still input parameters:" + str(inpars) + \
" and you've input: " + str(inpars_old) + " circle=%d, rotate=%d, vheight=%d" % (circle,rotate,vheight) )
def rotgauss(x,y):
if rotate==1:
xp = x * cos(rota) - y * sin(rota)
yp = x * sin(rota) + y * cos(rota)
else:
xp = x
yp = y
g = height+amplitude*exp(
-(((rcen_x-xp)/width_x)**2+
((rcen_y-yp)/width_y)**2)/2.)
return g
return rotgauss
def gaussfit(data,err=None,params=[],autoderiv=1,return_all=0,circle=0,rotate=1,vheight=1):
"""
Gaussian fitter with the ability to fit a variety of different forms of 2-dimensional gaussian.
Input Parameters:
data - 2-dimensional data array
err=None - error array with same size as data array
params=[] - initial input parameters for Gaussian function.
(height, amplitude, x, y, width_x, width_y, rota)
if not input, these will be determined from the moments of the system,
assuming no rotation
autoderiv=1 - use the autoderiv provided in the lmder.f function (the alternative
is to us an analytic derivative with lmdif.f: this method is less robust)
return_all=0 - Default is to return only the Gaussian parameters. See below for
detail on output
circle=0 - default is an elliptical gaussian (different x, y widths), but can reduce
the input by one parameter if it's a circular gaussian
rotate=1 - default allows rotation of the gaussian ellipse. Can remove last parameter
by setting rotate=0
vheight=1 - default allows a variable height-above-zero, i.e. an additive constant
for the Gaussian function. Can remove first parameter by setting this to 0
Output:
Default output is a set of Gaussian parameters with the same shape as the input parameters
Can also output the covariance matrix, 'infodict' that contains a lot more detail about
the fit (see scipy.optimize.leastsq), and a message from leastsq telling what the exit
status of the fitting routine was
Warning: Does NOT necessarily output a rotation angle between 0 and 360 degrees.
"""
if params == []:
params = (moments(data,circle,rotate,vheight))
if err == None:
errorfunction = lambda p: ravel((twodgaussian(p,circle,rotate,vheight)(*indices(data.shape)) - data))
else:
errorfunction = lambda p: ravel((twodgaussian(p,circle,rotate,vheight)(*indices(data.shape)) - data)/err)
if autoderiv == 0:
# the analytic derivative, while not terribly difficult, is less efficient and useful. I only bothered
# putting it here because I was instructed to do so for a class project - please ask if you would like
# this feature implemented
raise ValueError("I'm sorry, I haven't implemented this feature yet.")
else:
p, cov, infodict, errmsg, success = optimize.leastsq(errorfunction, params, full_output=1)
if return_all == 0:
return p
elif return_all == 1:
return p,cov,infodict,errmsg
|
saopiccREPO_NAMEDDFacetPATH_START.@DDFacet_extracted@DDFacet-master@DDFacet@ToolsDir@gaussfitter2.py@.PATH_END.py
|
{
"filename": "filter_test.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/tests/filter_test.py",
"type": "Python"
}
|
import numpy as np
import vaex
import pyarrow as pa
def test_set_active_range_and_trim(df_factory):
df = df_factory(x=np.arange(8))
df = df[(df.x % 2) == 0] # even numbers
assert len(df) == 4
df.set_active_range(2, 6) # these are unfiltered
assert df._cached_filtered_length == 2
assert df._selection_masks[vaex.dataframe.FILTER_SELECTION_NAME].count() == 4 # still the original mask (untrimmed)
dft = df.trim()
assert dft._cached_filtered_length == 2
assert dft._selection_masks[vaex.dataframe.FILTER_SELECTION_NAME].count() == 2
assert dft.x.tolist() == [2, 4]
def test_filter_cache():
called = 0
def odd(x):
nonlocal called
called += 1
return (x % 2) == 1
x = np.arange(10)
df = vaex.from_arrays(x=x)
df.add_function("odd", odd)
dff = df[df.func.odd("x")]
len(dff)
assert called == 1
df_sliced1 = dff[1:2]
df_sliced2 = dff[2:4]
assert called == 1
repr(dff)
assert called == 1
len(df_sliced1)
len(df_sliced2)
assert called == 1
df_sliced3 = df_sliced2[1:2]
assert called == 1
len(df_sliced3)
assert called == 1
def test_filter_by_boolean_column():
df = vaex.from_scalars(x=1, ok=True)
dff = df[df.ok]
assert dff[["x"]].x.tolist() == [1]
# def test_slice_no_compute(df_factory):
# df = df_factory(x=np.arange(8))
# df = df[(df.x % 2) == 0] # even numbers
# # len(df) # trigger cache
# df.count()
# # assert df.x.sum() == 0+2+4+6
# dfs = df[1:3]
# assert dfs._cached_filtered_length == 2
# assert dfs.x.sum() == 2+4
# dfs = df[1:2]
# assert dfs._cached_filtered_length == 1
# assert dfs.x.sum() == 2
def test_filter_after_dropna(df_factory):
x = pa.array([10, 20, 30, None, None])
y = pa.array([1, 2, 3, None, None])
z = pa.array(['1', '2', '3', None, None])
df = df_factory(x=x, y=y, z=z)
# First filter
df = df['x', 'y'].dropna()
# Second filter
dd = df[df.x > 10]
assert dd.x.tolist() == [20, 30]
assert dd.y.tolist() == [2, 3]
def test_filter_arrow_string_scalar():
df = vaex.from_arrays(x=['red', 'green', 'blue'])
assert df[df.x == pa.scalar('red')].x.tolist() == ['red']
assert df[df.x == pa.scalar('green')].shape == (1, 1)
assert df[df.x != pa.scalar('blue')].shape
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@tests@filter_test.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "NannyML/nannyml",
"repo_path": "nannyml_extracted/nannyml-main/nannyml/drift/univariate/__init__.py",
"type": "Python"
}
|
# Author: Niels Nuyttens <niels@nannyml.com>
#
# License: Apache Software License 2.0
"""Univariate drift detection methods, considering a single column at a time.
Some methods are applicable exclusively to continuous or categorical columns, others are applicable to both.
This package currently holds an implementation for the following univariate drift detection methods:
- Kolmogorov-Smirnov statistic (continuous)
- Wasserstein distance (continuous)
- Chi-squared statistic (categorical)
- L-infinity distance (categorical)
- Jensen-Shannon distance
- Hellinger distance
For more information, check out the `tutorial`_ or the `deep dive`_.
For help selecting the correct univariate drift detection method for your use case, check the `method selection guide`_.
.. _tutorial:
https://nannyml.readthedocs.io/en/stable/tutorials/detecting_data_drift/univariate_drift_detection.html
.. _deep dive:
https://nannyml.readthedocs.io/en/stable/how_it_works/univariate_drift_detection.html
.. _method selection guide:
https://nannyml.readthedocs.io/en/stable/how_it_works/univariate_drift_comparison.html
"""
from .calculator import UnivariateDriftCalculator
from .methods import FeatureType, Method, MethodFactory
from .result import Result
|
NannyMLREPO_NAMEnannymlPATH_START.@nannyml_extracted@nannyml-main@nannyml@drift@univariate@__init__.py@.PATH_END.py
|
{
"filename": "_visible.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/slider/currentvalue/_visible.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VisibleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="visible", parent_name="layout.slider.currentvalue", **kwargs
):
super(VisibleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@slider@currentvalue@_visible.py@.PATH_END.py
|
{
"filename": "test_ext_fourier_components.py",
"repo_name": "quatrope/feets",
"repo_path": "feets_extracted/feets-master/tests/extractors/test_ext_fourier_components.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2017 Juan Cabral
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
"""feets.extractors.ext_fourier_components Tests"""
# =============================================================================
# IMPORTS
# =============================================================================
from feets import extractors
# =============================================================================
# Test cases
# =============================================================================
def test_fourier_optional_data(periodic_lc_werror):
lc = periodic_lc_werror.copy()
lc["error"] = None
ext = extractors.FourierComponents()
assert ext.extract(features={}, **periodic_lc_werror) != ext.extract(
features={}, **lc
)
|
quatropeREPO_NAMEfeetsPATH_START.@feets_extracted@feets-master@tests@extractors@test_ext_fourier_components.py@.PATH_END.py
|
{
"filename": "test_outputs.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/frontends/exodus_ii/tests/test_outputs.py",
"type": "Python"
}
|
from numpy.testing import assert_array_equal, assert_equal
from yt.testing import requires_file
from yt.utilities.answer_testing.framework import (
GenericArrayTest,
data_dir_load,
requires_ds,
)
out = "ExodusII/out.e"
@requires_file(out)
def test_out():
ds = data_dir_load(out)
field_list = [
("all", "conv_indicator"),
("all", "conv_marker"),
("all", "convected"),
("all", "diffused"),
("connect1", "conv_indicator"),
("connect1", "conv_marker"),
("connect1", "convected"),
("connect1", "diffused"),
("connect2", "conv_indicator"),
("connect2", "conv_marker"),
("connect2", "convected"),
("connect2", "diffused"),
]
assert_equal(str(ds), "out.e")
assert_equal(ds.dimensionality, 3)
assert_equal(ds.current_time, 0.0)
assert_array_equal(ds.parameters["nod_names"], ["convected", "diffused"])
assert_equal(ds.parameters["num_meshes"], 2)
assert_array_equal(ds.field_list, field_list)
out_s002 = "ExodusII/out.e-s002"
@requires_file(out_s002)
def test_out002():
ds = data_dir_load(out_s002)
field_list = [
("all", "conv_indicator"),
("all", "conv_marker"),
("all", "convected"),
("all", "diffused"),
("connect1", "conv_indicator"),
("connect1", "conv_marker"),
("connect1", "convected"),
("connect1", "diffused"),
("connect2", "conv_indicator"),
("connect2", "conv_marker"),
("connect2", "convected"),
("connect2", "diffused"),
]
assert_equal(str(ds), "out.e-s002")
assert_equal(ds.dimensionality, 3)
assert_equal(ds.current_time, 2.0)
assert_array_equal(ds.field_list, field_list)
gold = "ExodusII/gold.e"
@requires_file(gold)
def test_gold():
ds = data_dir_load(gold)
field_list = [("all", "forced"), ("connect1", "forced")]
assert_equal(str(ds), "gold.e")
assert_array_equal(ds.field_list, field_list)
big_data = "MOOSE_sample_data/mps_out.e"
@requires_ds(big_data)
def test_displacement_fields():
displacement_dicts = [
{"connect2": (5.0, [0.0, 0.0, 0.0])},
{"connect1": (1.0, [1.0, 2.0, 3.0]), "connect2": (0.0, [0.0, 0.0, 0.0])},
]
for disp in displacement_dicts:
ds = data_dir_load(big_data, displacements=disp)
for mesh in ds.index.meshes:
def array_func():
return mesh.connectivity_coords # noqa: B023
yield GenericArrayTest(ds, array_func, 12)
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@frontends@exodus_ii@tests@test_outputs.py@.PATH_END.py
|
{
"filename": "GP.py",
"repo_name": "markfortune/luas",
"repo_path": "luas_extracted/luas-main/src/luas/GP.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from functools import partial
import jax
from jax import jit
import jax.numpy as jnp
from jax.flatten_util import ravel_pytree
from .jax_convenience_fns import (
array_to_pytree_2D,
pytree_to_array_2D,
order_list,
large_hessian_calc,
transf_from_unbounded_params,
transf_to_unbounded_params,
varying_params_wrapper,
)
from typing import Any, Optional, Callable, Union, Dict, Tuple
from .luas_types import Scalar, PyTree, JAXArray, Kernel
__all__ = ["GP"]
# Ensure we are using double precision floats as JAX uses single precision by default
jax.config.update("jax_enable_x64", True)
class GP(object):
"""Gaussian process class specialised to make the analysis of 2D data sets simple and efficient.
Can be used with any :class:`Kernel` such as :class:`LuasKernel` to perform
the required log-likelihood calculations in addition to performing GP regression.
Support for calculating Laplace approximations of the posterior with respect to
the input parameters is also provided as it can be very useful when sampling large numbers of parameters
(such as for tuning the MCMC).
Must have two separate input dimensions which are used to compute the covariance matrix and may additionally
be used to compute the deterministic mean function. The observed data ``Y`` is assumed to have shape ``(N_l, N_t)``
and will be a parameter of most methods.
The first input ``x_l`` is the wavelength/vertical dimension of the observed data ``Y`` and is expected to
have shape ``(N_l,)`` or ``(d_l, N_l)`` where N_l is the number of rows of ``Y`` and ``d_l`` is the number
of regression variables along the wavelength/vertical dimension used for kernel inputs and/or mean function inputs.
Similarly ``x_t`` is assumed to lie along the time/horizontal dimension of the observed data with shape ``(N_t,)`` or
``(d_t, N_t)`` where ``N_t`` is the number of columns of ``Y`` and ``d_t`` is the number of regression variables
along the column dimension used as kernel inputs and/or mean function inputs.
Args:
kf (Kernel): Kernel object which has already been initialised with the desired kernel function.
x_l (JAXArray): Array containing wavelength/vertical dimension regression variable(s).
May be of shape ``(N_l,)`` or ``(d_l,N_l)`` for ``d_l`` different wavelength/vertical
regression variables.
x_t (JAXArray): Array containing time/horizontal dimension regression variable(s).
May be of shape ``(N_t,)`` or ``(d_t,N_t)`` for ``d_t`` different time/horizontal
regression variables.
mf (Callable, optional): The deterministic mean function, by default returns a JAXArray of zeros.
Needs to be in the format ``mf(params, x_l, x_t)`` and returns a JAXArray of shape ``(N_l, N_t)``.
matching the shape of the observed data Y.
logPrior (Callable, optional): Log prior function, by default returns zero.
Needs to be in the format ``logPrior(params)`` and return a scalar.
jit (bool, optional): Whether to ``jax.jit`` compile the likelihood, posterior and GP prediction
functions. If set to ``False`` then mean functions not written in ``JAX`` are supported and
can still be used with ``PyMC`` (but not ``NumPyro`` which requires JIT compilation).
Defaults to ``True``.
"""
def __init__(
self,
kf: Kernel,
x_l: JAXArray,
x_t: JAXArray,
mf: Optional[Callable] = None,
logPrior: Optional[Callable] = None,
jit: Optional[bool] = True,
):
# Initialise variables. Due to the way JAX's JIT compilation works,
# any variables initialised here should not be modified but instead
# a new GP object should be initialised.
self.kf = kf
self.x_l = x_l
self.x_t = x_t
# The accepted shapes for each dimension are (N_l,) and (d_l, N_l)
# so take the length of the last dimension to get the dimension of the
# wavelength and time dimensions
self.N_l = self.x_l.shape[-1]
self.N_t = self.x_t.shape[-1]
if mf is None:
# Mean function returns zeros by default
# Returns a zero array which can vary in shape depending on the inputs
# x_l and x_t which permits GP regression to predict unobserved points
# of different array sizes
self.mf = lambda p, x_l, x_t: jnp.zeros((x_l.shape[-1], x_t.shape[-1]))
else:
# Ensure mean function is of the form mf(p, x_l, x_t) and for the
# observed inputs x_l, x_t should return an array of the same shape
# as the observed data Y.
self.mf = mf
if logPrior is None:
# Log Prior function returns zero by default
self.logPrior = lambda p: 0.
else:
# Custom logPrior function which must take only a single argument p
self.logPrior = logPrior
if jit:
# Have to option to avoid JIT compiling which can sometimes be useful
# e.g. Using a mean function which is either in JAX but cannot be JIT compiled
# Or a mean function which contains code not written in JAX
# Note that NumPyro will perform JIT compilation anyway but PyMC will not
self.logL = jax.jit(self.logL)
self.logL_stored = jax.jit(self.logL_stored)
self.logP = jax.jit(self.logP)
self.logP_stored = jax.jit(self.logP_stored)
self.logL_hessianable = jax.jit(self.logL_hessianable)
self.logL_hessianable_stored = jax.jit(self.logL_hessianable_stored)
self.logP_hessianable = jax.jit(self.logP_hessianable)
self.logP_hessianable_stored = jax.jit(self.logP_hessianable_stored)
def calculate_stored_values(self, p: PyTree) -> PyTree:
"""Calculate a PyTree of stored values from the decomposition of the covariance matrix.
The values stored depend on the choice of Kernel object and are returned by its decomp_fn method.
E.g. for :class:`LuasKernel` this will include eigenvalues and matrices calculated from the
eigendecompositions of its component covariance matrices.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix.
Any mean function parameters included will not affect this function.
Returns:
PyTree: Stored values from the decomposition of the covariance matrix. The specific
values contained in this PyTree depend on the choice of Kernel object and are returned by the
decomp_fn method of each :class:`Kernel` class.
"""
return self.kf.decomp_fn(p, self.x_l, self.x_t, stored_values = {})
def logL(
self,
p: PyTree,
Y: JAXArray,
) -> Scalar:
"""Computes the log likelihood without returning any stored values from the
decomposition of the covariance matrix.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
Returns:
Scalar: The value of the log likelihood.
"""
# Calculate the residuals after subtraction of the deterministic mean function
R = Y - self.mf(p, self.x_l, self.x_t)
# Use the specific log likelihood calculation of the chosen Kernel object
# to compute the log likelihood and any stored values from the decomposition
# are also returned by default but not returned by this method
logL, stored_values = self.kf.logL(p, self.x_l, self.x_t, R, {})
return logL
def logL_stored(
self,
p: PyTree,
Y: JAXArray,
stored_values: PyTree,
) -> Tuple[Scalar, PyTree]:
"""Computes the log likelihood and also returns any stored values from the decomposition of
the covariance matrix. This allows time to be saved in future log likelihood calculations in
which some hyperparameters are either fixed or being sampled separately with Gibbs/Blocked Gibbs
sampling.
Note:
This function will not give correct second order derivatives/hessian values (e.g. calculated
using `jax.hessian`). Make sure to use `GP.logL_hessianable_stored` if any hessian calculations
are required.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
stored_values (PyTree): Stored values from the decomposition of the covariance matrix. The specific
values contained in this PyTree depend on the choice of :class:`Kernel` object and are returned by
``Kernel.decomp_fn``.
Returns:
(Scalar, PyTree): A tuple where the first element is the value of the log likelihood.
The second element is a PyTree which contains stored values from the decomposition of the
covariance matrix.
"""
R = Y - self.mf(p, self.x_l, self.x_t)
logL, stored_values = self.kf.logL(p, self.x_l, self.x_t, R, stored_values)
return logL, stored_values
def logP(
self,
p: PyTree,
Y: JAXArray,
) -> Scalar:
"""Computes the log posterior without returning any stored values from the
decomposition of the covariance matrix.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Also input to the logPrior function for the calculation of the log priors.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
Returns:
Scalar: The value of the log posterior.
"""
R = Y - self.mf(p, self.x_l, self.x_t)
logL, stored_values = self.kf.logL(p, self.x_l, self.x_t, R, {})
logPrior = self.logPrior(p)
logP = logPrior + logL
return logP
def logP_stored(
self,
p: PyTree,
Y: JAXArray,
stored_values: PyTree,
) -> Tuple[Scalar, PyTree]:
"""Computes the log posterior and also returns any stored values from the decomposition of the
covariance matrix. This allows time to be saved in future log likelihood calculations in
which some hyperparameters are either fixed or being sampled separately with Gibbs/Blocked Gibbs
sampling.
Note:
This function will not give correct second order derivatives/hessian values (e.g. calculated
using `jax.hessian`). Make sure to use `GP.logP_hessianable_stored` if any hessian calculations
are required.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Also input to the logPrior function for the calculation of the log priors.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
stored_values (PyTree): Stored values from the decomposition of the covariance matrix. The specific
values contained in this PyTree depend on the choice of :class:`Kernel` object and are returned by
``Kernel.decomp_fn``.
Returns:
(Scalar, PyTree): A tuple where the first element is the value of the log posterior.
The second element is a PyTree which contains stored values from the decomposition of the
covariance matrix.
"""
R = Y - self.mf(p, self.x_l, self.x_t)
logL, stored_values = self.kf.logL(p, self.x_l, self.x_t, R, stored_values)
logPrior = self.logPrior(p)
logP = logPrior + logL
return logP, stored_values
def predict(
self,
p: PyTree,
Y: JAXArray,
x_l_pred: Optional[JAXArray] = None,
x_t_pred: Optional[JAXArray] = None,
return_std_dev: Optional[bool] = True,
**kwargs,
) -> Tuple[JAXArray, JAXArray, JAXArray]:
r"""Performs GP regression and computes the GP predictive mean and the GP predictive
uncertainty as the standard devation at each location or else can return the full
covariance matrix. Requires the input kernel function(s) to have a ``wn`` keyword
argument that defines the kernel when white noise is included (``wn = True``) and
when white noise isn't included (``wn = False``).
Currently assumes the same input hyperparameters for both the observed and predicted
locations. The predicted locations ``x_l_pred`` and ``x_t_pred`` may deviate from
the observed locations ``x_l`` and ``x_t`` however.
The GP predictive mean is defined as:
.. math::
\mathbb{E}[\vec{y}_*] = \vec{\mu}_* + \mathbf{K}_*^T \mathbf{K}^{-1} \vec{r}
And the GP predictive covariance is given by:
.. math::
Var[\vec{y}_*] = \mathbf{K}_{**} - \mathbf{K}_*^T \mathbf{K}^{-1} \mathbf{K}_*
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
x_l_pred (JAXArray, optional): Prediction locations along the row dimension,
defaults to observed input locations.
x_t_pred (JAXArray, optional): Prediction locations along the column dimension,
defaults to observed input locations.
return_std_dev (bool, optional): If ``True`` will return the standard deviation ofuncertainty
at the predicted locations. Otherwise will return the full predictive covariance matrix.
Defaults to ``True``.
Returns:
(JAXArray, JAXArray, JAXArray): Returns a tuple of three elements, where the first element is
the GP predictive mean at the prediction locations, the second element is either the
standard deviation of the predictions if ``return_std_dev = True``, otherwise it will be
the full covariance matrix of the predicted values. The third element will be the mean function
evalulated at the prediction locations.
"""
# If no prediction locations specified, predict at observed locations
if x_l_pred is None:
x_l_pred = self.x_l
if x_t_pred is None:
x_t_pred = self.x_t
# Generate mean function and compute residuals
R = Y - self.mf(p, self.x_l, self.x_t)
M_pred = self.mf(p, x_l_pred, x_t_pred)
# Kernel object computes GP regression as the most efficient method depends on the form of
# the kernel function
gp_mean, pred_err = self.kf.predict(p, self.x_l, x_l_pred, self.x_t, x_t_pred, R, M_pred,
return_std_dev = return_std_dev, **kwargs)
return gp_mean, pred_err, M_pred
def sigma_clip(
self,
p: PyTree,
Y:JAXArray,
sigma: Scalar,
plot: Optional[bool] = True,
use_gp_mean: Optional[bool] = True,
) -> JAXArray:
"""Performs GP regression and replaces any outliers above a given number of standard deviations
with the GP predictive mean evaluated at those locations. If ``use_gp_mean = False`` then will instead
replace outliers with the mean function evaluated at each location.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
sigma (Scalar): Significance value in standard deviations above which outliers will be clipped.
plot (bool, optional): Whether to produce plots which visualise the outliers in the data.
use_gp_mean (bool, optional): Will replace outliers with values from the GP predictive mean if ``True``,
otherwise will replace with values from the mean function.
Returns:
JAXArray: The observed data with outliers cleaned.
"""
# First perform the GP regression with the predicted locations defaulting to the observed locations
gp_mean, sigma_diag, M = self.predict(p, Y)
# Residuals after subtraction of the GP mean are normalised based on their predicted uncertainties
res = Y - gp_mean
Z = jnp.abs(res/sigma_diag)
# Identify outliers above given significance level
outliers = Z > sigma
# Create a copy of the observed data with outliers replaced with the GP predictive mean values
Y_clean = jnp.array(Y.copy())
if use_gp_mean:
Y_clean = Y_clean.at[outliers].set(gp_mean[outliers])
else:
Y_clean = Y_clean.at[outliers].set(M[outliers])
print("Number of outliers clipped = ", (outliers).sum())
# Some convenient plots to visualise the locations of the outliers
if plot:
plt.title("Std. Dev. of Residuals")
plt.imshow(Z, aspect = 'auto')
plt.colorbar()
plt.show()
if outliers.sum() > 0:
plt.title("Locations of Outliers Removed")
plt.imshow(Y, aspect = 'auto')
y, x = jnp.where(outliers)
plt.scatter(x, y, color='red', marker='x')
plt.show()
return Y_clean
def plot(
self,
p: PyTree,
Y: JAXArray,
x_l_plot = None,
x_t_plot = None,
**kwargs,
) -> plt.Figure:
"""Visualises the fit to the data. Displays the observed data as well as the mean function,
the GP predictive mean (not including the mean function) and the residuals of the data
after subtraction of the GP predictive mean (including the mean function).
For a good fit to the data, the data minus the GP predictive mean should consist of
white noise with no remaining correlations. The GP predictive mean (not including the mean function)
should also just be fitting correlated noise and should not look like its fitting the mean function.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
x_l_plot (JAXArray, optional): The values on the y-axis used by ``plt.pcolormesh`` for the plot.
If not included will default to ``x_l`` if ``x_l`` is of shape ``(N_l,)`` or to ``x_l[0, :]``
if ``x_l`` is of shape ``(d_l, N_l)``.
x_t_plot (JAXArray, optional): The values on the x-axis used by ``plt.pcolormesh`` for the plot.
If not included will default to ``x_t`` if ``x_t`` is of shape ``(N_t,)`` or to ``x_t[0, :]``
if ``x_t`` is of shape ``(d_t, N_t)``.
Returns:
plt.Figure: The figure object containing the plot produced.
"""
# If no x and y axes for the plots specified, defaults to x_l, x_t
# If x_l or x_t contain multiple rows then pick the first row
if x_l_plot is None:
if self.x_l.ndim == 1:
x_l_plot = self.x_l
else:
x_l_plot = self.x_l[0, :]
if x_t_plot is None:
if self.x_t.ndim == 1:
x_t_plot = self.x_t
else:
x_t_plot = self.x_t[0, :]
# Perform GP regression at the observed data locations
gp_mean, gp_cov, M = self.predict(p, Y, **kwargs)
fig = plt.figure(figsize = (20, 5))
ax = fig.subplots(1, 4, sharey = True)
# First plot is just the observed data
ax[0].set_title("Data")
ax[0].pcolormesh(x_t_plot, x_l_plot, Y, shading = "nearest")
# Second plot is just the deterministic mean function
ax[1].set_title("Mean function")
ax[1].pcolormesh(x_t_plot, x_l_plot, M, shading = "nearest")
# Third plot is the GP mean fit to the data without the mean function included
ax[2].set_title("GP mean (excl. mean function)")
ax[2].pcolormesh(x_t_plot, x_l_plot, gp_mean - M, shading = "nearest")
# Final plot is the residuals of the observed data after subtraction of the GP
# predictive mean (including the deterministic mean function)
ax[3].set_title("Residual noise")
ax[3].pcolormesh(x_t_plot, x_l_plot, Y - gp_mean, shading = "nearest")
# Label axes
ax[0].set_ylabel(r'$x_l$')
for i in range(4):
ax[i].set_xlabel(r'$x_t$')
# pcolormesh defaults to having the y-axis decrease with height which is weird so invert it
plt.gca().invert_yaxis()
return fig
def autocorrelate(
self,
p: PyTree,
Y: JAXArray,
max_sep_l: Optional[int] = None,
max_sep_t: Optional[int] = None,
include_gp_mean: Optional[bool] = True,
mat: Optional[JAXArray] = None,
plot: Optional[bool] = True,
plot_kwargs: Optional[dict] = None,
zero_centre: Optional[bool] = False,
cov: Optional[bool] = False,
) -> Union[JAXArray, Tuple[JAXArray, plt.Figure]]:
"""Performs a quick (and approximate) 2D autocorrelation using ``jax.scipy.signal.correlate2d``
on the observed data after subtraction of the GP predictive mean to examine if there is any
remaining correlation in the residuals.
Note:
This function also assumes the data is evenly spaced in both dimensions. It is also
not an exact autocorrelation as the mean is not subtracted for each set of residuals and therefore
it is assumed the residuals always have mean zero. Also instead of dividing by the standard deviations
of the specific residuals being multiplied together, all values are divided by the overall variance
of the residuals. This can result in some values having correlation lying outside the interval [-1, 1]
but runs very efficiently and should be reasonably accurate unless considering correlations between
widely separated parts of the data. For this reason, by default only half the separation of the data
is visualised in the plots.
If ``include_gp_mean = False`` then shows the autocorrelation of the observed data minus
the mean function (without the GP predictive mean) which is useful for visualising
what kernel function to use when fitting with a GP.
Can also just input a general matrix ``mat`` to run an autocorrelation on, in which case the inputs
``p`` and ``Y`` are ignored.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
max_sep_l (int, optional): The maximum separation of wavelengths/rows to visualise the correlation of.
This is given as an integer referring to the number of rows apart to show. Defaults to half the
number of rows in the observed data ``Y``.
max_sep_t (int, optional): The maximum separation of time/columns to visualise the correlation of. This
is given as an integer referring to the number of columns apart to show. Defaults to half the
number of columns in the observed data ``Y``.
include_gp_mean (bool, optional): Whether to subtract the GP predictive mean from the observed data
when calculating the residuals. If ``False`` will still subtract the deterministic mean function.
Useful for visualising correlation in a data set to aid in kernel choice before fitting with a GP.
mat (JAXArray, optional): Instead of using the residuals of the observed data, providing a
matrix for this argument will calculate the autocorrelation of this given matrix instead.
plot (bool, optional): If ``True`` then will produce a plot visualising the autocorrelation.
Defaults to ``True``.
zero_centre (bool, optional): Whether to set the correlation at zero separation to 0.
Since the correlation at zero separation is always 1 it can make it hard to visualise
correlation values which are very small so setting this to ``True`` can aid visualisation.
Defaults to ``False``.
cov (bool, optional): If ``True`` will return the autocovariance matrix instead.
Returns:
JAXArray or (JAXArray, plt.Figure): Returns the autocorrelation matrix of the residuals. If
``plot = True`` then will return a tuple with the generated ``plt.Figure`` also added.
"""
# Sets the maximum separation to visualise the autocorrelation for
# Once separations are too large the autocorrelation becomes very noisy
# as it is based off of very few values
if mat is not None:
# If a general matrix mat is given, defaults to autocorrelation of half its length
# in each dimension
if max_sep_l is None:
max_sep_l = mat.shape[0]//2
if max_sep_t is None:
max_sep_t = mat.shape[1]//2
# Set residuals matrix equal to mat
res = mat
else:
# For a matrix of residuals will also default to half the length of the data in
# each dimension
if max_sep_l is None:
max_sep_l = self.N_l//2
if max_sep_t is None:
max_sep_t = self.N_t//2
if include_gp_mean:
# Perform GP regression
gp_mean, sigma_diag, M = self.predict(p, Y)
# Includes GP mean fit to data when subtracting from observed data
res = Y - gp_mean
else:
M = self.mf(p, self.x_l, self.x_t)
# Only calculates the observed data minus the deterministic mean function
res = Y - M
# Performs the autocorrelation of the res matrix
auto_corr = jax.scipy.signal.correlate2d(res, res)
# jax.scipy.signal.correlate2d will pad with zeros as necessary for the autocorrelation
# which will artificially reduce the strength of correlation at large separations.
# These lines autocorrelate a matrix of ones to divide auto_corr by so that a values
# with more zeros used for padding will be divided through by lower numbers
# This should normalise things correctly and ensures if the residuals are constant
# then they will produce a correlation of ones everywhere.
ones = jnp.ones_like(res)
auto_corr_ones = jax.scipy.signal.correlate2d(ones, ones)
auto_corr /= auto_corr_ones
# Find the centre of the auto_corr 2D array
n_l, n_t = auto_corr.shape
auto_corr_centre = ((n_l-1)//2, (n_t-1)//2)
if not cov:
# Unless calculating the autocovariance we divide by the variance at zero separation
auto_corr /= auto_corr[auto_corr_centre[0], auto_corr_centre[1]]
if zero_centre:
# Can be helpful to zero the centre (which will always show correlation = 1)
# to help visualise weaker correlation at non-zero separations
auto_corr = auto_corr.at[auto_corr_centre[0], auto_corr_centre[1]].set(0.)
if plot:
if mat is None and plot_kwargs is None:
# Calculate the x and y axes values assuming equal separation of data points
# It is assumed if giving values to plot_kwargs that this will be handled by the user
# First check if x_l and x_t contain multiple rows in which case pick the top row
if self.x_l.ndim > 1:
x_l_plot = self.x_l[:, 0]
else:
x_l_plot = self.x_l
if self.x_t.ndim > 1:
x_l_plot = self.x_t[:, 0]
else:
x_l_plot = self.x_t
# Calculates the average separation between points along each dimension
l_step = x_l_plot.ptp()/(self.N_l-1)
t_step = x_l_plot.ptp()/(self.N_t-1)
# Calculate the maximum separations in each dimension in values given in x_l and x_t
extent = [t_step*-(max_sep_t+0.5), t_step*(max_sep_t+0.5),
l_step*(max_sep_l+0.5), l_step*-(max_sep_l+0.5)]
else:
extent = None
# Select the correct range of separations to plot
l_plot_range = [auto_corr_centre[0]-max_sep_l, auto_corr_centre[0]+max_sep_l+1]
t_plot_range = [auto_corr_centre[1]-max_sep_t, auto_corr_centre[1]+max_sep_t+1]
if plot_kwargs:
fig = plt.imshow(auto_corr[l_plot_range[0]:l_plot_range[1], t_plot_range[0]:t_plot_range[1]],
**plot_kwargs)
else:
fig = plt.imshow(auto_corr[l_plot_range[0]:l_plot_range[1], t_plot_range[0]:t_plot_range[1]],
aspect = 'auto', interpolation = "none", extent = extent)
plt.xlabel(r"$\Delta$t")
plt.ylabel(r"$\Delta$l")
plt.colorbar()
return auto_corr, fig
else:
# If not plotting then just returns the autocorrelation matrix
return auto_corr
def logL_hessianable(
self,
p: PyTree,
Y: JAXArray,
) -> Scalar:
"""Computes the log likelihood without returning any stored values from the
decomposition of the covariance matrix. This function is slower for gradient calculations
than ``GP.logL`` but is more numerically stable for second-order derivative calculations as
required when calculating the hessian. This function still only returns the log likelihood
so ``jax.hessian`` must be applied to return the hessian of the log likelihood.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
Returns:
Scalar: The value of the log likelihood.
"""
# Subtract mean function from observed data
R = Y - self.mf(p, self.x_l, self.x_t)
# Calculate log likelihood and stored values from decomposition
logL, stored_values = self.kf.logL_hessianable(p, self.x_l, self.x_t, R, {})
return logL
def logL_hessianable_stored(
self,
p: PyTree,
Y: JAXArray,
stored_values: PyTree,
) -> Tuple[Scalar, PyTree]:
"""Computes the log likelihood and also returns any stored values from the
decomposition of the covariance matrix. This function is slower for gradient calculations
than ``GP.logL_stored`` but is more numerically stable for second-order derivative calculations as
required when calculating the hessian. This function still only returns the log likelihood
so jax.hessian must be applied to return the hessian of the log likelihood.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
stored_values (PyTree): Stored values from the decomposition of the covariance matrix. The specific
values contained in this PyTree depend on the choice of :class:`Kernel` object and are returned by
``Kernel.decomp_fn``.
Returns:
(Scalar, PyTree): A tuple where the first element is the value of the log likelihood.
The second element is a PyTree which contains stored values from the decomposition of the
covariance matrix.
"""
# Subtract mean function from observed data
R = Y - self.mf(p, self.x_l, self.x_t)
# Calculate log likelihood and stored values from decomposition
logL, stored_values = self.kf.logL_hessianable(p, self.x_l, self.x_t, R, {})
return logL, stored_values
def logP_hessianable(
self,
p: PyTree,
Y: JAXArray,
) -> Scalar:
"""Computes the log posterior without returning any stored values from the
decomposition of the covariance matrix. This function is slower for gradient calculations
than ``GP.logP`` but is more numerically stable for second-order derivative calculations as
required when calculating the hessian. This function still only returns the log posterior
so jax.hessian must be applied to return the hessian of the log posterior.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Also input to the logPrior function for the calculation of the log priors.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
Returns:
Scalar: The value of the log posterior.
"""
logPrior = self.logPrior(p)
R = Y - self.mf(p, self.x_l, self.x_t)
logL, stored_values = self.kf.logL_hessianable(p, self.x_l, self.x_t, R, {})
logP = logPrior + logL
return logP
def logP_hessianable_stored(
self,
p: PyTree,
Y: JAXArray,
stored_values: PyTree,
) -> Tuple[Scalar, PyTree]:
"""Computes the log posterior and also returns any stored values from the decomposition of the
covariance matrix.
Note:
This function is slower for gradient calculations than ``GP.logP_stored`` but is more numerically
stable for second-order derivative calculations as required when calculating the hessian.
This function still only returns the log posterior so ``jax.hessian`` must be applied to return
the hessian of the log posterior.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Also input to the logPrior function for the calculation of the log priors.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
stored_values (PyTree): Stored values from the decomposition of the covariance matrix. The specific
values contained in this PyTree depend on the choice of :class:`Kernel` object and are returned by
``Kernel.decomp_fn``.
Returns:
(Scalar, PyTree): A tuple where the first element is the value of the log posterior.
The second element is a PyTree which contains stored values from the decomposition of the
covariance matrix.
"""
R = Y - self.mf(p, self.x_l, self.x_t)
logL, stored_values = self.kf.logL_hessianable(p, self.x_l, self.x_t, R, stored_values)
logPrior = self.logPrior(p)
logP = logPrior + logL
return logP, stored_values
def laplace_approx(
self,
p: PyTree,
Y: PyTree,
regularise: Optional[bool] = True,
regularise_const: Optional[Scalar] = 100.,
vars: Optional[list] = None,
fixed_vars: Optional[list] = None,
return_array: Optional[bool] = False,
large: Optional[bool] = False,
large_block_size: Optional[int] = 50,
large_jit: Optional[bool] = True,
logP_fn: Optional[Callable] = None,
hessian_mat: Optional[JAXArray] = None,
) -> Tuple[Union[PyTree, JAXArray], list]:
r"""Computes the Laplace approximation at the location of ``p`` with options to regularise
values which are poorly constrained. The parameters in ``p`` should be best-fit values of the posterior.
The Laplace approximation is an estimate of the posterior distribution at the location of best-fit.
It assumes the best-fit location is the mean of the Gaussian and calculates the covariance matrix
based on approximating the value of the Hessian at the location of best-fit. By taking the negative
inverse of the Hessian matrix this should give an approximate covariance matrix assuming the posterior
is close to a Gaussian distribution. It is equivalent to a second-order Taylor series approximation of
the posterior at the location of best-fit.
The Laplace approximation is useful to get a quick approximation of the posterior without having to
run an expensive MCMC calculation. Can also be useful for initialising MCMC inference with a good
tuning matrix when large numbers of parameters which may contain strong correlations are being sampled.
Note:
This calculation can be memory intensive for large data sets with many free parameters and so
setting ``large = True`` and ensuring ``large_block_size`` is a low integer can help reduce memory
costs by breaking up the hessian calculation into blocks of rows.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Also input to the ``logPrior`` function for the calculation of the log priors.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
regularise (bool, optional): Whether to add a regularisation constant to the diagonal of the hessian matrix
corresponding to diagonals which are negative along the diagonals of the resulting covariance matrix.
Defaults to ``True``.
regularise_const (bool, optional): The constant added to diagonals of the hessian matrix to regularise it
given that regularise is set to ``True``. Defaults to 100.
vars (:obj:`list` of :obj:`str`, optional): The ``list`` of keys names corresponding to
the parameters we want to calculate the Laplace approximation with respect to.
The remaining parameters will be assumed to be fixed. If specified in addition to
fixed_vars will raise an Exception.
fixed_vars (:obj:`list` of :obj:`str`, optional): Alternative to vars, may specify instead
the parameters being kept fixed which will not be marginalised over in the Laplace approximation.
If specified in addition to vars will raise an Exception.
return_array (bool, optional): Whether to return the approximated covariance matrix as a JAXArray or
as a nested PyTree where e.g. the covariance between parameters named p1 and p2 is given by
``cov_mat[p1][p2]`` and ``cov_mat[p2][p1]``.
large (bool, optional): Calculating the hessian matrix for large data sets with many parameters can be
very memory intensive. If this is set to True then the hessian will be calculated in groups of rows
instead of all at once which reduces the memory cost but can take significantly longer to run.
The calculation is otherwise the same with no approximation made. Defaults to False.
large_block_size (int, optional): If large is set to True and the hessian is being calculated in groups of rows
can specify how many rows are being calculated simultaneously. Large numbers may calculate the overall hessian
faster but at greater memory cost.
large_jit (bool, optional): Whether to JIT compile the hessian function when ``large = True``,
can speed up the calculation assuming the function can be JIT compiled. Defaults to ``True``.
hessian_mat (JAXArray, optional): Instead of calculating the hessian matrix (needed for the Laplace approximation)
from the input parameters ``p`` and ``Y`` just provide the hessian matrix directly.
Assumed to be a JAXArray and not a PyTree. The input parameters ``p`` and ``Y`` will be ignored.
Returns:
(JAXArray, :obj:`list` of :obj:`str`) or (PyTree, :obj:`list` of :obj:`str`): Returns a tuple of two elements,
if ``return_array = True`` the first element will be the covariance matrix from the Laplace approximation
as a JAXArray, otherwise it will be as a nested PyTree. The second element will be the order of the parameters
in the returned covariance matrix if it is a JAXArray. This list is also returned when ``return_array = False``
for consistency. The order of the list matches how ``jax.flatten_util.ravel_pytree`` will order keys from a PyTree.
"""
if logP_fn is None:
logP_fn = self.logP_hessianable
# This function returns a PyTree p_vary which has only the (key, value) pairs of
# parameters being varied
# make_p is also returned which is a function with returns the parameters in p_vary
# with all fixed parameters added back in
p_vary, make_p = varying_params_wrapper(p, vars = vars, fixed_vars = fixed_vars)
if hessian_mat is None:
# Generate a wrapper function which takes only the parameters being varied and
# calculates the log posterior (this avoids calculating derivatives of fixed parameters)
logP_hessianable_wrapper = lambda p_vary: logP_fn(make_p(p_vary), Y)
if large:
# For large data sets and many free parameters, breaking up the hessian calculation
# into blocks of rows of size large_block_size has a much lower memory cost
hessian_mat = large_hessian_calc(logP_hessianable_wrapper, p_vary, block_size = large_block_size,
return_array = True, jit = large_jit)
else:
# If memory cost is not an issue then directly calculating the full hessian is faster
hessian_mat = jax.hessian(logP_hessianable_wrapper)(p_vary)
# For inverting to a covariance matrix we need to convert the nested PyTree returned by
# jax.hessian into a matrix which we can do with this helper function from luas.jax_convenience_fns
hessian_mat = pytree_to_array_2D(p_vary, hessian_mat)
# Help symmetrise matrix which can help mitigate numerical errors
hessian_mat = (hessian_mat + hessian_mat.T)/2.
# Performs the actual Laplace approximation by inverting the negative hessian
cov_mat = jnp.linalg.inv(-hessian_mat)
if regularise:
# Test if the diagonals of the covariance matrix are positive
cov_diag = jnp.diag(cov_mat)
neg_ind = cov_diag < 0.
num_neg_diag_vals = neg_ind.sum()
if num_neg_diag_vals == 0:
print("No regularisation needed to remove negative values along diagonal of covariance matrix.")
else:
# Subtract regularise_const from the diagonal hessian elements which correspond to negative
# values in the covariance matrix which will help to regularise for large enough regularise_const
regularise_vec = regularise_const*neg_ind
hessian_mat -= jnp.diag(regularise_vec)
# Calculate the new Laplace approximation with regularisation
cov_mat = jnp.linalg.inv(-hessian_mat)
# Help to describe which values were regularised
# Identifies which parameters the negative diagonal elements correspond to
p_arr, make_p_dict = ravel_pytree(p_vary)
regularised_values = make_p_dict(neg_ind)
# Only include values which needed to be regularised when printing
for par in p_vary.keys():
if not jnp.any(regularised_values[par]):
del regularised_values[par]
# Check if there are any remaining negative values along the diagonal of the covariance matrix
cov_diag = jnp.diag(cov_mat)
neg_ind = cov_diag < 0.
num_neg_diag_vals_remaining = neg_ind.sum()
if num_neg_diag_vals_remaining > 0:
# Identify which parameters are still resulting in negatives along the diagonal
values_still_negative = make_p_dict(neg_ind)
# Only include values which still need to be regularised when printing
for par in p_vary.keys():
if not jnp.any(values_still_negative[par]):
del values_still_negative[par]
print(f"Initial number of negative values on diagonal of covariance matrix = {num_neg_diag_vals}\n" \
f"Corresponding to parameters: {regularised_values}.\n" \
f"Remaining number of negative values = {num_neg_diag_vals_remaining}\n" \
f"Corresponding to parameters: {values_still_negative}.\n"
f"Try increasing regularise_const to ensure the covariance matrix is positive definite " \
f"or double check that the input parameters are close to a best-fit location."
)
else:
print(f"Initial number of negative values on diagonal of covariance matrix = {num_neg_diag_vals}\n" \
f"Corresponding to parameters: {regularised_values}.\n" \
f"No remaining negative values."
)
# Generate the list which gives the order of the parameters in the covariance matrix
ordered_param_list = order_list(list(p_vary.keys()))
if return_array:
return cov_mat, ordered_param_list
else:
# If returning a nested PyTree use array_to_pytree_2D to convert
return array_to_pytree_2D(p_vary, cov_mat), ordered_param_list
def laplace_approx_with_bounds(
self,
p: PyTree,
Y: JAXArray,
param_bounds: PyTree,
vars: Optional[list] = None,
fixed_vars: Optional[list] = None,
large: Optional[bool] = False,
large_block_size: Optional[int] = 50,
return_array: Optional[bool] = False,
large_jit: Optional[bool] = True,
**kwargs,
) -> Tuple[Union[PyTree, JAXArray], list]:
"""Computes the Laplace approximation at the location of ``p`` but within the transformed
parameter space used by ``PyMC`` and ``NumPyro`` to deal with parameters bounded by a lower and upper bound.
Example:
``param_bounds`` should be of the form ``param_bounds[par] = [lower_bound, upper_bound]`` where
``lower_bound`` and ``upper_bound`` are of the same shape as ``p[par]``.
See ``GP.laplace_approx`` for more details about the Laplace approximation.
Args:
p (PyTree): Pytree of hyperparameters used to calculate the covariance matrix
in addition to any mean function parameters which may be needed to calculate the mean function.
Also input to the ``logPrior`` function for the calculation of the log priors.
Y (JAXArray): Observed data to fit, must be of shape ``(N_l, N_t)``.
param_bounds (PyTree): Contains any bounds for the parameters in ``p``.
vars (:obj:`list` of :obj:`str`, optional): The ``list`` of key names corresponding to
the parameters we want to calculate the Laplace approximation with respect to.
The remaining parameters will be assumed to be fixed. If specified in addition to
fixed_vars will raise an Exception.
fixed_vars (:obj:`list` of :obj:`str`, optional): Alternative to vars, may specify instead
the parameters being kept fixed which will not be marginalised over in the Laplace approximation.
If specified in addition to vars will raise an ``Exception``.
large (bool, optional): Calculating the hessian matrix for large data sets with many parameters can be
very memory intensive. If this is set to True then the hessian will be calculated in groups of rows
instead of all at once which reduces the memory cost but can take significantly longer to run.
The calculation is otherwise the same with no approximation made. Defaults to ``False``.
large_block_size (int, optional): If large is set to True and the hessian is being calculated in groups of rows
can specify how many rows are being calculated simultaneously. Large numbers may calculate the overall hessian
faster but at greater memory cost.
large_jit (bool, optional): Whether to JIT compile the hessian function when ``large = True``,
can speed up the calculation assuming the function can be JIT compiled. Defaults to ``True``.
return_array (bool, optional): Whether to return the approximated covariance matrix as a JAXArray or
as a nested PyTree where e.g. the covariance between parameters named p1 and p2 is given by
``cov_mat[p1][p2]`` and ``cov_mat[p2][p1]``.
Returns:
(JAXArray, :obj:`list` of :obj:`str`) or (PyTree, :obj:`list` of :obj:`str`): Returns a tuple of two elements,
if ``return_array = True`` the first element will be the covariance matrix from the Laplace approximation
as a JAXArray, otherwise it will be as a nested PyTree. The second element will be the order of the parameters
in the returned covariance matrix if it is a JAXArray. This list is also returned when ``return_array = False``
for consistency. The order of the list matches how ``jax.flatten_util.ravel_pytree`` will order keys from a PyTree.
"""
# This function returns a PyTree p_vary which has only the (key, value) pairs of
# parameters being varied
# make_p is also returned which is a function with returns the parameters in p_vary
# with all fixed parameters added back in
p_vary, make_p = varying_params_wrapper(p, vars = vars, fixed_vars = fixed_vars)
# Transform the parameters being varied to the transformed values which are sampled by
# PyMC and NumPyro
p_transf = transf_to_unbounded_params(p_vary, param_bounds)
# Create a function which returns the transformed values back to the full set of parameters
# untransformed including fixed parameters
def transf_back_to_p(p_transf):
p_vary = transf_from_unbounded_params(p_transf, param_bounds)
return make_p(p_vary)
# Write a wrapper function which takes the transformed parameters and calculates the log Posterior
pymc_logP_hessianable = lambda p_transf: self.logP_hessianable(transf_back_to_p(p_transf), Y)
if large:
# For large data sets and many free parameters, breaking up the hessian calculation
# into blocks of rows of size large_block_size has a much lower memory cost
hessian_mat = large_hessian_calc(pymc_logP_hessianable, p_transf, block_size = large_block_size,
return_array = False, jit = large_jit)
else:
# If memory cost is not an issue then directly calculating the full hessian is faster
hessian_mat = jax.hessian(pymc_logP_hessianable)(p_transf)
# Loop over each parameter being varied
for par in p_transf.keys():
# Select just the bounded parameters
if par in param_bounds.keys():
# Add to the diagonal of the hessian an additional term
# which is equal to the hessian of the jacobian of the transformation
# performed by PyMC and NumPyro
# This term is added to ensure the transformation these inference libraries perform
# does not impact the choice of priors made
exp_minus_p = jnp.exp(-p_transf[par])
hessian_of_transform_jacobian = jnp.diag(-2*exp_minus_p/(1+exp_minus_p)**2)
hessian_mat[par][par] += hessian_of_transform_jacobian
# Convert hessian from a nested PyTree to a 2D JAXArray for GP.laplace_approx to be able
# to invert to calculate the covariance matrix
hessian_mat = pytree_to_array_2D(p_transf, hessian_mat)
cov_mat, ordered_param_list = self.laplace_approx(p_transf, Y, hessian_mat = hessian_mat,
return_array = return_array, **kwargs)
return cov_mat, ordered_param_list
|
markfortuneREPO_NAMEluasPATH_START.@luas_extracted@luas-main@src@luas@GP.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "pynucastro/pynucastro",
"repo_path": "pynucastro_extracted/pynucastro-main/pynucastro/nucdata/AtomicMassEvaluation/README.md",
"type": "Markdown"
}
|
# Atomic Mass Evaluation / Nubase
The Nubase / 2020 Atomic Mass Evaluations data were downloaded from:
https://www-nds.iaea.org/amdc/
The Atomic Mass Evaluation 2020 is published as Chinese Phys. C 45 (2021) 030002, 030003
The Nubase evaluation is published as Chinese Physics C 45 (2021) 030001
## Scripts
* `extract_spin_1.py` : this extracts the spins from the Nubase version of the data,
currently using `nubase_4.mas20`.
* `extract_mass_excess.py` : this extracts the mass excess from the Nubase version of
the data, currently using `nubase_4.mas20`.
|
pynucastroREPO_NAMEpynucastroPATH_START.@pynucastro_extracted@pynucastro-main@pynucastro@nucdata@AtomicMassEvaluation@README.md@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "hmuellergoe/mrbeam",
"repo_path": "mrbeam_extracted/mrbeam-main/README.md",
"type": "Markdown"
}
|
# mrbeam
This is the second release of the MrBeam software tool.
Caution! This is alpha quality software, do expect bugs and sparse documentation!
MrBeam is a software written for VLBI imaging, in particular sparse mm-VLBI data.
The main features of this release of MrBeam are as follows:
-> a general VLBI interface for regpy
-> a support for convex solvers and non-L2 spaces adding a more diverse set of solver options to ehtim
-> an implementation of multiscalar decompositions for imaging, in particular:
-> DoG-HiT (Mueller, Lobanov 2022a, https://ui.adsabs.harvard.edu/abs/2022arXiv220609501M/abstract)
-> DoB-CLEAN (Mueller, Lobanov 2023a, https://ui.adsabs.harvard.edu/abs/2023A%26A...672A..26M/abstract)
-> Dynamic polarimetry with the multiresolution support (Mueller, Lobanov 2023b, https://ui.adsabs.harvard.edu/abs/2023A%26A...673A.151M/abstract)
-> an extension point to neural networks
This is a light version of MrBeam. Expect much more to come in consecutive releases.
We added some jupyter-notebook tutorials for a quick start with MrBeam. Some tutorials are still under construction, but will be added soon.
MrBeam makes explicit use of:
-> ehtim (Chael, Johnson, Narayan et. al. 2016, https://ui.adsabs.harvard.edu/abs/2016ApJ...829...11C/abstract; Chael, Johnson, Bouman et. al. 2018, https://ui.adsabs.harvard.edu/abs/2018ApJ...857...23C/abstract; source code: https://github.com/achael/eht-imaging)
-> WISE (Mertens, Lobanov 2015, https://ui.adsabs.harvard.edu/abs/2015A%26A...574A..67M/abstract; source code: https://github.com/flomertens/wise), we provide a light version of libwise (lightwise)
-> regpy (https://num.math.uni-goettingen.de/regpy/), we provide a local version of regpy with minor changes to provide adaptability
Installation guide:
The installation is easiest with anaconda. Create a new environment with python 3 and install all dependencies first:
-> numpy
-> scipy
-> matplotlib
-> numba
-> joblib
-> astropy
-> ephem
-> future
-> h5py
-> pandas
-> skimage (install by conda with install scikit-image)
-> pynfft (install by conda with install -c conda-forge pynfft)
-> ehtplot (pip install from https://github.com/liamedeiros/ehtplot)
-> ehtim (pip install from https://github.com/achael/eht-imaging)
Download and unpack MrBeam, cd in the subfolders itreg, libwise_0.4.7_light, MSI and imagingbase and install the packages by "pip install .".
MrBeam is currently under developement and in application to many sparse VLBI data analysis projects, such as for the EHT, RadioAstron and the EVN. Feel free to contribute.
|
hmuellergoeREPO_NAMEmrbeamPATH_START.@mrbeam_extracted@mrbeam-main@README.md@.PATH_END.py
|
{
"filename": "tool.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/tools/merriam_webster/tool.py",
"type": "Python"
}
|
"""Tool for the Merriam-Webster API."""
from typing import Optional
from langchain_core.callbacks import CallbackManagerForToolRun
from langchain_core.tools import BaseTool
from langchain_community.utilities.merriam_webster import MerriamWebsterAPIWrapper
class MerriamWebsterQueryRun(BaseTool): # type: ignore[override]
"""Tool that searches the Merriam-Webster API."""
name: str = "merriam_webster"
description: str = (
"A wrapper around Merriam-Webster. "
"Useful for when you need to get the definition of a word."
"Input should be the word you want the definition of."
)
api_wrapper: MerriamWebsterAPIWrapper
def _run(
self,
query: str,
run_manager: Optional[CallbackManagerForToolRun] = None,
) -> str:
"""Use the Merriam-Webster tool."""
return self.api_wrapper.run(query)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@tools@merriam_webster@tool.py@.PATH_END.py
|
{
"filename": "bendgratings.py",
"repo_name": "chandra-marx/marxs",
"repo_path": "marxs_extracted/marxs-main/marxs/design/bendgratings.py",
"type": "Python"
}
|
# Licensed under GPL version 3 - see LICENSE.rst
'''This module contains functions to modify gratings.
After flat simple gratings have been placed on a Rowland-torus, they can be
changed to more complicated geometries or set-ups. For example, placing just
the center of the grating on the Rowland-torus, can leave the edges to deviate.
In this case, gratings might be bend in one dimension or a more complex
arrangement of grating bars (a "chirp") could be specified. Conceptually, it
is often useful to think of this as a multi-step process for the simulation,
so in practice a chirp would have to be known in grating manufacturing.
To support this conceptual idea (place the gratings, then rotate it, then do X,
then chirp it) the functions in this module operate on exciting grating objects.
'''
import numpy as np
from scipy.optimize import minimize_scalar
from scipy.interpolate import RectBivariateSpline
from transforms3d.affines import decompose, decompose44
from marxs.math.geometry import Cylinder
from marxs.math.utils import h2e, e2h, norm_vector
from marxs.utils import generate_test_photons
from marxs.optics import OrderSelector
def bend_gratings(gratings, radius):
'''Bend gratings to follow the Rowland cirle.
Gratings are bend in one direction (the dispersion direction) only.
The numerical procedure used to calculate the bending takes the central ray as
a fixed ppoint assuming that the central ray always goes to the correct position!
Parameters
----------
gratings : list
List of gratings to be bend
radius : float
Radius of the newly bend gratings
'''
for e in gratings:
t, rot, z, s = decompose(e.geometry.pos4d)
d_phi = np.arctan(z[1] / radius)
c = Cylinder({'position': t - radius * h2e(e.geometry['e_x']),
'orientation': rot,
'zoom': [radius, radius, z[2]],
'phi_lim': [-d_phi, d_phi]})
c._geometry = e.geometry._geometry
e.geometry = c
e.display['shape'] = 'surface'
for e1 in e.elements:
# can't be the same geometry, because groove_angle is part of _geometry and that's different
# Maybe need to get that out again and make the geometry strictly the geometry
# But for now, make a new cylinder of each of them
# Even now, not sure that's needed, since intersect it run by FlatStack
c = Cylinder({'position': t - radius * h2e(e.geometry['e_x']),
'orientation': rot,
'zoom': [radius, radius, z[2]],
'phi_lim': [-d_phi, d_phi]})
c._geometry = e1.geometry._geometry
e1.geometry = c
e1.display['shape'] = 'surface'
class NumericalChirpFinder():
'''Optimizer to determine optimal chirp by ray-tracing individual rays
This object passes a ray through the center of a grating with the specified energy and
diffraction order. It records the position of this center ray. Then, rays are passed through the grating at different positions. For each position, the grating period is optimized numerically, such that these test rays hit the same position as the center ray, when the object is called is called.
The purpose of wrapping this in an object (as opposed to a simple function) is that certain settings such as energy and order of diffraction are set when the oject is initialized
Assumes that the focal point (=position of the 0th order) is at the
origin of the coordinate system.
Parameters
----------
detector : marxs element
Mamrx
colname : string
Name of column in photon list that the detector ``detector`` writes in
'''
uv = [0, 0]
def __init__(self, detector, order, energy, d=0.0002,
colname='detcirc_phi'):
self.photon = generate_test_photons(1)
self.detector = detector
self.energy = energy
self.order = order
self.base_d = d
self.colname = colname
def set_grat(self, grat):
self.grat = grat
self.calc_goal()
def set_uv(self, uv):
self.uv = uv
self.init_photon()
def calc_goal(self):
if not hasattr(self.grat, 'original_orderselector'):
self.grat.original_orderselector = self.grat.order_selector
self.grat.order_selector = OrderSelector([self.order])
self.grat._d = self.base_d
self.set_uv([0., 0.])
self.run_photon()
self.goal = self.photon[self.colname][0]
def posongrat(self):
pos = h2e(self.grat.geometry['center'] +
self.uv[0] * self.grat.geometry['v_y'] +
self.uv[1] * self.grat.geometry['v_z'])
return pos
def init_photon(self):
pos = self.posongrat()
self.pos = e2h(1.1 * pos, 1)
self.dir = norm_vector(- e2h(pos.reshape(1, 3), 0))
self.reset_photon()
def reset_photon(self):
self.photon['pos'] = self.pos
self.photon['dir'] = self.dir
self.photon['probability'] = 1
def run_photon(self):
self.reset_photon()
self.photon = self.grat(self.photon)
self.photon = self.detector(self.photon)
def optimize_func(self, d):
self.grat._d = d * self.base_d
self.run_photon()
return np.abs(self.photon[self.colname][0] - self.goal)
def correction_on_d(self, uarray=np.array([-.999, 0, .999]),
varray=np.array([0])):
corr = np.ones((len(uarray), len(varray)))
for j, u in enumerate(uarray):
for k, v in enumerate(varray):
self.set_uv([u, v])
corr[j, k] = minimize_scalar(self.optimize_func,
bracket=(.99, 1., 1.01)).x
return corr
def __call__(self, grat, *args, **kwargs):
self.set_grat(grat)
return self.correction_on_d(*args, **kwargs)
class BendNumericalChirpFinder(NumericalChirpFinder):
def posongrat(self):
trans, rot, zoom, shear = decompose44(self.grat.geometry.pos4d)
p_lim = self.grat.geometry.phi_limits
p_center = np.mean(p_lim)
# Not accounting for 2 pi wrap and crazy stuff
p_half = (p_lim[1] - p_lim[0]) / 2
pos = h2e(self.grat.geometry.parametric_surface([p_center + self.uv[0] * p_half],
[self.uv[1] * zoom[2]]))
return pos[0, 0, :]
def chirp_gratings(gratings, optimizer, d,
uarray=np.array([-.999, 0, .999]), varray=np.array([0])):
'''
Parameters
----------
gratings : list
optimizer : callable
'''
for grat in gratings:
corr = optimizer(grat, uarray, varray)
corr = np.tile(corr[:, 0], (3, 1)).T
ly = np.linalg.norm(grat.geometry['v_y'])
lz = np.linalg.norm(grat.geometry['v_z'])
grat.fitted_d_corr = corr
grat.spline = RectBivariateSpline(ly * uarray, lz * uarray,
d * corr,
bbox=[-ly, ly, -lz, lz],
kx=2, ky=2)
def func(self, intercoos):
return self.spline(intercoos[:, 0], intercoos[:, 1], grid=False)
# Invoking the descriptor protocol to create a bound method
# see https://stackoverflow.com/questions/972/adding-a-method-to-an-existing-object-instance
grat._d = func.__get__(grat)
grat.order_selector = grat.original_orderselector
|
chandra-marxREPO_NAMEmarxsPATH_START.@marxs_extracted@marxs-main@marxs@design@bendgratings.py@.PATH_END.py
|
{
"filename": "garbage.py",
"repo_name": "h5py/h5py",
"repo_path": "h5py_extracted/h5py-master/other/garbage.py",
"type": "Python"
}
|
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
"""
Demonstrates garbage messages printed to stderr for membership
testing, when performed in new threads.
"""
from threading import Thread
import h5py
def demonstrate():
with h5py.File('foo', 'w', driver='core') as f:
print('x' in f)
if __name__ == '__main__':
print("Main thread")
demonstrate()
thread = Thread(target=demonstrate)
print("New thread")
thread.start()
thread.join()
|
h5pyREPO_NAMEh5pyPATH_START.@h5py_extracted@h5py-master@other@garbage.py@.PATH_END.py
|
{
"filename": "numpyctypes.py",
"repo_name": "IvS-KULeuven/IvSPythonRepository",
"repo_path": "IvSPythonRepository_extracted/IvSPythonRepository-master/aux/numpyctypes.py",
"type": "Python"
}
|
"""
Module to convert a numpy array to a ctypes struct.
This struct can then be passed to a native C library.
Author: Joris De Ridder
"""
import numpy as np
import ctypes as C
ctypesDict = {'d' : C.c_double,
'b' : C.c_char,
'h' : C.c_short,
'i' : C.c_int,
'l' : C.c_long,
'q' : C.c_longlong,
'B' : C.c_ubyte,
'H' : C.c_ushort,
'I' : C.c_uint,
'L' : C.c_ulong,
'Q' : C.c_ulonglong}
def c_ndarray(a, dtype = None, ndim = None, shape = None, requirements = None):
"""
Returns a ctypes structure of the array 'a'
containing the arrays info (data, shape, strides, ndim).
A check is made to ensure that the array has the specified dtype
and requirements.
Example:
>>> myArray = np.arange(10.0)
>>> myCstruct = c_ndarray(myArray, dtype=np.double, ndim = 3, shape = (4,3,2),
... requirements = ['c_contiguous'])
@param a: the numpy array to be converted
@type a: ndarray
@param dtype: the required dtype of the array, convert if it doesn't match
@type dtype: numpy dtype
@param ndim: the required number of axes of the array,
complain if it doesn't match
@type ndim: integer
@param shape: required shape of the array, complain if it doesn't match
@type shape: tuple
@param requirements: "ensurearray", "aligned", "fortran", "f_contiguous",
or "c_contiguous". Convert if it doesn't match.
@type requirements: list
@return: ctypes structure with the fields:
- data: pointer to the data : the type is determined with the
dtype of the array, and with ctypesDict.
- shape: pointer to long array : size of each of the dimensions
- strides: pointer to long array : strides in elements (not bytes)
@rtype: ctypes structure
"""
if not requirements:
# Also allow derived classes of ndarray
array = np.asanyarray(a, dtype=dtype)
else:
# Convert requirements to captial letter codes:
# (ensurearray' -> 'E'; 'aligned' -> 'A'
# 'fortran', 'f_contiguous', 'f' -> 'F'
# 'contiguous', 'c_contiguous', 'c' -> 'C')
requirements = [x[0].upper() for x in requirements]
subok = (0 if 'E' in requirements else 1)
# Make from 'a' an ndarray with the specified dtype, but don't copy the
# data (yet). This also ensures that the .flags attribute is present.
array = np.array(a, dtype=dtype, copy=False, subok=subok)
# See if copying all data is really necessary.
# Note: 'A' = (A)ny = only (F) it is was already (F)
copychar = 'A'
if 'F' in requirements:
copychar = 'F'
elif 'C' in requirements:
copychar = 'C'
for req in requirements:
if not array.flags[req]:
array = array.copy(copychar)
break
# If required, check the number of axes and the shape of the array
if ndim is not None:
if array.ndim != ndim:
raise TypeError("Array has wrong number of axes")
if shape is not None:
if array.shape != shape:
raise TypeError("Array has wrong shape")
# Define a class that serves as interface of an ndarray to ctypes.
# Part of the type depends on the array's dtype.
class ndarrayInterfaceToCtypes(C.Structure):
pass
typechar = array.dtype.char
if typechar in ctypesDict:
ndarrayInterfaceToCtypes._fields_ = \
[("data", C.POINTER(ctypesDict[typechar])),
("shape" , C.POINTER(C.c_long)),
("strides", C.POINTER(C.c_long))]
else:
raise TypeError("dtype of input ndarray not supported")
# Instantiate the interface class and attach the ndarray's internal info.
# Ctypes does automatic conversion between (c_long * #) arrays and POINTER(c_long).
ndarrayInterface = ndarrayInterfaceToCtypes()
ndarrayInterface.data = array.ctypes.data_as(C.POINTER(ctypesDict[typechar]))
ndarrayInterface.shape = (C.c_long * array.ndim)(*array.shape)
ndarrayInterface.strides = (C.c_long * array.ndim)(*array.strides)
for n in range(array.ndim):
ndarrayInterface.strides[n] /= array.dtype.itemsize
return ndarrayInterface
|
IvS-KULeuvenREPO_NAMEIvSPythonRepositoryPATH_START.@IvSPythonRepository_extracted@IvSPythonRepository-master@aux@numpyctypes.py@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/choroplethmap/marker/_line.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Line(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "choroplethmap.marker"
_path_str = "choroplethmap.marker.line"
_valid_props = {"color", "colorsrc", "width", "widthsrc"}
# color
# -----
@property
def color(self):
"""
Sets the marker.line color. It accepts either a specific color
or an array of numbers that are mapped to the colorscale
relative to the max and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# width
# -----
@property
def width(self):
"""
Sets the width (in px) of the lines bounding the marker points.
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# widthsrc
# --------
@property
def widthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `width`.
The 'widthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["widthsrc"]
@widthsrc.setter
def widthsrc(self, val):
self["widthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
"""
def __init__(
self, arg=None, color=None, colorsrc=None, width=None, widthsrc=None, **kwargs
):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.choroplethmap.marker.Line`
color
Sets the marker.line color. It accepts either a
specific color or an array of numbers that are mapped
to the colorscale relative to the max and min values of
the array or relative to `marker.line.cmin` and
`marker.line.cmax` if set.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
width
Sets the width (in px) of the lines bounding the marker
points.
widthsrc
Sets the source reference on Chart Studio Cloud for
`width`.
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.choroplethmap.marker.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.choroplethmap.marker.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("widthsrc", None)
_v = widthsrc if widthsrc is not None else _v
if _v is not None:
self["widthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@choroplethmap@marker@_line.py@.PATH_END.py
|
{
"filename": "test_ccompiler_opt_conf.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/numpy/py3/numpy/distutils/tests/test_ccompiler_opt_conf.py",
"type": "Python"
}
|
import unittest
from os import sys, path
is_standalone = __name__ == '__main__' and __package__ is None
if is_standalone:
sys.path.append(path.abspath(path.join(path.dirname(__file__), "..")))
from ccompiler_opt import CCompilerOpt
else:
from numpy.distutils.ccompiler_opt import CCompilerOpt
arch_compilers = dict(
x86 = ("gcc", "clang", "icc", "iccw", "msvc"),
x64 = ("gcc", "clang", "icc", "iccw", "msvc"),
ppc64 = ("gcc", "clang"),
ppc64le = ("gcc", "clang"),
armhf = ("gcc", "clang"),
aarch64 = ("gcc", "clang"),
narch = ("gcc",)
)
class FakeCCompilerOpt(CCompilerOpt):
fake_info = ("arch", "compiler", "extra_args")
def __init__(self, *args, **kwargs):
CCompilerOpt.__init__(self, None, **kwargs)
def dist_compile(self, sources, flags, **kwargs):
return sources
def dist_info(self):
return FakeCCompilerOpt.fake_info
@staticmethod
def dist_log(*args, stderr=False):
pass
class _TestConfFeatures(FakeCCompilerOpt):
"""A hook to check the sanity of configured features
- before it called by the abstract class '_Feature'
"""
def conf_features_partial(self):
conf_all = self.conf_features
for feature_name, feature in conf_all.items():
self.test_feature(
"attribute conf_features",
conf_all, feature_name, feature
)
conf_partial = FakeCCompilerOpt.conf_features_partial(self)
for feature_name, feature in conf_partial.items():
self.test_feature(
"conf_features_partial()",
conf_partial, feature_name, feature
)
return conf_partial
def test_feature(self, log, search_in, feature_name, feature_dict):
error_msg = (
"during validate '{}' within feature '{}', "
"march '{}' and compiler '{}'\n>> "
).format(log, feature_name, self.cc_march, self.cc_name)
if not feature_name.isupper():
raise AssertionError(error_msg + "feature name must be in uppercase")
for option, val in feature_dict.items():
self.test_option_types(error_msg, option, val)
self.test_duplicates(error_msg, option, val)
self.test_implies(error_msg, search_in, feature_name, feature_dict)
self.test_group(error_msg, search_in, feature_name, feature_dict)
self.test_extra_checks(error_msg, search_in, feature_name, feature_dict)
def test_option_types(self, error_msg, option, val):
for tp, available in (
((str, list), (
"implies", "headers", "flags", "group", "detect", "extra_checks"
)),
((str,), ("disable",)),
((int,), ("interest",)),
((bool,), ("implies_detect",)),
((bool, type(None)), ("autovec",)),
) :
found_it = option in available
if not found_it:
continue
if not isinstance(val, tp):
error_tp = [t.__name__ for t in (*tp,)]
error_tp = ' or '.join(error_tp)
raise AssertionError(error_msg +
"expected '%s' type for option '%s' not '%s'" % (
error_tp, option, type(val).__name__
))
break
if not found_it:
raise AssertionError(error_msg + "invalid option name '%s'" % option)
def test_duplicates(self, error_msg, option, val):
if option not in (
"implies", "headers", "flags", "group", "detect", "extra_checks"
) : return
if isinstance(val, str):
val = val.split()
if len(val) != len(set(val)):
raise AssertionError(error_msg + "duplicated values in option '%s'" % option)
def test_implies(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
implies = feature_dict.get("implies", "")
if not implies:
return
if isinstance(implies, str):
implies = implies.split()
if feature_name in implies:
raise AssertionError(error_msg + "feature implies itself")
for impl in implies:
impl_dict = search_in.get(impl)
if impl_dict is not None:
if "disable" in impl_dict:
raise AssertionError(error_msg + "implies disabled feature '%s'" % impl)
continue
raise AssertionError(error_msg + "implies non-exist feature '%s'" % impl)
def test_group(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
group = feature_dict.get("group", "")
if not group:
return
if isinstance(group, str):
group = group.split()
for f in group:
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
raise AssertionError(error_msg +
"in option 'group', '%s' already exists as a feature name" % f
)
def test_extra_checks(self, error_msg, search_in, feature_name, feature_dict):
if feature_dict.get("disabled") is not None:
return
extra_checks = feature_dict.get("extra_checks", "")
if not extra_checks:
return
if isinstance(extra_checks, str):
extra_checks = extra_checks.split()
for f in extra_checks:
impl_dict = search_in.get(f)
if not impl_dict or "disable" in impl_dict:
continue
raise AssertionError(error_msg +
"in option 'extra_checks', extra test case '%s' already exists as a feature name" % f
)
class TestConfFeatures(unittest.TestCase):
def __init__(self, methodName="runTest"):
unittest.TestCase.__init__(self, methodName)
self._setup()
def _setup(self):
FakeCCompilerOpt.conf_nocache = True
def test_features(self):
for arch, compilers in arch_compilers.items():
for cc in compilers:
FakeCCompilerOpt.fake_info = (arch, cc, "")
_TestConfFeatures()
if is_standalone:
unittest.main()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@numpy@py3@numpy@distutils@tests@test_ccompiler_opt_conf.py@.PATH_END.py
|
{
"filename": "test_psf_measure.py",
"repo_name": "quatrope/ProperImage",
"repo_path": "ProperImage_extracted/ProperImage-master/drafts/test_psf_measure.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# test_recoverstats.py
#
# Copyright 2016 Bruno S <bruno.sanchez.63@gmail.com>
#
import os
import shlex
import subprocess
import sys
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import stats
import sep
from astropy.convolution import convolve
from astropy.convolution import convolve_fft
from astropy.time import Time
from astropy.io import fits
from astropy.stats import sigma_clipped_stats
from astropy.stats import signal_to_noise_oir_ccd
from astropy.table import Table
from astropy.modeling import fitting
from astropy.modeling import models
from astropy.nddata.utils import extract_array
from photutils import psf
from photutils import daofind
from properimage import simtools
from properimage import propercoadd as pc
# =============================================================================
# PSF measure test by propercoadd
# =============================================================================
N = 512 # side
X_FWHM = 6
Y_FWHM = 7
theta = 78
t_exp = 1
max_fw = max(X_FWHM, Y_FWHM)
test_dir = os.path.abspath('../test_images/measure_psf')
x = np.linspace(6*max_fw, N-6*max_fw, 7)
y = np.linspace(6*max_fw, N-6*max_fw, 7)
xy = simtools.cartesian_product([x, y])
SN = 1000. # SN para poder medir psf
weights = list(np.linspace(10, 100, len(xy)))
m = simtools.delta_point(N, center=False, xy=xy, weights=weights)
im = simtools.image(m, N, t_exp, X_FWHM, SN,
Y_FWHM=Y_FWHM, theta=theta, bkg_pdf='poisson')
sim = pc.SingleImage(im, imagefile=False, sim=True)
fitted_models = sim.fit_psf_sep()
x_sds = [g.x_stddev for g in fitted_models]
y_sds = [g.y_stddev for g in fitted_models]
th = [g.theta*180/np.pi for g in fitted_models]
amplitudes = [g.amplitude for g in fitted_models]
fwhm_x = 2.335*np.mean(x_sds)
fwhm_y = 2.335*np.mean(y_sds)
mean_th = round(np.mean(th))
fwhm = max(fwhm_x, fwhm_y)
print('X Fwhm = {}, Y Fwhm = {}, Mean Theta = {}'.format(fwhm_x, fwhm_y, mean_th))
# =============================================================================
# PSF spatially variant
# =============================================================================
covMat = np.zeros(shape=(len(fitted_models), len(fitted_models)))
renders = [g.render() for g in fitted_models]
for i in range(len(fitted_models)):
for j in range(len(fitted_models)):
if i<=j:
psfi_render = renders[i]
psfj_render = renders[j]
inner = np.vdot(psfi_render.flatten()/np.sum(psfi_render),
psfj_render.flatten()/np.sum(psfj_render))
covMat[i, j] = inner
covMat[j, i] = inner
import ipdb; ipdb.set_trace()
valh, vech = np.linalg.eigh(covMat)
power = valh/np.sum(abs(valh))
cum = 0
cut = 0
while cum < 0.90:
cut -= 1
cum += abs(power[cut])
# Build psf basis
N_psf_basis = abs(cut)
lambdas = valh[cut:]
xs = vech[:,cut:]
psf_basis = []
for i in range(N_psf_basis):
psf_basis.append(np.tensordot(xs[:,i], renders, axes=[0,0]))
# =============================================================================
# Manual test
# =============================================================================
runtest = False#input('Run Manual test?')
if runtest:
prf_model = models.Gaussian2D(x_stddev=1, y_stddev=1)
fitter = fitting.LevMarLSQFitter()
indices = np.indices(sim.bkg_sub_img.shape)
model_fits = []
best_big = srcs['tnpix']>=p_sizes[0]**2.
best_small = srcs['tnpix']<=p_sizes[2]**2.
best_flag = srcs['flag']<31
best_srcs = srcs[ best_big & best_flag & best_small]
fitshape = (4*FWHM, 4*FWHM)
prf_model.x_mean = fitshape[0]/2.
prf_model.y_mean = fitshape[1]/2.
for row in best_srcs:
position = (row['y'], row['x'])
y = extract_array(indices[0], fitshape, position)
x = extract_array(indices[1], fitshape, position)
sub_array_data = extract_array(sim.bkg_sub_img,
fitshape, position,
fill_value=sim.bkg.globalrms)
prf_model.x_mean = position[1]
prf_model.y_mean = position[0]
fit = fitter(prf_model, x, y, sub_array_data)
print(row['x'],row['y'],row['flux'],row['tnpix'],row['a'],row['b'])
print(fit)
res = sub_array_data - fit(x,y)
if np.sum(res*res) < sim.bkg.globalrms*fitshape[0]**2:
model_fits.append(fit)
plt.subplot(131)
plt.imshow(fit(x, y), interpolation='none')
plt.title('fit')
plt.subplot(132)
plt.title('sub_array')
plt.imshow(sub_array_data, interpolation='none')
plt.subplot(133)
plt.title('residual')
plt.imshow(sub_array_data - fit(x,y), interpolation='none')
plt.show()
continue_loop = input('continue loop?')
if not continue_loop: break
|
quatropeREPO_NAMEProperImagePATH_START.@ProperImage_extracted@ProperImage-master@drafts@test_psf_measure.py@.PATH_END.py
|
{
"filename": "star_parameters.py",
"repo_name": "LucaMalavolta/PyORBIT",
"repo_path": "PyORBIT_extracted/PyORBIT-main/pyorbit/common/star_parameters.py",
"type": "Python"
}
|
from pyorbit.subroutines.common import *
from pyorbit.common.abstract_common import *
from pyorbit.keywords_definitions import *
class CommonStarParameters(AbstractCommon):
''' This class must be used by each planet in the system
model_name is the way the planet is identified
'''
model_class = 'star_parameters'
parameters_dictionary = {
'radius': # Radius of the star, in Solar radii
{
'bounds': [0., 2.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 1.0,
'unit': 'solar_unit',
},
'mass': # Mass of the star, in Solar masses
{
'bounds': [0., 2.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 1.0,
'unit': 'solar_unit',
},
'density': # Density of the star, in Solar density units
{
'bounds': [0., 5.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 1.0,
'unit': 'solar_unit',
},
'i_star': # Inclination of the star
{
'bounds': [0., 180.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 90,
'unit': 'degree',
},
'cosi_star': # Inclination of the star
{
'bounds': [0., 1.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 1.,
'unit': 'unitary',
},
'v_sini': # Projected rotational velocity of the star
{
'bounds': [0., 200.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 1.6,
'unit': 'km/s',
},
'rotation_period': # Rotation period of the star
{
'bounds': [1., 1000.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 27,
'unit': 'days',
},
'activity_decay': # Rotation period of the star
{
'bounds': [10., 10000.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 1000,
'unit': 'days',
},
'temperature': # Effective temperature of the photosphere
{
'bounds': [2000., 11000.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 5777,
'unit': 'kelvin',
},
'line_contrast':
{
'bounds': [0., 100.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 50,
'unit': 'percentual',
},
'line_fwhm':
{
'bounds': [0., 12.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 6,
'unit': 'km/s',
},
'rv_center':
{
'bounds': [-3e2, 3e2],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 0.0,
'unit': 'km/s',
},
'veq_star': #equatorial velocity of the star
{
'bounds': [0.00, 70.],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 1.6,
'unit': 'km/s',
},
'alpha_rotation':
{
'bounds': [0.00, 1.00],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 0.6,
'unit': 'unit',
},
'convective_c1':
{
'bounds': [0.00, 5.00],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 0.0,
'unit': 'unit',
},
'convective_c2':
{
'bounds': [-5.00, 0.00],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 0.0,
'unit': 'unit',
},
'convective_c3':
{
'bounds': [-5.00, 5.00],
'priors': ['Uniform', []],
'spaces': 'Linear',
'fixed' : 0.0,
'unit': 'unit',
},
}
recenter_pams = OrderedSet()
def __init__(self, *args, **kwargs):
super(CommonStarParameters, self).__init__(*args, **kwargs)
self.use_equatorial_velocity = False
self.use_stellar_rotation_period = False
self.use_stellar_inclination = False
self.use_cosine_stellar_inclination = False
self.use_differential_rotation = False
self.use_stellar_radius = False
self.use_projected_velocity = True
self.convective_order = 0
self.compute_mass = True
self.compute_radius = False
self.compute_density = False
def initialize_model(self, mc, **kwargs):
""" check if the stellar_rotation_period has to be used as parameter
when the stellar rotation period is given as a prior, then the equatorial velcoity
is computed using the rotational period and the radius of the star.
The stellar inclination must be used as a free parameter
and the veq_sini as a prior to be checked a posteriori, as the determination of the
stellar inclination from the veq_sini could bias its distribution
From several experiments, I determined that PyDE/MCMC convergence is
much more difficult if v_eq and i_star are left as free parameters and
the output is compared with too many priors
"""
for keyword in keywords_stellar_rotation:
self.use_stellar_rotation_period = kwargs.get(keyword, self.use_stellar_rotation_period)
if self.use_stellar_rotation_period:
self.use_equatorial_velocity = False
self.use_stellar_inclination = True
self.use_stellar_radius = True
self.use_projected_velocity = False
""" check if the differemntial rotation should be included in the model"""
for keyword in keywords_differential_rotation:
self.use_differential_rotation = kwargs.get(keyword, self.use_differential_rotation)
if self.use_differential_rotation and not self.use_stellar_rotation_period:
self.use_equatorial_velocity = True
self.use_stellar_inclination = True
self.use_projected_velocity = False
""" The user can force the use of the equatorial velocity, the stellar inclination,
and the stellar radius, by activating the corresponding flags in the model"""
self.use_equatorial_velocity = kwargs.get('use_equatorial_velocity', self.use_equatorial_velocity)
self.use_stellar_inclination = kwargs.get('use_stellar_inclination', self.use_stellar_inclination)
self.use_cosine_stellar_inclination = kwargs.get('use_cosine_stellar_inclination', False) # Directly addressed here for back-compatibility
if self.use_cosine_stellar_inclination:
self.use_stellar_inclination = True
self.use_stellar_radius = kwargs.get('use_stellar_radius', self.use_stellar_radius)
self.use_projected_velocity = kwargs.get('use_projected_velocity', self.use_projected_velocity)
""" the user can decide how to deal with the mass-radius-density correlation
Density and (sometimes) radius can be involved in transit fit, while there is no way to measure the
mass from the star from radial velocities or photometry, so the mass should have lower priority
as a free parameter.
"""
self.compute_mass = kwargs.get('compute_mass', self.compute_mass)
self.compute_radius = kwargs.get('compute_radius', self.compute_radius)
self.compute_density = kwargs.get('compute_density', self.compute_density)
try:
multivariate_pams = self.multivariate_pams
if len(multivariate_pams) > 0:
self.compute_density = True
except AttributeError:
pass
if self.compute_density:
self.compute_radius = False
self.compute_mass = False
if self.compute_radius:
self.compute_density = False
self.compute_mass = False
if self.compute_mass:
self.compute_density = False
self.compute_radius = False
if not (self.compute_mass or self.compute_radius or self.compute_density):
self.compute_mass = True
self.convective_order = kwargs.get('convective_order', self.convective_order)
if self.use_equatorial_velocity and self.use_stellar_inclination and self.use_stellar_rotation_period and self.use_stellar_radius:
print('Possible source of unexpected behaviour, I will quit')
print('These parameters are correlated and should not be all free simultaneously:')
print('- stellar rotation period ')
print('- stellar radius ')
print('- stellar inclination ')
print('- equatorial velocity')
print()
quit()
def define_derived_parameters(self):
derived_list = []
if self.use_cosine_stellar_inclination and \
'cosi_star' in self.sampler_parameters and \
'i_star' not in self.parameter_index:
pam00_index = self.sampler_parameters['cosi_star']
self.transformation['i_star'] = get_var_arccosine
self.parameter_index['i_star'] = pam00_index
derived_list.append('i_star')
if not self.use_equatorial_velocity and \
'rotation_period' in self.sampler_parameters and \
'radius' in self.sampler_parameters and \
'veq_star' not in self.parameter_index:
pam00_index = self.sampler_parameters['rotation_period']
pam01_index = self.sampler_parameters['radius']
self.transformation['veq_star'] = get_2var_prot_rstar_veq
self.parameter_index['veq_star'] = [pam00_index, pam01_index]
derived_list.append('veq_star')
if not self.use_equatorial_velocity and \
'rotation_period' in self.sampler_parameters and \
'radius' in self.sampler_parameters and \
'i_star' in self.sampler_parameters and \
'v_sini' not in self.sampler_parameters:
pam00_index = self.sampler_parameters['rotation_period']
pam01_index = self.sampler_parameters['radius']
pam02_index = self.sampler_parameters['i_star']
self.transformation['v_sini'] = get_3var_prot_rstar_istar_veq
self.parameter_index['v_sini'] = [pam00_index, pam01_index, pam02_index]
derived_list.append('v_sini')
if not self.use_equatorial_velocity and \
'rotation_period' in self.sampler_parameters and \
'radius' in self.sampler_parameters and \
'cosi_star' in self.sampler_parameters and \
'v_sini' not in self.sampler_parameters:
pam00_index = self.sampler_parameters['rotation_period']
pam01_index = self.sampler_parameters['radius']
pam02_index = self.sampler_parameters['cosi_star']
self.transformation['v_sini'] = get_3var_prot_rstar_cosistar_veq
self.parameter_index['v_sini'] = [pam00_index, pam01_index, pam02_index]
derived_list.append('v_sini')
if 'veq_star' in self.sampler_parameters and \
'i_star' in self.sampler_parameters and \
'v_sini' not in self.parameter_index:
pam00_index = self.sampler_parameters['veq_star']
pam01_index = self.sampler_parameters['i_star']
self.transformation['v_sini'] = get_2var_veq_istar_vsini
self.parameter_index['v_sini'] = [pam00_index, pam01_index]
derived_list.append('v_sini')
if 'veq_star' in self.sampler_parameters and \
'cosi_star' in self.sampler_parameters and \
'v_sini' not in self.parameter_index:
pam00_index = self.sampler_parameters['veq_star']
pam01_index = self.sampler_parameters['cosi_star']
self.transformation['v_sini'] = get_2var_veq_cosi_vsini
self.parameter_index['v_sini'] = [pam00_index, pam01_index]
derived_list.append('v_sini')
if 'veq_star' in self.sampler_parameters and \
'radius' in self.sampler_parameters and \
'rotation_period' not in self.parameter_index:
pam00_index = self.sampler_parameters['veq_star']
pam01_index = self.sampler_parameters['radius']
self.transformation['rotation_period'] = get_2var_veq_radius_rot
self.parameter_index['rotation_period'] = [pam00_index, pam01_index]
derived_list.append('rotation_period')
if 'density' in self.sampler_parameters and \
'radius' in self.sampler_parameters and \
'mass' not in self.parameter_index:
pam00_index = self.sampler_parameters['density']
pam01_index = self.sampler_parameters['radius']
self.transformation['mass'] = get_2var_mass
self.parameter_index['mass'] = [pam00_index, pam01_index]
derived_list.append('mass')
if 'mass' in self.sampler_parameters and \
'radius' in self.sampler_parameters and \
'density' not in self.parameter_index:
pam00_index = self.sampler_parameters['mass']
pam01_index = self.sampler_parameters['radius']
self.transformation['density'] = get_2var_rho
self.parameter_index['density'] = [pam00_index, pam01_index]
derived_list.append('density')
if 'mass' in self.sampler_parameters and \
'density' in self.sampler_parameters and \
'radius' not in self.parameter_index:
pam00_index = self.sampler_parameters['mass']
pam01_index = self.sampler_parameters['density']
self.transformation['radius'] = get_2var_radius
self.parameter_index['radius'] = [pam00_index, pam01_index]
derived_list.append('radius')
for pam in derived_list:
if pam not in self.bounds:
self.bounds[pam] = self.default_bounds[pam]
if pam not in self.prior_pams:
if pam in self.bounds:
self.prior_pams[pam] = self.bounds[pam]
else:
self.prior_pams[pam] = self.default_bounds[pam]
self.prior_kind[pam] = 'Uniform'
return
|
LucaMalavoltaREPO_NAMEPyORBITPATH_START.@PyORBIT_extracted@PyORBIT-main@pyorbit@common@star_parameters.py@.PATH_END.py
|
{
"filename": "iso_ames.py",
"repo_name": "tomasstolker/species",
"repo_path": "species_extracted/species-main/species/data/isochrone_data/iso_ames.py",
"type": "Python"
}
|
from pathlib import Path
import h5py
import pooch
from typeguard import typechecked
from species.data.isochrone_data.iso_manual import add_manual
@typechecked
def add_ames(database: h5py._hl.files.File, input_path: str) -> None:
"""
Function for adding the AMES-Cond and AMES-Dusty
isochrone data to the database.
Parameters
----------
database : h5py._hl.files.File
Database.
input_path : str
Folder where the data is located.
Returns
-------
NoneType
None
"""
url_list = [
"https://home.strw.leidenuniv.nl/~stolker/species/"
"model.AMES-Cond-2000.M-0.0.MKO.Vega",
"https://home.strw.leidenuniv.nl/~stolker/species/"
"model.AMES-dusty.M-0.0.MKO.Vega",
]
file_hash = [
"fc04e6f7c02982bb3187b55cdefc2464e3f1564fb8026a8958967cb889f0f581",
"c7ba32ae10111c9ca692bf75154edac70b050c06cae211b421e1473725d6380c",
]
iso_tags = ["ames-cond", "ames-dusty"]
for url_idx, url_item in enumerate(url_list):
input_file = url_item.split("/")[-1]
data_file = Path(input_path) / input_file
if not data_file.exists():
print()
pooch.retrieve(
url=url_item,
known_hash=file_hash[url_idx],
fname=input_file,
path=input_path,
progressbar=True,
)
add_manual(
database=database,
tag=iso_tags[url_idx],
file_name=str(data_file),
model_name="ames",
)
|
tomasstolkerREPO_NAMEspeciesPATH_START.@species_extracted@species-main@species@data@isochrone_data@iso_ames.py@.PATH_END.py
|
{
"filename": "wavelets.py",
"repo_name": "PynPoint/PynPoint",
"repo_path": "PynPoint_extracted/PynPoint-main/pynpoint/util/wavelets.py",
"type": "Python"
}
|
"""
Wrapper utils for the wavelet functions for the mlpy cwt implementation (see continous.py)
"""
import numpy as np
from numba import jit
from typeguard import typechecked
from scipy.special import gamma, hermite
from scipy.signal import medfilt
from statsmodels.robust import mad
from pynpoint.util.continuous import autoscales, cwt, icwt
# from pynpoint.util.continuous import fourier_from_scales
# This function cannot by @typechecked because of a compatibility issue with numba
@jit(cache=True, nopython=True)
def _fast_zeros(soft: bool,
spectrum: np.ndarray,
uthresh: float) -> np.ndarray:
"""
Fast numba method to modify values in the wavelet space by using a hard or soft threshold
function.
Parameters
----------
soft : bool
If True soft the threshold function will be used, otherwise a hard threshold is applied.
spectrum : numpy.ndarray
The input 2D wavelet space.
uthresh : float
Threshold used by the threshold function.
Returns
-------
numpy.ndarray
Modified spectrum.
"""
if soft:
for i in range(0, spectrum.shape[0], 1):
for j in range(0, spectrum.shape[1], 1):
tmp_value = spectrum[i, j].real
if abs(spectrum[i, j]) > uthresh:
spectrum[i, j] = np.sign(tmp_value) * (abs(tmp_value) - uthresh)
else:
spectrum[i, j] = 0
else:
for i in range(0, spectrum.shape[0], 1):
for j in range(0, spectrum.shape[1], 1):
if abs(spectrum[i, j]) < uthresh:
spectrum[i, j] = 0
return spectrum
class WaveletAnalysisCapsule:
"""
Capsule class to process one 1d time series using the CWT and wavelet de-nosing by wavelet
shrinkage.
"""
@typechecked
def __init__(self,
signal_in: np.ndarray,
wavelet_in: str = 'dog',
order: int = 2,
padding: str = 'none',
frequency_resolution: float = 0.5) -> None:
"""
Parameters
----------
signal_in : numpy.ndarray
1D input signal.
wavelet_in : str
Wavelet function ('dog' or 'morlet').
order : int
Order of the wavelet function.
padding : str
Padding method ('zero', 'mirror', or 'none').
frequency_resolution : float
Wavelet space resolution in scale/frequency.
Returns
-------
NoneType
None
"""
# save input data
self.m_supported_wavelets = ['dog', 'morlet']
# check supported wavelets
if wavelet_in not in self.m_supported_wavelets:
raise ValueError(f'Wavelet {wavelet_in} is not supported')
if wavelet_in == 'dog':
self._m_c_reconstructions = {2: 3.5987,
4: 2.4014,
6: 1.9212,
8: 1.6467,
12: 1.3307,
16: 1.1464,
20: 1.0222,
30: 0.8312,
40: 0.7183,
60: 0.5853}
elif wavelet_in == 'morlet':
self._m_c_reconstructions = {5: 0.9484,
6: 0.7784,
7: 0.6616,
8: 0.5758,
10: 0.4579,
12: 0.3804,
14: 0.3254,
16: 0.2844,
20: 0.2272}
self.m_wavelet = wavelet_in
if padding not in ['none', 'zero', 'mirror']:
raise ValueError('Padding can only be none, zero or mirror')
self._m_data = signal_in - np.ones(len(signal_in)) * np.mean(signal_in)
self.m_padding = padding
self.__pad_signal()
self._m_data_size = len(self._m_data)
self._m_data_mean = np.mean(signal_in)
if order not in self._m_c_reconstructions:
raise ValueError('Wavelet ' + str(wavelet_in) + ' does not support order '
+ str(order) + ". \n Only orders: " +
str(sorted(self._m_c_reconstructions.keys())).strip('[]') +
" are supported")
self.m_order = order
self._m_c_final_reconstruction = self._m_c_reconstructions[order]
# create scales for wavelet transform
self._m_scales = autoscales(N=self._m_data_size,
dt=1,
dj=frequency_resolution,
wf=wavelet_in,
p=order)
self._m_number_of_scales = len(self._m_scales)
self._m_frequency_resolution = frequency_resolution
self.m_spectrum = None
# --- functions for reconstruction value
@staticmethod
@typechecked
def _morlet_function(omega0: float,
x_in: float) -> np.complex128:
"""
Returns
-------
numpy.complex128
Morlet function.
"""
return np.pi**(-0.25) * np.exp(1j * omega0 * x_in) * np.exp(-x_in**2/2.0)
@staticmethod
@typechecked
def _dog_function(order: int,
x_in: float) -> float:
"""
Returns
-------
float
DOG function.
"""
p_hpoly = hermite(order)[int(x_in / np.power(2, 0.5))]
herm = p_hpoly / (np.power(2, float(order) / 2))
return ((-1)**(order+1)) / np.sqrt(gamma(order + 0.5)) * herm
@typechecked
def __pad_signal(self) -> None:
"""
Returns
-------
NoneType
None
"""
padding_length = int(len(self._m_data) * 0.5)
if self.m_padding == 'zero':
new_data = np.append(self._m_data, np.zeros(padding_length, dtype=np.float64))
self._m_data = np.append(np.zeros(padding_length, dtype=np.float64), new_data)
elif self.m_padding == 'mirror':
left_half_signal = self._m_data[:padding_length]
right_half_signal = self._m_data[padding_length:]
new_data = np.append(self._m_data, right_half_signal[::-1])
self._m_data = np.append(left_half_signal[::-1], new_data)
@typechecked
def __compute_reconstruction_factor(self) -> float:
"""
Computes the reconstruction factor.
Returns
-------
float
Reconstruction factor.
"""
freq_res = self._m_frequency_resolution
wavelet = self.m_wavelet
order = self.m_order
if wavelet == 'morlet':
zero_function = self._morlet_function(order, 0)
else:
zero_function = self._dog_function(order, 0)
c_delta = self._m_c_final_reconstruction
reconstruction_factor = freq_res/(c_delta * zero_function)
return reconstruction_factor.real
@typechecked
def compute_cwt(self) -> None:
"""
Compute the wavelet space of the given input signal.
Returns
-------
NoneType
None
"""
self.m_spectrum = cwt(self._m_data,
dt=1,
scales=self._m_scales,
wf=self.m_wavelet,
p=self.m_order)
@typechecked
def update_signal(self) -> None:
"""
Updates the internal signal by the reconstruction of the current wavelet space.
Returns
-------
NoneType
None
"""
self._m_data = icwt(self.m_spectrum, scales=self._m_scales)
reconstruction_factor = self.__compute_reconstruction_factor()
self._m_data *= reconstruction_factor
@typechecked
def denoise_spectrum(self,
soft: bool = False) -> None:
"""
Applies wavelet shrinkage on the current wavelet space (m_spectrum) by either a hard of
soft threshold function.
Parameters
----------
soft : bool
If True a soft threshold is used, hard otherwise.
Returns
-------
NoneType
None
"""
if self.m_padding != 'none':
noise_length_4 = len(self._m_data) // 4
noise_spectrum = self.m_spectrum[0, noise_length_4: (noise_length_4 * 3)].real
else:
noise_spectrum = self.m_spectrum[0, :].real
sigma = mad(noise_spectrum)
uthresh = sigma*np.sqrt(2.0*np.log(len(noise_spectrum)))
self.m_spectrum = _fast_zeros(soft, self.m_spectrum, uthresh)
@typechecked
def median_filter(self) -> None:
"""
Applies a median filter on the internal 1d signal. Can be useful for cosmic ray correction
after temporal de-noising
Returns
-------
NoneType
None
"""
self._m_data = medfilt(self._m_data, 19)
@typechecked
def get_signal(self) -> np.ndarray:
"""
Returns the current version of the 1d signal. Use update_signal() in advance in order to get
the current reconstruction of the wavelet space. Removes padded values as well.
Returns
-------
numpy.ndarray
Current version of the 1D signal.
"""
tmp_data = self._m_data + np.ones(len(self._m_data)) * self._m_data_mean
if self.m_padding == 'none':
return tmp_data
return tmp_data[len(self._m_data) // 4: 3 * (len(self._m_data) // 4)]
# def __transform_period(self,
# period):
#
# tmp_y = fourier_from_scales(self._m_scales,
# self.m_wavelet,
# self.m_order)
#
# def __transformation(x):
# return np.log2(x + 1) * tmp_y[-1] / np.log2(tmp_y[-1] + 1)
#
# cutoff_scaled = __transformation(period)
#
# scale_new = tmp_y[-1] - tmp_y[0]
# scale_old = self.m_spectrum.shape[0]
#
# factor = scale_old / scale_new
# cutoff_scaled *= factor
#
# return cutoff_scaled
# ----- plotting functions --------
# def __plot_or_save_spectrum(self):
# plt.close()
#
# plt.figure(figsize=(8, 6))
# plt.subplot(1, 1, 1)
#
# tmp_y = fourier_from_scales(self._m_scales,
# self.m_wavelet,
# self.m_order)
#
# tmp_x = np.arange(0, self._m_data_size + 1, 1)
#
# scaled_spec = copy.deepcopy(self.m_spectrum.real)
# for i, _ in enumerate(scaled_spec):
# scaled_spec[i] /= np.sqrt(self._m_scales[i])
#
# plt.imshow(abs(scaled_spec),
# aspect='auto',
# extent=[tmp_x[0],
# tmp_x[-1],
# tmp_y[0],
# tmp_y[-1]],
# cmap=plt.get_cmap("gist_ncar"),
# origin='lower')
#
# # COI first part (only for DOG) with padding
#
# inner_frequency = 2.*np.pi/np.sqrt(self.m_order + 0.5)
# coi = np.append(np.zeros(len(tmp_x)/4),
# tmp_x[0:len(tmp_x) / 4])
# coi = np.append(coi,
# tmp_x[0:len(tmp_x) / 4][::-1])
# coi = np.append(coi,
# np.zeros(len(tmp_x) / 4))
#
# plt.plot(np.arange(0, len(coi), 1.0),
# inner_frequency * coi / np.sqrt(2),
# color="white")
#
# plt.ylim([tmp_y[0],
# tmp_y[-1]])
#
# plt.fill_between(np.arange(0, len(coi), 1.0),
# inner_frequency * coi / np.sqrt(2),
# np.ones(len(coi)) * tmp_y[-1],
# facecolor="none",
# edgecolor='white',
# alpha=0.4,
# hatch="x")
#
# plt.yscale('log', basey=2)
# plt.ylabel("Period in [s]")
# plt.xlabel("Time in [s]")
# plt.title("Spectrum computed with CWT using '" + str(self.m_wavelet) +
# "' wavelet of order " + str(self.m_order))
#
# def plot_spectrum(self):
# """
# Shows a plot of the current wavelet space.
# :return: None
# """
#
# self.__plot_or_save_spectrum()
# plt.show()
#
# def save_spectrum(self,
# location):
# """
# Saves a plot of the current wavelet space to a given location.
# :param location: Save location
# :type location: str
# :return: None
# """
# self.__plot_or_save_spectrum()
# plt.savefig(location)
# plt.close()
#
# def __plot_or_save_signal(self):
# plt.close()
# plt.plot(self._m_data)
# plt.title("Signal")
# plt.ylabel("Value of the function")
# plt.xlim([0, self._m_data_size])
# plt.xlabel("Time in [s]")
#
# def plot_signal(self):
# """
# Plot the current signal.
# :return: None
# """
# self.__plot_or_save_signal()
# plt.show()
#
# def save_signal(self,
# location):
# """
# Saves a plot of the current signal to a given location.
# :param location: Save location
# :type location: str
# :return: None
# """
# self.__plot_or_save_signal()
# plt.savefig(location)
|
PynPointREPO_NAMEPynPointPATH_START.@PynPoint_extracted@PynPoint-main@pynpoint@util@wavelets.py@.PATH_END.py
|
{
"filename": "unsupervised_base.py",
"repo_name": "mavrix93/LightCurvesClassifier",
"repo_path": "LightCurvesClassifier_extracted/LightCurvesClassifier-master/lcc/stars_processing/utilities/unsupervised_base.py",
"type": "Python"
}
|
from matplotlib import pyplot as plt
import numpy as np
from lcc.stars_processing.utilities.base_decider import BaseDecider
from lcc.entities.exceptions import QueryInputError
class UnsupervisedBase(BaseDecider):
'''
classdocs
'''
def __init__(self, classifier, params, threshold=0.5, **kwargs):
super(UnsupervisedBase, self).__init__(**kwargs)
self.classifier = classifier(**params)
def learn(self, coords):
coords = [c for c in coords if not np.NaN in c and not None in c]
if coords:
self.X = np.array(coords)
self.classifier.fit(coords)
else:
raise QueryInputError("No coordinates for learning")
def evaluate(self, star_coords):
return self.classifier.predict(star_coords)
|
mavrix93REPO_NAMELightCurvesClassifierPATH_START.@LightCurvesClassifier_extracted@LightCurvesClassifier-master@lcc@stars_processing@utilities@unsupervised_base.py@.PATH_END.py
|
{
"filename": "CODE_OF_CONDUCT.md",
"repo_name": "PetroFit/petrofit",
"repo_path": "petrofit_extracted/petrofit-main/CODE_OF_CONDUCT.md",
"type": "Markdown"
}
|
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
|
PetroFitREPO_NAMEpetrofitPATH_START.@petrofit_extracted@petrofit-main@CODE_OF_CONDUCT.md@.PATH_END.py
|
{
"filename": "MspImagePlugin.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/PIL/MspImagePlugin.py",
"type": "Python"
}
|
#
# The Python Imaging Library.
#
# MSP file handling
#
# This is the format used by the Paint program in Windows 1 and 2.
#
# History:
# 95-09-05 fl Created
# 97-01-03 fl Read/write MSP images
# 17-02-21 es Fixed RLE interpretation
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1995-97.
# Copyright (c) Eric Soroos 2017.
#
# See the README file for information on usage and redistribution.
#
# More info on this format: https://archive.org/details/gg243631
# Page 313:
# Figure 205. Windows Paint Version 1: "DanM" Format
# Figure 206. Windows Paint Version 2: "LinS" Format. Used in Windows V2.03
#
# See also: http://www.fileformat.info/format/mspaint/egff.htm
from . import Image, ImageFile
from ._binary import i16le as i16, o16le as o16, i8
import struct
import io
__version__ = "0.1"
#
# read MSP files
def _accept(prefix):
return prefix[:4] in [b"DanM", b"LinS"]
##
# Image plugin for Windows MSP images. This plugin supports both
# uncompressed (Windows 1.0).
class MspImageFile(ImageFile.ImageFile):
format = "MSP"
format_description = "Windows Paint"
def _open(self):
# Header
s = self.fp.read(32)
if s[:4] not in [b"DanM", b"LinS"]:
raise SyntaxError("not an MSP file")
# Header checksum
checksum = 0
for i in range(0, 32, 2):
checksum = checksum ^ i16(s[i:i+2])
if checksum != 0:
raise SyntaxError("bad MSP checksum")
self.mode = "1"
self.size = i16(s[4:]), i16(s[6:])
if s[:4] == b"DanM":
self.tile = [("raw", (0, 0)+self.size, 32, ("1", 0, 1))]
else:
self.tile = [("MSP", (0, 0)+self.size, 32, None)]
class MspDecoder(ImageFile.PyDecoder):
# The algo for the MSP decoder is from
# http://www.fileformat.info/format/mspaint/egff.htm
# cc-by-attribution -- That page references is taken from the
# Encyclopedia of Graphics File Formats and is licensed by
# O'Reilly under the Creative Common/Attribution license
#
# For RLE encoded files, the 32byte header is followed by a scan
# line map, encoded as one 16bit word of encoded byte length per
# line.
#
# NOTE: the encoded length of the line can be 0. This was not
# handled in the previous version of this encoder, and there's no
# mention of how to handle it in the documentation. From the few
# examples I've seen, I've assumed that it is a fill of the
# background color, in this case, white.
#
#
# Pseudocode of the decoder:
# Read a BYTE value as the RunType
# If the RunType value is zero
# Read next byte as the RunCount
# Read the next byte as the RunValue
# Write the RunValue byte RunCount times
# If the RunType value is non-zero
# Use this value as the RunCount
# Read and write the next RunCount bytes literally
#
# e.g.:
# 0x00 03 ff 05 00 01 02 03 04
# would yield the bytes:
# 0xff ff ff 00 01 02 03 04
#
# which are then interpreted as a bit packed mode '1' image
_pulls_fd = True
def decode(self, buffer):
img = io.BytesIO()
blank_line = bytearray((0xff,)*((self.state.xsize+7)//8))
try:
self.fd.seek(32)
rowmap = struct.unpack_from("<%dH" % (self.state.ysize),
self.fd.read(self.state.ysize*2))
except struct.error:
raise IOError("Truncated MSP file in row map")
for x, rowlen in enumerate(rowmap):
try:
if rowlen == 0:
img.write(blank_line)
continue
row = self.fd.read(rowlen)
if len(row) != rowlen:
raise IOError("Truncated MSP file, expected %d bytes on row %s",
(rowlen, x))
idx = 0
while idx < rowlen:
runtype = i8(row[idx])
idx += 1
if runtype == 0:
(runcount, runval) = struct.unpack("Bc", row[idx:idx+2])
img.write(runval * runcount)
idx += 2
else:
runcount = runtype
img.write(row[idx:idx+runcount])
idx += runcount
except struct.error:
raise IOError("Corrupted MSP file in row %d" % x)
self.set_as_raw(img.getvalue(), ("1", 0, 1))
return 0, 0
Image.register_decoder('MSP', MspDecoder)
#
# write MSP files (uncompressed only)
def _save(im, fp, filename):
if im.mode != "1":
raise IOError("cannot write mode %s as MSP" % im.mode)
# create MSP header
header = [0] * 16
header[0], header[1] = i16(b"Da"), i16(b"nM") # version 1
header[2], header[3] = im.size
header[4], header[5] = 1, 1
header[6], header[7] = 1, 1
header[8], header[9] = im.size
checksum = 0
for h in header:
checksum = checksum ^ h
header[12] = checksum # FIXME: is this the right field?
# header
for h in header:
fp.write(o16(h))
# image body
ImageFile._save(im, fp, [("raw", (0, 0)+im.size, 32, ("1", 0, 1))])
#
# registry
Image.register_open(MspImageFile.format, MspImageFile, _accept)
Image.register_save(MspImageFile.format, _save)
Image.register_extension(MspImageFile.format, ".msp")
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@PIL@MspImagePlugin.py@.PATH_END.py
|
{
"filename": "_namelengthsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/mesh3d/hoverlabel/_namelengthsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NamelengthsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="namelengthsrc", parent_name="mesh3d.hoverlabel", **kwargs
):
super(NamelengthsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@mesh3d@hoverlabel@_namelengthsrc.py@.PATH_END.py
|
{
"filename": "_optional_dependencies.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/utils/_optional_dependencies.py",
"type": "Python"
}
|
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
def check_matplotlib_support(caller_name):
"""Raise ImportError with detailed error message if mpl is not installed.
Plot utilities like any of the Display's plotting functions should lazily import
matplotlib and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires matplotlib.
"""
try:
import matplotlib # noqa
except ImportError as e:
raise ImportError(
"{} requires matplotlib. You can install matplotlib with "
"`pip install matplotlib`".format(caller_name)
) from e
def check_pandas_support(caller_name):
"""Raise ImportError with detailed error message if pandas is not installed.
Plot utilities like :func:`fetch_openml` should lazily import
pandas and call this helper before any computation.
Parameters
----------
caller_name : str
The name of the caller that requires pandas.
Returns
-------
pandas
The pandas package.
"""
try:
import pandas # noqa
return pandas
except ImportError as e:
raise ImportError("{} requires pandas.".format(caller_name)) from e
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@utils@_optional_dependencies.py@.PATH_END.py
|
{
"filename": "imagenet10.md",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/docs/en/datasets/classify/imagenet10.md",
"type": "Markdown"
}
|
---
comments: true
description: Discover ImageNet10 a compact version of ImageNet for rapid model testing and CI checks. Perfect for quick evaluations in computer vision tasks.
keywords: ImageNet10, ImageNet, Ultralytics, CI tests, sanity checks, training pipelines, computer vision, deep learning, dataset
---
# ImageNet10 Dataset
The [ImageNet10](https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenet10.zip) dataset is a small-scale subset of the [ImageNet](https://www.image-net.org/) database, developed by [Ultralytics](https://www.ultralytics.com/) and designed for CI tests, sanity checks, and fast testing of training pipelines. This dataset is composed of the first image in the training set and the first image from the validation set of the first 10 classes in ImageNet. Although significantly smaller, it retains the structure and diversity of the original ImageNet dataset.
## Key Features
- ImageNet10 is a compact version of ImageNet, with 20 images representing the first 10 classes of the original dataset.
- The dataset is organized according to the WordNet hierarchy, mirroring the structure of the full ImageNet dataset.
- It is ideally suited for CI tests, sanity checks, and rapid testing of training pipelines in [computer vision](https://www.ultralytics.com/glossary/computer-vision-cv) tasks.
- Although not designed for model benchmarking, it can provide a quick indication of a model's basic functionality and correctness.
## Dataset Structure
The ImageNet10 dataset, like the original ImageNet, is organized using the WordNet hierarchy. Each of the 10 classes in ImageNet10 is described by a synset (a collection of synonymous terms). The images in ImageNet10 are annotated with one or more synsets, providing a compact resource for testing models to recognize various objects and their relationships.
## Applications
The ImageNet10 dataset is useful for quickly testing and debugging computer vision models and pipelines. Its small size allows for rapid iteration, making it ideal for continuous integration tests and sanity checks. It can also be used for fast preliminary testing of new models or changes to existing models before moving on to full-scale testing with the complete ImageNet dataset.
## Usage
To test a deep learning model on the ImageNet10 dataset with an image size of 224x224, you can use the following code snippets. For a comprehensive list of available arguments, refer to the model [Training](../../modes/train.md) page.
!!! example "Test Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="imagenet10", epochs=5, imgsz=224)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=imagenet10 model=yolo11n-cls.pt epochs=5 imgsz=224
```
## Sample Images and Annotations
The ImageNet10 dataset contains a subset of images from the original ImageNet dataset. These images are chosen to represent the first 10 classes in the dataset, providing a diverse yet compact dataset for quick testing and evaluation.
 The example showcases the variety and complexity of the images in the ImageNet10 dataset, highlighting its usefulness for sanity checks and quick testing of computer vision models.
## Citations and Acknowledgments
If you use the ImageNet10 dataset in your research or development work, please cite the original ImageNet paper:
!!! quote ""
=== "BibTeX"
```bibtex
@article{ILSVRC15,
author = {Olga Russakovsky and Jia Deng and Hao Su and Jonathan Krause and Sanjeev Satheesh and Sean Ma and Zhiheng Huang and Andrej Karpathy and Aditya Khosla and Michael Bernstein and Alexander C. Berg and Li Fei-Fei},
title={ImageNet Large Scale Visual Recognition Challenge},
year={2015},
journal={International Journal of Computer Vision (IJCV)},
volume={115},
number={3},
pages={211-252}
}
```
We would like to acknowledge the ImageNet team, led by Olga Russakovsky, Jia Deng, and Li Fei-Fei, for creating and maintaining the ImageNet dataset. The ImageNet10 dataset, while a compact subset, is a valuable resource for quick testing and debugging in the [machine learning](https://www.ultralytics.com/glossary/machine-learning-ml) and computer vision research community. For more information about the ImageNet dataset and its creators, visit the [ImageNet website](https://www.image-net.org/).
## FAQ
### What is the ImageNet10 dataset and how is it different from the full ImageNet dataset?
The [ImageNet10](https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenet10.zip) dataset is a compact subset of the original [ImageNet](https://www.image-net.org/) database, created by Ultralytics for rapid CI tests, sanity checks, and training pipeline evaluations. ImageNet10 comprises only 20 images, representing the first image in the training and validation sets of the first 10 classes in ImageNet. Despite its small size, it maintains the structure and diversity of the full dataset, making it ideal for quick testing but not for benchmarking models.
### How can I use the ImageNet10 dataset to test my deep learning model?
To test your deep learning model on the ImageNet10 dataset with an image size of 224x224, use the following code snippets.
!!! example "Test Example"
=== "Python"
```python
from ultralytics import YOLO
# Load a model
model = YOLO("yolo11n-cls.pt") # load a pretrained model (recommended for training)
# Train the model
results = model.train(data="imagenet10", epochs=5, imgsz=224)
```
=== "CLI"
```bash
# Start training from a pretrained *.pt model
yolo classify train data=imagenet10 model=yolo11n-cls.pt epochs=5 imgsz=224
```
Refer to the [Training](../../modes/train.md) page for a comprehensive list of available arguments.
### Why should I use the ImageNet10 dataset for CI tests and sanity checks?
The ImageNet10 dataset is designed specifically for CI tests, sanity checks, and quick evaluations in [deep learning](https://www.ultralytics.com/glossary/deep-learning-dl) pipelines. Its small size allows for rapid iteration and testing, making it perfect for continuous integration processes where speed is crucial. By maintaining the structural complexity and diversity of the original ImageNet dataset, ImageNet10 provides a reliable indication of a model's basic functionality and correctness without the overhead of processing a large dataset.
### What are the main features of the ImageNet10 dataset?
The ImageNet10 dataset has several key features:
- **Compact Size**: With only 20 images, it allows for rapid testing and debugging.
- **Structured Organization**: Follows the WordNet hierarchy, similar to the full ImageNet dataset.
- **CI and Sanity Checks**: Ideally suited for continuous integration tests and sanity checks.
- **Not for Benchmarking**: While useful for quick model evaluations, it is not designed for extensive benchmarking.
### Where can I download the ImageNet10 dataset?
You can download the ImageNet10 dataset from the [Ultralytics GitHub releases page](https://github.com/ultralytics/assets/releases/download/v0.0.0/imagenet10.zip). For more detailed information about its structure and applications, refer to the [ImageNet10 Dataset](imagenet10.md) page.
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@docs@en@datasets@classify@imagenet10.md@.PATH_END.py
|
{
"filename": "plotlib.py",
"repo_name": "catketchup/lens_rot_bias",
"repo_path": "lens_rot_bias_extracted/lens_rot_bias-main/plotlib.py",
"type": "Python"
}
|
import matplotlib as mpl
from cycler import cycler
# common style for all plots
mpl.rcParams['figure.dpi'] = 180
mpl.rcParams['font.size'] = 12
mpl.rcParams['xtick.labelsize'] = 12
mpl.rcParams['ytick.labelsize'] = 12
mpl.rcParams['text.usetex'] = False
mpl.rcParams['xtick.direction'] = 'in'
mpl.rcParams['ytick.direction'] = 'in'
mpl.rcParams['xtick.top'] = True
mpl.rcParams['ytick.right'] = True
mpl.rcParams['lines.linewidth'] = 1
mpl.rcParams['axes.prop_cycle'] = cycler(color='bgrcmyk')
mpl.rcParams['font.family'] = 'serif'
def setup_axis(ax, xlabel=None, ylabel=None, xscale=None, yscale=None,
fs=18, lbl_fs=None, title=None):
if lbl_fs is None: lbl_fs = fs
if xlabel: ax.set_xlabel(xlabel, fontsize=lbl_fs)
if ylabel: ax.set_ylabel(ylabel, fontsize=lbl_fs)
if xscale: ax.set_xscale(xscale)
if yscale: ax.set_yscale(yscale)
if title: ax.set_title(title, fontsize=lbl_fs)
return ax
def texify(text):
text = text.replace(" ", "~")
return r"${\rm %s}$" % text
|
catketchupREPO_NAMElens_rot_biasPATH_START.@lens_rot_bias_extracted@lens_rot_bias-main@plotlib.py@.PATH_END.py
|
{
"filename": "mosfit.ipynb",
"repo_name": "guillochon/MOSFiT",
"repo_path": "MOSFiT_extracted/MOSFiT-master/jupyter/mosfit.ipynb",
"type": "Jupyter Notebook"
}
|
## <span style="color:red">Important: Before running this notebook, make sure the mosfit, corner, matplotlib, and seaborn libraries are installed in your Python environment (install them with conda or pip).</span>
### Load the data from `walkers.json`, the main output file produced by the last MOSFiT run.
```python
%matplotlib inline
%config InlineBackend.figure_format='retina'
import corner
import json
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from tqdm import tqdm_notebook
from collections import OrderedDict
from mosfit.plotting import bandcolorf
sns.reset_orig()
plt.rcParams["font.family"] = "serif"
plt.rcParams.update({'font.size': 14})
with open('../products/walkers.json', 'r', encoding = 'utf-8') as f:
data = json.loads(f.read())
if 'name' not in data:
data = data[list(data.keys())[0]]
photo = data['photometry']
model = data['models'][0]
real_data = len([x for x in photo if 'band' in x and 'magnitude' in x and (
'realization' not in x or 'simulated' in x)]) > 0
band_attr = ['band', 'instrument', 'telescope', 'system', 'bandset']
band_list = list(set([tuple(x.get(y, '')
for y in band_attr) for x in photo
if 'band' in x and 'magnitude' in x]))
real_band_list = list(set([tuple(x.get(y, '')
for y in band_attr) for x in photo
if 'band' in x and 'magnitude' in x and (
'realization' not in x or 'simulated' in x)]))
xray_instrument_attr = ['instrument', 'telescope']
xray_instrument_list = list(set([tuple(x.get(y, '')
for y in xray_instrument_attr) for x in photo
if 'instrument' in x and 'countrate' in x]))
real_band_list = list(set([tuple(x.get(y, '')
for y in band_attr) for x in photo
if 'band' in x and 'magnitude' in x and (
'realization' not in x or 'simulated' in x)]))
real_xray_instrument_list = list(set([tuple(x.get(y, '')
for y in xray_instrument_attr) for x in photo
if 'instrument' in x and 'countrate' in x and (
'realization' not in x or 'simulated' in x)]))
```
### First, plot the comparisson between the observed magnitudes and that predicted by the model.
```python
# Uncomment line below to only plot from the specified instruments.
# inst_exclusive_list = ['UVOT']
fig = plt.figure(figsize=(12,8))
plt.gca().invert_yaxis()
plt.gca().set_xlabel('MJD')
plt.gca().set_ylabel('Apparent Magnitude')
used_bands = []
for full_band in tqdm_notebook(band_list, desc='Photo', leave=False):
(band, inst, tele, syst, bset) = full_band
try:
inst_exclusive_list
except:
pass
else:
if inst not in inst_exclusive_list:
continue
extra_nice = ', '.join(list(filter(None, OrderedDict.fromkeys((inst, syst, bset)).keys())))
nice_name = band + ((' [' + extra_nice + ']') if extra_nice else '')
realizations = [[] for x in range(len(model['realizations']))]
for ph in photo:
rn = ph.get('realization', None)
si = ph.get('simulated', False)
if rn and not si:
if tuple(ph.get(y, '') for y in band_attr) == full_band:
realizations[int(rn) - 1].append((
float(ph['time']), float(ph['magnitude']), [
float(ph.get('e_lower_magnitude', ph.get('e_magnitude', 0.0))),
float(ph.get('e_upper_magnitude', ph.get('e_magnitude', 0.0)))],
ph.get('upperlimit')))
numrz = np.sum([1 for x in realizations if len(x)])
for rz in realizations:
if not len(rz):
continue
xs, ys, vs, us = zip(*rz)
label = '' if full_band in used_bands or full_band in real_band_list else nice_name
if max(vs) == 0.0:
plt.plot(xs, ys, color=bandcolorf(band),
label=label, linewidth=0.5)
else:
xs = np.array(xs)
ymi = np.array(ys) - np.array([np.inf if u else v[0] for v, u in zip(vs, us)])
yma = np.array(ys) + np.array([v[1] for v in vs])
plt.fill_between(xs, ymi, yma, color=bandcolorf(band), edgecolor=None,
label=label, alpha=1.0/numrz, linewidth=0.0)
plt.plot(xs, ys, color=bandcolorf(band),
label=label, alpha=1.0, linewidth=0.5)
if label:
used_bands = list(set(used_bands + [full_band]))
if real_data:
for s in range(2):
if s == 0:
cond = False
symb = 'o'
else:
cond = True
symb = 'v'
vec = [(float(x['time']), float(x['magnitude']),
0.0 if 'upperlimit' in x else float(x.get('e_lower_magnitude', x.get('e_magnitude', 0.0))),
float(x.get('e_upper_magnitude', x.get('e_magnitude', 0.0)))) for x in photo
if 'magnitude' in x and ('realization' not in x or 'simulated' in x) and
'host' not in x and 'includeshost' not in x and
x.get('upperlimit', False) == cond and
tuple(x.get(y, '') for y in band_attr) == full_band]
if not len(vec):
continue
xs, ys, yls, yus = zip(*vec)
label = nice_name if full_band not in used_bands else ''
plt.errorbar(xs, ys, yerr=(yus, yls), color=bandcolorf(band), fmt=symb,
label=label,
markeredgecolor='black', markeredgewidth=1, capsize=1,
elinewidth=1.5, capthick=2, zorder=10)
plt.errorbar(xs, ys, yerr=(yus, yls), color='k', fmt=symb, capsize=2,
elinewidth=2.5, capthick=3, zorder=5)
if label:
used_bands = list(set(used_bands + [full_band]))
plt.margins(0.02, 0.1)
plt.legend()
plt.show()
fig.savefig('../products/lc.pdf')
```
HBox(children=(FloatProgress(value=0.0, description='Photo', max=4.0, style=ProgressStyle(description_width='i…

### Then, plot observations reported as count rates (e.g. X-ray observations).
```python
fig = plt.figure(figsize=(12,8))
ax = plt.gca()
used_instruments = []
color = {}
for i in xray_instrument_list:
color[i[0]] = next(ax._get_lines.prop_cycler)['color'] # using this to match MOSFiT color scheme
for all_instrument_attr in tqdm_notebook(xray_instrument_list, desc='Photo', leave=False):
(inst, telscp) = all_instrument_attr
try:
inst_exclusive_list
except:
pass
else:
if inst not in inst_exclusive_list:
continue
extra_nice = ', '.join(list(filter(None, OrderedDict.fromkeys((inst, syst)).keys())))
nice_name = telscp + (('[' + extra_nice + ']') if extra_nice else '')
realizations = [[] for x in range(len(model['realizations']))]
alias = ['' for x in range(len(model['realizations']))]
for ph in photo:
rn = ph.get('realization', None)
if rn:
if tuple(ph.get(y, '') for y in xray_instrument_attr) == all_instrument_attr:
realizations[int(rn) - 1].append((float(ph['time']),
float(ph['countrate']),
float(ph.get('e_lower_countrate', 0.0)),
float(ph.get('e_upper_countrate', 0.0))))
alias[int(rn) - 1] = ph['realization']
for i,rz in enumerate(realizations):
if not len(rz):
continue
xs, ys, vls, vus = zip(*rz)
label = ('' if all_instrument_attr in used_instruments or
all_instrument_attr in real_xray_instrument_list else nice_name)
if max(vs) == 0.0:
plt.plot(xs, ys, color=color[inst],
label=label, linewidth=0.5)
else:
xs = np.array(xs)
ymi = np.array(ys) - np.array(vls)
yma = np.array(ys) + np.array(vus)
plt.fill_between(xs, ymi, yma, color=color[inst],
label=label, alpha=2.0/len(realizations))
if label:
used_instruments = list(set(used_instruments + [all_instrument_attr]))
if real_data:
for s in range(2):
if s == 0:
cond = False
symb = 'o'
else:
cond = True
symb = 'v'
vec = [(float(x['time'][0]), float(x['countrate']), float(x.get('e_countrate', 0.0))) for x in photo
if 'countrate' in x and 'realization' not in x and
'host' not in x and 'includeshost' not in x and
x.get('upperlimit', False) == cond and
tuple(x.get(y, '') for y in xray_instrument_attr) == all_instrument_attr]
if not len(vec):
continue
xs, ys, yes = zip(*vec)
label = nice_name if all_instrument_attr not in used_instruments else ''
plt.errorbar(xs, ys, yerr=yes, color = color[inst], fmt=symb,
label=label,
markeredgecolor='black', markeredgewidth=1, capsize=5,
elinewidth=2, capthick=2, zorder=10)
plt.errorbar(xs, ys, yerr=yes, color='k', fmt=symb, capsize=6,
elinewidth=3, capthick=3, zorder=5)
if label:
used_instruments = list(set(used_instruments + [all_instrument_attr]))
plt.gca().set_xlabel('MJD')
plt.gca().set_ylabel('countrates')
#plt.xlim(55200, 56000)
plt.yscale('log')
#plt.ylim(1e-320,1e20)
plt.margins(0.1,0.1)
plt.legend(loc = 'best')
plt.show()
fig.savefig('../products/lc_xrays.pdf')
```
### If MOSFiT was run with the `-c` flag, this cell will plot the parameter values over the full chain.
```python
with open('../products/chain.json', 'r', encoding = 'utf-8') as f:
all_chain = np.array(json.load(f))
param_names = all_chain[1]
all_chain = np.asarray(all_chain[0])
print(np.shape(all_chain))
nparam = len(all_chain[0,0,:]);
fig = plt.figure(figsize=(4. * np.ceil(nparam / 4.), 8));
for pi, param in enumerate(range(nparam)):
my_chain = all_chain[:, :, param]
ax = fig.add_subplot(np.ceil(nparam / 4.), 4, pi + 1);
ax.plot(my_chain.T);
ax.plot(np.mean(my_chain, axis=0), color='k');
plt.tight_layout()
```
(16, 10, 8)

### This cell produces a corner plot of all the parameters.
```python
import logging
logging.disable(logging.WARNING)
# Construct walker arrays for corner
corner_input = []
pars = [x for x in model['setup'] if model['setup'][x].get('kind') == 'parameter' and
'min_value' in model['setup'][x] and 'max_value' in model['setup'][x]]
weights = []
for realization in model['realizations']:
par_vals = realization['parameters']
if 'weight' in realization:
weights.append(float(realization['weight']))
var_names = ['$' + ('\\log\\, ' if par_vals[x].get('log') else '') +
par_vals[x]['latex'] + '$' for x in par_vals if x in pars and 'fraction' in par_vals[x]]
corner_input.append([np.log10(par_vals[x]['value']) if
par_vals[x].get('log') else par_vals[x]['value'] for x in par_vals
if x in pars and 'fraction' in par_vals[x]])
weights = weights if len(weights) else None
ranges = [0.999 for x in range(len(corner_input[0]))]
cfig = corner.corner(corner_input, labels=var_names, quantiles=[0.16, 0.5, 0.84],
show_titles=True, weights=weights, range=ranges)
cfig.savefig('../products/corner.pdf')
```
['codeltalambda', 'codeltatime', 'fnickel', 'kappagamma', 'lumdist', 'mejecta', 'nhhost', 'redshift', 'temperature', 'texplosion', 'variance', 'vejecta']

### The following cell plots the full chain corner plot
```python
import logging
logging.disable(logging.WARNING)
par_dict = model['realizations'][0]['parameters']
# Construct chain walker arrays for corner
corner_input = []
with open('../products/chain.json', 'r', encoding = 'utf-8') as f:
all_chain = np.array(json.load(f))
par_names = all_chain[1]
all_chain = np.asarray(all_chain[0])
all_chain = np.reshape(all_chain,(-1,len(par_names)))
par_labels = []
for i,par_name in enumerate(par_names):
par_labels.append('$' + ('\\log\\, ' if par_dict[par_name].get('log') else '') +
par_dict[par_name]['latex'] + '$')
if par_dict[par_name].get('log'):
all_chain[:,i] = np.log10(all_chain[:,i])
cfig = corner.corner(all_chain[-1000:,:], labels=par_labels, quantiles=[0.16, 0.5, 0.84],
show_titles=True)
cfig.savefig('../products/corner.pdf')
```

### The above cells are a demonstration of visualizations of MOSFiT outputs and are not intended to show the full scope of possible outputs, the user is encouraged to experiment with their own notebooks for their projects!
```python
```
|
guillochonREPO_NAMEMOSFiTPATH_START.@MOSFiT_extracted@MOSFiT-master@jupyter@mosfit.ipynb@.PATH_END.py
|
{
"filename": "_opacity.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/marker/_opacity.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="opacity", parent_name="scatter.marker", **kwargs):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
anim=kwargs.pop("anim", True),
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@marker@_opacity.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "florpi/sunbird",
"repo_path": "sunbird_extracted/sunbird-main/sunbird/emulators/loss/__init__.py",
"type": "Python"
}
|
from .gaussian import MultivariateGaussianNLLLoss, GaussianNLoglike, get_cholesky_decomposition_covariance
from .weighted import WeightedL1Loss, WeightedMSELoss
|
florpiREPO_NAMEsunbirdPATH_START.@sunbird_extracted@sunbird-main@sunbird@emulators@loss@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/tools/test/__init__.py",
"type": "Python"
}
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tools for testing."""
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@tools@test@__init__.py@.PATH_END.py
|
{
"filename": "_showexponent.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/mesh3d/colorbar/_showexponent.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowexponentValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showexponent", parent_name="mesh3d.colorbar", **kwargs
):
super(ShowexponentValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@mesh3d@colorbar@_showexponent.py@.PATH_END.py
|
{
"filename": "FLAG_casa_backup_flag_table.py",
"repo_name": "IanHeywood/oxkat",
"repo_path": "oxkat_extracted/oxkat-master/oxkat/FLAG_casa_backup_flag_table.py",
"type": "Python"
}
|
# ian.heywood@physics.ox.ac.uk
#
# set versionname=... on command line call to CASA
# can also specify csv mslist=...,... otherwise project_info.p will be
# used and the operation will proceed on all available target Measurement Sets.
#
# versionname must be supplied
#
import os
import sys
execfile('oxkat/casa_read_project_info.py')
mslist = False
args = sys.argv
for item in sys.argv:
parts = item.split('=')
if parts[0] == 'versionname':
versionname = parts[1]
if parts[0] == 'mslist':
mslist = parts[1].split(',')
if not mslist:
mslist = []
for targ in target_ms:
mslist.append(targ)
for myms in mslist:
if os.path.isdir(myms):
flagmanager(vis=myms,
mode='save',
versionname=versionname)
else:
print(myms+' not found')
|
IanHeywoodREPO_NAMEoxkatPATH_START.@oxkat_extracted@oxkat-master@oxkat@FLAG_casa_backup_flag_table.py@.PATH_END.py
|
{
"filename": "inset_locator.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/mpl_toolkits/axes_grid1/inset_locator.py",
"type": "Python"
}
|
"""
A collection of functions and objects for creating or placing inset axes.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib import docstring
import six
from matplotlib.offsetbox import AnchoredOffsetbox
from matplotlib.patches import Patch, Rectangle
from matplotlib.path import Path
from matplotlib.transforms import Bbox, BboxTransformTo
from matplotlib.transforms import IdentityTransform, TransformedBbox
from . import axes_size as Size
from .parasite_axes import HostAxes
class InsetPosition(object):
@docstring.dedent_interpd
def __init__(self, parent, lbwh):
"""
An object for positioning an inset axes.
This is created by specifying the normalized coordinates in the axes,
instead of the figure.
Parameters
----------
parent : `matplotlib.axes.Axes`
Axes to use for normalizing coordinates.
lbwh : iterable of four floats
The left edge, bottom edge, width, and height of the inset axes, in
units of the normalized coordinate of the *parent* axes.
See Also
--------
:meth:`matplotlib.axes.Axes.set_axes_locator`
Examples
--------
The following bounds the inset axes to a box with 20%% of the parent
axes's height and 40%% of the width. The size of the axes specified
([0, 0, 1, 1]) ensures that the axes completely fills the bounding box:
>>> parent_axes = plt.gca()
>>> ax_ins = plt.axes([0, 0, 1, 1])
>>> ip = InsetPosition(ax, [0.5, 0.1, 0.4, 0.2])
>>> ax_ins.set_axes_locator(ip)
"""
self.parent = parent
self.lbwh = lbwh
def __call__(self, ax, renderer):
bbox_parent = self.parent.get_position(original=False)
trans = BboxTransformTo(bbox_parent)
bbox_inset = Bbox.from_bounds(*self.lbwh)
bb = TransformedBbox(bbox_inset, trans)
return bb
class AnchoredLocatorBase(AnchoredOffsetbox):
def __init__(self, bbox_to_anchor, offsetbox, loc,
borderpad=0.5, bbox_transform=None):
super(AnchoredLocatorBase, self).__init__(
loc, pad=0., child=None, borderpad=borderpad,
bbox_to_anchor=bbox_to_anchor, bbox_transform=bbox_transform
)
def draw(self, renderer):
raise RuntimeError("No draw method should be called")
def __call__(self, ax, renderer):
self.axes = ax
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self._update_offset_func(renderer, fontsize)
width, height, xdescent, ydescent = self.get_extent(renderer)
px, py = self.get_offset(width, height, 0, 0, renderer)
bbox_canvas = Bbox.from_bounds(px, py, width, height)
tr = ax.figure.transFigure.inverted()
bb = TransformedBbox(bbox_canvas, tr)
return bb
class AnchoredSizeLocator(AnchoredLocatorBase):
def __init__(self, bbox_to_anchor, x_size, y_size, loc,
borderpad=0.5, bbox_transform=None):
super(AnchoredSizeLocator, self).__init__(
bbox_to_anchor, None, loc,
borderpad=borderpad, bbox_transform=bbox_transform
)
self.x_size = Size.from_any(x_size)
self.y_size = Size.from_any(y_size)
def get_extent(self, renderer):
x, y, w, h = self.get_bbox_to_anchor().bounds
dpi = renderer.points_to_pixels(72.)
r, a = self.x_size.get_size(renderer)
width = w*r + a*dpi
r, a = self.y_size.get_size(renderer)
height = h*r + a*dpi
xd, yd = 0, 0
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return width+2*pad, height+2*pad, xd+pad, yd+pad
class AnchoredZoomLocator(AnchoredLocatorBase):
def __init__(self, parent_axes, zoom, loc,
borderpad=0.5,
bbox_to_anchor=None,
bbox_transform=None):
self.parent_axes = parent_axes
self.zoom = zoom
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
super(AnchoredZoomLocator, self).__init__(
bbox_to_anchor, None, loc, borderpad=borderpad,
bbox_transform=bbox_transform)
def get_extent(self, renderer):
bb = TransformedBbox(self.axes.viewLim,
self.parent_axes.transData)
x, y, w, h = bb.bounds
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return abs(w*self.zoom)+2*pad, abs(h*self.zoom)+2*pad, pad, pad
class BboxPatch(Patch):
@docstring.dedent_interpd
def __init__(self, bbox, **kwargs):
"""
Patch showing the shape bounded by a Bbox.
Parameters
----------
bbox : `matplotlib.transforms.Bbox`
Bbox to use for the extents of this patch.
**kwargs
Patch properties. Valid arguments include:
%(Patch)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, **kwargs)
self.bbox = bbox
def get_path(self):
x0, y0, x1, y1 = self.bbox.extents
verts = [(x0, y0),
(x1, y0),
(x1, y1),
(x0, y1),
(x0, y0),
(0, 0)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.LINETO,
Path.CLOSEPOLY]
return Path(verts, codes)
get_path.__doc__ = Patch.get_path.__doc__
class BboxConnector(Patch):
@staticmethod
def get_bbox_edge_pos(bbox, loc):
"""
Helper function to obtain the location of a corner of a bbox
Parameters
----------
bbox : `matplotlib.transforms.Bbox`
loc : {1, 2, 3, 4}
Corner of *bbox*. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
Returns
-------
x, y : float
Coordinates of the corner specified by *loc*.
"""
x0, y0, x1, y1 = bbox.extents
if loc == 1:
return x1, y1
elif loc == 2:
return x0, y1
elif loc == 3:
return x0, y0
elif loc == 4:
return x1, y0
@staticmethod
def connect_bbox(bbox1, bbox2, loc1, loc2=None):
"""
Helper function to obtain a Path from one bbox to another.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1 : {1, 2, 3, 4}
Corner of *bbox1* to use. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc2 : {1, 2, 3, 4}, optional
Corner of *bbox2* to use. If None, defaults to *loc1*.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
Returns
-------
path : `matplotlib.path.Path`
A line segment from the *loc1* corner of *bbox1* to the *loc2*
corner of *bbox2*.
"""
if isinstance(bbox1, Rectangle):
transform = bbox1.get_transfrom()
bbox1 = Bbox.from_bounds(0, 0, 1, 1)
bbox1 = TransformedBbox(bbox1, transform)
if isinstance(bbox2, Rectangle):
transform = bbox2.get_transform()
bbox2 = Bbox.from_bounds(0, 0, 1, 1)
bbox2 = TransformedBbox(bbox2, transform)
if loc2 is None:
loc2 = loc1
x1, y1 = BboxConnector.get_bbox_edge_pos(bbox1, loc1)
x2, y2 = BboxConnector.get_bbox_edge_pos(bbox2, loc2)
verts = [[x1, y1], [x2, y2]]
codes = [Path.MOVETO, Path.LINETO]
return Path(verts, codes)
@docstring.dedent_interpd
def __init__(self, bbox1, bbox2, loc1, loc2=None, **kwargs):
"""
Connect two bboxes with a straight line.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1 : {1, 2, 3, 4}
Corner of *bbox1* to draw the line. Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc2 : {1, 2, 3, 4}, optional
Corner of *bbox2* to draw the line. If None, defaults to *loc1*.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
**kwargs
Patch properties for the line drawn. Valid arguments include:
%(Patch)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
kwargs["transform"] = IdentityTransform()
Patch.__init__(self, fill=False, **kwargs)
self.bbox1 = bbox1
self.bbox2 = bbox2
self.loc1 = loc1
self.loc2 = loc2
def get_path(self):
return self.connect_bbox(self.bbox1, self.bbox2,
self.loc1, self.loc2)
get_path.__doc__ = Patch.get_path.__doc__
class BboxConnectorPatch(BboxConnector):
@docstring.dedent_interpd
def __init__(self, bbox1, bbox2, loc1a, loc2a, loc1b, loc2b, **kwargs):
"""
Connect two bboxes with a quadrilateral.
The quadrilateral is specified by two lines that start and end at corners
of the bboxes. The four sides of the quadrilateral are defined by the two
lines given, the line between the two corners specified in *bbox1* and the
line between the two corners specified in *bbox2*.
Parameters
----------
bbox1, bbox2 : `matplotlib.transforms.Bbox`
Bounding boxes to connect.
loc1a, loc2a : {1, 2, 3, 4}
Corners of *bbox1* and *bbox2* to draw the first line.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
loc1b, loc2b : {1, 2, 3, 4}
Corners of *bbox1* and *bbox2* to draw the second line.
Valid values are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4
**kwargs
Patch properties for the line drawn:
%(Patch)s
"""
if "transform" in kwargs:
raise ValueError("transform should not be set")
BboxConnector.__init__(self, bbox1, bbox2, loc1a, loc2a, **kwargs)
self.loc1b = loc1b
self.loc2b = loc2b
def get_path(self):
path1 = self.connect_bbox(self.bbox1, self.bbox2, self.loc1, self.loc2)
path2 = self.connect_bbox(self.bbox2, self.bbox1,
self.loc2b, self.loc1b)
path_merged = (list(path1.vertices) +
list(path2.vertices) +
[path1.vertices[0]])
return Path(path_merged)
get_path.__doc__ = BboxConnector.get_path.__doc__
def _add_inset_axes(parent_axes, inset_axes):
"""Helper function to add an inset axes and disable navigation in it"""
parent_axes.figure.add_axes(inset_axes)
inset_axes.set_navigate(False)
@docstring.dedent_interpd
def inset_axes(parent_axes, width, height, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
borderpad=0.5):
"""
Create an inset axes with a given width and height.
Both sizes used can be specified either in inches or percentage of the
parent axes.
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes to place the inset axes.
width, height : float or str
Size of the inset axes to create.
loc : int or string, optional, default to 1
Location to place the inset axes. The valid locations are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored. Can be a tuple of
[left, bottom, width, height], or a tuple of [left, bottom].
bbox_transform : `matplotlib.transforms.Transform`, optional
Transformation for the bbox. if None, `parent_axes.transAxes` is used.
axes_class : `matplotlib.axes.Axes` type, optional
If specified, the inset axes created with be created with this class's
constructor.
axes_kwargs : dict, optional
Keyworded arguments to pass to the constructor of the inset axes.
Valid arguments include:
%(Axes)s
borderpad : float, optional
Padding between inset axes and the bbox_to_anchor. Defaults to 0.5.
Returns
-------
inset_axes : `axes_class`
Inset axes object created.
"""
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
if bbox_to_anchor is None:
bbox_to_anchor = parent_axes.bbox
axes_locator = AnchoredSizeLocator(bbox_to_anchor,
width, height,
loc=loc,
bbox_transform=bbox_transform,
borderpad=borderpad)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
@docstring.dedent_interpd
def zoomed_inset_axes(parent_axes, zoom, loc=1,
bbox_to_anchor=None, bbox_transform=None,
axes_class=None,
axes_kwargs=None,
borderpad=0.5):
"""
Create an anchored inset axes by scaling a parent axes.
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes to place the inset axes.
zoom : float
Scaling factor of the data axes. *zoom* > 1 will enlargen the
coordinates (i.e., "zoomed in"), while *zoom* < 1 will shrink the
coordinates (i.e., "zoomed out").
loc : int or string, optional, default to 1
Location to place the inset axes. The valid locations are::
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10
bbox_to_anchor : tuple or `matplotlib.transforms.BboxBase`, optional
Bbox that the inset axes will be anchored. Can be a tuple of
[left, bottom, width, height], or a tuple of [left, bottom].
bbox_transform : `matplotlib.transforms.Transform`, optional
Transformation for the bbox. if None, `parent_axes.transAxes` is used.
axes_class : `matplotlib.axes.Axes` type, optional
If specified, the inset axes created with be created with this class's
constructor.
axes_kwargs : dict, optional
Keyworded arguments to pass to the constructor of the inset axes.
Valid arguments include:
%(Axes)s
borderpad : float, optional
Padding between inset axes and the bbox_to_anchor. Defaults to 0.5.
Returns
-------
inset_axes : `axes_class`
Inset axes object created.
"""
if axes_class is None:
axes_class = HostAxes
if axes_kwargs is None:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position())
else:
inset_axes = axes_class(parent_axes.figure, parent_axes.get_position(),
**axes_kwargs)
axes_locator = AnchoredZoomLocator(parent_axes, zoom=zoom, loc=loc,
bbox_to_anchor=bbox_to_anchor,
bbox_transform=bbox_transform,
borderpad=borderpad)
inset_axes.set_axes_locator(axes_locator)
_add_inset_axes(parent_axes, inset_axes)
return inset_axes
@docstring.dedent_interpd
def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs):
"""
Draw a box to mark the location of an area represented by an inset axes.
This function draws a box in *parent_axes* at the bounding box of
*inset_axes*, and shows a connection with the inset axes by drawing lines
at the corners, giving a "zoomed in" effect.
Parameters
----------
parent_axes : `matplotlib.axes.Axes`
Axes which contains the area of the inset axes.
inset_axes : `matplotlib.axes.Axes`
The inset axes.
loc1, loc2 : {1, 2, 3, 4}
Corners to use for connecting the inset axes and the area in the
parent axes.
**kwargs
Patch properties for the lines and box drawn:
%(Patch)s
Returns
-------
pp : `matplotlib.patches.Patch`
The patch drawn to represent the area of the inset axes.
p1, p2 : `matplotlib.patches.Patch`
The patches connecting two corners of the inset axes and its area.
"""
rect = TransformedBbox(inset_axes.viewLim, parent_axes.transData)
pp = BboxPatch(rect, fill=False, **kwargs)
parent_axes.add_patch(pp)
p1 = BboxConnector(inset_axes.bbox, rect, loc1=loc1, **kwargs)
inset_axes.add_patch(p1)
p1.set_clip_on(False)
p2 = BboxConnector(inset_axes.bbox, rect, loc1=loc2, **kwargs)
inset_axes.add_patch(p2)
p2.set_clip_on(False)
return pp, p1, p2
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@mpl_toolkits@axes_grid1@inset_locator.py@.PATH_END.py
|
{
"filename": "test_basic.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/fft/tests/test_basic.py",
"type": "Python"
}
|
import queue
import threading
import multiprocessing
import numpy as np
import pytest
from numpy.random import random
from numpy.testing import assert_array_almost_equal, assert_allclose
from pytest import raises as assert_raises
import scipy.fft as fft
from scipy.conftest import array_api_compatible
from scipy._lib._array_api import (
array_namespace, xp_size, xp_assert_close, xp_assert_equal
)
pytestmark = [array_api_compatible, pytest.mark.usefixtures("skip_xp_backends")]
skip_xp_backends = pytest.mark.skip_xp_backends
# Expected input dtypes. Note that `scipy.fft` is more flexible for numpy,
# but for C2C transforms like `fft.fft`, the array API standard only mandates
# that complex dtypes should work, float32/float64 aren't guaranteed to.
def get_expected_input_dtype(func, xp):
if func in [fft.fft, fft.fftn, fft.fft2,
fft.ifft, fft.ifftn, fft.ifft2,
fft.hfft, fft.hfftn, fft.hfft2,
fft.irfft, fft.irfftn, fft.irfft2]:
dtype = xp.complex128
elif func in [fft.rfft, fft.rfftn, fft.rfft2,
fft.ihfft, fft.ihfftn, fft.ihfft2]:
dtype = xp.float64
else:
raise ValueError(f'Unknown FFT function: {func}')
return dtype
def fft1(x):
L = len(x)
phase = -2j*np.pi*(np.arange(L)/float(L))
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFT:
def test_identity(self, xp):
maxlen = 512
x = xp.asarray(random(maxlen) + 1j*random(maxlen))
xr = xp.asarray(random(maxlen))
# Check some powers of 2 and some primes
for i in [1, 2, 16, 128, 512, 53, 149, 281, 397]:
xp_assert_close(fft.ifft(fft.fft(x[0:i])), x[0:i])
xp_assert_close(fft.irfft(fft.rfft(xr[0:i]), i), xr[0:i])
@skip_xp_backends(np_only=True, reason='significant overhead for some backends')
def test_identity_extensive(self, xp):
maxlen = 512
x = xp.asarray(random(maxlen) + 1j*random(maxlen))
xr = xp.asarray(random(maxlen))
for i in range(1, maxlen):
xp_assert_close(fft.ifft(fft.fft(x[0:i])), x[0:i])
xp_assert_close(fft.irfft(fft.rfft(xr[0:i]), i), xr[0:i])
def test_fft(self, xp):
x = random(30) + 1j*random(30)
expect = xp.asarray(fft1(x))
x = xp.asarray(x)
xp_assert_close(fft.fft(x), expect)
xp_assert_close(fft.fft(x, norm="backward"), expect)
xp_assert_close(fft.fft(x, norm="ortho"),
expect / xp.sqrt(xp.asarray(30, dtype=xp.float64)),)
xp_assert_close(fft.fft(x, norm="forward"), expect / 30)
@skip_xp_backends(np_only=True, reason='some backends allow `n=0`')
def test_fft_n(self, xp):
x = xp.asarray([1, 2, 3], dtype=xp.complex128)
assert_raises(ValueError, fft.fft, x, 0)
def test_ifft(self, xp):
x = xp.asarray(random(30) + 1j*random(30))
xp_assert_close(fft.ifft(fft.fft(x)), x)
for norm in ["backward", "ortho", "forward"]:
xp_assert_close(fft.ifft(fft.fft(x, norm=norm), norm=norm), x)
def test_fft2(self, xp):
x = xp.asarray(random((30, 20)) + 1j*random((30, 20)))
expect = fft.fft(fft.fft(x, axis=1), axis=0)
xp_assert_close(fft.fft2(x), expect)
xp_assert_close(fft.fft2(x, norm="backward"), expect)
xp_assert_close(fft.fft2(x, norm="ortho"),
expect / xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64)))
xp_assert_close(fft.fft2(x, norm="forward"), expect / (30 * 20))
def test_ifft2(self, xp):
x = xp.asarray(random((30, 20)) + 1j*random((30, 20)))
expect = fft.ifft(fft.ifft(x, axis=1), axis=0)
xp_assert_close(fft.ifft2(x), expect)
xp_assert_close(fft.ifft2(x, norm="backward"), expect)
xp_assert_close(fft.ifft2(x, norm="ortho"),
expect * xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64)))
xp_assert_close(fft.ifft2(x, norm="forward"), expect * (30 * 20))
def test_fftn(self, xp):
x = xp.asarray(random((30, 20, 10)) + 1j*random((30, 20, 10)))
expect = fft.fft(fft.fft(fft.fft(x, axis=2), axis=1), axis=0)
xp_assert_close(fft.fftn(x), expect)
xp_assert_close(fft.fftn(x, norm="backward"), expect)
xp_assert_close(fft.fftn(x, norm="ortho"),
expect / xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64)))
xp_assert_close(fft.fftn(x, norm="forward"), expect / (30 * 20 * 10))
def test_ifftn(self, xp):
x = xp.asarray(random((30, 20, 10)) + 1j*random((30, 20, 10)))
expect = fft.ifft(fft.ifft(fft.ifft(x, axis=2), axis=1), axis=0)
xp_assert_close(fft.ifftn(x), expect, rtol=1e-7)
xp_assert_close(fft.ifftn(x, norm="backward"), expect, rtol=1e-7)
xp_assert_close(
fft.ifftn(x, norm="ortho"),
fft.ifftn(x) * xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64))
)
xp_assert_close(fft.ifftn(x, norm="forward"),
expect * (30 * 20 * 10),
rtol=1e-7)
def test_rfft(self, xp):
x = xp.asarray(random(29), dtype=xp.float64)
for n in [xp_size(x), 2*xp_size(x)]:
for norm in [None, "backward", "ortho", "forward"]:
xp_assert_close(fft.rfft(x, n=n, norm=norm),
fft.fft(xp.asarray(x, dtype=xp.complex128),
n=n, norm=norm)[:(n//2 + 1)])
xp_assert_close(
fft.rfft(x, n=n, norm="ortho"),
fft.rfft(x, n=n) / xp.sqrt(xp.asarray(n, dtype=xp.float64))
)
def test_irfft(self, xp):
x = xp.asarray(random(30))
xp_assert_close(fft.irfft(fft.rfft(x)), x)
for norm in ["backward", "ortho", "forward"]:
xp_assert_close(fft.irfft(fft.rfft(x, norm=norm), norm=norm), x)
def test_rfft2(self, xp):
x = xp.asarray(random((30, 20)), dtype=xp.float64)
expect = fft.fft2(xp.asarray(x, dtype=xp.complex128))[:, :11]
xp_assert_close(fft.rfft2(x), expect)
xp_assert_close(fft.rfft2(x, norm="backward"), expect)
xp_assert_close(fft.rfft2(x, norm="ortho"),
expect / xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64)))
xp_assert_close(fft.rfft2(x, norm="forward"), expect / (30 * 20))
def test_irfft2(self, xp):
x = xp.asarray(random((30, 20)))
xp_assert_close(fft.irfft2(fft.rfft2(x)), x)
for norm in ["backward", "ortho", "forward"]:
xp_assert_close(fft.irfft2(fft.rfft2(x, norm=norm), norm=norm), x)
def test_rfftn(self, xp):
x = xp.asarray(random((30, 20, 10)), dtype=xp.float64)
expect = fft.fftn(xp.asarray(x, dtype=xp.complex128))[:, :, :6]
xp_assert_close(fft.rfftn(x), expect)
xp_assert_close(fft.rfftn(x, norm="backward"), expect)
xp_assert_close(fft.rfftn(x, norm="ortho"),
expect / xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64)))
xp_assert_close(fft.rfftn(x, norm="forward"), expect / (30 * 20 * 10))
def test_irfftn(self, xp):
x = xp.asarray(random((30, 20, 10)))
xp_assert_close(fft.irfftn(fft.rfftn(x)), x)
for norm in ["backward", "ortho", "forward"]:
xp_assert_close(fft.irfftn(fft.rfftn(x, norm=norm), norm=norm), x)
def test_hfft(self, xp):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
x = xp.asarray(x)
x_herm = xp.asarray(x_herm)
expect = xp.real(fft.fft(x))
xp_assert_close(fft.hfft(x_herm), expect)
xp_assert_close(fft.hfft(x_herm, norm="backward"), expect)
xp_assert_close(fft.hfft(x_herm, norm="ortho"),
expect / xp.sqrt(xp.asarray(30, dtype=xp.float64)))
xp_assert_close(fft.hfft(x_herm, norm="forward"), expect / 30)
def test_ihfft(self, xp):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
x = xp.asarray(x)
x_herm = xp.asarray(x_herm)
xp_assert_close(fft.ihfft(fft.hfft(x_herm)), x_herm)
for norm in ["backward", "ortho", "forward"]:
xp_assert_close(fft.ihfft(fft.hfft(x_herm, norm=norm), norm=norm), x_herm)
def test_hfft2(self, xp):
x = xp.asarray(random((30, 20)))
xp_assert_close(fft.hfft2(fft.ihfft2(x)), x)
for norm in ["backward", "ortho", "forward"]:
xp_assert_close(fft.hfft2(fft.ihfft2(x, norm=norm), norm=norm), x)
def test_ihfft2(self, xp):
x = xp.asarray(random((30, 20)), dtype=xp.float64)
expect = fft.ifft2(xp.asarray(x, dtype=xp.complex128))[:, :11]
xp_assert_close(fft.ihfft2(x), expect)
xp_assert_close(fft.ihfft2(x, norm="backward"), expect)
xp_assert_close(
fft.ihfft2(x, norm="ortho"),
expect * xp.sqrt(xp.asarray(30 * 20, dtype=xp.float64))
)
xp_assert_close(fft.ihfft2(x, norm="forward"), expect * (30 * 20))
def test_hfftn(self, xp):
x = xp.asarray(random((30, 20, 10)))
xp_assert_close(fft.hfftn(fft.ihfftn(x)), x)
for norm in ["backward", "ortho", "forward"]:
xp_assert_close(fft.hfftn(fft.ihfftn(x, norm=norm), norm=norm), x)
def test_ihfftn(self, xp):
x = xp.asarray(random((30, 20, 10)), dtype=xp.float64)
expect = fft.ifftn(xp.asarray(x, dtype=xp.complex128))[:, :, :6]
xp_assert_close(expect, fft.ihfftn(x))
xp_assert_close(expect, fft.ihfftn(x, norm="backward"))
xp_assert_close(
fft.ihfftn(x, norm="ortho"),
expect * xp.sqrt(xp.asarray(30 * 20 * 10, dtype=xp.float64))
)
xp_assert_close(fft.ihfftn(x, norm="forward"), expect * (30 * 20 * 10))
def _check_axes(self, op, xp):
dtype = get_expected_input_dtype(op, xp)
x = xp.asarray(random((30, 20, 10)), dtype=dtype)
axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
xp_test = array_namespace(x)
for a in axes:
op_tr = op(xp_test.permute_dims(x, axes=a))
tr_op = xp_test.permute_dims(op(x, axes=a), axes=a)
xp_assert_close(op_tr, tr_op)
@pytest.mark.parametrize("op", [fft.fftn, fft.ifftn, fft.rfftn, fft.irfftn])
def test_axes_standard(self, op, xp):
self._check_axes(op, xp)
@pytest.mark.parametrize("op", [fft.hfftn, fft.ihfftn])
def test_axes_non_standard(self, op, xp):
self._check_axes(op, xp)
@pytest.mark.parametrize("op", [fft.fftn, fft.ifftn,
fft.rfftn, fft.irfftn])
def test_axes_subset_with_shape_standard(self, op, xp):
dtype = get_expected_input_dtype(op, xp)
x = xp.asarray(random((16, 8, 4)), dtype=dtype)
axes = [(0, 1, 2), (0, 2, 1), (1, 2, 0)]
xp_test = array_namespace(x)
for a in axes:
# different shape on the first two axes
shape = tuple([2*x.shape[ax] if ax in a[:2] else x.shape[ax]
for ax in range(x.ndim)])
# transform only the first two axes
op_tr = op(xp_test.permute_dims(x, axes=a),
s=shape[:2], axes=(0, 1))
tr_op = xp_test.permute_dims(op(x, s=shape[:2], axes=a[:2]),
axes=a)
xp_assert_close(op_tr, tr_op)
@pytest.mark.parametrize("op", [fft.fft2, fft.ifft2,
fft.rfft2, fft.irfft2,
fft.hfft2, fft.ihfft2,
fft.hfftn, fft.ihfftn])
def test_axes_subset_with_shape_non_standard(self, op, xp):
dtype = get_expected_input_dtype(op, xp)
x = xp.asarray(random((16, 8, 4)), dtype=dtype)
axes = [(0, 1, 2), (0, 2, 1), (1, 2, 0)]
xp_test = array_namespace(x)
for a in axes:
# different shape on the first two axes
shape = tuple([2*x.shape[ax] if ax in a[:2] else x.shape[ax]
for ax in range(x.ndim)])
# transform only the first two axes
op_tr = op(xp_test.permute_dims(x, axes=a), s=shape[:2], axes=(0, 1))
tr_op = xp_test.permute_dims(op(x, s=shape[:2], axes=a[:2]), axes=a)
xp_assert_close(op_tr, tr_op)
def test_all_1d_norm_preserving(self, xp):
# verify that round-trip transforms are norm-preserving
x = xp.asarray(random(30), dtype=xp.float64)
xp_test = array_namespace(x)
x_norm = xp_test.linalg.vector_norm(x)
n = xp_size(x) * 2
func_pairs = [(fft.rfft, fft.irfft),
# hfft: order so the first function takes x.size samples
# (necessary for comparison to x_norm above)
(fft.ihfft, fft.hfft),
# functions that expect complex dtypes at the end
(fft.fft, fft.ifft),
]
for forw, back in func_pairs:
if forw == fft.fft:
x = xp.asarray(x, dtype=xp.complex128)
x_norm = xp_test.linalg.vector_norm(x)
for n in [xp_size(x), 2*xp_size(x)]:
for norm in ['backward', 'ortho', 'forward']:
tmp = forw(x, n=n, norm=norm)
tmp = back(tmp, n=n, norm=norm)
xp_assert_close(xp_test.linalg.vector_norm(tmp), x_norm)
@skip_xp_backends(np_only=True)
@pytest.mark.parametrize("dtype", [np.float16, np.longdouble])
def test_dtypes_nonstandard(self, dtype):
x = random(30).astype(dtype)
out_dtypes = {np.float16: np.complex64, np.longdouble: np.clongdouble}
x_complex = x.astype(out_dtypes[dtype])
res_fft = fft.ifft(fft.fft(x))
res_rfft = fft.irfft(fft.rfft(x))
res_hfft = fft.hfft(fft.ihfft(x), x.shape[0])
# Check both numerical results and exact dtype matches
assert_array_almost_equal(res_fft, x_complex)
assert_array_almost_equal(res_rfft, x)
assert_array_almost_equal(res_hfft, x)
assert res_fft.dtype == x_complex.dtype
assert res_rfft.dtype == np.result_type(np.float32, x.dtype)
assert res_hfft.dtype == np.result_type(np.float32, x.dtype)
@pytest.mark.parametrize("dtype", ["float32", "float64"])
def test_dtypes_real(self, dtype, xp):
x = xp.asarray(random(30), dtype=getattr(xp, dtype))
res_rfft = fft.irfft(fft.rfft(x))
res_hfft = fft.hfft(fft.ihfft(x), x.shape[0])
# Check both numerical results and exact dtype matches
xp_assert_close(res_rfft, x)
xp_assert_close(res_hfft, x)
@pytest.mark.parametrize("dtype", ["complex64", "complex128"])
def test_dtypes_complex(self, dtype, xp):
rng = np.random.default_rng(1234)
x = xp.asarray(rng.random(30), dtype=getattr(xp, dtype))
res_fft = fft.ifft(fft.fft(x))
# Check both numerical results and exact dtype matches
xp_assert_close(res_fft, x)
@skip_xp_backends(np_only=True,
reason='array-likes only supported for NumPy backend')
@pytest.mark.parametrize("op", [fft.fft, fft.ifft,
fft.fft2, fft.ifft2,
fft.fftn, fft.ifftn,
fft.rfft, fft.irfft,
fft.rfft2, fft.irfft2,
fft.rfftn, fft.irfftn,
fft.hfft, fft.ihfft,
fft.hfft2, fft.ihfft2,
fft.hfftn, fft.ihfftn,])
def test_array_like(self, xp, op):
x = [[[1.0, 1.0], [1.0, 1.0]],
[[1.0, 1.0], [1.0, 1.0]],
[[1.0, 1.0], [1.0, 1.0]]]
xp_assert_close(op(x), op(xp.asarray(x)))
@skip_xp_backends(np_only=True)
@pytest.mark.parametrize(
"dtype",
[np.float32, np.float64, np.longdouble,
np.complex64, np.complex128, np.clongdouble])
@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
@pytest.mark.parametrize(
"fft",
[fft.fft, fft.fft2, fft.fftn,
fft.ifft, fft.ifft2, fft.ifftn])
def test_fft_with_order(dtype, order, fft):
# Check that FFT/IFFT produces identical results for C, Fortran and
# non contiguous arrays
rng = np.random.RandomState(42)
X = rng.rand(8, 7, 13).astype(dtype, copy=False)
if order == 'F':
Y = np.asfortranarray(X)
else:
# Make a non contiguous array
Y = X[::-1]
X = np.ascontiguousarray(X[::-1])
if fft.__name__.endswith('fft'):
for axis in range(3):
X_res = fft(X, axis=axis)
Y_res = fft(Y, axis=axis)
assert_array_almost_equal(X_res, Y_res)
elif fft.__name__.endswith(('fft2', 'fftn')):
axes = [(0, 1), (1, 2), (0, 2)]
if fft.__name__.endswith('fftn'):
axes.extend([(0,), (1,), (2,), None])
for ax in axes:
X_res = fft(X, axes=ax)
Y_res = fft(Y, axes=ax)
assert_array_almost_equal(X_res, Y_res)
else:
raise ValueError
@skip_xp_backends(cpu_only=True)
class TestFFTThreadSafe:
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args, xp=None):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
xp_assert_equal(
q.get(timeout=5), expected,
err_msg='Function returned wrong value in multithreaded context'
)
def test_fft(self, xp):
a = xp.ones(self.input_shape, dtype=xp.complex128)
self._test_mtsame(fft.fft, a, xp=xp)
def test_ifft(self, xp):
a = xp.full(self.input_shape, 1+0j)
self._test_mtsame(fft.ifft, a, xp=xp)
def test_rfft(self, xp):
a = xp.ones(self.input_shape)
self._test_mtsame(fft.rfft, a, xp=xp)
def test_irfft(self, xp):
a = xp.full(self.input_shape, 1+0j)
self._test_mtsame(fft.irfft, a, xp=xp)
def test_hfft(self, xp):
a = xp.ones(self.input_shape, dtype=xp.complex64)
self._test_mtsame(fft.hfft, a, xp=xp)
def test_ihfft(self, xp):
a = xp.ones(self.input_shape)
self._test_mtsame(fft.ihfft, a, xp=xp)
@skip_xp_backends(np_only=True)
@pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.rfft, fft.irfft])
def test_multiprocess(func):
# Test that fft still works after fork (gh-10422)
with multiprocessing.Pool(2) as p:
res = p.map(func, [np.ones(100) for _ in range(4)])
expect = func(np.ones(100))
for x in res:
assert_allclose(x, expect)
class TestIRFFTN:
def test_not_last_axis_success(self, xp):
ar, ai = np.random.random((2, 16, 8, 32))
a = ar + 1j*ai
a = xp.asarray(a)
axes = (-2,)
# Should not raise error
fft.irfftn(a, axes=axes)
@pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.rfft, fft.irfft,
fft.fftn, fft.ifftn,
fft.rfftn, fft.irfftn, fft.hfft, fft.ihfft])
def test_non_standard_params(func, xp):
if func in [fft.rfft, fft.rfftn, fft.ihfft]:
dtype = xp.float64
else:
dtype = xp.complex128
if xp.__name__ != 'numpy':
x = xp.asarray([1, 2, 3], dtype=dtype)
# func(x) should not raise an exception
func(x)
assert_raises(ValueError, func, x, workers=2)
# `plan` param is not tested since SciPy does not use it currently
# but should be tested if it comes into use
@pytest.mark.parametrize("dtype", ['float32', 'float64'])
@pytest.mark.parametrize("func", [fft.fft, fft.ifft, fft.irfft,
fft.fftn, fft.ifftn,
fft.irfftn, fft.hfft,])
def test_real_input(func, dtype, xp):
x = xp.asarray([1, 2, 3], dtype=getattr(xp, dtype))
# func(x) should not raise an exception
func(x)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@fft@tests@test_basic.py@.PATH_END.py
|
{
"filename": "numpycompat.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/utils/compat/numpycompat.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This is a collection of monkey patches and workarounds for bugs in
earlier versions of Numpy.
"""
import numpy as np
from astropy.utils import minversion
__all__ = [
"COPY_IF_NEEDED",
"NUMPY_LT_1_24",
"NUMPY_LT_1_25",
"NUMPY_LT_1_26",
"NUMPY_LT_2_0",
"NUMPY_LT_2_1",
"NUMPY_LT_2_2",
"NUMPY_LT_2_3",
]
# TODO: It might also be nice to have aliases to these named for specific
# features/bugs we're checking for (ex:
# astropy.table.table._BROKEN_UNICODE_TABLE_SORT)
NUMPY_LT_1_24 = not minversion(np, "1.24")
NUMPY_LT_1_25 = not minversion(np, "1.25")
NUMPY_LT_1_26 = not minversion(np, "1.26")
NUMPY_LT_2_0 = not minversion(np, "2.0")
NUMPY_LT_2_1 = not minversion(np, "2.1.0.dev")
NUMPY_LT_2_2 = not minversion(np, "2.2.0.dev0")
NUMPY_LT_2_3 = not minversion(np, "2.3.0.dev0")
COPY_IF_NEEDED = False if NUMPY_LT_2_0 else None
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@utils@compat@numpycompat.py@.PATH_END.py
|
{
"filename": "copy.py",
"repo_name": "ashleychontos/pySYD",
"repo_path": "pySYD_extracted/pySYD-master/dev/dev/copy.py",
"type": "Python"
}
|
# DEVELOPMENT BRANCH ---> SOURCE PACKAGE
print('\n\n COPYING FROM DEVELOPMENT BRANCH ---> SOURCE PACKAGE \n\n')
from pysyd import utils
cont = utils._ask_yesno('continue? ')
if cont:
import os
scripts = ['cli','models','pipeline','plots','target','utils']
rows, rows_cli = 18, 32
_ROOT, _ = os.path.split(os.path.abspath(os.getcwd()))
package = os.path.join(os.path.split(_ROOT)[0], 'pysyd')
# copy scripts from dev -> src
for script in scripts:
if script == 'cli':
n = rows_cli
else:
n = rows
# keep header from pysyd package
with open(os.path.join(package, '%s.py'%script), "r") as f:
lines = [line for line in f.readlines()]
header = lines[:n]
# copy body of development branch script
with open(os.path.join(_ROOT, '%s.py'%script), "r") as f:
lines = [line for line in f.readlines()]
body = lines[n:]
# smash together header & body
lines = header+body
with open(os.path.join(package, '%s.py'%script), "w") as f:
for line in lines:
f.write(line)
import shutil
# version is different
src = os.path.join(_ROOT, 'version.py')
dst = os.path.join(package, 'version.py')
shutil.copy(src, dst)
import glob
# make sure data and dicts are up-to-date
files = glob.glob(os.path.join(_ROOT, 'info', 'data', '*'))
for file in files:
dst = os.path.join(package, 'data', os.path.split(file)[-1])
shutil.copy(file, dst)
files = glob.glob(os.path.join(_ROOT, 'dicts', '*'))
for file in files:
dst = os.path.join(package, 'dicts', os.path.split(file)[-1])
shutil.copy(file, dst)
|
ashleychontosREPO_NAMEpySYDPATH_START.@pySYD_extracted@pySYD-master@dev@dev@copy.py@.PATH_END.py
|
{
"filename": "extract_mgcls_dino_representation.py",
"repo_name": "SKA-INAF/sclassifier",
"repo_path": "sclassifier_extracted/sclassifier-master/macros/extract_mgcls_dino_representation.py",
"type": "Python"
}
|
import sys
# - IMPORT LUSTUFKA MODULES
sys.path.insert(1, '/home/riggi/Software/Sources/mgcls_dino')
import utils
import vision_transformer as vits
#################################
import os
import argparse
import json
import warnings
import numpy as np
## ASTRO ####
from astropy.io import fits
from astropy.io.fits.verify import VerifyWarning
warnings.simplefilter('ignore', category=VerifyWarning)
from astropy.stats import sigma_clip
from astropy.visualization import ZScaleInterval
## IMAGE PROC ###
from PIL import Image
## TORCH ####
import torch
from torch import nn
import torch.distributed as dist
import torch.backends.cudnn as cudnn
from torchvision import datasets
from torchvision import transforms as pth_transforms
from torchvision import models as torchvision_models
from torch.utils.data import Dataset, DataLoader
######################################
### DATASET
######################################
class AstroImageDataset(Dataset):
""" Dataset to load astro images in FITS format """
def __init__(self, filename, transform, in_chans=1, apply_zscale=False, norm_range=(0.,1.), to_uint8=False, set_zero_to_min=False):
self.filename= filename
self.__read_filelist()
self.transform = transform
self.clip_data= False
self.in_chans= in_chans
self.apply_zscale= apply_zscale
self.norm_range= norm_range
self.to_uint8= to_uint8
self.set_zero_to_min= set_zero_to_min
print("self.apply_zscale")
print(self.apply_zscale)
def __getitem__(self, idx):
""" Override getitem method """
# - Get label at inder idx
class_id= self.datalist[idx]['id']
# - Get object identifier
sname= self.datalist[idx]['sname']
# - Load PIL image at index
image_pil, is_good_data= self.load_image(idx)
if image_pil is None:
print("WARN: Failed to load image ...")
is_good_data= False
# - Convert image for the model
image_tensor= self.transform(image_pil)
return image_tensor, class_id, sname, is_good_data
def __read_filelist(self):
""" Read input json filelist """
fp= open(self.filename, "r")
self.datalist= json.load(fp)["data"]
def __get_clipped_data(self, data, sigma_low=5, sigma_up=30):
""" Apply sigma clipping to input data and return transformed data """
# - Find NaNs pixels
cond= np.logical_and(data!=0, np.isfinite(data))
data_1d= data[cond]
# - Clip all pixels that are below sigma clip
res= sigma_clip(data_1d, sigma_lower=sigma_low, sigma_upper=sigma_up, masked=True, return_bounds=True)
thr_low= res[1]
thr_up= res[2]
data_clipped= np.copy(data)
data_clipped[data_clipped<thr_low]= thr_low
data_clipped[data_clipped>thr_up]= thr_up
# - Set NaNs to 0
data_clipped[~cond]= 0
return data_clipped
def __get_zscaled_data(self, data, contrast=0.25):
""" Apply sigma clipping to input data and return transformed data """
# - Find NaNs pixels
cond= np.logical_and(data!=0, np.isfinite(data))
# - Apply zscale transform
transform= ZScaleInterval(contrast=contrast)
data_transf= transform(data)
# - Set NaNs to 0
data_transf[~cond]= 0
return data_transf
def __read_fits(self, filename):
""" Read FITS image """
is_good_data= True
# - Read FITS data
data= fits.open(filename)[0].data
# - Set NANs to image min
if self.set_zero_to_min:
cond= np.logical_and(data!=0, np.isfinite(data))
else:
cond= np.isfinite(data)
data_1d= data[cond]
if data_1d.size==0:
is_good_data= False
print("WARN: All NAN image, setting image to 0...")
data[~cond]= 0
return data.astype(np.uint8), is_good_data
#return None
data_min= np.min(data_1d)
data[~cond]= data_min
data_transf= data
print("== DATA MIN/MAX ==")
print(data_transf.min())
print(data_transf.max())
# - Clip data?
if self.clip_data:
data_clipped= self.__get_clipped_data(data_transf, sigma_low=5, sigma_up=30)
data_transf= data_clipped
# - Apply zscale stretch
if self.apply_zscale:
print("Apply zscale stretch ...")
data_stretched= self.__get_zscaled_data(data_transf, contrast=0.25)
data_transf= data_stretched
# - Convert to uint8
#data_transf= (data_transf*255.).astype(np.uint8)
# - Normalize to range
data_min= data_transf.min()
data_max= data_transf.max()
norm_min= self.norm_range[0]
norm_max= self.norm_range[1]
if norm_min==data_min and norm_max==data_max:
print("INFO: Data already normalized in range (%f,%f)" % (norm_min, norm_max))
else:
data_norm= (data_transf-data_min)/(data_max-data_min) * (norm_max-norm_min) + norm_min
data_transf= data_norm
print("== DATA MIN/MAX (AFTER TRANSF) ==")
print(data_transf.min())
print(data_transf.max())
# - Convert to uint8
if self.to_uint8:
data_transf= data_transf.astype(np.uint8)
return data_transf, is_good_data
def __transform_data(self, data):
""" Transform numpy data """
is_good_data= True
data_transf= np.copy(data)
# - Set NANs to image min
if self.set_zero_to_min:
cond= np.logical_and(data_transf!=0, np.isfinite(data_transf))
else:
cond= np.isfinite(data_transf)
data_1d= data_transf[cond]
if data_1d.size==0:
is_good_data= False
print("WARN: All NAN image, setting image to 0...")
data_transf[~cond]= 0
return data_transf.astype(np.uint8), is_good_data
#return None
data_min= np.min(data_1d)
data_transf[~cond]= data_min
print("== DATA MIN/MAX ==")
print(data_transf.min())
print(data_transf.max())
# - Clip data?
if self.clip_data:
data_clipped= self.__get_clipped_data(data_transf, sigma_low=5, sigma_up=30)
data_transf= data_clipped
# - Apply zscale stretch
if self.apply_zscale:
print("Apply zscale stretch ...")
data_stretched= self.__get_zscaled_data(data_transf, contrast=0.25)
data_transf= data_stretched
# - Convert to uint8
#data_transf= (data_transf*255.).astype(np.uint8)
# - Normalize to range
data_min= data_transf.min()
data_max= data_transf.max()
norm_min= self.norm_range[0]
norm_max= self.norm_range[1]
if norm_min==data_min and norm_max==data_max:
print("INFO: Data already normalized in range (%f,%f)" % (norm_min, norm_max))
else:
data_norm= (data_transf-data_min)/(data_max-data_min) * (norm_max-norm_min) + norm_min
data_transf= data_norm
print("== DATA MIN/MAX (AFTER TRANSF) ==")
print(data_transf.min())
print(data_transf.max())
# - Convert to uint8
if self.to_uint8:
data_transf= data_transf.astype(np.uint8)
return data_transf, is_good_data
def load_image(self, idx):
""" Load image """
# - Get image path
item= self.datalist[idx]
image_path= item["filepaths"][0]
image_ext= os.path.splitext(image_path)[1]
print("INFO: Reading image %s ..." % (image_path))
# - Read FITS image as numpy array
is_good_data= True
if image_ext=='.fits':
data= fits.open(filename)[0].data
##data, is_good_data= self.__read_fits(image_path)
##if data is None or not is_good_data:
## print("WARN: Failed to read FITS data ...")
## ###return None
###image= Image.fromarray(data)
else:
###image= Image.open(image_path)
data= np.asarray(Image.open(image_path))
# - Transform numpy array
data_transf, is_good_data= self.__transform_data(data)
data= data_transf
# - Convert numpy to PIL image
image= Image.fromarray(data)
# - Convert to RGB image
if self.in_chans==3:
image= image.convert("RGB")
print("--> image.shape")
print(np.asarray(image).shape)
return image, is_good_data
def load_image_info(self, idx):
""" Load image metadata """
return self.datalist[idx]
def __len__(self):
return len(self.datalist)
def get_sample_size(self):
return len(self.datalist)
def write_ascii(data, filename, header=''):
""" Write data to ascii file """
# - Skip if data is empty
if data.size<=0:
print("WARN: Empty data given, no file will be written!")
return
# - Open file and write header
fout = open(filename, 'wt')
if header:
fout.write(header)
fout.write('\n')
fout.flush()
# - Write data to file
nrows= data.shape[0]
ncols= data.shape[1]
for i in range(nrows):
fields= ' '.join(map(str, data[i,:]))
fout.write(fields)
fout.write('\n')
fout.flush()
fout.close()
###########################
## ARGS
###########################
def get_args():
"""This function parses and return arguments passed in"""
parser = argparse.ArgumentParser(description="Parse args.")
# - Input options
parser.add_argument('-datalist','--datalist', dest='datalist', required=True, type=str, help='Input data json filelist')
parser.add_argument('--data_path', default='/path/to/imagenet/', type=str)
# - Data options
parser.add_argument('--imgsize', default=224, type=int, help='Image resize size in pixels')
parser.add_argument('--nmax', default=-1, type=int, help='Number of images to read and process in input file (-1=all)')
parser.add_argument('--zscale', dest='zscale', action='store_true',help='Apply zscale transform (default=false)')
parser.set_defaults(zscale=False)
parser.add_argument('--norm_min', default=0., type=float, help='Norm min (default=0)')
parser.add_argument('--norm_max', default=1., type=float, help='Norm max (default=1)')
parser.add_argument('--to_uint8', dest='to_uint8', action='store_true',help='Convert to uint8 (default=false)')
parser.set_defaults(to_uint8=False)
parser.add_argument('--in_chans', default = 1, type = int, help = 'Length of subset of dataset to use.')
parser.add_argument('--set_zero_to_min', dest='shift_zero_to_min', action='store_true',help='Set blank pixels to min>0 (default=false)')
parser.set_defaults(set_zero_to_min=False)
parser.add_argument('--center_crop', dest='center_crop', action='store_true', help='Center crop image to fixed desired size in pixel, specified in crop_size option (default=no)')
parser.set_defaults(center_crop=False)
parser.add_argument('-crop_size', '--crop_size', dest='crop_size', required=False, type=int, default=224, action='store',help='Crop size in pixels (default=224)')
# - Model options
parser.add_argument('--batch_size_per_gpu', default=1, type=int, help='Per-GPU batch-size')
parser.add_argument('--pretrained_weights', default='', type=str, help="Path to pretrained weights to evaluate.")
parser.add_argument('--use_cuda', default=True, type=utils.bool_flag, help="Should we store the features on GPU? We recommend setting this to False if you encounter OOM")
parser.add_argument('--arch', default='vit_small', type=str, help='Architecture')
parser.add_argument('--patch_size', default=16, type=int, help='Patch resolution of the model.')
parser.add_argument("--checkpoint_key", default="teacher", type=str, help='Key to use in the checkpoint (example: "teacher")')
parser.add_argument('--dump_features', default=None, help='Path where to save computed features, empty for no saving')
parser.add_argument('--num_workers', default=0, type=int, help='Number of data loading workers per GPU.')
# - Outfile option
parser.add_argument('-outfile','--outfile', dest='outfile', required=False, type=str, default='featdata.dat', help='Output filename (.dat) of feature data')
args = parser.parse_args()
return args
##############
## MAIN ##
##############
def main():
"""Main function"""
#===========================
#== PARSE ARGS
#===========================
print("INFO: Get script args ...")
try:
args= get_args()
except Exception as ex:
logger.error("Failed to get and parse options (err=%s)",str(ex))
return 1
# - Read args
datalist= args.datalist
# - Data options
imgsize= args.imgsize
nmax= args.nmax
print("args.zscale")
print(args.zscale)
#===========================
#== BUILD MODEL
#===========================
print("INFO: Build network %s ..." % (args.arch))
if "vit" in args.arch:
model = vits.__dict__[args.arch](patch_size=args.patch_size, num_classes=0, in_chans=args.in_chans)
print(f"Model {args.arch} {args.patch_size}x{args.patch_size} built.")
elif "xcit" in args.arch:
model = torch.hub.load('facebookresearch/xcit:main', args.arch, num_classes=0)
elif args.arch in torchvision_models.__dict__.keys():
model = torchvision_models.__dict__[args.arch](num_classes=0)
if args.in_chans != 3:
model.conv1 = nn.Conv2d(args.in_chans, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
if args.arch == "resnet18": #after converting the checkpoint keys to Torchvision names
model.conv1 = nn.Conv2d(1, 64, kernel_size=(3, 3), stride=(2, 2), padding=(3, 3), bias=False)
model.fc = nn.Identity()
else:
print(f"Architecture {args.arch} non supported")
return 1
if args.use_cuda:
model.cuda()
print("model")
print(model)
print("INFO: Load pretrained weights from file %s ..." % (args.pretrained_weights))
utils.load_pretrained_weights(model, args.pretrained_weights, args.checkpoint_key, args.arch, args.patch_size)
model.eval()
#===========================
#== SET DATA LOADER
#===========================
# - Set data transform
data_mean= (0.485, 0.456, 0.406)
data_std= (0.229, 0.224, 0.225)
#data_mean= (0.0, 0.0, 0.0)
#data_std= (1.0, 1.0, 1.0)
tlist= []
if args.center_crop:
tlist.append( pth_transforms.CenterCrop(args.crop_size) )
tlist.append( pth_transforms.Resize(imgsize, interpolation=3) )
tlist.append( pth_transforms.ToTensor() )
#tlist.append( pth_transforms.Normalize(data_mean, data_std) )
transform= pth_transforms.Compose(tlist)
# - Set dataset
dataset= AstroImageDataset(
filename=datalist,
transform=transform,
in_chans=args.in_chans,
apply_zscale=args.zscale,
norm_range=(args.norm_min, args.norm_max),
to_uint8=args.to_uint8,
set_zero_to_min=args.set_zero_to_min
)
data_loader= torch.utils.data.DataLoader(
dataset,
shuffle=False,
batch_size=args.batch_size_per_gpu,
num_workers=args.num_workers,
pin_memory=True,
drop_last=False,
)
print(f"Data loaded with {len(dataset)} imgs.")
#===========================
#== EXTRACT FEATURES
#===========================
nsamples= len(dataset)
feature_list= []
sname_list= []
classid_list= []
iterator= iter(data_loader)
for i in range(nsamples):
# - Stop looping?
if nmax!=-1 and i>=nmax:
print("INFO: Max number of samples (%d) reached, exit loop..." % (nmax))
break
# - Load data from loader
ret= next(iterator)
if ret is None:
print("Failed to read image %d, skipping ..." % (i+1))
continue
imgs= ret[0]
class_ids= ret[1]
sname = ret[2]
is_good_data= ret[3]
if not is_good_data:
print("Bad data image %d, skipping ..." % (i+1))
continue
# - Run inference
with torch.no_grad():
feats = model(imgs)
print("class_ids")
print(class_ids)
features_numpy= feats[0].cpu().numpy()
class_ids_numpy= class_ids[0].cpu().numpy()
if i==0:
print("feats.shape")
print(feats.shape)
print("features_numpy.shape")
print(features_numpy.shape)
# - Append to main list
feature_list.append(features_numpy)
sname_list.append(sname)
classid_list.append(class_ids_numpy)
#feature_list.extend(features_numpy)
#sname_list.extend(sname)
#classid_list.extend(class_ids_numpy)
#===========================
#== SAVE FEATURES
#===========================
# - Write selected feature data table
print("INFO: Writin feature data to file %s ..." % (args.outfile))
N= len(feature_list)
nfeats= feature_list[0].shape[0]
print("INFO: N=%d, nfeats=%d" % (N, nfeats))
featdata_arr= np.array(feature_list)
snames_arr= np.array(sname_list).reshape(N,1)
classids_arr= np.array(classid_list).reshape(N,1)
outdata= np.concatenate(
(snames_arr, featdata_arr, classids_arr),
axis=1
)
znames_counter= list(range(1,nfeats+1))
znames= '{}{}'.format('z',' z'.join(str(item) for item in znames_counter))
head= '{} {} {}'.format("# sname",znames,"id")
write_ascii(outdata, args.outfile, head)
return 0
###################
## MAIN EXEC ##
###################
if __name__ == "__main__":
sys.exit(main())
|
SKA-INAFREPO_NAMEsclassifierPATH_START.@sclassifier_extracted@sclassifier-master@macros@extract_mgcls_dino_representation.py@.PATH_END.py
|
{
"filename": "most.py",
"repo_name": "D-arioSpace/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/ipac/irsa/most.py",
"type": "Python"
}
|
import io
import re
import tarfile
import warnings
from bs4 import BeautifulSoup
from astropy.io import votable, fits
from astropy.table import Table
from astroquery.query import BaseQuery
from astroquery.utils import class_or_instance
from astroquery.exceptions import InvalidQueryError, NoResultsWarning
from . import conf
__all__ = ["Most", "MostClass"]
class MostClass(BaseQuery):
URL = conf.most_server
TIMEOUT = conf.timeout
def _validate_name_input_type(self, params):
"""
Validate required parameters when ``input_type='name_input'``.
Parameters
----------
params : `dict`
Dictionary of query parameters to validate.
Raises
------
ValueError
If the input does not have the minimum required parameters set to
an at least truthy value.
"""
if not params.get("obj_name", False):
raise ValueError("When input type is 'name_input' key 'obj_name' is required.")
def _validate_nafid_input_type(self, params):
"""
Validate required parameters when ``input_type='naifid_input'``.
Parameters
----------
params : `dict`
Dictionary of query parameters to validate.
Raises
------
ValueError
If the input does not have the minimum required parameters set to
an at least truthy value.
"""
if not params.get("obj_nafid", False):
raise ValueError("When input type is 'nafid_input' key 'obj_nafid' is required.")
def _validate_mpc_input_type(self, params):
"""
Validate required parameters when ``input_type='mpc_input'``.
Parameters
----------
params : `dict`
Dictionary of query parameters to validate.
Raises
------
ValueError
If the input does not have the minimum required parameters set to
an at least truthy value.
"""
obj_type = params.get("obj_type", False)
if not obj_type:
raise ValueError("When input type is 'mpc_input' key 'obj_type' is required.")
if obj_type not in ("Asteroid", "Comet"):
raise ValueError("Object type is case sensitive and must be one of: `Asteroid` or `Comet`")
if not params.get("mpc_data", False):
raise ValueError("When input type is 'mpc_input' key 'mpc_data' is required.")
def _validate_manual_input_type(self, params):
"""
Validate required parameters when ``input_type='manual_input'``.
Parameters
----------
params : `dict`
Dictionary of query parameters to validate.
Raises
------
ValueError
If the input does not have the minimum required parameters set to
an at least truthy value.
"""
obj_type = params.get("obj_type", False)
if not obj_type:
raise ValueError("When input type is 'manual_input' key 'obj_type' is required.")
if obj_type not in ("Asteroid", "Comet"):
raise ValueError("Object type is case sensitive and must be one of: 'Asteroid' or 'Comet'")
# MOST will always require at least the distance and eccentricity
# distance param is named differently in cases of asteroids and comets
if not params.get("eccentricity", False):
raise ValueError("When input_type is 'manual_input', 'eccentricity' is required.")
if obj_type == "Asteroid":
if not params.get("semimajor_axis", False):
raise ValueError("When obj_type is 'Asteroid', 'semimajor_axis' is required.")
elif obj_type == "Comet":
if not params.get("perih_dist", False):
raise ValueError("When obj_type is 'Comet', 'perih_dist' is required.")
# This seemingly can be whatever
if not params.get("body_designation", False):
params["body_designation"] = "Test"+params["obj_type"]
def _validate_input(self, params):
"""
Validate the minimum required set of parameters, for a given input
type, are at least truthy.
These include the keys ``catalog``, ``input_type``, ``output_mode`` and
``ephem_step`` in addition to keys required by the specified input type.
Parameters
----------
params : `dict`
Dictionary of query parameters to validate.
Raises
------
ValueError
If the input does not have the minimum required parameters set to
an at least truthy value.
"""
if params.get("catalog", None) is None:
raise ValueError("Which catalog is being queried is always required.")
input_type = params.get("input_type", None)
if input_type is None:
raise ValueError("Input type is always required.")
if input_type == "name_input":
self._validate_name_input_type(params)
elif input_type == "nafid_input":
self._validate_nafid_input_type(params)
elif input_type == "mpc_input":
self._validate_mpc_input_type(params)
elif input_type == "manual_input":
self._validate_manual_input_type(params)
else:
raise ValueError(
"Unrecognized 'input_type'. Expected `name_input`, `nafid_input` "
f"`mpc_input` or `manual_input`, got {input_type} instead."
)
def _parse_full_regular_response(self, response, withTarballs=False):
"""
Parses the response when output type is set to ``"Regular"`` or ``"Full"``.
Parameters
----------
response : `requests.models.Response`
Query response.
withTarballs : `bool`, optional
Parse the links to FITS and region tarballs from the response. By
default, set to False.
Returns
-------
retdict : `dict`
Dictionary containing the keys ``results``, ``metadata`` and ``region``.
Optionally can contain keys ``fits_tarball`` and ``region_tarball``.
The ``results`` and ``metadata`` are an `astropy.table.Table` object
containing the links to image and region files and minimum object
metadata, while ``metadata`` contains the image metadata and object
positions. The ``region`` key contains a link to the DS9 region file
representing the matched object trajectory and search boxes. When
existing, ``fits_tarball`` and ``region_tarball`` are links to the
tarball archives of the fits and region images.
"""
retdict = {}
html = BeautifulSoup(response.content, "html5lib")
download_tags = html.find_all("a", string=re.compile(".*Download.*"))
# this is "Download Results Table (above)"
results_response = self._request("GET", download_tags[0]["href"])
retdict["results"] = Table.read(results_response.text, format="ipac")
# this is "Download Image Metadata with Matched Object position Table"
imgmet_response = self._request("GET", download_tags[1]["href"])
retdict["metadata"] = Table.read(imgmet_response.text, format="ipac")
# this is "Download DS9 Region File with the Orbital Path", it's a link
# to a DS9 region file
# regions_response = self._request("GET", download_tags[2]["href"])
retdict["region"] = download_tags[2]["href"]
if withTarballs:
retdict["fits_tarball"] = download_tags[-1]["href"]
retdict["region_tarball"] = download_tags[-2]["href"]
return retdict
@class_or_instance
def list_catalogs(self):
"""Returns a list of queriable catalogs."""
response = self._request("GET", conf.most_interface_url, timeout=self.TIMEOUT)
html = BeautifulSoup(response.content, "html5lib")
catalog_dropdown_options = html.find("select").find_all("option")
catalogs = [tag.string for tag in catalog_dropdown_options]
# The Internal-Use-only datasets are free to search in MOST.
# The way it is supposed to work is that the images will not be accessible.
if "--- Internal use only:" in catalogs:
catalogs.remove("--- Internal use only:")
return catalogs
def get_images(self, catalog="wise_merge", input_mode="name_input", ephem_step=0.25,
obs_begin=None, obs_end=None, obj_name=None, obj_nafid=None, obj_type=None,
mpc_data=None, body_designation=None, epoch=None, eccentricity=None,
inclination=None, arg_perihelion=None, ascend_node=None, semimajor_axis=None,
mean_anomaly=None, perih_dist=None, perih_time=None, get_query_payload=False,
save=False, savedir=''):
"""Gets images containing the specified object or orbit.
Parameters are case sensitive.
See module help for more details.
Parameters
----------
catalog : str
Catalog to query.
Required.
Default ``"wise_merge"``.
input_mode : str
Input mode. One of ``"name_input"``, ``"naifid_input"``,
``"mpc_input"`` or ``"manual_input"``.
Required.
Default: ``"name_input"``.
ephem_step : 0.25,
Size of the steps (in days) at which the object ephemeris is evaluated.
Required.
Default: 0.25
obs_begin : str or None
UTC of the start of observations in ``YYYY-MM-DD``. When ``None``
queries all availible data in the catalog which can be slow.
Optional.
Default: ``None``.
obs_end : str or None
UTC of the end of observations in ``YYYY-MM-DD``. When ``None``
queries all availible data in the catalog, can be slow.
Optional.
Default: ``None``.
obj_name : str or None
Object name.
Required when input mode is ``"name_input"``.
obj_nafid : str or None
Object NAIFD.
Required when input mode is ``"naifid_input"``.
obj_type : str or None
Object type, ``"Asteroid"`` or ``Comet``.
Required when input mode is ``"mpc_input"`` or ``"manual_input"``.
mpc_data : str or None
MPC formatted object string.
Required when input mode is ``"mpc_input"``.
body_designation : str or None
Name of the object described by the given orbital parameters. Does
not have to be a real name. Will default to ``"TestAsteroid"`` or
``"TestComet"`` depending on selected object type.
Required when input mode is ``"manual_input"``.
epoch : str or None
Epoch in MJD.
Required when input mode is ``"manual_input"``.
eccentricity : float or None
Eccentricity (0-1).
Required when input mode is ``"manual_input"``.
inclination : float or None
Inclination (0-180 degrees).
Required when input mode is ``"manual_input"``.
arg_perihelion : str or None
Argument of perihelion (0-360 degrees).
Required when input mode is ``"manual_input"``.
ascend_node : float or None
Longitude of the ascending node (0-360).
Required when input mode is ``"manual_input"``.
semimajor_axis : float or None
Semimajor axis (AU).
Required when input mode is ``"manual_input"`` and object type is
``"Asteroid"``.
mean_anomaly : str or None
Mean anomaly (degrees).
Required when input mode is ``"manual_input"`` and object type is
``"Asteroid"``.
perih_dist : float or None
Perihelion distance (AU).
Required when input mode is ``"manual_input"`` and object type is
``"Comet"``.
perih_time : str or None
Perihelion time (YYYY+MM+DD+HH:MM:SS).
Required when input mode is ``"manual_input"`` and object type is
``"Comet"``.
get_query_payload : bool
Return the query parameters as a dictionary. Useful for debugging.
Optional.
Default: ``False``
save : bool
Whether to save the file to a local directory.
savedir : str
The location to save the local file if you want to save it
somewhere other than `~astroquery.query.BaseQuery.cache_location`
Returns
-------
images : list
A list of `~astropy.io.fits.HDUList` objects.
"""
# We insist on output_mode being regular so that it executes quicker,
# and we insist on tarballs so the download is quicker. We ignore
# whatever else user provides, but leave the parameters as arguments to
# keep the same signatures for doc purposes.
queryres = self.query_object(
catalog=catalog,
input_mode=input_mode,
obs_begin=obs_begin,
obs_end=obs_end,
ephem_step=ephem_step,
obj_name=obj_name,
obj_nafid=obj_nafid,
obj_type=obj_type,
mpc_data=mpc_data,
body_designation=body_designation,
epoch=epoch,
eccentricity=eccentricity,
inclination=inclination,
arg_perihelion=arg_perihelion,
ascend_node=ascend_node,
semimajor_axis=semimajor_axis,
mean_anomaly=mean_anomaly,
perih_dist=perih_dist,
perih_time=perih_time,
get_query_payload=get_query_payload,
output_mode="Regular",
with_tarballs=True,
)
if queryres is None:
# A warning will already be issued by query_object so no need to
# raise a new one here.
return None
response = self._request("GET", queryres["fits_tarball"],
save=save, savedir=savedir)
archive = tarfile.open(fileobj=io.BytesIO(response.content))
images = []
for name in archive.getnames():
if ".fits" in name:
fileobj = archive.extractfile(name)
fitsfile = fits.open(fileobj)
images.append(fitsfile)
return images
@class_or_instance
def query_object(self, catalog="wise_merge", input_mode="name_input", output_mode="Regular",
ephem_step=0.25, with_tarballs=False, obs_begin=None, obs_end=None,
obj_name=None, obj_nafid=None, obj_type=None, mpc_data=None,
body_designation=None, epoch=None, eccentricity=None, inclination=None,
arg_perihelion=None, ascend_node=None, semimajor_axis=None, mean_anomaly=None,
perih_dist=None, perih_time=None, get_query_payload=False):
"""
Query the MOST interface using specified parameters and/or default
query values.
MOST service takes an object/orbit, depending on the input mode,
evaluates its ephemerides in the, in the given time range, and returns
a combination of image identifiers, image metadata and/or ephemerides
depending on the output mode.
The required and optional query parameters vary depending on the query
input type. Provided parameters that do not match the given input type
will be ignored. Certain parameters are always required input to the
service. For these the provided default values match the defaults of
the online MOST interface.
Parameters are case sensitive.
See module help for more details.
Parameters
----------
catalog : str
Catalog to query.
Required.
Default ``"wise_merge"``.
input_mode : str
Input mode. One of ``"name_input"``, ``"naifid_input"``,
``"mpc_input"`` or ``"manual_input"``.
Required.
Default: ``"name_input"``.
output_mode : str
Output mode. One of ``"Regular"``, ``"Full"``, ``"Brief"``,
``"Gator"`` or ``"VOTable"``.
Required.
Default: ``"Regular"``
ephem_step : 0.25,
Size of the steps (in days) at which the object ephemeris is evaluated.
Required.
Default: 0.25
with_tarballs : bool
Return links to tarballs of found FITS and Region files.
Optional, only when output mode is ``"Regular"`` or ``"Full"``.
Default: ``False``
obs_begin : str or None
UTC of the start of observations in ``YYYY-MM-DD``. When ``None``
queries all availible data in the catalog which can be slow.
Optional.
Default: ``"None"``.
obs_end : str or None
UTC of the end of observations in ``YYYY-MM-DD``. When ``None``
queries all availible data in the catalog, can be slow.
Optional.
Default: ``None``
obj_name : str or None
Object name.
Required when input mode is ``"name_input"``.
obj_nafid : str or None
Object NAIFD
Required when input mode is ``"naifid_input"``.
obj_type : str or None
Object type, ``"Asteroid"`` or ``Comet``
Required when input mode is ``"mpc_input"`` or ``"manual_input"``.
mpc_data : str or None
MPC formatted object string.
Required when input mode is ``"mpc_input"``.
body_designation : str or None
Name of the object described by the given orbital parameters. Does
not have to be a real name. Will default to ``"TestAsteroid"`` or
``"TestComet"`` depending on selected object type.
Required when input mode is ``"manual_input"``.
epoch : str or None
Epoch in MJD.
Required when input mode is ``"manual_input"``.
eccentricity : float or None
Eccentricity (0-1).
Required when input mode is ``"manual_input"``.
inclination : float or None
Inclination (0-180 degrees).
Required when input mode is ``"manual_input"``.
arg_perihelion : str or None
Argument of perihelion (0-360 degrees).
Required when input mode is ``"manual_input"``.
ascend_node : float or None
Longitude of the ascending node (0-360).
Required when input mode is ``"manual_input"``.
semimajor_axis : float or None
Semimajor axis (AU).
Required when input mode is ``"manual_input"`` and object type is
``"Asteroid"``.
mean_anomaly : str or None
Mean anomaly (degrees).
Required when input mode is ``"manual_input"`` and object type is
``"Asteroid"``.
perih_dist : float or None
Perihelion distance (AU).
Required when input mode is ``"manual_input"`` and object type is
``"Comet"``.
perih_time : str or None
Perihelion time (YYYY+MM+DD+HH:MM:SS).
Required when input mode is ``"manual_input"`` and object type is
``"Comet"``.
get_query_payload : bool
Return the query parameters as a dictionary. Useful for debugging.
Optional.
Default: ``False``
Returns
-------
query_results : `~astropy.table.Table`, `~astropy.io.votable.tree.VOTableFile` or `dict`
Results of the query. Content depends on the selected output mode.
In ``"Full"`` or ``"Regular"`` output mode returns a dictionary
containing at least ``results``, ``metadata`` and ``region`` keys,
and optionally ``fits_tarball`` and ``region_tarball`` keys. When
in ``"Brief"`` or ``"Gator"`` an `~astropy.table.Table` object and
in ``"VOTable"`` an `~astropy.io.votable.tree.VOTableFile`. See
module help for more details on the content of these tables.
"""
# This is a map between the keyword names used by the MOST cgi-bin
# service and their more user-friendly names. For example,
# input_type -> input_mode or fits_region_files --> with tarballs
qparams = {
"catalog": catalog,
"input_type": input_mode,
"output_mode": output_mode,
"obs_begin": obs_begin,
"obs_end": obs_end,
"ephem_step": ephem_step,
"fits_region_files": "on" if with_tarballs else "",
"obj_name": obj_name,
"obj_nafid": obj_nafid,
"obj_type": obj_type,
"mpc_data": mpc_data,
"body_designation": body_designation,
"epoch": epoch,
"eccentricity": eccentricity,
"inclination": inclination,
"arg_perihelion": arg_perihelion,
"ascend_node": ascend_node,
"semimajor_axis": semimajor_axis,
"mean_anomaly": mean_anomaly,
"perih_dist": perih_dist,
"perih_time": perih_time,
}
if get_query_payload:
return qparams
self._validate_input(qparams)
response = self._request("POST", self.URL,
data=qparams, timeout=self.TIMEOUT)
# service unreachable or some other reason
response.raise_for_status()
# MOST will not raise an bad response if the query is bad because they
# are not a REST API
if "MOST: *** error:" in response.text:
raise InvalidQueryError(response.text)
# presume that response is HTML to simplify conditions
if "Number of Matched Image Frames = 0" in response.text:
warnings.warn("Number of Matched Image Frames = 0", NoResultsWarning)
return None
if qparams["output_mode"] in ("Brief", "Gator"):
return Table.read(response.text, format="ipac")
elif qparams["output_mode"] == "VOTable":
matches = votable.parse(io.BytesIO(response.content))
if matches.get_table_by_index(0).nrows == 0:
warnings.warn("Number of Matched Image Frames = 0", NoResultsWarning)
return Table()
return matches
else:
return self._parse_full_regular_response(response, qparams["fits_region_files"])
Most = MostClass()
|
D-arioSpaceREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@ipac@irsa@most.py@.PATH_END.py
|
{
"filename": "sparse_csgraph.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/benchmarks/benchmarks/sparse_csgraph.py",
"type": "Python"
}
|
"""benchmarks for the scipy.sparse.csgraph module"""
import numpy as np
import scipy.sparse
from .common import Benchmark, safe_import
with safe_import():
from scipy.sparse.csgraph import laplacian
class Laplacian(Benchmark):
params = [
[30, 300, 900],
['dense', 'coo', 'csc', 'csr', 'dia'],
[True, False]
]
param_names = ['n', 'format', 'normed']
def setup(self, n, format, normed):
data = scipy.sparse.rand(9, n, density=0.5, random_state=42).toarray()
data = np.vstack((data, data))
diags = list(range(-9, 0)) + list(range(1, 10))
A = scipy.sparse.spdiags(data, diags, n, n)
if format == 'dense':
self.A = A.toarray()
else:
self.A = A.asformat(format)
def time_laplacian(self, n, format, normed):
laplacian(self.A, normed=normed)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@benchmarks@benchmarks@sparse_csgraph.py@.PATH_END.py
|
{
"filename": "test_encode_basestring_ascii.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/simplejson/py2/simplejson/tests/test_encode_basestring_ascii.py",
"type": "Python"
}
|
from unittest import TestCase
import simplejson.encoder
from simplejson.compat import b
CASES = [
(u'/\\"\ucafe\ubabe\uab98\ufcde\ubcda\uef4a\x08\x0c\n\r\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?', '"/\\\\\\"\\ucafe\\ubabe\\uab98\\ufcde\\ubcda\\uef4a\\b\\f\\n\\r\\t`1~!@#$%^&*()_+-=[]{}|;:\',./<>?"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
(u'controls', '"controls"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'{"object with 1 member":["array with 1 element"]}', '"{\\"object with 1 member\\":[\\"array with 1 element\\"]}"'),
(u' s p a c e d ', '" s p a c e d "'),
(u'\U0001d120', '"\\ud834\\udd20"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(b('\xce\xb1\xce\xa9'), '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(b('\xce\xb1\xce\xa9'), '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u'\u03b1\u03a9', '"\\u03b1\\u03a9"'),
(u"`1~!@#$%^&*()_+-={':[,]}|;.</>?", '"`1~!@#$%^&*()_+-={\':[,]}|;.</>?"'),
(u'\x08\x0c\n\r\t', '"\\b\\f\\n\\r\\t"'),
(u'\u0123\u4567\u89ab\ucdef\uabcd\uef4a', '"\\u0123\\u4567\\u89ab\\ucdef\\uabcd\\uef4a"'),
]
class TestEncodeBaseStringAscii(TestCase):
def test_py_encode_basestring_ascii(self):
self._test_encode_basestring_ascii(simplejson.encoder.py_encode_basestring_ascii)
def test_c_encode_basestring_ascii(self):
if not simplejson.encoder.c_encode_basestring_ascii:
return
self._test_encode_basestring_ascii(simplejson.encoder.c_encode_basestring_ascii)
def _test_encode_basestring_ascii(self, encode_basestring_ascii):
fname = encode_basestring_ascii.__name__
for input_string, expect in CASES:
result = encode_basestring_ascii(input_string)
#self.assertEqual(result, expect,
# '{0!r} != {1!r} for {2}({3!r})'.format(
# result, expect, fname, input_string))
self.assertEqual(result, expect,
'%r != %r for %s(%r)' % (result, expect, fname, input_string))
def test_sorted_dict(self):
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = simplejson.dumps(dict(items), sort_keys=True)
self.assertEqual(s, '{"five": 5, "four": 4, "one": 1, "three": 3, "two": 2}')
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@simplejson@py2@simplejson@tests@test_encode_basestring_ascii.py@.PATH_END.py
|
{
"filename": "_height.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/annotation/_height.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class HeightValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="height", parent_name="layout.annotation", **kwargs):
super(HeightValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+arraydraw"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@annotation@_height.py@.PATH_END.py
|
{
"filename": "config.py",
"repo_name": "mlujnie/simple",
"repo_path": "simple_extracted/simple-main/simple/config.py",
"type": "Python"
}
|
class Config():
"""
Configuration class to find the installation directory of lognormal_galaxies.
Parameters
----------
lognormal_galaxies_path: str
Installation directory of lognormal_galaxies.
"""
def __init__(self):
self.lognormal_galaxies_path = '/u/majaln/intensity-mapping/code/mock/lognormal_galaxies/'
|
mlujnieREPO_NAMEsimplePATH_START.@simple_extracted@simple-main@simple@config.py@.PATH_END.py
|
{
"filename": "price_mcewen.py",
"repo_name": "astro-informatics/s2fft",
"repo_path": "s2fft_extracted/s2fft-main/s2fft/recursions/price_mcewen.py",
"type": "Python"
}
|
import warnings
from functools import partial
from typing import List
import jax.lax as lax
import jax.numpy as jnp
import numpy as np
from jax import jit
from s2fft.sampling import s2_samples as samples
warnings.filterwarnings("ignore")
def generate_precomputes(
L: int,
spin: int = 0,
sampling: str = "mw",
nside: int = None,
forward: bool = False,
L_lower: int = 0,
) -> List[np.ndarray]:
r"""
Compute recursion coefficients with :math:`\mathcal{O}(L^2)` memory overhead.
In practice one could compute these on-the-fly but the memory overhead is
negligible and well worth the acceleration.
Args:
L (int): Harmonic band-limit.
spin (int, optional): Harmonic spin. Defaults to 0.
sampling (str, optional): Sampling scheme. Supported sampling schemes include
{"mw", "mwss", "dh", "healpix"}. Defaults to "mw".
nside (int, optional): HEALPix Nside resolution parameter. Only required
if sampling="healpix". Defaults to None.
forward (bool, optional): Whether to provide forward or inverse shift.
Defaults to False.
L_lower (int, optional): Harmonic lower-bound. Transform will only be computed
for :math:`\texttt{L_lower} \leq \ell < \texttt{L}`. Defaults to 0.
Returns:
List[np.ndarray]: List of precomputed coefficient arrays.
Note:
TODO: this function should be optimised.
"""
mm = -spin
L0 = L_lower
# Correct for mw to mwss conversion
if forward and sampling.lower() in ["mw", "mwss"]:
sampling = "mwss"
beta = samples.thetas(2 * L, "mwss")[1:-1]
else:
beta = samples.thetas(L, sampling, nside)
ntheta = len(beta) # Number of theta samples
el = np.arange(L0, L)
# Trigonometric constant adopted throughout
t = np.tan(-beta / 2.0)
lt = np.log(np.abs(t))
c2 = np.cos(beta / 2.0)
# Indexing boundaries
half_slices = [el + mm + 1, el - mm + 1]
# Vectors with indexing -L < m < L adopted throughout
cpi = np.zeros((L + 1, L - L0), dtype=np.float64)
cp2 = np.zeros((L + 1, L - L0), dtype=np.float64)
log_first_row = np.zeros((2 * L + 1, ntheta, L - L0), dtype=np.float64)
# Populate vectors for first row
log_first_row[0] = np.einsum("l,t->tl", 2.0 * el, np.log(np.abs(c2)))
for i in range(2, L + abs(mm) + 2):
ratio = (2 * el + 2 - i) / (i - 1)
for j in range(ntheta):
log_first_row[i - 1, j] = (
log_first_row[i - 2, j] + np.log(ratio) / 2 + lt[j]
)
# Initialising coefficients cp(m)= cplus(l-m).
cpi[0] = 2.0 / np.sqrt(2 * el)
for m in range(2, L + 1):
cpi[m - 1] = 2.0 / np.sqrt(m * (2 * el + 1 - m))
cp2[m - 1] = cpi[m - 1] / cpi[m - 2]
for k in range(L0, L):
cpi[:, k - L0] = np.roll(cpi[:, k - L0], (L - k - 1), axis=-1)
cp2[:, k - L0] = np.roll(cp2[:, k - L0], (L - k - 1), axis=-1)
# Then evaluate the negative half row and reflect using
# Wigner-d symmetry relation.
# Perform precomputations (these can be done offline)
msign = np.hstack(((-1) ** (abs(np.arange(L - 1))), np.ones(L)))
lsign = (-1) ** abs(mm + el)
vsign = np.einsum("m,l->ml", msign, lsign)
vsign[: L - 1] *= (-1) ** abs(mm + 1 + L)
lrenorm = np.zeros((2, ntheta, L - L0), dtype=np.float64)
for i in range(2):
for j in range(ntheta):
for k in range(L0, L):
lrenorm[i, j, k - L0] = log_first_row[
half_slices[i][k - L0] - 1, j, k - L0
]
indices = np.repeat(np.expand_dims(np.arange(L0, L), 0), ntheta, axis=0)
return [lrenorm, vsign, cpi, cp2, indices]
@partial(jit, static_argnums=(0, 2, 3, 4, 5))
def generate_precomputes_jax(
L: int,
spin: int = 0,
sampling: str = "mw",
nside: int = None,
forward: bool = False,
L_lower: int = 0,
betas: jnp.ndarray = None,
) -> List[jnp.ndarray]:
r"""
Compute recursion coefficients with :math:`\mathcal{O}(L^2)` memory overhead.
In practice one could compute these on-the-fly but the memory overhead is
negligible and well worth the acceleration. JAX implementation of
:func:`~generate_precomputes`.
Args:
L (int): Harmonic band-limit.
spin (int, optional): Harmonic spin. Defaults to 0.
sampling (str, optional): Sampling scheme. Supported sampling schemes include
{"mw", "mwss", "dh", "healpix"}. Defaults to "mw".
nside (int, optional): HEALPix Nside resolution parameter. Only required
if sampling="healpix". Defaults to None.
forward (bool, optional): Whether to provide forward or inverse shift.
Defaults to False.
L_lower (int, optional): Harmonic lower-bound. Transform will only be computed
for :math:`\texttt{L_lower} \leq \ell < \texttt{L}`. Defaults to 0.
beta (jnp.ndarray): Array of polar angles in radians.
Returns:
List[jnp.ndarray]: List of precomputed coefficient arrays.
"""
mm = -spin
L0 = L_lower
# Correct for mw to mwss conversion
if betas is None:
if forward and sampling.lower() in ["mw", "mwss"]:
sampling = "mwss"
beta = samples.thetas(2 * L, "mwss")[1:-1]
else:
beta = samples.thetas(L, sampling, nside)
else:
beta = betas
ntheta = len(beta) # Number of theta samples
el = jnp.arange(L0, L)
# Trigonometric constant adopted throughout
t = jnp.tan(-beta / 2.0)
lt = jnp.log(jnp.abs(t))
c2 = jnp.cos(beta / 2.0)
# Indexing boundaries
half_slices = [el + mm + 1, el - mm + 1]
# Vectors with indexing -L < m < L adopted throughout
cpi = jnp.zeros((L + 1, L - L0), dtype=jnp.float64)
cp2 = jnp.zeros((L + 1, L - L0), dtype=jnp.float64)
# Initialising coefficients cp(m)= cplus(l-m).
cpi = cpi.at[0].add(2.0 / jnp.sqrt(2 * el))
def cpi_cp2_loop(m, args):
cpi, cp2 = args
cpi = cpi.at[m - 1].add(2.0 / jnp.sqrt(m * (2 * el + 1 - m)))
cp2 = cp2.at[m - 1].add(cpi[m - 1] / cpi[m - 2])
return cpi, cp2
cpi, cp2 = lax.fori_loop(2, L + 1, cpi_cp2_loop, (cpi, cp2))
def cpi_cp2_roll_loop(m, args):
cpi, cp2 = args
cpi = cpi.at[:, m - L0].set(jnp.roll(cpi[:, m - L0], (L - m - 1), axis=-1))
cp2 = cp2.at[:, m - L0].set(jnp.roll(cp2[:, m - L0], (L - m - 1), axis=-1))
return cpi, cp2
cpi, cp2 = lax.fori_loop(L0, L, cpi_cp2_roll_loop, (cpi, cp2))
# Then evaluate the negative half row and reflect using
# Wigner-d symmetry relation.
# Perform precomputations (these can be done offline)
msign = jnp.hstack(((-1) ** (abs(jnp.arange(L - 1))), jnp.ones(L)))
lsign = (-1) ** abs(mm + el)
vsign = jnp.einsum("m,l->ml", msign, lsign, optimize=True)
vsign = vsign.at[: L - 1].multiply((-1) ** abs(mm + 1 + L))
# Populate vectors for first ro
lrenorm = jnp.zeros((2, ntheta, L - L0), dtype=jnp.float64)
log_first_row_iter = jnp.einsum(
"l,t->tl", 2.0 * el, jnp.log(jnp.abs(c2)), optimize=True
)
ratio_update = jnp.arange(2 * L + 1)
ratio = jnp.repeat(jnp.expand_dims(2 * el + 2, -1), 2 * L + 1, axis=-1)
ratio -= ratio_update
ratio /= ratio_update - 1
ratio = jnp.log(jnp.swapaxes(ratio, 0, 1)) / 2
for ind in range(2):
lrenorm = lrenorm.at[ind].set(
jnp.where(1 == half_slices[ind], log_first_row_iter, lrenorm[ind])
)
def renorm_m_loop(i, args):
log_first_row_iter, lrenorm = args
log_first_row_iter += ratio[i]
log_first_row_iter = jnp.swapaxes(log_first_row_iter, 0, 1)
log_first_row_iter += lt
log_first_row_iter = jnp.swapaxes(log_first_row_iter, 0, 1)
for ind in range(2):
lrenorm = lrenorm.at[ind].set(
jnp.where(i == half_slices[ind], log_first_row_iter, lrenorm[ind])
)
return log_first_row_iter, lrenorm
_, lrenorm = lax.fori_loop(
2, L + abs(mm) + 2, renorm_m_loop, (log_first_row_iter, lrenorm)
)
indices = jnp.repeat(jnp.expand_dims(jnp.arange(L0, L), 0), ntheta, axis=0)
# Remove redundant nans:
# - in forward pass these are not accessed, so are irrelevant.
# - in backward pass the adjoint computation otherwise accumulates these
# nans into grads if not explicitly set to zero.
lrenorm = jnp.nan_to_num(lrenorm, nan=0.0, posinf=0.0, neginf=0.0)
cpi = jnp.nan_to_num(cpi, nan=0.0, posinf=0.0, neginf=0.0)
cp2 = jnp.nan_to_num(cp2, nan=0.0, posinf=0.0, neginf=0.0)
return [lrenorm, vsign, cpi, cp2, indices]
def generate_precomputes_wigner(
L: int,
N: int,
sampling: str = "mw",
nside: int = None,
forward: bool = False,
reality: bool = False,
L_lower: int = 0,
) -> List[List[np.ndarray]]:
r"""
Compute recursion coefficients with :math:`\mathcal{O}(L^2)` memory overhead.
In practice one could compute these on-the-fly but the memory overhead is
negligible and well worth the acceleration. This is a wrapped extension of
:func:`~generate_precomputes` for the case of multiple spins, i.e. the Wigner
transform over SO(3).
Args:
L (int): Harmonic band-limit.
N (int): Azimuthal bandlimit
sampling (str, optional): Sampling scheme. Supported sampling schemes include
{"mw", "mwss", "dh", "healpix"}. Defaults to "mw".
nside (int, optional): HEALPix Nside resolution parameter. Only required
if sampling="healpix". Defaults to None.
forward (bool, optional): Whether to provide forward or inverse shift.
Defaults to False.
reality (bool, optional): Whether the signal on the sphere is real. If so,
conjugate symmetry is exploited to reduce computational costs. Defaults to
False.
L_lower (int, optional): Harmonic lower-bound. Transform will only be computed
for :math:`\texttt{L_lower} \leq \ell < \texttt{L}`. Defaults to 0.
Returns:
List[List[np.ndarray]]: 2N-1 length List of Lists of precomputed coefficient arrays.
Note:
TODO: this function should be optimised.
"""
precomps = []
n_start_ind = 0 if reality else -N + 1
for n in range(n_start_ind, N):
precomps.append(generate_precomputes(L, -n, sampling, nside, forward, L_lower))
return precomps
@partial(jit, static_argnums=(0, 1, 2, 3, 4, 5, 6))
def generate_precomputes_wigner_jax(
L: int,
N: int,
sampling: str = "mw",
nside: int = None,
forward: bool = False,
reality: bool = False,
L_lower: int = 0,
) -> List[List[jnp.ndarray]]:
r"""
Compute recursion coefficients with :math:`\mathcal{O}(L^2)` memory overhead.
In practice one could compute these on-the-fly but the memory overhead is
negligible and well worth the acceleration. This is a wrapped extension of
:func:`~generate_precomputes` for the case of multiple spins, i.e. the Wigner
transform over SO(3). JAX implementation of :func:`~generate_precomputes_wigner`.
Args:
L (int): Harmonic band-limit.
N (int): Azimuthal bandlimit
sampling (str, optional): Sampling scheme. Supported sampling schemes include
{"mw", "mwss", "dh", "healpix"}. Defaults to "mw".
nside (int, optional): HEALPix Nside resolution parameter. Only required
if sampling="healpix". Defaults to None.
forward (bool, optional): Whether to provide forward or inverse shift.
Defaults to False.
reality (bool, optional): Whether the signal on the sphere is real. If so,
conjugate symmetry is exploited to reduce computational costs. Defaults to
False.
L_lower (int, optional): Harmonic lower-bound. Transform will only be computed
for :math:`\texttt{L_lower} \leq \ell < \texttt{L}`. Defaults to 0.
Returns:
List[List[jnp.ndarray]]: 2N-1 length List of Lists of precomputed coefficient arrays.
"""
lrenorm = []
vsign = []
cpi = []
cp2 = []
indices = []
captured_repeats = False
n_start_ind = 0 if reality else -N + 1
for n in range(n_start_ind, N):
precomps = generate_precomputes_jax(L, -n, sampling, nside, forward, L_lower)
lrenorm.append(precomps[0])
vsign.append(precomps[1])
if not captured_repeats:
cpi.append(precomps[2])
cp2.append(precomps[3])
indices.append(precomps[4])
captured_repeats = True
return [
jnp.asarray(lrenorm),
jnp.asarray(vsign),
jnp.asarray(cpi),
jnp.asarray(cp2),
jnp.asarray(indices),
]
def compute_all_slices(
beta: np.ndarray, L: int, spin: int, precomps=None
) -> np.ndarray:
r"""
Compute a particular slice :math:`m^{\prime}`, denoted `mm`,
of the complete Wigner-d matrix for all sampled polar angles
:math:`\beta` and all :math:`\ell` using Price & McEwen recursion.
The Wigner-d slice for all :math:`\ell` (`el`) and :math:`\beta` is
computed recursively over :math:`m` labelled 'm' at a specific
:math:`m^{\prime}`. The Price & McEwen recursion is analytically correct
from :math:`-\ell < m < \ell` however numerically it can become unstable for
:math:`m > 0`. To avoid this we compute :math:`d_{m,
m^{\prime}}^{\ell}(\beta)` for negative :math:`m` and then evaluate
:math:`d_{m, -m^{\prime}}^{\ell}(\beta) = (-1)^{m-m^{\prime}} d_{-m,
m^{\prime}}^{\ell}(\beta)` which we can again evaluate using the same recursion.
On-the-fly renormalisation is implemented to avoid potential over/under-flows,
within any given iteration of the recursion the iterants are :math:`\sim \mathcal{O}(1)`.
The Wigner-d slice :math:`d^\ell_{m, m^{\prime}}(\beta)` is indexed for
:math:`-L < m < L` by `dl[L - 1 - m, \beta, \ell]`. This implementation has
computational scaling :math:`\mathcal{O}(L)` and typically requires :math:`\sim 2L`
operations.
Args:
beta (np.ndarray): Array of polar angles in radians.
L (int): Harmonic band-limit.
spin (int, optional): Harmonic spin. Defaults to 0.
precomps (List[np.ndarray]): Precomputed recursion coefficients with memory overhead
:math:`\mathcal{O}(L^2)`, which is minimal.
Returns:
np.ndarray: Wigner-d matrix mm slice of dimension :math:`[2L-1, n_{\theta}, n_{\ell}]`.
"""
# Indexing boundaries and constants
mm = -spin
ntheta = len(beta)
lims = [0, -1]
el = np.arange(L)
# Trigonometric constant adopted throughout
c = np.cos(beta)
s = np.sin(beta)
omc = 1.0 - c
# Indexing boundaries
half_slices = [el + mm + 1, el - mm + 1]
dl_test = np.zeros((2 * L - 1, ntheta, L), dtype=np.float64)
if precomps is None:
lrenorm, offset, vsign, cpi, cp2, cs, indices = generate_precomputes(
beta, L, mm
)
else:
lrenorm, offset, vsign, cpi, cp2, cs, indices = precomps
lamb = np.zeros((ntheta, L), np.float64)
for i in range(2):
lind = L - 1
sind = lims[i]
sgn = (-1) ** (i)
dl_iter = np.ones((2, ntheta, L), dtype=np.float64)
lamb = (
np.einsum("l,t->tl", el + 1, omc)
+ np.einsum("l,t->tl", 2 - L + el, c)
- half_slices[i]
)
lamb = np.einsum("tl,t->tl", lamb, 1 / s)
dl_iter[1, :, lind:] = np.einsum(
"l,tl->tl",
cpi[0, lind:],
dl_iter[0, :, lind:] * lamb[:, lind:],
)
dl_test[sind, :, lind:] = (
dl_iter[0, :, lind:] * vsign[sind, lind:] * np.exp(lrenorm[i, :, lind:])
)
dl_test[sind + sgn, :, lind - 1 :] = (
dl_iter[1, :, lind - 1 :]
* vsign[sind + sgn, lind - 1 :]
* np.exp(lrenorm[i, :, lind - 1 :])
)
dl_entry = np.zeros((ntheta, L), dtype=np.float64)
for m in range(2, L):
index = indices >= L - m - 1
lamb = (
np.einsum("l,t->tl", el + 1, omc)
+ np.einsum("l,t->tl", m - L + el + 1, c)
- half_slices[i]
)
lamb = np.einsum("tl,t->tl", lamb, 1 / s)
dl_entry = np.where(
index,
np.einsum("l,tl->tl", cpi[m - 1], dl_iter[1] * lamb)
- np.einsum("l,tl->tl", cp2[m - 1], dl_iter[0]),
dl_entry,
)
dl_entry[:, -(m + 1)] = 1
dl_test[sind + sgn * m] = np.where(
index,
dl_entry * vsign[sind + sgn * m] * np.exp(lrenorm[i]),
dl_test[sind + sgn * m],
)
bigi = 1.0 / abs(dl_entry)
lbig = np.log(abs(dl_entry))
dl_iter[0] = np.where(index, bigi * dl_iter[1], dl_iter[0])
dl_iter[1] = np.where(index, bigi * dl_entry, dl_iter[1])
lrenorm[i] = np.where(index, lrenorm[i] + lbig, lrenorm[i])
return dl_test
@partial(jit, static_argnums=(1, 3, 4, 5))
def compute_all_slices_jax(
beta: jnp.ndarray,
L: int,
spin: int,
sampling: str = "mw",
forward: bool = False,
nside: int = None,
precomps=None,
) -> jnp.ndarray:
r"""
Compute a particular slice :math:`m^{\prime}`, denoted `mm`,
of the complete Wigner-d matrix for all sampled polar angles
:math:`\beta` and all :math:`\ell` using Price & McEwen recursion.
The Wigner-d slice for all :math:`\ell` (`el`) and :math:`\beta` is
computed recursively over :math:`m` labelled 'm' at a specific
:math:`m^{\prime}`. The Price & McEwen recursion is analytically correct
from :math:`-\ell < m < \ell` however numerically it can become unstable for
:math:`m > 0`. To avoid this we compute :math:`d_{m,
m^{\prime}}^{\ell}(\beta)` for negative :math:`m` and then evaluate
:math:`d_{m, -m^{\prime}}^{\ell}(\beta) = (-1)^{m-m^{\prime}} d_{-m,
m^{\prime}}^{\ell}(\beta)` which we can again evaluate using the same recursion.
On-the-fly renormalisation is implemented to avoid potential over/under-flows,
within any given iteration of the recursion the iterants are :math:`\sim \mathcal{O}(1)`.
The Wigner-d slice :math:`d^\ell_{m, m^{\prime}}(\beta)` is indexed for
:math:`-L < m < L` by `dl[L - 1 - m, \beta, \ell]`. This implementation has
computational scaling :math:`\mathcal{O}(L)` and typically requires :math:`\sim 2L`
operations.
Args:
beta (jnp.ndarray): Array of polar angles in radians.
L (int): Harmonic band-limit.
spin (int, optional): Harmonic spin. Defaults to 0.
precomps (List[np.ndarray]): Precomputed recursion coefficients with memory overhead
:math:`\mathcal{O}(L^2)`, which is minimal.
Returns:
jnp.ndarray: Wigner-d matrix mm slice of dimension :math:`[2L-1, n_{\theta}, n_{\ell}]`.
"""
# Indexing boundaries and constants
mm = -spin
ntheta = len(beta)
lims = [0, -1]
# Trigonometric constant adopted throughout
c = jnp.cos(beta)
s = jnp.sin(beta)
omc = 1.0 - c
el = jnp.arange(L)
# Indexing boundaries
half_slices = [el + mm + 1, el - mm + 1]
dl_test = jnp.zeros((2 * L - 1, ntheta, L), dtype=jnp.float64)
if precomps is None:
lrenorm, vsign, cpi, cp2, indices = generate_precomputes_jax(
L, spin, sampling, nside, forward, 0, beta
)
else:
lrenorm, vsign, cpi, cp2, indices = precomps
for i in range(2):
lind = L - 1
sind = lims[i]
sgn = (-1) ** (i)
dl_iter = jnp.ones((2, ntheta, L), dtype=jnp.float64)
lamb = (
jnp.einsum("l,t->tl", el + 1, omc, optimize=True)
+ jnp.einsum("l,t->tl", 2 - L + el, c, optimize=True)
- half_slices[i]
)
lamb = jnp.einsum("tl,t->tl", lamb, 1 / s, optimize=True)
dl_iter = dl_iter.at[1, :, lind:].set(
jnp.einsum(
"l,tl->tl",
cpi[0, lind:],
dl_iter[0, :, lind:] * lamb[:, lind:],
)
)
dl_test = dl_test.at[sind, :, lind:].set(
dl_iter[0, :, lind:] * vsign[sind, lind:] * jnp.exp(lrenorm[i, :, lind:])
)
dl_test = dl_test.at[sind + sgn, :, lind - 1 :].set(
dl_iter[1, :, lind - 1 :]
* vsign[sind + sgn, lind - 1 :]
* jnp.exp(lrenorm[i, :, lind - 1 :])
)
dl_entry = jnp.zeros((ntheta, L), dtype=jnp.float64)
def pm_recursion_step(m, args):
dl_test, dl_entry, dl_iter, lrenorm, indices, omc, c, s = args
index = indices >= L - m - 1
lamb = (
jnp.einsum("l,t->tl", el + 1, omc, optimize=True)
+ jnp.einsum("l,t->tl", m - L + el + 1, c, optimize=True)
- half_slices[i]
)
lamb = jnp.einsum("tl,t->tl", lamb, 1 / s, optimize=True)
dl_entry = jnp.where(
index,
jnp.einsum("l,tl->tl", cpi[m - 1], dl_iter[1] * lamb, optimize=True)
- jnp.einsum("l,tl->tl", cp2[m - 1], dl_iter[0], optimize=True),
dl_entry,
)
dl_entry = dl_entry.at[:, -(m + 1)].set(1)
dl_test = dl_test.at[sind + sgn * m].set(
jnp.where(
index,
dl_entry * vsign[sind + sgn * m] * jnp.exp(lrenorm[i]),
dl_test[sind + sgn * m],
)
)
bigi = 1.0 / abs(dl_entry)
lbig = jnp.log(abs(dl_entry))
dl_iter = dl_iter.at[0].set(jnp.where(index, bigi * dl_iter[1], dl_iter[0]))
dl_iter = dl_iter.at[1].set(jnp.where(index, bigi * dl_entry, dl_iter[1]))
lrenorm = lrenorm.at[i].set(jnp.where(index, lrenorm[i] + lbig, lrenorm[i]))
return dl_test, dl_entry, dl_iter, lrenorm, indices, omc, c, s
dl_test, dl_entry, dl_iter, lrenorm, indices, omc, c, s = lax.fori_loop(
2,
L,
pm_recursion_step,
(dl_test, dl_entry, dl_iter, lrenorm, indices, omc, c, s),
)
return dl_test
|
astro-informaticsREPO_NAMEs2fftPATH_START.@s2fft_extracted@s2fft-main@s2fft@recursions@price_mcewen.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "radiocosmology/driftscan",
"repo_path": "driftscan_extracted/driftscan-master/drift/__init__.py",
"type": "Python"
}
|
"""Modelling for transit radio telescopes.
The existing code is mostly focussed on interferometers but can also be used
for multi-beam transit telescopes.
Submodules
==========
.. autosummary::
:toctree: _autosummary
core
pipeline
scripts
telescope
util
"""
from importlib.metadata import PackageNotFoundError, version
try:
__version__ = version("driftscan")
except PackageNotFoundError:
# package is not installed
pass
del version, PackageNotFoundError
|
radiocosmologyREPO_NAMEdriftscanPATH_START.@driftscan_extracted@driftscan-master@drift@__init__.py@.PATH_END.py
|
{
"filename": "test_miri_lrs_slit_spec3.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/jwst/regtest/test_miri_lrs_slit_spec3.py",
"type": "Python"
}
|
""" Test of the spec3 pipeline using MIRI LRS fixed-slit exposures.
This takes an association and generates the level 3 products."""
import pytest
import numpy as np
from gwcs import wcstools
import asdf
from astropy.io.fits.diff import FITSDiff
from jwst.stpipe import Step
from jwst import datamodels
@pytest.fixture(
scope="module",
params=["default_wcs", "user_wcs", "user_wcs+shape", "user_wcs+shape1"]
)
def run_pipeline(rtdata_module, request):
"""
Run the calwebb_spec3 pipeline on an ASN of nodded MIRI LRS
fixed-slit exposures using different options for the WCS and output
image shape for the resample step.
The first iteration ("default_wcs") creates an output WCS for the combined
product based on the built-in WCS's in the inputs.
The second iteration ("user_wcs") creates a user-supplied WCS input file
using the WCS from the default product, just to prove that you get the
identical result when using a user-supplied WCS.
The third iteration ("user_wcs+shape") uses the same user-supplied WCS and also
specifies the output 2D file shape, using a shape that is identical to the
default. Hence all of the first 3 iterations should produce identical results
and therefore are compared to a single set of truth files.
The fourth iteration ("user_wcs+shape1") uses the same user-specified WCS and
specifies an output 2D file shape that is 1 pixel larger than the default in
both axes. Hence the resulting s2d product needs a separate (larger) truth file.
Meanwhile, the x1d product from this iteration should still be identical to
the first 3, because the extra row and column of the 2D data file are ignored
during extraction.
"""
rtdata = rtdata_module
# Get the spec3 ASN and its members
rtdata.get_asn("miri/lrs/jw01530-o005_20221202t204827_spec3_00001_asn.json")
root_file = "jw01530-o005_t004_miri_p750l_"
args = [
"calwebb_spec3",
rtdata.input
]
rtdata.custom_wcs_mode = request.param
if request.param != "default_wcs":
# Get the s2d product that was just created using "default_wcs"
default_s2d = root_file + "s2d.fits"
dm = datamodels.open(default_s2d)
# Create a user-supplied WCS file that is identical to the default WCS
af = asdf.AsdfFile({"wcs": dm.meta.wcs})
wcs_file = default_s2d[:-8] + 'wcs.asdf'
af.write_to(wcs_file)
args.append(f"--steps.resample_spec.output_wcs={wcs_file}")
if request.param == "user_wcs+shape":
output_shape = ','.join(map(str, dm.data.shape[::-1]))
args.append(f"--steps.resample_spec.output_shape={output_shape}")
elif request.param == "user_wcs+shape1":
output_shape = ','.join(map(str, (d + 1 for d in dm.data.shape[::-1])))
args.append(f"--steps.resample_spec.output_shape={output_shape}")
output_file = root_file + 'shape1.fits'
args.append(f"--steps.resample_spec.output_file={output_file}")
# Run the calwebb_spec3 pipeline; save results from intermediate steps
Step.from_cmdline(args)
@pytest.mark.bigdata
@pytest.mark.parametrize("suffix", ["s2d", "x1d"])
def test_miri_lrs_slit_spec3(run_pipeline, rtdata_module, fitsdiff_default_kwargs, suffix):
"""Regression test of the calwebb_spec3 pipeline on MIRI
LRS fixed-slit data using along-slit-nod pattern for
background subtraction."""
# Run the pipeline and retrieve outputs
rtdata = rtdata_module
if rtdata.custom_wcs_mode == 'user_wcs+shape1' and suffix == "s2d":
output = f"jw01530-o005_t004_miri_p750l_shape1_{suffix}.fits"
else:
output = f"jw01530-o005_t004_miri_p750l_{suffix}.fits"
rtdata.output = output
# Get the truth files
rtdata.get_truth(f"truth/test_miri_lrs_slit_spec3/{output}")
# Compare the results
diff = FITSDiff(rtdata.output, rtdata.truth, **fitsdiff_default_kwargs)
assert diff.identical, diff.report()
if output == "s2d":
# Compare the calculated wavelengths
tolerance = 1e-03
dmt = datamodels.open(rtdata.truth)
dmr = datamodels.open(rtdata.output)
if isinstance(dmt, datamodels.MultiSlitModel):
names = [s.name for s in dmt.slits]
for name in names:
st_idx = [(s.wcs, s.wavelength) for s in dmt.slits if s.name==name]
w = dmt.slits[st_idx].meta.wcs
x, y = wcstools.grid_from_bounding_box(w.bounding_box, step=(1, 1), center=True)
_, _, wave = w(x, y)
sr_idx = [(s.wcs, s.wavelength) for s in dmr.slits if s.name==name]
wlr = dmr.slits[sr_idx].wavelength
assert np.all(np.isclose(wave, wlr, atol=tolerance))
else:
w = dmt.meta.wcs
x, y = wcstools.grid_from_bounding_box(w.bounding_box, step=(1, 1), center=True)
_, _, wave = w(x, y)
wlr = dmr.wavelength
assert np.all(np.isclose(wave, wlr, atol=tolerance))
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@jwst@regtest@test_miri_lrs_slit_spec3.py@.PATH_END.py
|
{
"filename": "rhoqso_test.py",
"repo_name": "gkulkarni/QLF",
"repo_path": "QLF_extracted/QLF-master/rhoqso_test.py",
"type": "Python"
}
|
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['font.serif'] = 'cm'
mpl.rcParams['font.size'] = '22'
import matplotlib.pyplot as plt
from numpy.polynomial import Chebyshev as T
p_log10phiStar = [-7.73388053, 1.06477161, -0.11304974]
# p_MStar = [-17.84979944, -4.90153699, 0.49748768, -0.01925119]
p_MStar = [-0.07700476, 0.99497536, -4.84378342, -18.34728712] #[-45.54987107, 66.51882126, -37.01499714, -18.47003438]
p_alpha = [-3.22779072, -0.27456505]
p_beta = [-2.42666272, 0.91088261, 3.48830563, 9.96182361, -0.1530638]
# p_log10phiStar = np.array([-8.73893404, 1.62542678, -0.14453768])
# p_MStar = np.array([-18.46966787, -4.8580583 , 0.57781389, -0.02531129])
# p_alpha = np.array([-4.45250745, 0.20357239])
# p_beta = np.array([-2.33424594, 1.19579103, 3.01011911, 8.31975455, -1.02475454])
def lfParams(z, pPhiStar, pMStar, pAlpha, pBeta):
log10phiStar = T(pPhiStar)(1+z)
# mStar = T(pMStar)(1+z)
# mStar = np.polyval(pMStar, np.log10(1.0+z))
mStar = np.polyval(pMStar, 1.0+z)
alpha = T(pAlpha)(1+z)
h, f0, z0, a, b = pBeta
zeta = np.log10((1.0+z)/(1.0+z0))
beta = h + f0/(10.0**(a*zeta) + 10.0**(b*zeta))
return log10phiStar, mStar, alpha, beta
def phi(z, m, *params):
log10phiStar, mStar, alpha, beta = lfParams(z, *params)
phi = 10.0**log10phiStar / (10.0**(0.4*(alpha+1)*(m-mStar)) +
10.0**(0.4*(beta+1)*(m-mStar)))
return phi
def dlfParamsdz(z, pPhiStar, pMStar, pAlpha, pBeta):
dlog10phiStardz = T.deriv(T(pPhiStar))(1+z)
dmStardz = T.deriv(T(pMStar))(1+z)
dalphadz = T.deriv(T(pAlpha))(1+z)
h, f0, z0, a, b = pBeta
zeta = np.log10((1.0+z)/(1.0+z0))
dbetadz = (-f0*(a*10.0**((a-1)*zeta)/(1.0+z0)
+ b*10.0**((b-1)*zeta)/(1.0+z0))/
(10.0**(a*zeta) + 10.0**(b*zeta))**2)
return dlog10phiStardz, dmStardz, dalphadz, dbetadz
def dphidphiStar(z, m, *params):
return phi(z, m, *params) * np.log(10.0)
def dphidalpha(z, m, *params):
log10phiStar, mStar, alpha, beta = lfParams(z, *params)
phi = 10.0**log10phiStar / (10.0**(0.4*(alpha+1)*(m-mStar)) +
10.0**(0.4*(beta+1)*(m-mStar)))
d = (- phi * np.log(10.0) * 0.4 * (m-mStar) * 10.0**(0.4*(alpha+1)*(m-mStar)) /
(10.0**(0.4*(alpha+1)*(m-mStar)) + 10.0**(0.4*(beta+1)*(m-mStar))))
return d
def dphidbeta(z, m, *params):
log10phiStar, mStar, alpha, beta = lfParams(z, *params)
phi = 10.0**log10phiStar / (10.0**(0.4*(alpha+1)*(m-mStar)) +
10.0**(0.4*(beta+1)*(m-mStar)))
d = (- phi * np.log(10.0) * 0.4 * (m-mStar) * 10.0**(0.4*(beta+1)*(m-mStar)) /
(10.0**(0.4*(alpha+1)*(m-mStar)) + 10.0**(0.4*(beta+1)*(m-mStar))))
return d
def dphidMStar(z, m, *params):
log10phiStar, mStar, alpha, beta = lfParams(z, *params)
phi = 10.0**log10phiStar / (10.0**(0.4*(alpha+1)*(m-mStar)) +
10.0**(0.4*(beta+1)*(m-mStar)))
d1 = 10.0**(0.4*(alpha+1)*(m-mStar)) * np.log(10.0) * (-0.4*(alpha+1))
d2 = 10.0**(0.4*(beta+1)*(m-mStar)) * np.log(10.0) * (-0.4*(beta+1))
d = (- phi * (d1+d2) /
(10.0**(0.4*(alpha+1)*(m-mStar)) + 10.0**(0.4*(beta+1)*(m-mStar))))
return d
def dphidz(z, m, *params):
dphiStardz, dmStardz, dalphadz, dbetadz = dlfParamsdz(z, *params)
return (dphidphiStar(z, m, *params)*dphiStardz +
dphidMStar(z, m, *params)*dmStardz +
dphidalpha(z, m, *params)*dalphadz +
dphidbeta(z, m, *params)*dbetadz)
def plotLF(*params):
m = np.linspace(-55., -10., num=500)
fig = plt.figure(figsize=(7, 7), dpi=100)
ax = fig.add_subplot(1, 1, 1)
ax.tick_params('both', which='major', length=7, width=1)
ax.tick_params('both', which='minor', length=5, width=1)
ax.set_ylabel(r'$\phi$')
ax.set_xlabel(r'$M$')
ax.set_yscale('log')
for z in range(5, 15, 2):
p = [phi(z, x, *params) for x in m]
plt.plot(m, p, lw=2, label='$z='+str(z)+'$')
leg = plt.legend(loc='lower left', fontsize=10, handlelength=3,
frameon=False, framealpha=0.0, labelspacing=.1,
handletextpad=0.1, borderpad=0.01, scatterpoints=1)
plt.xlim(-10,-55)
plt.savefig('lf.pdf', bbox_inches='tight')
return
def rhoqso(mlim, z, *params):
m = np.linspace(-35.0, mlim, num=1000)
farr = phi(z, m, *params)
return np.trapz(farr, m)
def drhoqsodz(mlim, z, *params):
m = np.linspace(-35.0, mlim, num=1000)
farr = dphidz(z, m, *params)
return np.trapz(farr, m)
def plotRhoQso(zmin, zmax):
fig = plt.figure(figsize=(7, 10), dpi=100)
ax = fig.add_subplot(1, 1, 1)
ax.tick_params('both', which='major', length=7, width=1)
ax.tick_params('both', which='minor', length=5, width=1)
# ax.set_ylabel(r'$\rho(z, M_{1450} < M_\mathrm{lim})$ [cMpc$^{-3}$]')
ax.set_ylabel(r'$\rho_\mathrm{qso}$ [cMpc$^{-3}$]')
ax.set_xlabel('$z$')
zmin = 0
zmax = 15
ax.set_xlim(zmin, zmax)
zc = np.linspace(zmin, zmax, num=500)
rho = [rhoqso(-18, x, p_log10phiStar, p_MStar, p_alpha, p_beta) for x in zc]
ax.plot(zc, rho, c='k', lw=2)
#plt.text(5.0, 3e-5, '$M<-18$', fontsize=16, rotation=-61)
rho = [rhoqso(-21, x, p_log10phiStar, p_MStar, p_alpha, p_beta) for x in zc]
ax.plot(zc, rho, c='k', lw=2)
#plt.text(4.5, 1.4e-6, '$M<-21$', fontsize=16, rotation=-63)
rho = [rhoqso(-24, x, p_log10phiStar, p_MStar, p_alpha, p_beta) for x in zc]
ax.plot(zc, rho, c='k', lw=2)
#plt.text(4.2, 6.0e-8, '$M<-24$', fontsize=16, rotation=-64)
rho = [rhoqso(-27, x, p_log10phiStar, p_MStar, p_alpha, p_beta) for x in zc]
ax.plot(zc, rho, c='k', lw=2)
#plt.text(4.5, 6.0e-10, '$M<-27$', fontsize=16, rotation=-62)
ax.set_yscale('log')
ax.set_ylim(1.0e-40, 1.0e-3)
plt.savefig('rhoqso_test.pdf',bbox_inches='tight')
return
def plotdRhoQsodz(zmin, zmax):
fig = plt.figure(figsize=(7, 10), dpi=100)
ax = fig.add_subplot(1, 1, 1)
ax.tick_params('both', which='major', length=7, width=1)
ax.tick_params('both', which='minor', length=5, width=1)
# ax.set_ylabel(r'$d\rho(z, M_{1450} < M_\mathrm{lim})/dz$ [cMpc$^{-3}$]')
ax.set_ylabel(r'$d\rho_\mathrm{qso}/dz$ [cMpc$^{-3}$]')
ax.set_xlabel('$z$')
zmin = 0
zmax = 15
ax.set_xlim(zmin, zmax)
zc = np.linspace(zmin, zmax, num=500)
rho = np.array([drhoqsodz(-18, x, p_log10phiStar, p_MStar, p_alpha, p_beta) for x in zc])
rhop = np.where(rho>0.0, rho, 0.0)
rhon = np.where(rho<0.0, -rho, 0.0)
ax.plot(zc, rhop, c='k', lw=2)
ax.plot(zc, rhon, c='tomato', lw=2)
#plt.text(9.0, 10, '$M<-18$', fontsize=16, rotation=52)
zc = np.linspace(zmin, zmax, num=500)
rho = np.array([drhoqsodz(-21, x, p_log10phiStar, p_MStar, p_alpha, p_beta) for x in zc])
rhop = np.where(rho>0.0, rho, 0.0)
rhon = np.where(rho<0.0, -rho, 0.0)
ax.plot(zc, rhop, c='k', lw=2)
ax.plot(zc, rhon, c='tomato', lw=2)
zc = np.linspace(zmin, zmax, num=500)
rho = np.array([drhoqsodz(-24, x, p_log10phiStar, p_MStar, p_alpha, p_beta) for x in zc])
rhop = np.where(rho>0.0, rho, 0.0)
rhon = np.where(rho<0.0, -rho, 0.0)
ax.plot(zc, rhop, c='k', lw=2)
ax.plot(zc, rhon, c='tomato', lw=2)
zc = np.linspace(zmin, zmax, num=500)
rho = np.array([drhoqsodz(-27, x, p_log10phiStar, p_MStar, p_alpha, p_beta) for x in zc])
rhop = np.where(rho>0.0, rho, 0.0)
rhon = np.where(rho<0.0, -rho, 0.0)
ax.plot(zc, rhop, c='k', lw=2, label=r'positive values')
ax.plot(zc, rhon, c='tomato', lw=2, label=r'negative values')
#plt.text(10.0, 1.0e-5, '$M<-27$', fontsize=16, rotation=55)
ax.set_yscale('log')
ax.set_ylim(1.0e-20, 1.0e30)
leg = plt.legend(loc='upper left', fontsize=14, handlelength=3,
frameon=False, framealpha=0.0, labelspacing=.1,
handletextpad=0.1, borderpad=0.5, scatterpoints=1)
plt.savefig('drhoqsodz_test.pdf',bbox_inches='tight')
return
def plotParams(zmin, zmax):
mpl.rcParams['font.size'] = '14'
fig = plt.figure(figsize=(6, 6), dpi=100)
K = 4
nplots_x = 2
nplots_y = 2
nplots = 4
factor = 2.0 # size of one side of one panel
lbdim = 0.5 * factor # size of left/bottom margin
trdim = 0.2 * factor # size of top/right margin
whspace = 0.1 # w/hspace size
plotdim = factor * K + factor * (K - 1.) * whspace
dim = lbdim + plotdim + trdim
lb = lbdim / dim
tr = (lbdim + plotdim) / dim
fig.subplots_adjust(left=lb, bottom=lb, right=tr, top=tr,
wspace=whspace, hspace=whspace)
zmin = 0
zmax = 15
zc = np.linspace(zmin, zmax, num=500)
ax = fig.add_subplot(nplots_x, nplots_y, 1)
ax.set_xlim(zmin, zmax)
ax.set_ylim(-50, 0)
ax.plot(zc, T(p_log10phiStar)(1+zc), c='k', lw=2)
ax.set_ylabel(r'$\log_{10}\phi_*$')
ax.set_xticklabels('')
ax = fig.add_subplot(nplots_x, nplots_y, 2)
ax.yaxis.tick_right()
ax.yaxis.set_ticks_position('both')
ax.yaxis.set_label_position('right')
ax.set_xlim(zmin, zmax)
ax.set_ylim(-150, -20)
ax.plot(zc, T(p_MStar)(1+zc), c='k', lw=2)
ax.set_ylabel(r'$M_*$')
ax.set_xticklabels('')
ax = fig.add_subplot(nplots_x, nplots_y, 3)
ax.set_xlim(zmin, zmax)
ax.set_ylim(-8, -3)
ax.plot(zc, T(p_alpha)(1+zc), c='k', lw=2)
ax.set_ylabel(r'$\alpha$')
ax = fig.add_subplot(nplots_x, nplots_y, 4)
ax.yaxis.tick_right()
ax.yaxis.set_ticks_position('both')
ax.yaxis.set_label_position('right')
ax.set_xlim(zmin, zmax)
ax.set_ylim(-2.6, -1.5)
h, f0, z0, a, b = p_beta
zeta = np.log10((1.0+zc)/(1.0+z0))
beta = h + f0/(10.0**(a*zeta) + 10.0**(b*zeta))
ax.plot(zc, beta, c='k', lw=2)
ax.set_ylabel(r'$\beta$')
plt.savefig('evolutionFC.pdf',bbox_inches='tight')
mpl.rcParams['font.size'] = '22'
return
plotRhoQso(0, 15)
# plotdRhoQsodz(0, 15)
|
gkulkarniREPO_NAMEQLFPATH_START.@QLF_extracted@QLF-master@rhoqso_test.py@.PATH_END.py
|
{
"filename": "_legendgrouptitle.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/histogram2dcontour/_legendgrouptitle.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Legendgrouptitle(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "histogram2dcontour"
_path_str = "histogram2dcontour.legendgrouptitle"
_valid_props = {"font", "text"}
# font
# ----
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram2dcontour.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
plotly.graph_objs.histogram2dcontour.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.histogram2dcon
tour.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super(Legendgrouptitle, self).__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.histogram2dcontour.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram2dcontour.Legendgrouptitle`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@histogram2dcontour@_legendgrouptitle.py@.PATH_END.py
|
{
"filename": "covariance_run_EFIGI-like_with_lensing.py",
"repo_name": "DrexelLenser/Lenser",
"repo_path": "Lenser_extracted/Lenser-master/examples/covariance/covariance_run_EFIGI-like_with_lensing.py",
"type": "Python"
}
|
from covariance import Covariance
import numpy as np
from scipy.special import gamma
"""Covariance run 4: EFIGI-like with lensing fields"""
# Generate non-fit parameters.
# .. These values should be motivated to reflect actual data
# .. Postage stamp size
Nx = 255
Ny = 255
# .. Standard galaxy size (in pixels)
a = 100.
# .. I0
I0 = 5.e4
# .. noise1 and noise2
noise1 = 2.
gain = 4.75
noise2 = 1/np.sqrt(gain)
# .. Background
background = 0.
# Lensing parameters
# .. We will choose gamma1 and gamma2 and then get psi,ij.
# We will set kappa = 0 (we can arbitrarily make this choice due to the
# mass-sheet degeneracy)
# .. kappa
kappa = 0.
# .. gamma1
gamma1 = (0.05)/np.sqrt(2)
# .. gamma2
gamma2 = (0.05)/np.sqrt(2)
# .. psi,11
psi11 = kappa + gamma1
# .. psi,12
psi12 = gamma2
# .. psi,22
psi22 = kappa - gamma1
# .. We have to be careful when generating the flexion, because not all of psi,ijj
# are independent from one another. We do the following:
# (i). Choose F1 and F2
# (ii). Use F1 and F2 to calculate the angle of flexion, phi_F
# (iii). Assume a particular analytic lens model, which in this case is a
# singular isothermal sphere (SIS). This allows us to relate first and
# section flexion in an analytic way. We then use F1, F2, and phi_F to
# get G1 and G2
# (iv). Use F1, F2, G1, and G2 to get psi,ijk
# .. F1
F1 = (1.e-3)/np.sqrt(2)
# .. F2
F2 = (1.e-3)/np.sqrt(2)
# .. phi_F
# .. .. angle of flexion
phi_F = np.arctan2(F2,F1)
# .. G1
G1 = -((3*np.cos(3*phi_F))/np.cos(phi_F))*F1
# .. G2
G2 = -((3*np.sin(3*phi_F))/np.sin(phi_F))*F2
# .. psi,111
psi111 = (1./2.)*(3.*F1 + G1)
# .. psi,112
psi112 = (1./2.)*(F2 + G2)
# .. psi,122
psi122 = (1./2.)*(F1 - G1)
# .. psi,222
psi222 = (1./2.)*(3.*F2 - G2)
# Shape parameters
# .. Centroid (will be dithered within a pixel in Covariance)
xc = 0.5
yc = 0.5
# .. ns
ns = 4.
# .. phi
phi = np.pi/6
# .. q
# .. .. Axis ratio will be a function of both intrinsic ellipticity and shear
# .. .. We choose intrinsic ellipticity to have a magnitude of 0.2 (Schneider 1996)
eps_s = 0.2
q = (1+abs(eps_s))/(1-abs(eps_s))
# .. .. Let us calculate the "observed q" i.e. the q that Lenser will reconstruct.
# This q will be different from the above q, because nonzero shear will add
# to the intrinsic ellipticity.
# .. .. .. Get the components of the intrinsic ellipticity.
eps_s1, eps_s2 = eps_s*np.cos(2.*phi), eps_s*np.sin(2.*phi)
# .. .. .. Approximate observed ellipticity as eps = eps_s + gamma
eps1 = eps_s1 + gamma1
eps2 = eps_s2 + gamma2
eps = np.sqrt(eps1**2. + eps2**2.)
# .. .. .. Get observed q
q_obs = (1+abs(eps))/(1-abs(eps))
# .. .. Now let us get the "observed" phi. By the same token as for q, the orientation
# angle will be different from the intrinsic one in the presence of nonzero shear
phi_obs = np.arctan2(eps2,eps1)/2
# .. rs
rs = a/(np.sqrt(((1+q_obs**2.)/2)))*np.sqrt(gamma(2.*ns)/gamma(4.*ns))
# Gather list of fiducial parameter values
# will differ from actual input parameters in presence of nonzero shear
# i.e. q and phi change when shear is introduced
fid_params = np.array((0.5, 0.5, # Centroid dithered from 0 to 1, so the fiducial value is trivially 0.5
ns, rs,
q_obs, phi_obs,
psi111, psi112, psi122, psi222))
# Run Covariance
Cov4 = Covariance(Nx=Nx, Ny=Ny,
xc=xc, yc=yc, ns=ns, rs=rs, q=q, phi=phi,
psi2=[psi11,psi12,psi22],
psi3=[psi111,psi112,psi122,psi222],
marg=np.array((1,1,1,1,1,1,0,0,0,1,1,1,1)),
I0=I0, noise1=noise1, noise2=noise2, background=background,
N_iter=100,
fid_params=fid_params,
stamp_col_label='EFIGI-like_with_lensing')
# Simulate the stamp collection
Cov4.simulateGals()
# Run Lenser on this stamp collection
Cov4.lenserRun()
# Compute the covariance matrix for this stamp collection
Cov4_mat = Cov4.computeCovMat()
# Compute the 1-sigma uncertainty on each parameter
print(np.round(Cov4.error(Cov4_mat),7))
# Plot
Cov4.plot_error_matrix(Cov4_mat)
|
DrexelLenserREPO_NAMELenserPATH_START.@Lenser_extracted@Lenser-master@examples@covariance@covariance_run_EFIGI-like_with_lensing.py@.PATH_END.py
|
{
"filename": "test_enable_iterative_imputer.py",
"repo_name": "scikit-learn/scikit-learn",
"repo_path": "scikit-learn_extracted/scikit-learn-main/sklearn/experimental/tests/test_enable_iterative_imputer.py",
"type": "Python"
}
|
"""Tests for making sure experimental imports work as expected."""
import textwrap
import pytest
from sklearn.utils._testing import assert_run_python_script_without_output
from sklearn.utils.fixes import _IS_WASM
@pytest.mark.xfail(_IS_WASM, reason="cannot start subprocess")
def test_imports_strategies():
# Make sure different import strategies work or fail as expected.
# Since Python caches the imported modules, we need to run a child process
# for every test case. Else, the tests would not be independent
# (manually removing the imports from the cache (sys.modules) is not
# recommended and can lead to many complications).
pattern = "IterativeImputer is experimental"
good_import = """
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
"""
assert_run_python_script_without_output(
textwrap.dedent(good_import), pattern=pattern
)
good_import_with_ensemble_first = """
import sklearn.ensemble
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
"""
assert_run_python_script_without_output(
textwrap.dedent(good_import_with_ensemble_first),
pattern=pattern,
)
bad_imports = f"""
import pytest
with pytest.raises(ImportError, match={pattern!r}):
from sklearn.impute import IterativeImputer
import sklearn.experimental
with pytest.raises(ImportError, match={pattern!r}):
from sklearn.impute import IterativeImputer
"""
assert_run_python_script_without_output(
textwrap.dedent(bad_imports),
pattern=pattern,
)
|
scikit-learnREPO_NAMEscikit-learnPATH_START.@scikit-learn_extracted@scikit-learn-main@sklearn@experimental@tests@test_enable_iterative_imputer.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.