metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "tools.py",
"repo_name": "s-ilic/ECLAIR",
"repo_path": "ECLAIR_extracted/ECLAIR-master/likelihoods/CMB/Planck/PR4/hillipop_TTTEEE/tools.py",
"type": "Python"
}
|
# ------------------------------------------------------------------------------------------------
# Hillipop External Tools
# ------------------------------------------------------------------------------------------------
import os
import astropy.io.fits as fits
import numpy as np
import scipy.ndimage as nd
from numpy.linalg import *
tagnames = ["TT", "EE", "TE", "ET"]
# ------------------------------------------------------------------------------------------------
# def create_bin_file(filename, lbinTT, lbinEE, lbinBB, lbinTE, lbinET)
# def SG(l, cl, nsm=5, lcut=0)
# def convert_to_stdev(sigma)
# def ctr_level(histo2d, lvl)
# class Bins(object)
# ------------------------------------------------------------------------------------------------
def create_bin_file(filename, lbinTT, lbinEE, lbinBB, lbinTE, lbinET):
"""
lbin = [(lmin,lmax)] for each 15 cross-spectra
"""
h = fits.Header()
hdu = [fits.PrimaryHDU(header=h)]
def fits_layer(lbin):
h = fits.Header()
lmin = np.array([l[0] for l in lbin])
lmax = np.array([l[1] for l in lbin])
c1 = fits.Column(name="LMIN", array=lmin, format="1D")
c2 = fits.Column(name="LMAX", array=lmax, format="1D")
return fits.BinTableHDU.from_columns([c1, c2], header=h)
hdu.append(fits_layer(lbinTT))
hdu.append(fits_layer(lbinEE))
hdu.append(fits_layer(lbinBB))
hdu.append(fits_layer(lbinTE))
hdu.append(fits_layer(lbinET))
hdulist = fits.HDUList(hdu)
hdulist.writeto(filename, overwrite=True)
# smooth cls before Cov computation
def SG(l, cl, nsm=5, lcut=0):
clSG = np.copy(cl)
# gauss filter
if lcut < 2 * nsm:
shift = 0
else:
shift = 2 * nsm
data = nd.gaussian_filter1d(clSG[max(0, lcut - shift) :], nsm)
clSG[lcut:] = data[shift:]
return clSG
def convert_to_stdev(sigma):
"""
Given a grid of likelihood values, convert them to cumulative
standard deviation. This is useful for drawing contours from a
grid of likelihoods.
"""
# sigma = np.exp(-logL+np.max(logL))
shape = sigma.shape
sigma = sigma.ravel()
# obtain the indices to sort and unsort the flattened array
i_sort = np.argsort(sigma)[::-1]
i_unsort = np.argsort(i_sort)
sigma_cumsum = sigma[i_sort].cumsum()
sigma_cumsum /= sigma_cumsum[-1]
return sigma_cumsum[i_unsort].reshape(shape)
def ctr_level(histo2d, lvl):
"""
Extract the contours for the 2d plots
"""
h = histo2d.flatten() * 1.0
h.sort()
cum_h = np.cumsum(h[::-1])
cum_h /= cum_h[-1]
alvl = np.searchsorted(cum_h, lvl)
clist = h[-alvl]
return clist
# ------------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------
# Binning
# ------------------------------------------------------------------------------------------------
class Bins(object):
"""
lmins : list of integers
Lower bound of the bins
lmaxs : list of integers
Upper bound of the bins
"""
def __init__(self, lmins, lmaxs):
if not (len(lmins) == len(lmaxs)):
raise ValueError("Incoherent inputs")
lmins = np.asarray(lmins)
lmaxs = np.asarray(lmaxs)
cutfirst = np.logical_and(lmaxs >= 2, lmins >= 2)
self.lmins = lmins[cutfirst]
self.lmaxs = lmaxs[cutfirst]
self._derive_ext()
@classmethod
def fromdeltal(cls, lmin, lmax, delta_ell):
nbins = (lmax - lmin + 1) // delta_ell
lmins = lmin + np.arange(nbins) * delta_ell
lmaxs = lmins + delta_ell - 1
return cls(lmins, lmaxs)
def _derive_ext(self):
for l1, l2 in zip(self.lmins, self.lmaxs):
if l1 > l2:
raise ValueError("Incoherent inputs")
self.lmin = min(self.lmins)
self.lmax = max(self.lmaxs)
if self.lmin < 1:
raise ValueError("Input lmin is less than 1.")
if self.lmax < self.lmin:
raise ValueError("Input lmax is less than lmin.")
self.nbins = len(self.lmins)
self.lbin = (self.lmins + self.lmaxs) / 2.0
self.dl = self.lmaxs - self.lmins + 1
def bins(self):
return (self.lmins, self.lmaxs)
def cut_binning(self, lmin, lmax):
sel = np.where((self.lmins >= lmin) & (self.lmaxs <= lmax))[0]
self.lmins = self.lmins[sel]
self.lmaxs = self.lmaxs[sel]
self._derive_ext()
def _bin_operators(self, Dl=False, cov=False):
if Dl:
ell2 = np.arange(self.lmax + 1)
ell2 = ell2 * (ell2 + 1) / (2 * np.pi)
else:
ell2 = np.ones(self.lmax + 1)
p = np.zeros((self.nbins, self.lmax + 1))
q = np.zeros((self.lmax + 1, self.nbins))
for b, (a, z) in enumerate(zip(self.lmins, self.lmaxs)):
dl = z - a + 1
p[b, a : z + 1] = ell2[a : z + 1] / dl
if cov:
q[a : z + 1, b] = 1 / ell2[a : z + 1] / dl
else:
q[a : z + 1, b] = 1 / ell2[a : z + 1]
return p, q
def bin_spectra(self, spectra, Dl=False):
"""
Average spectra with defined bins
can be weighted by `l(l+1)/2pi`.
Return Cb
"""
spectra = np.asarray(spectra)
minlmax = min([spectra.shape[-1] - 1, self.lmax])
_p, _q = self._bin_operators(Dl=Dl)
return np.dot(spectra[..., : minlmax + 1], _p.T[: minlmax + 1, ...])
def bin_covariance(self, clcov):
"""
Average covariance with defined bins
"""
p, q = self._bin_operators(cov=True)
return np.matmul(p, np.matmul(clcov, q))
|
s-ilicREPO_NAMEECLAIRPATH_START.@ECLAIR_extracted@ECLAIR-master@likelihoods@CMB@Planck@PR4@hillipop_TTTEEE@tools.py@.PATH_END.py
|
{
"filename": "t-test.md",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/doc/unconverted/python/t-test.md",
"type": "Markdown"
}
|
---
jupyter:
jupytext:
notebook_metadata_filter: all
text_representation:
extension: .md
format_name: markdown
format_version: '1.1'
jupytext_version: 1.1.1
kernelspec:
display_name: Python 2
language: python
name: python2
plotly:
description: Learn how to perform a one sample and two sample t-test using Python.
display_as: statistics
has_thumbnail: false
language: python
layout: base
name: T-Test
order: 7
page_type: example_index
permalink: python/t-test/
thumbnail: /images/static-image
---
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#### Imports
The tutorial below imports [NumPy](http://www.numpy.org/), [Pandas](https://plot.ly/pandas/intro-to-pandas-tutorial/), and [SciPy](https://www.scipy.org/).
```python
import plotly.plotly as py
import plotly.graph_objs as go
from plotly.tools import FigureFactory as FF
import numpy as np
import pandas as pd
import scipy
```
#### Generate Data
Let us generate some random data from the `Normal Distribution`. We will sample 50 points from a normal distribution with mean $\mu = 0$ and variance $\sigma^2 = 1$ and from another with mean $\mu = 2$ and variance $\sigma^2 = 1$.
```python
data1 = np.random.normal(0, 1, size=50)
data2 = np.random.normal(2, 1, size=50)
```
The two normal probability distribution functions (p.d.f) stacked on top of each other look like this:
```python
x = np.linspace(-4, 4, 160)
y1 = scipy.stats.norm.pdf(x)
y2 = scipy.stats.norm.pdf(x, loc=2)
trace1 = go.Scatter(
x = x,
y = y1,
mode = 'lines+markers',
name='Mean of 0'
)
trace2 = go.Scatter(
x = x,
y = y2,
mode = 'lines+markers',
name='Mean of 2'
)
data = [trace1, trace2]
py.iplot(data, filename='normal-dists-plot')
```
#### One Sample T Test
A `One Sample T-Test` is a statistical test used to evaluate the null hypothesis that the mean $m$ of a 1D sample dataset of independent observations is equal to the true mean $\mu$ of the population from which the data is sampled. In other words, our null hypothesis is that
$$
\begin{align*}
m = \mu
\end{align*}
$$
For our T-test, we will be using a significance level of `0.05`. On the matter of doing ethical science, it is good practice to always state the chosen significance level for a given test _before_ actually conducting the test. This is meant to ensure that the analyst does not modify the significance level for the purpose of achieving a desired outcome.
For more information on the choice of 0.05 for a significance level, check out [this page](http://www.investopedia.com/exam-guide/cfa-level-1/quantitative-methods/hypothesis-testing.asp).
```python
true_mu = 0
onesample_results = scipy.stats.ttest_1samp(data1, true_mu)
matrix_onesample = [
['', 'Test Statistic', 'p-value'],
['Sample Data', onesample_results[0], onesample_results[1]]
]
onesample_table = FF.create_table(matrix_onesample, index=True)
py.iplot(onesample_table, filename='onesample-table')
```
Since our p-value is greater than our Test-Statistic, we have good evidence to not reject the null-hypothesis at the $0.05$ significance level. This is our expected result because the data was collected from a normal distribution.
#### Two Sample T Test
If we have two independently sampled datasets (with equal variance) and are interested in exploring the question of whether the true means $\mu_1$ and $\mu_2$ are identical, that is, if the data were sampled from the same population, we would use a `Two Sample T-Test`.
Typically when a researcher in a field is interested in the affect of a given test variable between two populations, they will take one sample from each population and will note them as the experimental group and the control group. The experimental group is the sample which will receive the variable being tested, while the control group will not.
This test variable is observed (eg. blood pressure) for all the subjects and a two sided t-test can be used to investigate if the two groups of subjects were sampled from populations with the same true mean, i.e. "Does the drug have an effect?"
```python
twosample_results = scipy.stats.ttest_ind(data1, data2)
matrix_twosample = [
['', 'Test Statistic', 'p-value'],
['Sample Data', twosample_results[0], twosample_results[1]]
]
twosample_table = FF.create_table(matrix_twosample, index=True)
py.iplot(twosample_table, filename='twosample-table')
```
Since our p-value is much less than our Test Statistic, then with great evidence we can reject our null hypothesis of identical means. This is in alignment with our setup, since we sampled from two different normal pdfs with different means.
```python
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'python-T-Test.ipynb', 'python/t-test/', 'T-Test | plotly',
'Learn how to perform a one sample and two sample t-test using Python.',
title='T-Test in Python. | plotly',
name='T-Test',
language='python',
page_type='example_index', has_thumbnail='false', display_as='statistics', order=7,
ipynb= '~notebook_demo/115')
```
```python
```
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@doc@unconverted@python@t-test.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/packages/vaex-jupyter/vaex/jupyter/vendor/__init__.py",
"type": "Python"
}
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@packages@vaex-jupyter@vaex@jupyter@vendor@__init__.py@.PATH_END.py
|
|
{
"filename": "evaluation.py",
"repo_name": "DLR-RM/stable-baselines3",
"repo_path": "stable-baselines3_extracted/stable-baselines3-master/stable_baselines3/common/evaluation.py",
"type": "Python"
}
|
import warnings
from typing import Any, Callable, Optional, Union
import gymnasium as gym
import numpy as np
from stable_baselines3.common import type_aliases
from stable_baselines3.common.vec_env import DummyVecEnv, VecEnv, VecMonitor, is_vecenv_wrapped
def evaluate_policy(
model: "type_aliases.PolicyPredictor",
env: Union[gym.Env, VecEnv],
n_eval_episodes: int = 10,
deterministic: bool = True,
render: bool = False,
callback: Optional[Callable[[dict[str, Any], dict[str, Any]], None]] = None,
reward_threshold: Optional[float] = None,
return_episode_rewards: bool = False,
warn: bool = True,
) -> Union[tuple[float, float], tuple[list[float], list[int]]]:
"""
Runs policy for ``n_eval_episodes`` episodes and returns average reward.
If a vector env is passed in, this divides the episodes to evaluate onto the
different elements of the vector env. This static division of work is done to
remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more
details and discussion.
.. note::
If environment has not been wrapped with ``Monitor`` wrapper, reward and
episode lengths are counted as it appears with ``env.step`` calls. If
the environment contains wrappers that modify rewards or episode lengths
(e.g. reward scaling, early episode reset), these will affect the evaluation
results as well. You can avoid this by wrapping environment with ``Monitor``
wrapper before anything else.
:param model: The RL agent you want to evaluate. This can be any object
that implements a `predict` method, such as an RL algorithm (``BaseAlgorithm``)
or policy (``BasePolicy``).
:param env: The gym environment or ``VecEnv`` environment.
:param n_eval_episodes: Number of episode to evaluate the agent
:param deterministic: Whether to use deterministic or stochastic actions
:param render: Whether to render the environment or not
:param callback: callback function to do additional checks,
called after each step. Gets locals() and globals() passed as parameters.
:param reward_threshold: Minimum expected reward per episode,
this will raise an error if the performance is not met
:param return_episode_rewards: If True, a list of rewards and episode lengths
per episode will be returned instead of the mean.
:param warn: If True (default), warns user about lack of a Monitor wrapper in the
evaluation environment.
:return: Mean reward per episode, std of reward per episode.
Returns ([float], [int]) when ``return_episode_rewards`` is True, first
list containing per-episode rewards and second containing per-episode lengths
(in number of steps).
"""
is_monitor_wrapped = False
# Avoid circular import
from stable_baselines3.common.monitor import Monitor
if not isinstance(env, VecEnv):
env = DummyVecEnv([lambda: env]) # type: ignore[list-item, return-value]
is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0]
if not is_monitor_wrapped and warn:
warnings.warn(
"Evaluation environment is not wrapped with a ``Monitor`` wrapper. "
"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. "
"Consider wrapping environment first with ``Monitor`` wrapper.",
UserWarning,
)
n_envs = env.num_envs
episode_rewards = []
episode_lengths = []
episode_counts = np.zeros(n_envs, dtype="int")
# Divides episodes among different sub environments in the vector as evenly as possible
episode_count_targets = np.array([(n_eval_episodes + i) // n_envs for i in range(n_envs)], dtype="int")
current_rewards = np.zeros(n_envs)
current_lengths = np.zeros(n_envs, dtype="int")
observations = env.reset()
states = None
episode_starts = np.ones((env.num_envs,), dtype=bool)
while (episode_counts < episode_count_targets).any():
actions, states = model.predict(
observations, # type: ignore[arg-type]
state=states,
episode_start=episode_starts,
deterministic=deterministic,
)
new_observations, rewards, dones, infos = env.step(actions)
current_rewards += rewards
current_lengths += 1
for i in range(n_envs):
if episode_counts[i] < episode_count_targets[i]:
# unpack values so that the callback can access the local variables
reward = rewards[i]
done = dones[i]
info = infos[i]
episode_starts[i] = done
if callback is not None:
callback(locals(), globals())
if dones[i]:
if is_monitor_wrapped:
# Atari wrapper can send a "done" signal when
# the agent loses a life, but it does not correspond
# to the true end of episode
if "episode" in info.keys():
# Do not trust "done" with episode endings.
# Monitor wrapper includes "episode" key in info if environment
# has been wrapped with it. Use those rewards instead.
episode_rewards.append(info["episode"]["r"])
episode_lengths.append(info["episode"]["l"])
# Only increment at the real end of an episode
episode_counts[i] += 1
else:
episode_rewards.append(current_rewards[i])
episode_lengths.append(current_lengths[i])
episode_counts[i] += 1
current_rewards[i] = 0
current_lengths[i] = 0
observations = new_observations
if render:
env.render()
mean_reward = np.mean(episode_rewards)
std_reward = np.std(episode_rewards)
if reward_threshold is not None:
assert mean_reward > reward_threshold, "Mean reward below threshold: " f"{mean_reward:.2f} < {reward_threshold:.2f}"
if return_episode_rewards:
return episode_rewards, episode_lengths
return mean_reward, std_reward
|
DLR-RMREPO_NAMEstable-baselines3PATH_START.@stable-baselines3_extracted@stable-baselines3-master@stable_baselines3@common@evaluation.py@.PATH_END.py
|
{
"filename": "test_output.py",
"repo_name": "rmjarvis/Piff",
"repo_path": "Piff_extracted/Piff-main/tests/test_output.py",
"type": "Python"
}
|
# Copyright (c) 2016 by Mike Jarvis and the other collaborators on GitHub at
# https://github.com/rmjarvis/Piff All rights reserved.
#
# Piff is free software: Redistribution and use in source and binary forms
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import os
import shutil
import numpy as np
import piff
from piff_test_helper import timer
@timer
def test_ensure_dir():
"""Test the ensure_dir utiltity
"""
d = os.path.join('output', 'test_dir')
if os.path.exists(d):
shutil.rmtree(d)
assert not os.path.exists(d)
f = os.path.join(d, 'test_file')
# ensure that the directory needed to write f exisits
piff.util.ensure_dir(f)
assert os.path.exists(d)
assert os.path.isdir(d)
# write something to f
with open(f,'w') as fout:
fout.write('test')
# doing it again doesn't destroy f
piff.util.ensure_dir(f)
assert os.path.exists(d)
assert os.path.isdir(d)
with open(f) as fin:
s = fin.read()
print(s)
assert s == 'test'
@timer
def test_base_output():
"""Test the base Output class.
"""
# A bit gratuitous, since no one should ever call these, but just check that the
# trivial implementation (or NotImplementedErrors) in the base class work as expected.
config = { 'file_name' : 'dummy_file' }
out = piff.Output()
kwargs = out.parseKwargs(config)
assert kwargs == config
np.testing.assert_raises(NotImplementedError, out.write, None)
np.testing.assert_raises(NotImplementedError, out.read)
@timer
def test_invalid():
# Invalid output type
config = { 'type': 'invalid' }
with np.testing.assert_raises(ValueError):
piff.Output.process(config)
# Also check errors when registering other type names
class NoOutput1(piff.Output):
pass
assert NoOutput1 not in piff.Output.valid_output_types.values()
class NoOutput2(piff.Output):
_type_name = None
assert NoOutput2 not in piff.Output.valid_output_types.values()
class ValidOutput1(piff.Output):
_type_name = 'valid'
assert ValidOutput1 in piff.Output.valid_output_types.values()
assert ValidOutput1 == piff.Output.valid_output_types['valid']
with np.testing.assert_raises(ValueError):
class ValidOutput2(piff.Output):
_type_name = 'valid'
with np.testing.assert_raises(ValueError):
class ValidOutput3(ValidOutput1):
pass
if __name__ == '__main__':
test_ensure_dir()
test_base_output()
test_invalid()
|
rmjarvisREPO_NAMEPiffPATH_START.@Piff_extracted@Piff-main@tests@test_output.py@.PATH_END.py
|
{
"filename": "test_simulations_rt1d_const_flux.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/tests/test_simulations_rt1d_const_flux.py",
"type": "Python"
}
|
"""
test_const_ionization.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Thu Oct 16 14:46:48 MDT 2014
Description:
"""
import ares
import numpy as np
from ares.physics.CrossSections import PhotoIonizationCrossSection as sigma
s_per_yr = ares.physics.Constants.s_per_yr
pars = \
{
'problem_type': 0,
'grid_cells': 1,
'initial_ionization': [1.-1e-6, 1e-6],
#'initial_temperature': 1e4,# make cold so collisional ionization is negligible
'isothermal': False,
'stop_time': 10.0,
'plane_parallel': True,
'recombination': False, # To match analytical solution
'source_type': 'toy',
'source_qdot': 1e4, # solver fails when this is large (like 1e10)
'source_lifetime': 1e10,
'source_E': [13.60000001],
'source_LE': [1.0],
'secondary_ionization': 0,
'collisional_ionization': 0,
'logdtDataDump': 0.5,
'initial_timestep': 1e-15,
}
def test(rtol=1e-2):
# Numerical solution
sim = ares.simulations.RaySegment(**pars)
sim.run()
t, xHII = sim.get_cell_evolution(field='h_2')
# Analytic solution: exponential time evolution
sigma0 = sigma(pars['source_E'][0])
qdot = pars['source_qdot']
Gamma = qdot * sigma0
xi0 = pars['initial_ionization'][1]
C = 1. - xi0
def xi(t, Gamma=Gamma):
return 1. - C * np.exp(-Gamma * t)
xHII_anyl = np.array(list(map(xi, t)))
# Only test accuracy at somewhat later times
mask = t > 0
err = np.abs(xHII[mask] - xHII_anyl[mask]) / xHII_anyl[mask]
assert np.allclose(xHII[mask], xHII_anyl[mask], rtol=rtol, atol=0)
if __name__ == '__main__':
test()
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@tests@test_simulations_rt1d_const_flux.py@.PATH_END.py
|
{
"filename": "_reversescale.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/marker/line/_reversescale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ReversescaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="reversescale", parent_name="bar.marker.line", **kwargs
):
super(ReversescaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@marker@line@_reversescale.py@.PATH_END.py
|
{
"filename": "xla_metadata_test.py",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/tests/xla_metadata_test.py",
"type": "Python"
}
|
# Copyright 2024 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests whether the frontend attributes added by the context manager are
correctly propagated to the jaxpr and mlir.
"""
from absl.testing import absltest
import jax
from jax._src import config
from jax._src import test_util as jtu
from jax._src.lax import lax
from jax.experimental.xla_metadata import set_xla_metadata
import jax.numpy as jnp
config.parse_flags_with_absl()
class XlaMetadataTest(jtu.JaxTestCase):
def test_f_jitted(self):
@jax.jit
def f(a, b):
with set_xla_metadata(a="b"):
return a + b
f_jaxpr = jax.make_jaxpr(f)(1, 2)
eqns = f_jaxpr.eqns
for eq in eqns[1:]:
self.assertDictEqual(eq.ctx.attributes, {"a": "b"})
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn('mhlo.frontend_attributes = {a = "b"}', f_lowered_text)
def test_f_jitted_bool_attributes(self):
@jax.jit
def f(a, b):
with set_xla_metadata(a=True):
return a + b
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn('mhlo.frontend_attributes = {a = "true"}', f_lowered_text)
def test_f_jitted_int_attributes(self):
@jax.jit
def f(a, b):
with set_xla_metadata(a=10):
return a + b
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn('mhlo.frontend_attributes = {a = "10"}', f_lowered_text)
def test_f_nonjitted(self):
def f_add(a, b):
return lax.add(a, b)
arg1 = jnp.arange(2)
with set_xla_metadata(a="b"):
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}',
jax.jit(f_add).lower(arg1, arg1).as_text(),
)
def test_f_attributes_overwrite(self):
@jax.jit
def g(a, b):
return a * b
with set_xla_metadata(a="b"):
@jax.jit
def f(a, b):
with set_xla_metadata(a="c"):
return a + b
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn('mhlo.frontend_attributes = {a = "c"}', f_lowered_text)
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}', g.lower(1.0, 2.0).as_text()
)
self.assertNotIn("mhlo.frontend_attributes", g.lower(1.0, 2.0).as_text())
def test_f_attributes_merge(self):
with set_xla_metadata(key1="val1"):
@jax.jit
def f(a, b):
with set_xla_metadata(key2="val2"):
return a + b
f_lowered_text = f.lower(1.0, 2.0).as_text()
self.assertIn(
'mhlo.frontend_attributes = {key1 = "val1", key2 = "val2"}',
f_lowered_text,
)
def test_attr_caching_jit(self):
@jax.jit
def f_add_jit(a, b):
return a + b
with set_xla_metadata(b="c"):
f_add_lowered1 = f_add_jit.lower(2.0, 3.0).as_text()
# Expect no attributes in the mlir.
f_add_lowered2 = f_add_jit.lower(1.0, 2.0).as_text()
with set_xla_metadata(c="d"):
f_add_lowered3 = f_add_jit.lower(4.0, 5.0).as_text()
self.assertIn('mhlo.frontend_attributes = {b = "c"}', f_add_lowered1)
self.assertNotIn("mhlo.frontend_attributes = {}", f_add_lowered2)
self.assertNotIn('mhlo.frontend_attributes = {b = "c"}', f_add_lowered2)
self.assertNotIn('mhlo.frontend_attributes = {c = "d"}', f_add_lowered2)
self.assertIn('mhlo.frontend_attributes = {c = "d"}', f_add_lowered3)
def test_attr_caching_nonjit(self):
def f_add(a, b):
return lax.add(a, b)
arg1 = jnp.arange(2)
arg2 = jnp.arange(2) + 1
arg3 = jnp.arange(2) + 2
with set_xla_metadata(b="c"):
self.assertIn(
'mhlo.frontend_attributes = {b = "c"}',
jax.jit(f_add).lower(arg1, arg1).as_text(),
)
# Expect no attributes in the jaxpr.
self.assertNotIn(
"mhlo.frontend_attributes",
jax.jit(f_add).lower(arg2, arg2).as_text(),
)
with set_xla_metadata(c="d"):
self.assertIn(
'mhlo.frontend_attributes = {c = "d"}',
jax.jit(f_add).lower(arg3, arg3).as_text(),
)
def test_axpy(self):
@jax.jit
def axpy(a, x, y):
with set_xla_metadata(a="b"):
return a * x + y
for line in axpy.lower(1.0, 2.0, 3.0).as_text().split("\n"):
if "stablehlo.multiply" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
if "stablehlo.add" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
def test_while(self):
@jax.jit
def f(a):
with set_xla_metadata(a="b"):
return jax.lax.while_loop(lambda x: x < 10, lambda x: x + 1, a)
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}', f.lower(1.0).as_text()
)
def test_while_condition_body(self):
@jax.jit
def f_condition(x):
with set_xla_metadata(a="b"):
return x < 10
@jax.jit
def f_body(x):
with set_xla_metadata(a="c"):
return x + 1
@jax.jit
def while_fn(a):
return jax.lax.while_loop(f_condition, f_body, a)
for line in while_fn.lower(1.0).as_text().split("\n"):
if "stablehlo.compare" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
if "stablehlo.add" in line:
self.assertIn('mhlo.frontend_attributes = {a = "c"}', line)
def test_nested_jit(self):
@jax.jit
def f(x, y):
with set_xla_metadata(a="b"):
z = x * y
@jax.jit
def g(z):
with set_xla_metadata(c="d"):
return z**2 + 1
return g(z)
self.assertIn(
'mhlo.frontend_attributes = {a = "b", c = "d"}',
f.lower(1.0, 2.0).as_text(),
)
def test_grad(self):
@jax.jit
def f(x, y):
with set_xla_metadata(a="b"):
return jax.grad(lambda x: x**3 + y**2 + jnp.sin(x))(x)
f_jaxpr = jax.make_jaxpr(f)(1.0, 2.0)
eqns = f_jaxpr.eqns
for eq in eqns[1:]:
self.assertDictEqual(eq.ctx.attributes, {"a": "b"})
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}', f.lower(1.0, 2.).as_text()
)
def test_grad_outside_ctx(self):
@jax.jit
def f(x):
with set_xla_metadata(a="b"):
return x**3 + x**2 + jnp.sin(x)
grad_fn = jax.jit(jax.grad(f))
for line in grad_fn.lower(1.0).as_text().split("\n"):
if "stablehlo.cosine" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
if "call @integer_pow" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
def test_vmap(self):
dct = {"a": 0.0, "b": jnp.arange(5.0)}
@jax.jit
def f(dct, x):
with set_xla_metadata(a="b"):
return dct["a"] + dct["b"] + x
with set_xla_metadata(a="d"):
f_vmap = jax.vmap(f, in_axes=({"a": None, "b": 0}, None))
f_jaxpr = jax.make_jaxpr(f_vmap)(dct, 1.0)
eqns = f_jaxpr.eqns
for eq in eqns[1:]:
self.assertDictEqual(eq.ctx.attributes, {"a": "d"})
@jax.jit
def f2(x, y):
with set_xla_metadata(a="b"):
return (x + y, y * 2.0)
f_vmap_jaxpr = jax.make_jaxpr(jax.vmap(f2, in_axes=(0, None)))
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}',
f_vmap_jaxpr.lower(jnp.arange(5.0), 1.0).as_text(),
)
def test_multiple_instructions(self):
@jax.jit
def f(x, a):
y = jnp.matmul(x, x)
with set_xla_metadata(a="b"):
return y + a
for line in f.lower(jnp.arange(5.0), 1.0).as_text().split("\n"):
# matmul doesn't have attributes
if "stablehlo.dot_general" in line:
self.assertNotIn('mhlo.frontend_attributes = {a = "b"}', line)
if "stablehlo.add" in line:
self.assertIn('mhlo.frontend_attributes = {a = "b"}', line)
def test_softmax(self):
@jax.jit
def f(x):
with set_xla_metadata(a="b"):
return jax.nn.softmax(x)
self.assertIn(
'mhlo.frontend_attributes = {a = "b"}', f.lower(jnp.arange(5.0)).as_text()
)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@xla_metadata_test.py@.PATH_END.py
|
{
"filename": "_tickfont.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/carpet/aaxis/_tickfont.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "carpet.aaxis"
_path_str = "carpet.aaxis.tickfont"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Tickfont object
Sets the tick font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.carpet.aaxis.Tickfont`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.carpet.aaxis.Tickfont
constructor must be a dict or
an instance of :class:`plotly.graph_objs.carpet.aaxis.Tickfont`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@carpet@aaxis@_tickfont.py@.PATH_END.py
|
{
"filename": "test_svc.py",
"repo_name": "spicy-oil/hfs_fit",
"repo_path": "hfs_fit_extracted/hfs_fit-master/tests/test_svc.py",
"type": "Python"
}
|
"""Integration tests for the service as a whole."""
import os
from unittest.mock import patch
from numpy import testing
from hfs_fit import hfs
<<<<<<< HEAD
@patch('hfs_fit.hfs_fit.get_user_levels')
@patch('hfs_fit.hfs_fit.get_user_wavenumber')
@patch('hfs_fit.hfs_fit.get_user_noise')
=======
@patch('hfs_fit.get_user_levels')
@patch('hfs_fit.get_user_wavenumber')
@patch('hfs_fit.get_user_noise')
>>>>>>> b10b16cc946083ac2d3cf37242d37dcf2391a94d
def test_hfs(mock_user_noise, mock_user_wavenumber, mock_user_levels):
"""Run a full test of the script."""
# setup
mock_user_levels.return_value = 2, 2
mock_user_noise.return_value = 37945, 37975
mock_user_wavenumber.return_value = 'z5S2', 'a5P2', 37978, 37980
# run svc
obj = hfs('tests/sample_spectrum.txt', 'tests/fitLog.xlsx', nuclearSpin = 3.5)
obj.NewFit()
obj.PlotGuess()
obj.Optimise(2)
# validate
testing.assert_almost_equal(obj.SNR, 52.386236188012326)
testing.assert_almost_equal(obj.normFactor, 3.90336975182)
testing.assert_almost_equal(obj.relIntensities[0], 0.16923077)
testing.assert_almost_equal(obj.relIntensities[-2], 0.26923077)
testing.assert_almost_equal(obj.relIntensities[-1], 1.)
testing.assert_almost_equal(obj.fitParams[0], -5.03268524e-02)
testing.assert_almost_equal(obj.fitParams[-2], 3.79790274e+04, decimal=3)
<<<<<<< HEAD
=======
>>>>>>> b10b16cc946083ac2d3cf37242d37dcf2391a94d
|
spicy-oilREPO_NAMEhfs_fitPATH_START.@hfs_fit_extracted@hfs_fit-master@tests@test_svc.py@.PATH_END.py
|
{
"filename": "geometry.md",
"repo_name": "EranOfek/AstroPack",
"repo_path": "AstroPack_extracted/AstroPack-main/matlab/util/+tools/+math/+geometry/geometry.md",
"type": "Markdown"
}
|
# Overview
# Usage
# Notes
# Known Issues
# See Also
|
EranOfekREPO_NAMEAstroPackPATH_START.@AstroPack_extracted@AstroPack-main@matlab@util@+tools@+math@+geometry@geometry.md@.PATH_END.py
|
{
"filename": "_idssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/densitymapbox/_idssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class IdssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="idssrc", parent_name="densitymapbox", **kwargs):
super(IdssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@densitymapbox@_idssrc.py@.PATH_END.py
|
{
"filename": "rates_only_mle.py",
"repo_name": "Swift-BAT/NITRATES",
"repo_path": "NITRATES_extracted/NITRATES-main/nitrates/archive/rates_only_mle.py",
"type": "Python"
}
|
import numpy as np
from astropy.io import fits
from astropy.table import Table, vstack
from scipy import optimize, stats
import argparse
import os
from ..lib.logllh_ebins_funcs import get_cnt_ebins_normed, log_pois_prob
from ..response.ray_trace_funcs import ray_trace_square
from ..lib.drm_funcs import get_ebin_ind_edges, DRMs
from ..lib.event2dpi_funcs import det2dpis, mask_detxy
from ..lib.trans_func import get_pb_absortion
def get_abs_cor_rates(imx, imy, drm):
drm_emids = (drm[1].data["ENERG_LO"] + drm[1].data["ENERG_HI"]) / 2.0
absorbs = get_pb_absortion(drm_emids, imx, imy)
abs_cor = (2.0 - absorbs) / (absorbs)
return abs_cor
def profiled_bkg_llh(
data_cnts, sig_rate, sdt, off_cnts, odt, off_cnts_err, ret_f=False
):
sig2 = off_cnts_err**2
d_i = np.sqrt(
(sdt * sig2 - odt * off_cnts + (odt**2) * sig_rate) ** 2
- 4.0
* (odt**2)
* (sdt * sig2 * sig_rate - data_cnts * sig2 - odt * off_cnts * sig_rate)
)
f_i = (-(sdt * sig2 - odt * off_cnts + sig_rate * (odt**2)) + d_i) / (
2.0 * odt**2
)
llh = (
sdt * (sig_rate + f_i)
- data_cnts * np.log(sdt * (sig_rate + f_i))
+ np.square(off_cnts - odt * f_i) / (2.0 * sig2)
- data_cnts * (1.0 - np.log(data_cnts))
)
if ret_f:
return np.sum(llh), f_i
return np.sum(llh)
def rates_llh(data, nsig, sig_e_normed, bkg_cnts, bkg_err, sig_dt, bkg_dt, ret_f=False):
sig_rates = (nsig / sig_dt) * sig_e_normed
if ret_f:
llh, f = profiled_bkg_llh(
data, sig_rates, sig_dt, bkg_cnts, bkg_dt, bkg_err, ret_f=ret_f
)
return llh, f
llh = profiled_bkg_llh(
data, sig_rates, sig_dt, bkg_cnts, bkg_dt, bkg_err, ret_f=ret_f
)
return llh
def rate_llh2min(theta, data, bkg_cnts, bkg_err, sig_dt, bkg_dt, cnorm_obj):
nsig = theta[0]
ind = theta[1]
if (ind < -1) or (ind > 3):
return np.inf
cnt_ebn = cnorm_obj(ind)
llh = rates_llh(data, nsig, cnt_ebn, bkg_cnts, bkg_err, sig_dt, bkg_dt)
return llh
class cnts_norm_intp(object):
def __init__(self, cnt_ebins_norm_ind_mat, ind_ax):
self.ind_ax = ind_ax
self.cnt_ebins_norm_ind_mat = cnt_ebins_norm_ind_mat
self.ind0 = np.min(ind_ax)
self.ind1 = np.max(ind_ax)
def __call__(self, ind):
if (ind <= self.ind0) or (ind >= self.ind1):
return np.nan * np.ones(np.shape(self.cnt_ebins_norm_ind_mat)[1])
ind_ind0 = np.argmin(np.abs(ind - self.ind_ax))
ind_ind1 = ind_ind0 + 1 if ind > self.ind_ax[ind_ind0] else ind_ind0 - 1
A0 = np.abs(ind - self.ind_ax[ind_ind1]) / np.abs(
self.ind_ax[ind_ind0] - self.ind_ax[ind_ind1]
)
A1 = 1 - A0
cnts_norm = (
A0 * self.cnt_ebins_norm_ind_mat[ind_ind0]
+ A1 * self.cnt_ebins_norm_ind_mat[ind_ind1]
)
return cnts_norm
def get_cnts_intp_obj(ind_ax, drm, ebin_ind_edges, abs_cor):
nebins = len(ebin_ind_edges)
cnt_ebins_norm_ind_mat = np.zeros((len(ind_ax), nebins))
for i in range(len(ind_ax)):
cnt_ebins_norm_ind_mat[i] = get_cnt_ebins_normed(
ind_ax[i], drm, ebin_ind_edges, abs_cor=abs_cor
)
intp_obj = cnts_norm_intp(cnt_ebins_norm_ind_mat, ind_ax)
return intp_obj
def get_cnts_per_tbins(t_bins0, t_bins1, ebins0, ebins1, ev_data, dmask):
ntbins = len(t_bins0)
nebins = len(ebins0)
cnts_per_tbin = np.zeros((ntbins, nebins))
for i in range(ntbins):
sig_bl = (ev_data["TIME"] >= t_bins0[i]) & (ev_data["TIME"] < (t_bins1[i]))
sig_data = ev_data[sig_bl]
sig_data_dpis = det2dpis(sig_data, ebins0, ebins1)
cnts_per_tbin[i] = np.array(
[np.sum(dpi[(dmask == 0)]) for dpi in sig_data_dpis]
)
return cnts_per_tbin
def get_cnts(ev, t_bins0, t_bins1, ebin_inds, nebins):
ntbins = len(t_bins0)
cnts = np.zeros((ntbins, nebins))
for i in range(ntbins):
blt = (ev["TIME"] >= t_bins0[i]) & (ev["TIME"] < t_bins1[i])
ebin_inds_ = ebin_inds[blt]
for j in range(nebins):
cnts[i, j] = np.sum(ebin_inds_ == j)
return cnts
def double_up_tbins(cnts_per_tbin, t_bins0, t_bins1):
dt = t_bins1[0] - t_bins0[0]
new_cnts = cnts_per_tbin[1:] + cnts_per_tbin[:-1]
t_bins0 = t_bins0[:-1]
t_bins1 = t_bins0 + 2.0 * dt
return new_cnts, t_bins0, t_bins1
def main(args):
ebins0 = np.array([14.0, 20.0, 26.0, 36.3, 51.1, 70.9, 91.7, 118.2, 151.4])
ebins1 = np.append(ebins0[1:], [194.9])
nebins = len(ebins0)
ev_data = fits.open(args.evf)[1].data
dmask = fits.open(args.dmask)[0].data
good_dt0 = args.bkgt0 - args.trigtime - 1.0
good_dt1 = 20.0
trig_time = args.trigtime
good_t0 = trig_time + good_dt0
good_t1 = trig_time + good_dt1
mask_vals = mask_detxy(dmask, ev_data)
bl_ev = (
(ev_data["TIME"] > good_t0)
& (ev_data["TIME"] < good_t1)
& (ev_data["EVENT_FLAGS"] < 1)
& (mask_vals == 0)
& (ev_data["ENERGY"] <= 194.9)
& (ev_data["ENERGY"] >= 14.0)
)
ev_data0 = ev_data[bl_ev]
ebins = np.append(ebins0, [ebins1[-1]])
ebin_ind = np.digitize(ev_data0["ENERGY"], ebins) - 1
bkg_bl = (ev_data0["TIME"] > args.bkgt0) & (
ev_data0["TIME"] < (args.bkgt0 + args.bkgdt)
)
bkg_data = ev_data0[bkg_bl]
bkg_data_dpis = det2dpis(bkg_data, ebins0, ebins1)
bkg_cnts = np.array([np.sum(dpi[(dmask == 0)]) for dpi in bkg_data_dpis])
print(bkg_cnts)
print(bkg_cnts / args.bkgdt)
bkg_err = 1.1 * np.sqrt(bkg_cnts)
tstep = 0.064
bin_size = 0.128
t_bins0 = np.arange(-15.008, 15.008, tstep) + trig_time
t_bins1 = t_bins0 + bin_size
ntbins = len(t_bins0)
print(ntbins)
# cnts_per_tbin = get_cnts_per_tbins(t_bins0, t_bins1, ebins0, ebins1,\
# ev_data0, dmask)
cnts_per_tbin = get_cnts(ev_data0, t_bins0, t_bins1, ebin_ind, nebins)
drm_dir = args.obsid
drm_obj = DRMs(drm_dir)
drm00 = drm_obj.get_drm(0.0, 0.0)
abs_cor = get_abs_cor_rates(0.0, 0.0, drm00)
ebin_ind_edges = get_ebin_ind_edges(drm00, ebins0, ebins1)
ind_ax = np.linspace(-1.5, 3.5, 20 * 5 + 1)
cnts_intp = get_cnts_intp_obj(ind_ax, drm00, ebin_ind_edges, abs_cor)
names = ["tstart", "tstop", "bkg_llh", "sig_llh", "Nsig", "plaw_ind"]
N_dbl_dt = args.ndbl
cnts_norm = cnts_intp(1.0)
tabs = []
for ii in range(N_dbl_dt):
tab = Table()
bkg_llh_tbins = np.zeros(ntbins)
bf_bkg_rates = np.zeros((ntbins, nebins))
for i in range(ntbins):
bkg_llh_tbins[i], bf_bkg_rates[i] = rates_llh(
cnts_per_tbin[i],
0.0,
cnts_norm,
bkg_cnts,
bkg_err,
bin_size,
args.bkgdt,
ret_f=True,
)
bf_nsigs = np.zeros(ntbins)
bf_inds = np.zeros(ntbins)
llhs = np.zeros(ntbins)
for i in range(ntbins):
x0 = [1.0, 1.0]
_args = (
cnts_per_tbin[i],
bkg_cnts,
bkg_err,
bin_size,
args.bkgdt,
cnts_intp,
)
# res = rate_llh2min(x0, *args)
res = optimize.fmin(
rate_llh2min, x0, args=_args, disp=False, full_output=True
)
bf_nsigs[i] = res[0][0]
bf_inds[i] = res[0][1]
llhs[i] = res[1]
tab["tstart"] = t_bins0
tab["tstop"] = t_bins1
tab["bkg_llh"] = bkg_llh_tbins
tab["sig_llh"] = llhs
tab["Nsig"] = bf_nsigs
tab["plaw_ind"] = bf_inds
tabs.append(tab)
# cnts_per_tbin, t_bins0, t_bins1 =\
# double_up_tbins(cnts_per_tbin, t_bins0, t_bins1)
# tstep = t_bins1[0] - t_bins0[0]
# ntbins -= 1
tstep *= 2
bin_size *= 2
t_bins0 = np.arange(-15.008, 15.008, tstep) + trig_time
t_bins1 = t_bins0 + bin_size
ntbins = len(t_bins0)
print(ntbins)
cnts_per_tbin = get_cnts(ev_data0, t_bins0, t_bins1, ebin_ind, nebins)
tab = vstack(tabs)
fname = os.path.join(args.obsid, "rate_llhs_trigtime_%.1f_.fits" % (args.trigtime))
dt_tot = np.max(tab["tstop"]) - np.min(tab["tstart"])
llhrs = tab["bkg_llh"] - tab["sig_llh"]
exps = tab["tstop"] - tab["tstart"]
pvals = stats.chi2.sf(2.0 * llhrs, 1)
Nexps = args.ndbl + 1
for i in range(Nexps):
bl_exp = np.isclose(exps, 0.128 * (2 ** (i)))
pvals[bl_exp] *= np.sum(bl_exp) / dt_tot
pvals = 1.0 - np.exp(-pvals)
tab["pval"] = pvals
tab.write(fname)
return
def cli():
parser = argparse.ArgumentParser()
parser.add_argument(
"--obsid", type=str, help="Obsid as a string, as it appears in file names"
)
parser.add_argument("--evf", type=str, help="Event File Name")
parser.add_argument("--e0", type=float, help="Min energy", default=14.0)
parser.add_argument("--e1", type=float, help="Max energy", default=194.9)
parser.add_argument("--dmask", type=str, help="Detmask fname")
parser.add_argument("--trigtime", type=float, help="Trigger time in MET seconds")
parser.add_argument("--bkgt0", type=float, help="Bkg start time in MET seconds")
parser.add_argument("--bkgdt", type=float, help="Bkg duration time in seconds")
parser.add_argument(
"--ndbl",
type=int,
help="Number of times to double the time bin duration",
default=5,
)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = cli()
main(args)
|
Swift-BATREPO_NAMENITRATESPATH_START.@NITRATES_extracted@NITRATES-main@nitrates@archive@rates_only_mle.py@.PATH_END.py
|
{
"filename": "feature_request.md",
"repo_name": "AndrewAnnex/SpiceyPy",
"repo_path": "SpiceyPy_extracted/SpiceyPy-main/.github/ISSUE_TEMPLATE/feature_request.md",
"type": "Markdown"
}
|
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
**Describe the solution you'd like**
A clear and concise description of what you want to happen.
**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.
**Additional context**
Add any other context or screenshots about the feature request here.
|
AndrewAnnexREPO_NAMESpiceyPyPATH_START.@SpiceyPy_extracted@SpiceyPy-main@.github@ISSUE_TEMPLATE@feature_request.md@.PATH_END.py
|
{
"filename": "cheblib.py",
"repo_name": "venkateshgopinath/FAlCon-DNS",
"repo_path": "FAlCon-DNS_extracted/FAlCon-DNS-main/python/annulus/cheblib.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import glob, os
import numpy as np
from scipy.fftpack import dct, idct, fft, ifft
def chebgrid(nr, a, b):
"""
This function defines a Gauss-Lobatto grid from a to b.
>>> r_icb = 0.5 ; r_cmb = 1.5; n_r_max=65
>>> rr = chebgrid(n_r_max, r_icb, r_cmb)
:param nr: number of radial grid points
:type nr: int
:param a: lower limit of the Gauss-Lobatto grid
:type a: float
:param b: upper limit of the Gauss-Lobatto grid
:type b: float
:returns: the Gauss-Lobatto grid
:rtype: numpy.ndarray
"""
rst = (a+b)/(b-a)
rr = 0.5*(rst+np.cos(np.pi*(1.-np.arange(nr+1.)/nr)))*(b-a)
return rr
def matder(nr, z1, z2):
"""
This function calculates the derivative in Chebyshev space.
>>> r_icb = 0.5 ; r_cmb = 1.5; n_r_max=65
>>> d1 = matder(n_r_max, r_icb, r_cmb)
>>> # Chebyshev grid and data
>>> rr = chebgrid(n_r_max, r_icb, r_cmb)
>>> f = sin(rr)
>>> # Radial derivative
>>> df = dot(d1, f)
:param nr: number of radial grid points
:type nr: int
:param z1: lower limit of the Gauss-Lobatto grid
:type z1: float
:param z2: upper limit of the Gauss-Lobatto grid
:type z2: float
:returns: a matrix of dimension (nr,nr) to calculate the derivatives
:rtype: numpy.ndarray
"""
nrp = nr+1
w1 = np.zeros((nrp, nrp), dtype='Float64')
zl = z2-z1
for i in range(nrp):
for j in range(nrp):
w1[i, j] = spdel(i, j, nr, zl)
return w1
def intcheb(f, nr, z1, z2):
"""
This function integrates an input function f defined on the Gauss-Lobatto grid.
>>> print(intcheb(f, 65, 0.5, 1.5))
:param f: an input array
:type: numpy.ndarray
:param nr: number of radial grid points
:type nr: int
:param z1: lower limit of the Gauss-Lobatto grid
:type z1: float
:param z2: upper limit of the Gauss-Lobatto grid
:type z2: float
:returns: the integrated quantity
:rtype: float
"""
#fn = np.abs(np.real(dct(f,1))*0.5*np.sqrt(2.0/np.real(nr-1)))
fn = np.real(chebtransform(nr,f))
int = 0.
for i in range(0, nr, 2):
if i==0 or i==nr-1:
int = int-0.5*(z2-z1)/(i**2.-1.)*fn[i]
else:
int = int-(z2-z1)/(i**2.-1.)*fn[i]
int = int * np.sqrt(2.0/np.real(nr-1.))
return int
def spdel(kr, jr, nr, zl):
if kr != nr :
fac = 1.
k = kr
j = jr
else:
fac = -1.
k = 0.
j = nr-jr
spdel = fac*dnum(k, j, nr)/den(k, j, nr)
return -spdel*(2./zl)
def dnum(k, j, nr):
if k == 0:
if (j == 0 or j == nr):
dnum = 0.5
a = nr % 2
if a == 1:
dnum = -dnum
if j == 0:
dnum = 1./3.*float(nr*nr)+1./6.
return dnum
dnum = 0.5*(float(nr)+0.5)*((float(nr)+0.5)+(1./np.tan(np.pi*float(j) \
/float(2.*nr)))**2)+1./8.-0.25/(np.sin(np.pi*float(j)/ \
float(2*nr))**2) - 0.5*float(nr*nr)
return dnum
dnum = ff(k+j, nr)+ff(k-j, nr)
return dnum
def ff(i, nr):
if i == 0:
return 0
ff = float(nr)*0.5/np.tan(np.pi*float(i)/float(2.*nr))
a = i % 2
if a == 0:
ff = -ff
return ff
def den(k, j, nr):
if k == 0:
den = 0.5*float(nr)
a = j % 2
if a == 1:
den = -den
if (j == 0 or j == nr):
den = 1.
return den
den = float(nr)*np.sin(np.pi*float(k)/float(nr))
if (j == 0 or j == nr):
den = 2.*den
return den
def scanDir(pattern, tfix=None):
"""
This function sorts the files which match a given input pattern from the oldest
to the most recent one (in the current working directory)
>>> dat = scanDir('log.*')
>>> print(log)
:param pattern: a classical regexp pattern
:type pattern: str
:param tfix: in case you want to add only the files that are more recent than
a certain date, use tfix (computer 1970 format!!)
:type tfix: float
:returns: a list of files that match the input pattern
:rtype: list
"""
dat = [(os.stat(i).st_mtime, i) for i in glob.glob(pattern)]
dat.sort()
if tfix is not None:
out = []
for i in dat:
if i[0] > tfix:
out.append(i[1])
else:
out = [i[1] for i in dat]
return out
def spat_spec(data, nm, np):
out = fft(data, nm)
return out/(np)
def spec_spat2(data, n):
out = ifft(data, n)
return out.real
def spec_spat(data, n, axis=0):
out = np.fft.irfft(data, axis=axis, n=n)*n
return out.real
def chebforward(varFR,Nm_max,Nr_max):
varFC = np.zeros((Nm_max+1,Nr_max),dtype=np.complex128)
for i in range(0,Nm_max+1):
varFC[i][:] = dct(varFR[i][:],1)
return varFC
def chebinverse(varFC,Nm_max,Nr_max_ref,Nr_max_cur):
varFR = np.zeros((Nm_max+1,Nr_max_ref),dtype=np.complex128)
for i in range(0,Nm_max+1):
varFR[i][:] = idct(varFC[i][:],1)/(2.0*(Nr_max_cur-1))
return varFR
def padding(ttFC,Nm_max_ref,Nr_max_ref,Nm_max_cur,Nr_max_cur):
var_comp = np.zeros((Nm_max_ref+1,Nr_max_ref),dtype=np.complex128)
if (Nm_max_ref>Nm_max_cur) and (Nr_max_ref>Nr_max_cur) :
for i in range(0,Nm_max_cur+1):
for j in range(0,Nr_max_cur):
var_comp[i,j]=ttFC[i,j]
for i in range(Nm_max_cur+1,Nm_max_ref+1):
for j in range(Nr_max_cur,Nr_max_ref):
var_comp[i,j]=0.0
elif (Nm_max_ref==Nm_max_cur) and (Nr_max_ref>Nr_max_cur):
for i in range(0,Nm_max_cur+1):
for j in range(0,Nr_max_cur):
var_comp[i,j]=ttFC[i,j]
for i in range(0,Nm_max_cur+1):
for j in range(Nr_max_cur,Nr_max_ref):
var_comp[i,j]=0.0
elif (Nm_max_ref>Nm_max_cur) and (Nr_max_ref==Nr_max_cur):
for i in range(0,Nm_max_cur+1):
for j in range(0,Nr_max_cur):
var_comp[i,j]=ttFC[i,j]
for i in range(Nm_max_cur+1,Nm_max_ref+1):
for j in range(0,Nr_max_cur):
var_comp[i,j]=0.0
elif (Nm_max_ref==Nm_max_cur) and (Nr_max_ref==Nr_max_cur):
var_comp = ttFC
return var_comp
def calc_L2norm(varFR_comp,varFR_ref,Nm_max_ref,Nr_max_ref,rmin,rmax):
var_diff_phi=np.zeros(Nr_max_ref)
varp=np.zeros(Nr_max_ref)
rr= chebgrid(Nr_max_ref-1,rmin,rmax)
for j in range(0,Nr_max_ref-0):
var_diff_phi[j]=0.0
for i in range(1,Nm_max_ref+1):
var_diff_phi[j] = var_diff_phi[j] + abs((varFR_comp[i,j] - varFR_ref[i,j])*(varFR_comp[i,j]-varFR_ref[i,j]))
var_diff_phi[j] = 2*var_diff_phi[j] + abs((varFR_comp[0,j] - varFR_ref[0,j])*(varFR_comp[0,j]-varFR_ref[0,j]))
varp = rr*var_diff_phi
L2_error = np.sqrt((1.0/(np.pi*(rmax**2.0-rmin**2.0)))*(2.0*np.pi*intcheb(varp, Nr_max_ref, rmin, rmax)))
return L2_error
def calc_rel_L2norm(varFR_comp,varFR_ref,Nm_max_ref,Nr_max_ref,rmin,rmax):
var_diff_phi=np.zeros(Nr_max_ref)
varp=np.zeros(Nr_max_ref)
denom=np.zeros(Nr_max_ref)
rr= chebgrid(Nr_max_ref-1,rmin,rmax)
for j in range(0,Nr_max_ref):
var_diff_phi[j]=0.0
denom[j]=0.0
for i in range(1,Nm_max_ref+1):
var_diff_phi[j] = var_diff_phi[j] + abs((varFR_comp[i,j] - varFR_ref[i,j])*(varFR_comp[i,j]-varFR_ref[i,j]))
denom[j] = denom[j] + abs((varFR_ref[i,j])*(varFR_ref[i,j]))
var_diff_phi[j] = 2*var_diff_phi[j] + abs((varFR_comp[0,j] - varFR_ref[0,j])*(varFR_comp[0,j]-varFR_ref[0,j]))
denom[j] = 2*denom[j] + abs((varFR_ref[0,j])*(varFR_ref[0,j]))
varp = rr*var_diff_phi
denom = np.real(np.multiply(rr,denom))
rel_L2_error = np.sqrt((intcheb(varp, Nr_max_ref, rmin, rmax))/(intcheb(denom, Nr_max_ref, rmin, rmax)))
#rel_L2_error = np.sqrt((2.0*np.pi*intcheb(var_diff_phi, Nr_max_ref, rmin, rmax))/(2.0*np.pi*intcheb(denom, Nr_max_ref, rmin, rmax)))
#rel_L2_error = np.sqrt((1.0/(np.pi*(rmax**2.0-rmin**2.0)))*(4.0*np.pi*intcheb(varp, Nr_max_ref, rmin, rmax)))
return rel_L2_error
def calc_combine_error(var1_comp,var1_ref,var2_comp,var2_ref,var3_comp,var3_ref,Nm_max_ref,Nr_max_ref,rmin,rmax):
var_diff_phi1=np.zeros(Nr_max_ref)
denom1=np.zeros(Nr_max_ref)
var_diff_phi2=np.zeros(Nr_max_ref)
denom2=np.zeros(Nr_max_ref)
var_diff_phi3=np.zeros(Nr_max_ref)
denom3=np.zeros(Nr_max_ref)
rr= chebgrid(Nr_max_ref-1,rmin,rmax)
for j in range(0,Nr_max_ref):
var_diff_phi1[j]=0.0
denom1[j]=0.0
var_diff_phi2[j]=0.0
denom2[j]=0.0
var_diff_phi3[j]=0.0
denom3[j]=0.0
for i in range(1,Nm_max_ref+1):
var_diff_phi1[j] = var_diff_phi1[j] + abs((var1_comp[i,j] - var1_ref[i,j])*(var1_comp[i,j]-var1_ref[i,j]))
denom1[j] = denom1[j] + abs((var1_ref[i,j])*(var1_ref[i,j]))
var_diff_phi2[j] = var_diff_phi2[j] + abs((var2_comp[i,j] - var2_ref[i,j])*(var2_comp[i,j]-var2_ref[i,j]))
denom2[j] = denom2[j] + abs((var2_ref[i,j])*(var2_ref[i,j]))
var_diff_phi3[j] = var_diff_phi3[j] + abs((var3_comp[i,j] - var3_ref[i,j])*(var3_comp[i,j]-var3_ref[i,j]))
denom3[j] = denom3[j] + abs((var3_ref[i,j])*(var3_ref[i,j]))
var_diff_phi1[j] = 2*var_diff_phi1[j] + abs((var1_comp[0,j] - var1_ref[0,j])*(var1_comp[0,j]-var1_ref[0,j]))
denom1[j] = 2*denom1[j] + abs((var1_ref[0,j])*(var1_ref[0,j]))
var_diff_phi2[j] = 2*var_diff_phi2[j] + abs((var2_comp[0,j] - var2_ref[0,j])*(var2_comp[0,j]-var2_ref[0,j]))
denom2[j] = 2*denom2[j] + abs((var2_ref[0,j])*(var2_ref[0,j]))
var_diff_phi3[j] = 2*var_diff_phi3[j] + abs((var3_comp[0,j] - var3_ref[0,j])*(var3_comp[0,j]-var3_ref[0,j]))
denom3[j] = 2*denom3[j] + abs((var3_ref[0,j])*(var3_ref[0,j]))
var_diff_phi1 = np.real(np.multiply(rr,var_diff_phi1))
denom1 = np.real(np.multiply(rr,denom1))
n1=2.0*np.pi*intcheb(var_diff_phi1, Nr_max_ref, rmin, rmax)
d1=2.0*np.pi*intcheb(denom1, Nr_max_ref, rmin, rmax)
var_diff_phi2 = np.real(np.multiply(rr,var_diff_phi2))
denom2 = np.real(np.multiply(rr,denom2))
n2=2.0*np.pi*intcheb(var_diff_phi2, Nr_max_ref, rmin, rmax)
d2=2.0*np.pi*intcheb(denom2, Nr_max_ref, rmin, rmax)
var_diff_phi3 = np.real(np.multiply(rr,var_diff_phi3))
denom3 = np.real(np.multiply(rr,denom3))
n3=2.0*np.pi*intcheb(var_diff_phi3, Nr_max_ref, rmin, rmax)
d3=2.0*np.pi*intcheb(denom3, Nr_max_ref, rmin, rmax)
combine_error = np.sqrt(n1/d1 + n2/d2 + n3/d3)
return combine_error
def calc_L2norm_two(var1FR_comp,var1FR_ref,var2FR_comp,var2FR_ref,Nm_max_ref,Nr_max_ref,rmin,rmax):
var_diff_phi=np.zeros(Nr_max_ref)
rr= chebgrid(Nr_max_ref-1,rmin,rmax)
for j in range(0,Nr_max_ref):
var_diff_phi[j]=0.0
for i in range(0,Nm_max_ref+1):
var_diff_phi[j] = var_diff_phi[j] + np.abs(var1FR_comp[i,j] - var1FR_ref[i,j])*np.abs(var1FR_comp[i,j]-var1FR_ref[i,j]) + np.abs(var2FR_comp[i,j] - var2FR_ref[i,j])*np.abs(var2FR_comp[i,j]-var2FR_ref[i,j])
var_diff_phi = np.real(np.multiply(rr,var_diff_phi))
L2_error = np.sqrt(2.0*np.pi*intcheb(var_diff_phi, Nr_max_ref, rmin, rmax))
return L2_error
def calc_rmsvel(var1FR_ref,var2FR_ref,Nm_max_ref,Nr_max_ref,rmin,rmax): # From output variables from code in Fourier-real space
var1=np.zeros(Nr_max_ref)
var2=np.zeros(Nr_max_ref)
rr= chebgrid(Nr_max_ref-1,rmin,rmax)
for j in range(0,Nr_max_ref):
var1[j]=0.0
var2[j]=0.0
for i in range(0,Nm_max_ref+1):
var1[j] = var1[j] + np.abs(var1FR_ref[i,j]*var1FR_ref[i,j])
var2[j] = var2[j] + np.abs(var2FR_ref[i,j]*var2FR_ref[i,j])
var1 = np.real(np.multiply(rr,var1))
var2 = np.real(np.multiply(rr,var2))
Ekin = (2.0*np.pi*intcheb(var1, Nr_max_ref, rmin, rmax))+(2.0*np.pi*intcheb(var2, Nr_max_ref, rmin, rmax))
rmsvel = np.sqrt(2.0*Ekin/(np.pi*(rmax**2.0-rmin**2.0)))
return rmsvel
def calc_rmsvel2(var1_r,var2_r,Nm_max,Nr_max,rmin,rmax): # From variables in physical space and taking Fourier transform
Np_max=3*Nm_max
var1_FR=np.zeros((Np_max,Nr_max),dtype=complex)
var2_FR=np.zeros((Np_max,Nr_max),dtype=complex)
var1FR=np.zeros((Nm_max+1,Nr_max),dtype=complex)
var2FR=np.zeros((Nm_max+1,Nr_max),dtype=complex)
for i in range(0,Nr_max):
var1_FR[:,i]=fft(var1_r[:,i])/Np_max
var2_FR[:,i]=fft(var2_r[:,i])/Np_max
for i in range(0,Nm_max+1):
for j in range(0,Nr_max):
var1FR[i,j]=var1_FR[i,j]
var2FR[i,j]=var2_FR[i,j]
var1=np.zeros(Nr_max)
var2=np.zeros(Nr_max)
rr= chebgrid(Nr_max-1,rmin,rmax)
for j in range(0,Nr_max):
var1[j]=0.0
var2[j]=0.0
#for i in range(0,Nm_max+1):
for i in range(0,Np_max):
var1[j] = var1[j] + np.abs(var1_FR[i,j]*var1_FR[i,j])
var2[j] = var2[j] + np.abs(var2_FR[i,j]*var2_FR[i,j])
#var1[j] = var1[j] + np.abs(var1FR[i,j]*var1FR[i,j])
#var2[j] = var2[j] + np.abs(var2FR[i,j]*var2FR[i,j])
var1 = np.real(np.multiply(rr,var1))
var2 = np.real(np.multiply(rr,var2))
Ekin = (0.5*((2.0*np.pi*intcheb(var1, Nr_max, rmin, rmax))+(2.0*np.pi*intcheb(var2, Nr_max, rmin, rmax))))
rmsvel = np.sqrt(2.0*Ekin/(np.pi*(rmax**2.0-rmin**2.0)))
return rmsvel, Ekin
def calc_rmsvel3(var1_r,var2_r,Nm_max,Nr_max,rmin,rmax): # From variables in physical space using np.trapz
Np_max=3*Nm_max
var1=np.zeros(Nr_max)
var2=np.zeros(Nr_max)
rr= chebgrid(Nr_max-1,rmin,rmax)
phi = np.zeros(Np_max)
for i in range(1,Np_max):
phi[i]=phi[i-1]+2.0*np.pi/(Np_max)
for j in range(0,Nr_max):
var1[j] = np.trapz(var1_r[:,j]*var1_r[:,j],phi)
var2[j] = np.trapz(var2_r[:,j]*var2_r[:,j],phi)
var1 = np.real(np.multiply(rr,var1))
var2 = np.real(np.multiply(rr,var2))
Ekin = 0.5*((intcheb(var1, Nr_max, rmin, rmax))+(intcheb(var2, Nr_max, rmin, rmax)))
rmsvel = np.sqrt(2.0*Ekin/(np.pi*(rmax**2.0-rmin**2.0)))
return rmsvel, Ekin
def calc_rmsvel_phi(var1FR_ref,var2FR_ref,Nm_max_ref,Nr_max_ref,rmin,rmax):
var=np.zeros(Nr_max_ref)
rr= chebgrid(Nr_max_ref-1,rmin,rmax)
for j in range(0,Nr_max_ref):
var[j]=0.0
for i in range(0,Nm_max_ref+1):
var[j] = var[j] + np.abs(var1FR_ref[i,j])*np.abs(var1FR_ref[i,j]) + np.abs(var2FR_ref[i,j])*np.abs(var2FR_ref[i,j])
var[j] = np.sqrt(var[j]/(2.0*np.pi*rr[j]))
return var
def time_avg(var_ref,time,Nr_max_ref,nsnaps):
var=np.zeros(Nr_max_ref,dtype=complex)
fac = time[nsnaps-1] - time[0]
for j in range(0,Nr_max_ref):
var[j]=0.0
var[j] = 1./fac * np.trapz(var_ref[:,j],time)
return var
def calc_maxnorm(var_comp,var_ref,Nm_max_ref,Nr_max_ref):
#var_c = spec_spat(var_comp, 3*Nm_max_ref)
#var_r = spec_spat(var_ref, 3*Nm_max_ref)
#diff = np.zeros(shape=(Nm_max_ref+1,Nr_max_ref))
#for j in range(1,Nr_max_ref-1):
# for i in range(0,Nm_max_ref+1):
#for i in range(0,1):
# diff[i,j] = abs(np.imag(var_comp[i,j]-var_ref[i,j]))
#max_error = (diff).max()
max_error = abs(var_comp - var_ref).max()
#max_error = abs(var_c - var_r).max()
#max_error = abs(np.real(var_comp) - np.real(var_ref)).max()
#max_error = abs(np.imag(var_comp) - np.imag(var_ref)).max()
return max_error
def calc_maxnorm_omg(var_comp,var_ref,Nm_max_ref,Nr_max_ref):
diff = np.zeros(shape=(Nm_max_ref+1,Nr_max_ref))
for j in range(1,Nr_max_ref-1):
for i in range(0,Nm_max_ref+1):
diff[i,j] = abs(np.imag(var_comp[i,j]-var_ref[i,j]))
max_error = diff.max()
def chebtransform(Nr_max,f):
f00=np.zeros([2*Nr_max-2],dtype=complex)
ff=np.zeros([2*Nr_max-2],dtype=complex)
f2=np.zeros([Nr_max],dtype=complex)
ft=np.zeros([Nr_max],dtype=complex)
f0 = f[::-1]
# Pre-processing
f00[0:Nr_max]=f0[0:Nr_max]
f00[Nr_max:2*Nr_max-2]=f[1:Nr_max-1]
ff[:] = fft(f00) # Execute dft
# Post-processing
ff=ff/(2*Nr_max-2)
f2[0]=ff[0]
f2[Nr_max-1]=ff[Nr_max-1]
f2[1:Nr_max-1]=2*ff[1:Nr_max-1]
fac=np.sqrt(2./(Nr_max-1))
ft=f2/fac
ft[0]=2*ft[0]
ft[Nr_max-1]=2*ft[Nr_max-1]
return ft
def chebinvtran(Nr_max,fc):
fin=np.zeros([Nr_max],dtype=complex)
ff=np.zeros([Nr_max],dtype=complex)
f2=np.zeros([Nr_max],dtype=complex)
ft=np.zeros([Nr_max],dtype=complex)
fin = idct(fc,1)
fac = np.sqrt(2./(Nr_max-1))
fin = fac * fin * 0.5
fin = fin[::-1]
return fin
def chebinvtranD1(Nr_max,ft):
f2r=np.zeros([2*Nr_max-2],dtype=complex)
f2c=np.zeros([2*Nr_max-2],dtype=complex)
df=np.zeros([Nr_max],dtype=complex)
beta1=np.zeros([Nr_max],dtype=complex)
f=ft
fac = np.sqrt(2./(Nr_max-1))
# Recurrence for the 1st derivative coefficients
beta1[Nr_max-1] = 0.0
beta1[Nr_max-2] = 2. * (Nr_max-1) * f[Nr_max-1]
for i in range(Nr_max-2,0,-1):
beta1[i-1] = beta1[i+1] + 4. * (i) * f[i]
beta1=beta1*fac
beta1[0]=beta1[0]/2
beta1[Nr_max-1]=beta1[Nr_max-1]/2
# Fast inverse Chebyshev transform
# Pre-processing
f2c[0]=beta1[0]
f2c[1:Nr_max-1]=beta1[1:Nr_max-1]/2
f2c[Nr_max:2*Nr_max-2]=beta1[Nr_max-2:0:-1]/2
f2r = fft(f2c)
df[0:Nr_max]=f2r[0:Nr_max]
df=df[::-1]
return df
def chebinvtranD2(Nr_max,ft):
f2r=np.zeros([2*Nr_max-2],dtype=complex)
f2c=np.zeros([2*Nr_max-2],dtype=complex)
d2f=np.zeros([Nr_max],dtype=complex)
beta1=np.zeros([Nr_max],dtype=complex)
beta2=np.zeros([Nr_max],dtype=complex)
f=ft
fac = np.sqrt(2./(Nr_max-1))
# Recurrence for the 1st derivative coefficients
beta1[Nr_max-1] = 0.0
beta1[Nr_max-2] = 2. * (Nr_max-1) * f[Nr_max-1]
for i in range(Nr_max-2,0,-1):
beta1[i-1] = beta1[i+1] + 4. * (i) * f[i]
# Recurrence for the 2nd derivative coefficients
beta2[Nr_max-1] = 0.0
beta2[Nr_max-2] = 0.0
for i in range(Nr_max-2,0,-1):
beta2[i-1] = beta2[i+1] + 4. * (i) * beta1[i]
beta1=beta1*fac
beta1[0]=beta1[0]/2
beta1[Nr_max-1]=beta1[Nr_max-1]/2
beta2=beta2*fac
beta2[0]=beta2[0]/2
beta2[Nr_max-1]=beta2[Nr_max-1]/2
# Fast inverse Chebyshev transform
# Pre-processing
f2c[0]=beta2[0]
f2c[1:Nr_max-1]=beta2[1:Nr_max-1]/2
f2c[Nr_max:2*Nr_max-2]=beta2[Nr_max-2:0:-1]/2
f2r = fft(f2c) # Execute dft
d2f[0:Nr_max]=f2r[0:Nr_max]
d2f=d2f[::-1]
return d2f
# NOTES
# Note that when we do a integral in phi direction we get 2*pi as a prefactor due to Parseval's theorem
|
venkateshgopinathREPO_NAMEFAlCon-DNSPATH_START.@FAlCon-DNS_extracted@FAlCon-DNS-main@python@annulus@cheblib.py@.PATH_END.py
|
{
"filename": "context.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/tests/unit/services/externalBrokers/context.py",
"type": "Python"
}
|
"""Import at the start of tests so that imported packages get resolved properly.
"""
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../common')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../common/src')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../services')))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../../services/externalBrokers')))
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@tests@unit@services@externalBrokers@context.py@.PATH_END.py
|
{
"filename": "nn.py",
"repo_name": "probabilists/lampe",
"repo_path": "lampe_extracted/lampe-master/lampe/nn.py",
"type": "Python"
}
|
r"""Neural networks, layers and modules."""
__all__ = ["MLP", "ResMLP"]
import torch.nn as nn
from torch import Tensor
from typing import Sequence
from zuko.nn import MLP
class Residual(nn.Module):
r"""Creates a residual block from a non-linear function :math:`f`.
.. math:: y = x + f(x)
Arguments:
f: A function :math:`f`.
"""
def __init__(self, f: nn.Module):
super().__init__()
self.f = f
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self.f})"
def forward(self, x: Tensor) -> Tensor:
return x + self.f(x)
class ResMLP(nn.Sequential):
r"""Creates a residual multi-layer perceptron (ResMLP).
A ResMLP is a series of residual blocks where each block is a (shallow) MLP. Using
residual blocks instead of regular non-linear functions prevents the gradients from
vanishing, which allows for deeper networks.
Arguments:
in_features: The number of input features.
out_features: The number of output features.
hidden_features: The numbers of hidden features.
kwargs: Keyword arguments passed to :class:`MLP`.
Example:
>>> net = ResMLP(64, 1, [32, 16], activation=nn.ELU)
>>> net
ResMLP(
(0): Linear(in_features=64, out_features=32, bias=True)
(1): Residual(MLP(
(0): Linear(in_features=32, out_features=32, bias=True)
(1): ELU(alpha=1.0)
(2): Linear(in_features=32, out_features=32, bias=True)
))
(2): Linear(in_features=32, out_features=16, bias=True)
(3): Residual(MLP(
(0): Linear(in_features=16, out_features=16, bias=True)
(1): ELU(alpha=1.0)
(2): Linear(in_features=16, out_features=16, bias=True)
))
(4): Linear(in_features=16, out_features=1, bias=True)
)
"""
def __init__(
self,
in_features: int,
out_features: int,
hidden_features: Sequence[int] = (64, 64),
**kwargs,
):
blocks = []
for before, after in zip(
(in_features, *hidden_features),
(*hidden_features, out_features),
):
if after != before:
blocks.append(nn.Linear(before, after))
blocks.append(Residual(MLP(after, after, [after], **kwargs)))
blocks = blocks[:-1]
super().__init__(*blocks)
self.in_features = in_features
self.out_features = out_features
|
probabilistsREPO_NAMElampePATH_START.@lampe_extracted@lampe-master@lampe@nn.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "bd-j/prospector",
"repo_path": "prospector_extracted/prospector-main/prospect/fitting/__init__.py",
"type": "Python"
}
|
from .ensemble import run_emcee_sampler, restart_emcee_sampler
from .minimizer import reinitialize
from .nested import run_nested_sampler
from .fitting import fit_model, lnprobfn, run_minimize
__all__ = ["fit_model", "lnprobfn",
# below should all be removed/deprecated
"run_emcee_sampler", "restart_emcee_sampler",
"run_nested_sampler",
"run_minimize", "reinitialize"]
|
bd-jREPO_NAMEprospectorPATH_START.@prospector_extracted@prospector-main@prospect@fitting@__init__.py@.PATH_END.py
|
{
"filename": "_title.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/carpet/aaxis/_title.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "carpet.aaxis"
_path_str = "carpet.aaxis.title"
_valid_props = {"font", "offset", "text"}
# font
# ----
@property
def font(self):
"""
Sets this axis' title font. Note that the title's font used to
be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.aaxis.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
plotly.graph_objs.carpet.aaxis.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# offset
# ------
@property
def offset(self):
"""
An additional amount by which to offset the title from the tick
labels, given in pixels. Note that this used to be set by the
now deprecated `titleoffset` attribute.
The 'offset' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["offset"]
@offset.setter
def offset(self, val):
self["offset"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of this axis. Note that before the existence of
`title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this axis' title font. Note that the title's font
used to be set by the now deprecated `titlefont`
attribute.
offset
An additional amount by which to offset the title from
the tick labels, given in pixels. Note that this used
to be set by the now deprecated `titleoffset`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, offset=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.carpet.aaxis.Title`
font
Sets this axis' title font. Note that the title's font
used to be set by the now deprecated `titlefont`
attribute.
offset
An additional amount by which to offset the title from
the tick labels, given in pixels. Note that this used
to be set by the now deprecated `titleoffset`
attribute.
text
Sets the title of this axis. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.carpet.aaxis.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.carpet.aaxis.Title`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("offset", None)
_v = offset if offset is not None else _v
if _v is not None:
self["offset"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@carpet@aaxis@_title.py@.PATH_END.py
|
{
"filename": "run.py",
"repo_name": "ratt-ru/Stimela-classic",
"repo_path": "Stimela-classic_extracted/Stimela-classic-master/stimela/cargo/cab/casa47_applycal/src/run.py",
"type": "Python"
}
|
import os
import sys
import logging
import Crasa.Crasa as crasa
import yaml
import glob
import shutil
CONFIG = os.environ["CONFIG"]
INPUT = os.environ["INPUT"]
OUTPUT = os.environ["OUTPUT"]
MSDIR = os.environ["MSDIR"]
with open(CONFIG, "r") as _std:
cab = yaml.safe_load(_std)
junk = cab["junk"]
args = {}
for param in cab['parameters']:
name = param['name']
value = param['value']
if value is None:
continue
args[name] = value
task = crasa.CasaTask(cab["binary"], **args)
try:
task.run()
finally:
for item in junk:
for dest in [OUTPUT, MSDIR]: # these are the only writable volumes in the container
items = glob.glob("{dest}/{item}".format(**locals()))
for f in items:
if os.path.isfile(f):
os.remove(f)
elif os.path.isdir(f):
shutil.rmtree(f)
# Leave other types
|
ratt-ruREPO_NAMEStimela-classicPATH_START.@Stimela-classic_extracted@Stimela-classic-master@stimela@cargo@cab@casa47_applycal@src@run.py@.PATH_END.py
|
{
"filename": "_xanchor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/streamtube/colorbar/_xanchor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="xanchor", parent_name="streamtube.colorbar", **kwargs
):
super(XanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["left", "center", "right"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@streamtube@colorbar@_xanchor.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Jammy2211/PyAutoLens",
"repo_path": "PyAutoLens_extracted/PyAutoLens-main/autolens/point/__init__.py",
"type": "Python"
}
|
Jammy2211REPO_NAMEPyAutoLensPATH_START.@PyAutoLens_extracted@PyAutoLens-main@autolens@point@__init__.py@.PATH_END.py
|
|
{
"filename": "fernandes_2019_broken_powerlaw.py",
"repo_name": "GijsMulders/epos",
"repo_path": "epos_extracted/epos-master/EPOS/scriptdir/papers/fernandes_2019_broken_powerlaw.py",
"type": "Python"
}
|
#! /usr/bin/env ipython
'''
This script is meant to reproduce some of the results/figures from
Fernandes et al. 2019 (AJ submitted), in particular the 3 and 4 parameter
solutions in Table 1 and 2 and Figure 4, (9), and 10
Text output will appear in the terminal.
Symmetric:
./paper_fernandes_2019_broken_powerlaw.py sym
Best-fit values
pps= 0.476 +0.0798 -0.0704
P break= 1.54e+03 +948 -381
p1= 0.652 +0.197 -0.17
m1= -0.442 +0.0526 -0.0544
posterior per bin
eta= 26.6% +6.1% -5.4%
Asymmetric:
./paper_fernandes_2019_broken_powerlaw.py
Best-fit values
pps= 0.462 +0.0804 -0.0701
P break= 2.04e+03 +1.16e+03 -1.13e+03
p1= 0.641 +0.272 -0.146
p2= -1.29 +0.96 -1.17
m1= -0.439 +0.0538 -0.0536
posterior per bin
eta= 27.4% +7.0% -5.4%
Plots will be generated in the png/ subfolder
png/fernandes2019_rv/occurrence/posterior_x.png
png/fernandes2019_rv/occurrence/posterior_y.png
png/fernandes2019_rv_sym/mcmc/triangle.png
Note that results may vary depending on the random seed, version of the code used,
and external dependencies.
Future versions of the code may employ different default parameters.
This script is compatible with version 1.1 of EPOS
If you have trouble running this script or EPOS, please consult the online documentation,
try running the test and example scripts, or contact the first author
'''
import sys
import EPOS
from EPOS import cgs
Symmetric= 'sym' in sys.argv
Single= 'single' in sys.argv
if Single: suffix='_single'
elif Symmetric: suffix='_sym'
else: suffix=''
''' initialize the EPOS class '''
#Msini = True converts the mass distirbution to an msini distribution
epos= EPOS.epos(name='fernandes2019_rv{}'.format('_sym' if Symmetric else ''),
RV = True, MC = False, Msini = True)
''' load the exoplanets and completeness from Mayor+ 2011'''
obs, survey= EPOS.rv.Mayor2011()
epos.set_observation(**obs)
epos.set_survey(**survey)
''' Define a double broken power-law as a planet population '''
if Single:
epos.set_parametric(EPOS.fitfunctions.brokenpowerlaw2D_yonly)
elif Symmetric:
epos.set_parametric(EPOS.fitfunctions.brokenpowerlaw2D_symmetric)
else:
epos.set_parametric(EPOS.fitfunctions.brokenpowerlaw2D)
''' Parameter initial guess and fitting ranges
Note:
- brokenpowerlaw2D uses 6 parameters, indicated with the is2D keyword
- 'pps' is a normalization factor for the planet occurrence rate (planets-per-star)
- parameters a_M and b_M are not fitted
- dx is the range in walker initial positions for parameters that change sign (+/-)
'''
epos.fitpars.add('pps', 1.0, min=1e-3)
if Single:
epos.fitpars.add('p1', 1.0, min=0, max=3, is2D=True)
else:
epos.fitpars.add('P break', 1e3, min=100,max=7e3,is2D=True)
epos.fitpars.add('p1', 1.0, min=0, max=3, is2D=True)
if not Symmetric:
epos.fitpars.add('p2', -0.5, min=-3, max=0, is2D=True)
epos.fitpars.add('M break', 10.0, fixed=True, is2D=True)
epos.fitpars.add('a_M', 0.0, fixed=True, is2D=True)
epos.fitpars.add('m1', -0.5, fixed=False, dx=0.1, is2D=True)
''' define the simulated range (trim) and the range compared to observations (zoom)'''
epos.set_ranges(xtrim=[1,1e5],ytrim=[10,1e5],xzoom=[10,1e4],yzoom=[30,6000],
Occ=True, UnitTicks=True)
''' Run the simulation once '''
EPOS.run.once(epos)
''' run an MCMC chain on multiple cores or read in a previously saved run'''
EPOS.run.mcmc(epos, nMC=1000, nwalkers=50, nburn=200, threads=20, Saved=True)
''' define bins where occurrence is calculated '''
Pbin= [365.24 * au**1.5 for au in [0.1,100.]]
Mbin= [Mj * cgs.Mjup/cgs.Mearth for Mj in [0.1,13.]]
epos.set_bins(xbins=[Pbin], ybins=[Mbin])
''' Calculate the occurrence rates '''
EPOS.occurrence.all(epos)
''' Adjust plot parameters'''
epos.plotpars['occrange']= [2e-4,2.]
#epos.xtrim[1]= 4e5
''' plot everything '''
EPOS.plot.survey.all(epos)
EPOS.plot.input.all(epos)
EPOS.plot.output.all(epos)
EPOS.plot.mcmc.all(epos)
EPOS.plot.occurrence.all(epos)
|
GijsMuldersREPO_NAMEeposPATH_START.@epos_extracted@epos-master@EPOS@scriptdir@papers@fernandes_2019_broken_powerlaw.py@.PATH_END.py
|
{
"filename": "analyze.py",
"repo_name": "jmd-dk/concept",
"repo_path": "concept_extracted/concept-master/test/fluid_pressure/analyze.py",
"type": "Python"
}
|
# This file has to be run in pure Python mode!
# Imports from the CO𝘕CEPT code
from commons import *
from snapshot import load
import species
plt = get_matplotlib().pyplot
# Absolute path and name of this test
this_dir = os.path.dirname(os.path.realpath(__file__))
this_test = os.path.basename(os.path.dirname(this_dir))
# Read in data from the CO𝘕CEPT snapshots
species.allow_similarly_named_components = True
fluids = []
times = []
for fname in sorted(
glob(f'{this_dir}/output/snapshot_t=*'),
key=(lambda s: s[(s.index('=') + 1):]),
):
snapshot = load(fname, compare_params=False)
fluids.append(snapshot.components[0])
times.append(float(re.search(f'snapshot_t=(.*){unit_time}', fname).group(1)))
gridsize = fluids[0].gridsize
N_snapshots = len(fluids)
# Sort data chronologically
order = np.argsort(times)
times = [times[o] for o in order]
fluids = [fluids[o] for o in order]
# Use precise times
times = output_times['t']['snapshot']
# Begin analysis
masterprint(f'Analysing {this_test} data ...')
# Extract hidden parameters
w = user_params['_w']
T = user_params['_T']
A = user_params['_A']
ρ0 = user_params['_ρ0']
# Plot
fig_file = f'{this_dir}/result.png'
fig, axes = plt.subplots(N_snapshots, sharex=True, sharey=True, figsize=(8, 3*N_snapshots))
x_values = [boxsize*i/gridsize for i in range(gridsize)]
ρ = []
ρ_snapshot = []
for ax, fluid, t in zip(axes, fluids, times):
ρ.append(asarray([ρ0 + A*sin(x/boxsize*2*π)*cos(t/T*2*π) for x in x_values]))
ρ_snapshot.append(fluid.ϱ.grid_noghosts[:gridsize, 0, 0])
ax.plot([0, boxsize], [ρ0 ]*2, 'k:' )
ax.plot([0, boxsize], [ρ0 + A]*2, 'k--')
ax.plot([0, boxsize], [ρ0 - A]*2, 'k--')
ax.plot(x_values, ρ[-1] , '-', label='Analytical solution')
ax.plot(x_values, ρ_snapshot[-1], '.', markersize=10, alpha=0.7, label='Simulation')
ax.set_ylabel(
r'$\varrho$ $\mathrm{{[{}\,m_{{\odot}}\,{}^{{-3}}]}}$'
.format(
significant_figures(
1/units.m_sun,
3,
fmt='TeX',
incl_zeros=False,
),
unit_length,
)
)
ax.set_title(rf'$t={t:.3g}\,\mathrm{{{unit_time}}}$')
axes[ 0].set_xlim(0, boxsize)
axes[-1].set_xlabel(rf'$x\,\mathrm{{[{unit_length}]}}$')
axes[ 0].legend()
fig.tight_layout()
fig.savefig(fig_file, dpi=150)
# Fluid elements in yz-slices should all have the same values
for fluid, t in zip(fluids, times):
for fluidscalar in fluid.iterate_fluidscalars():
varnum = fluidscalar.varnum
grid = fluidscalar.grid_noghosts[:gridsize, :gridsize, :gridsize]
for i in range(gridsize):
yz_slice = grid[i, :, :]
yz_mean = np.mean(yz_slice)
if not isclose(
np.std(yz_slice) if yz_mean == 0 else np.std(yz_slice)/yz_mean,
0,
rel_tol=0,
abs_tol=1e+1*machine_ϵ,
):
abort(
f'Non-uniformities have emerged at t = {t} {unit_time} '
f'in yz-slices of fluid scalar variable {fluidscalar}.\n'
f'See "{fig_file}" for a visualization.'
)
# Compare ρ from the snapshots to the analytical solution
abs_tol = 1e-2*A
for ρ_i, ρ_snapshot_i, t in zip(ρ, ρ_snapshot, times):
if not isclose(
np.mean(abs(ρ_i - ρ_snapshot_i)),
0,
rel_tol=0,
abs_tol=abs_tol,
):
abort(
f'Fluid evolution differs from the analytical solution '
f'at t = {t} {unit_time}.\n'
f'See "{fig_file}" for a visualization.'
)
# Done analysing
masterprint('done')
|
jmd-dkREPO_NAMEconceptPATH_START.@concept_extracted@concept-master@test@fluid_pressure@analyze.py@.PATH_END.py
|
{
"filename": "multinest.py",
"repo_name": "vallis/libstempo",
"repo_path": "libstempo_extracted/libstempo-master/libstempo/multinest.py",
"type": "Python"
}
|
import math
import os
import re
from ctypes import CFUNCTYPE, POINTER, c_bool, c_double, c_int, c_void_p, cdll, create_string_buffer
import numpy as N
from numpy.ctypeslib import as_array
# don't bother with parsing error
try:
lib = cdll.LoadLibrary("libnest3.so")
except:
lib = cdll.LoadLibrary(os.path.dirname(__file__) + "/libnest3.so")
# if we want to do OS X version detection:
# import platform
# if platform.system() == 'Darwin'
# '.'.join(platform.mac_ver().split('.')[:2]) --> 10.X
# libstempo.multinest.run borrows heavily from Johannes Buchner's pymultinest;
# it requires MultiNest v3.2 patched with cwrapper.f90
def run(
LogLikelihood,
Prior,
n_dims,
n_params=None,
n_clustering_params=None,
wrapped_params=None,
importance_nested_sampling=True,
multimodal=True,
const_efficiency_mode=False,
n_live_points=400,
evidence_tolerance=0.5,
sampling_efficiency=0.8,
n_iter_before_update=100,
null_log_evidence=-1e90,
max_modes=100,
mode_tolerance=-1e90,
outputfiles_basename="./multinest-",
seed=-1,
verbose=False,
resume=True,
context=None,
write_output=True,
log_zero=-1e100,
max_iter=0,
init_MPI=True,
dump_callback=None,
):
"""
Runs MultiNest
The most important parameters are the two log-probability functions Prior
and LogLikelihood. They are called by MultiNest.
Prior should transform the unit cube into the parameter cube. Here
is an example for a uniform prior::
def Prior(cube, ndim, nparams):
for i in range(ndim):
cube[i] = cube[i] * 10 * math.pi
The LogLikelihood function gets this parameter cube and should
return the logarithm of the likelihood.
Here is the example for the eggbox problem::
def Loglike(cube, ndim, nparams):
chi = 1.
for i in range(ndim):
chi *= math.cos(cube[i] / 2.)
return math.pow(2. + chi, 5)
Some of the parameters are explained below. Otherwise consult the
MultiNest documentation.
@param importance_nested_sampling:
If True, Multinest will use Importance Nested Sampling (INS). Read http://arxiv.org/abs/1306.2144
for more details on INS. Please read the MultiNest README file before using the INS in MultiNest v3.0.
@param n_params:
Total no. of parameters, should be equal to ndims in most cases
but if you need to store some additional
parameters with the actual parameters then you need to pass
them through the likelihood routine.
@param sampling_efficiency:
defines the sampling efficiency. 0.8 and 0.3 are recommended
for parameter estimation & evidence evalutation
respectively.
use 'parameter' or 'model' to select the respective default
values
@param mode_tolerance:
MultiNest can find multiple modes & also specify which samples belong to which mode. It might be
desirable to have separate samples & mode statistics for modes with local log-evidence value greater than a
particular value in which case Ztol should be set to that value. If there isn't any particularly interesting
Ztol value, then Ztol should be set to a very large negative number (e.g. -1e90).
@param evidence_tolerance:
A value of 0.5 should give good enough accuracy.
@param n_clustering_params:
If mmodal is T, MultiNest will attempt to separate out the
modes. Mode separation is done through a clustering
algorithm. Mode separation can be done on all the parameters
(in which case nCdims should be set to ndims) & it
can also be done on a subset of parameters (in which case
nCdims < ndims) which might be advantageous as
clustering is less accurate as the dimensionality increases.
If nCdims < ndims then mode separation is done on
the first nCdims parameters.
@param null_log_evidence:
If mmodal is T, MultiNest can find multiple modes & also specify
which samples belong to which mode. It might be
desirable to have separate samples & mode statistics for modes
with local log-evidence value greater than a
particular value in which case nullZ should be set to that
value. If there isn't any particulrly interesting
nullZ value, then nullZ should be set to a very large negative
number (e.g. -1.d90).
@param init_MPI:
initialize MPI routines?, relevant only if compiling with MPI
@param log_zero:
points with loglike < logZero will be ignored by MultiNest
@param max_iter:
maximum number of iterations. 0 is unlimited.
@param write_output:
write output files? This is required for analysis.
@param dump_callback:
a callback function for dumping the current status
"""
if n_params is None:
n_params = n_dims
if n_clustering_params is None:
n_clustering_params = n_dims
if wrapped_params is None:
wrapped_params = [0] * n_dims
WrappedType = c_int * len(wrapped_params)
wraps = WrappedType(*wrapped_params)
if sampling_efficiency == "parameter":
sampling_efficiency = 0.8
if sampling_efficiency == "model":
sampling_efficiency = 0.3
# MV 20130923
loglike_type = CFUNCTYPE(c_double, POINTER(c_double), c_int, c_int, c_void_p)
dumper_type = CFUNCTYPE(
c_void_p,
c_int,
c_int,
c_int,
POINTER(c_double),
POINTER(c_double),
POINTER(c_double),
c_double,
c_double,
c_double,
c_void_p,
)
if hasattr(LogLikelihood, "loglike") and hasattr(Prior, "remap") and hasattr(Prior, "prior"):
def loglike(cube, ndim, nparams, nullcontext):
# we're not using context with libstempo.like objects
pprior = Prior.premap(cube)
# mappers are supposed to throw a ValueError if they get out of range
try:
pars = Prior.remap(cube)
except ValueError:
return -N.inf
prior = pprior * Prior.prior(pars)
return -N.inf if not prior else math.log(prior) + LogLikelihood.loglike(pars)
else:
def loglike(cube, ndim, nparams, nullcontext):
# it's actually easier to use the context, if any, at the Python level
# and pass a null pointer to MultiNest...
args = [cube, ndim, nparams] + ([] if context is None else context)
if Prior:
Prior(*args)
return LogLikelihood(*args)
def dumper(nSamples, nlive, nPar, physLive, posterior, paramConstr, maxLogLike, logZ, logZerr, nullcontext):
if dump_callback:
# It's not clear to me what the desired PyMultiNest dumper callback
# syntax is... but this should pass back the right numpy arrays,
# without copies. Untested!
pc = as_array(paramConstr, shape=(nPar, 4))
dump_callback(
nSamples,
nlive,
nPar,
as_array(physLive, shape=(nPar + 1, nlive)).T,
as_array(posterior, shape=(nPar + 2, nSamples)).T,
(pc[0, :], pc[1, :], pc[2, :], pc[3, :]), # (mean,std,bestfit,map)
maxLogLike,
logZ,
logZerr,
)
# MV 20130923: currently we support only multinest 3.2 (24 parameters),
# but it would not be a problem to build up the parameter list dynamically
lib.run(
c_bool(importance_nested_sampling),
c_bool(multimodal),
c_bool(const_efficiency_mode),
c_int(n_live_points),
c_double(evidence_tolerance),
c_double(sampling_efficiency),
c_int(n_dims),
c_int(n_params),
c_int(n_clustering_params),
c_int(max_modes),
c_int(n_iter_before_update),
c_double(mode_tolerance),
create_string_buffer(outputfiles_basename.encode()), # MV 20130923: need a regular C string
c_int(seed),
wraps,
c_bool(verbose),
c_bool(resume),
c_bool(write_output),
c_bool(init_MPI),
c_double(log_zero),
c_int(max_iter),
loglike_type(loglike),
dumper_type(dumper),
c_void_p(0),
)
class multinestdata(dict):
pass
class multinestpar(object):
pass
# where are the multinest files?
def _findfiles(multinestrun, dirname, suffix="-post_equal_weights.dat"):
# try chains/multinestrun-...
# chains/multinestrun/multinestrun-...
root = [dirname + "/", dirname + "/" + multinestrun]
# and if multinestrun is something like pulsar-model,
# try chains/pulsar/model/pulsar-model-...
if "-" in multinestrun:
tokens = multinestrun.split("-")[:-1]
pulsar, model = "-".join(tokens[:-1]), tokens[-1]
root.append(dirname + "/" + pulsar + "/" + model)
return filter(lambda r: os.path.isfile(r + "/" + multinestrun + suffix), root)
def _getcomment(ret, filename):
try:
ret.comment = open(filename, "r").read()
except IOError:
pass
def _getmeta(ret, filename):
try:
meta = N.load(filename)
except IOError:
return
ret.parnames = list(meta["name"])
ret.tempopars = list(meta["val"]) # somewhat legacy?
ret.tempo = {}
ml = N.argmax(ret.data[:, -1])
for i, par in enumerate(ret.parnames):
ret[par] = multinestpar()
try:
ret[par].val, ret[par].err = N.mean(ret.data[:, i]) + meta["offset"][i], math.sqrt(N.var(ret.data[:, i]))
ret[par].offset = meta["offset"][i]
except ValueError:
ret[par].val, ret[par].err = N.mean(ret.data[:, i]), math.sqrt(N.var(ret.data[:, i]))
if "ml" in meta.dtype.names:
ret[par].ml = meta["ml"][i]
else:
ret[par].ml = ret.data[ml, i] + (meta["offset"][i] if "offset" in meta.dtype.names else 0)
ret.tempo[par] = multinestpar()
ret.tempo[par].val, ret.tempo[par].err = meta["val"][i], meta["err"][i]
def load_mcmc(mcrun, dirname="."):
root = _findfiles(mcrun, dirname, "-chain.npy")
ret = multinestdata()
ret.dirname = root[0]
alldata = N.load("{0}/{1}-chain.npy".format(root[0], mcrun))
# keep all the steps
ret.data = alldata[:, :]
_getmeta(ret, "{0}/{1}-meta.npy".format(root[0], mcrun))
_getcomment(ret, "{0}/{1}-comment.txt".format(root[0], mcrun))
return ret
def load_emcee(emceerun, dirname=".", chains=False):
root = _findfiles(emceerun, dirname, "-chain.npy")
ret = multinestdata()
ret.dirname = root[0]
alldata = N.load("{0}/{1}-chain.npy".format(root[0], emceerun))
# keep the last iteration of the walker cloud
ret.data = alldata[:, -1, :]
if chains:
ret.chains = alldata
_getmeta(ret, "{0}/{1}-meta.npy".format(root[0], emceerun))
_getcomment(ret, "{0}/{1}-comment.txt".format(root[0], emceerun))
return ret
def load(multinestrun, dirname="."):
root = _findfiles(multinestrun, dirname, "-post_equal_weights.dat")
if not root:
# try to find a tar.gz archive
import tarfile
import tempfile
root = _findfiles(multinestrun, dirname, ".tar.gz")
tar = tarfile.open("{0}/{1}.tar.gz".format(root[0], multinestrun), mode="r|gz")
root = [tempfile.mkdtemp(prefix="/tmp/")]
tar.extractall(path=root[0])
ret = multinestdata()
ret.dirname = root[0]
# get data
ret.data = N.loadtxt("{0}/{1}-post_equal_weights.dat".format(root[0], multinestrun))[:, :-1]
# get evidence
try:
lines = open("{0}/{1}-stats.dat".format(root[0], multinestrun), "r").readlines()
try:
ret.ev = float(re.search(r"Global Evidence:\s*(\S*)\s*\+/-\s*(\S*)", lines[0]).group(1))
except:
ret.ev = float(re.search(r"Global Log-Evidence :\s*(\S*)\s*\+/-\s*(\S*)", lines[0]).group(1))
except IOError:
pass
# get metadata
_getmeta(ret, "{0}/{1}-meta.npy".format(root[0], multinestrun))
_getcomment(ret, "{0}/{1}-comment.txt".format(root[0], multinestrun))
if root[0][:4] == "/tmp":
import shutil
shutil.rmtree(root[0])
return ret
def compress(rootname):
import os
dirname, filename = os.path.dirname(rootname), os.path.basename(rootname)
if filename[-1] == "-":
filename = filename[:-1]
files = [
filename + "-" + ending
for ending in (
".txt",
"phys_live.points",
"stats.dat",
"ev.dat",
"post_equal_weights.dat",
"summary.txt",
"live.points",
"post_separate.dat",
"meta.npy",
"resume.dat",
"comment.txt",
)
]
cd = os.getcwd()
os.chdir(dirname)
os.system("tar zcf {0}.tar.gz {1}".format(filename, " ".join(files)))
files_exclude = [filename + "-" + ending for ending in ("IS.iterinfo", "IS.points", "IS.ptprob")]
for f in files + files_exclude:
if os.path.isfile(f):
os.unlink(f)
os.chdir(cd)
|
vallisREPO_NAMElibstempoPATH_START.@libstempo_extracted@libstempo-master@libstempo@multinest.py@.PATH_END.py
|
{
"filename": "xcode.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/styles/xcode.py",
"type": "Python"
}
|
"""
pygments.styles.xcode
~~~~~~~~~~~~~~~~~~~~~
Style similar to the `Xcode` default theme.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Literal
__all__ = ['XcodeStyle']
class XcodeStyle(Style):
"""
Style similar to the Xcode default colouring theme.
"""
name = 'xcode'
styles = {
Comment: '#177500',
Comment.Preproc: '#633820',
String: '#C41A16',
String.Char: '#2300CE',
Operator: '#000000',
Keyword: '#A90D91',
Name: '#000000',
Name.Attribute: '#836C28',
Name.Class: '#3F6E75',
Name.Function: '#000000',
Name.Builtin: '#A90D91',
# In Obj-C code this token is used to colour Cocoa types
Name.Builtin.Pseudo: '#5B269A',
Name.Variable: '#000000',
Name.Tag: '#000000',
Name.Decorator: '#000000',
# Workaround for a BUG here: lexer treats multiline method signatres as labels
Name.Label: '#000000',
Literal: '#1C01CE',
Number: '#1C01CE',
Error: '#000000',
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@styles@xcode.py@.PATH_END.py
|
{
"filename": "test_spherical_convolution.py",
"repo_name": "neuraloperator/neuraloperator",
"repo_path": "neuraloperator_extracted/neuraloperator-main/neuralop/layers/tests/test_spherical_convolution.py",
"type": "Python"
}
|
import pytest
import torch
from tltorch import FactorizedTensor
from ..spherical_convolution import SphericalConv
from ..spherical_convolution import SHT
@pytest.mark.parametrize('factorization', ['ComplexDense', 'ComplexCP', 'ComplexTucker', 'ComplexTT'])
@pytest.mark.parametrize('implementation', ['factorized', 'reconstructed'])
def test_SphericalConv(factorization, implementation):
"""Test for SphericalConv (2D only)
Compares Factorized and Dense convolution output
Verifies that a dense conv and factorized conv with the same weight produce the same output
Checks the output size
Verifies that dynamically changing the number of Fourier modes doesn't break the conv
"""
n_modes = (6, 6)
conv = SphericalConv(
3, 3, n_modes, bias=False, implementation=implementation, factorization=factorization)
conv_dense = SphericalConv(
3, 3, n_modes, bias=False, implementation='reconstructed', factorization=None)
conv_dense.weight = FactorizedTensor.from_tensor(conv.weight.to_tensor(), rank=None, factorization='ComplexDense')
x = torch.randn(2, 3, *(12, 12))
res_dense = conv_dense(x)
res = conv(x)
torch.testing.assert_close(res_dense, res)
# Downsample outputs
block = SphericalConv(
3, 4, n_modes, resolution_scaling_factor=0.5)
x = torch.randn(2, 3, *(12, 12))
res = block(x)
assert(list(res.shape[2:]) == [12//2, 12//2])
# Upsample outputs
block = SphericalConv(
3, 4, n_modes, resolution_scaling_factor=2)
x = torch.randn(2, 3, *(12, 12))
res = block(x)
assert res.shape[1] == 4 # Check out channels
assert(list(res.shape[2:]) == [12*2, 12*2])
# Test change of grid
block_0 = SphericalConv(
4, 4, n_modes, sht_grids=["equiangular", "legendre-gauss"])
block_1 = SphericalConv(
4, 4, n_modes, sht_grids=["legendre-gauss", "equiangular"])
x = torch.randn(2, 4, *(12, 12))
res = block_0(x)
res = block_1(res)
assert(res.shape[2:] == x.shape[2:])
res = block_0.transform(x)
res = block_1.transform(res)
assert(res.shape[2:] == x.shape[2:])
@pytest.mark.parametrize('grid', ['equiangular', 'legendre-gauss'])
def test_sht(grid):
nlat = 16
nlon = 2*nlat
batch_size = 2
if grid == "equiangular":
mmax = nlat // 2
else:
mmax = nlat
lmax = mmax
norm = 'ortho'
dtype = torch.float32
sht_handle = SHT(dtype=dtype)
# Create input
coeffs = torch.zeros(batch_size, lmax, mmax, dtype=torch.complex64)
coeffs[:, :lmax, :mmax] = torch.randn(batch_size, lmax, mmax, dtype=torch.complex64)
signal = sht_handle.isht(coeffs, s=(nlat, nlon), grid=grid, norm=norm).to(torch.float32)
coeffs = sht_handle.sht(signal, s=(lmax, mmax), grid=grid, norm=norm)
rec = sht_handle.isht(coeffs, s=(nlat, nlon), grid=grid, norm=norm)
torch.testing.assert_close(signal, rec, rtol=1e-4, atol=1e-4)
|
neuraloperatorREPO_NAMEneuraloperatorPATH_START.@neuraloperator_extracted@neuraloperator-main@neuralop@layers@tests@test_spherical_convolution.py@.PATH_END.py
|
{
"filename": "download_sdo_jsoc.py",
"repo_name": "RobertJaro/InstrumentToInstrument",
"repo_path": "InstrumentToInstrument_extracted/InstrumentToInstrument-master/itipy/download/download_sdo_jsoc.py",
"type": "Python"
}
|
import argparse
import glob
import os
import shutil
from datetime import timedelta, datetime
import drms
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
parser = argparse.ArgumentParser(description='Download SDO data from JSOC')
parser.add_argument('--download_dir', type=str, help='path to the download directory.')
parser.add_argument('--email', type=str, help='registered email address for JSOC.')
parser.add_argument('--channels', '-channels', nargs='+', required=False,
default=['171', '193', '211', '304', '6173'],
help='subset of channels to load. The order must match the input channels of the model.')
args = parser.parse_args()
download_dir = args.download_dir
channels = args.channels
euv_channels = [c for c in channels if c != '6173']
[os.makedirs(os.path.join(download_dir, str(c)), exist_ok=True) for c in channels]
client = drms.Client(verbose=True, email=args.email)
def round_date(t):
if t.minute >= 30:
return t.replace(second=0, microsecond=0, minute=0) + timedelta(hours=1)
else:
return t.replace(second=0, microsecond=0, minute=0)
def download(ds):
r = client.export(ds, method='url-tar', protocol='fits')
r.wait()
download_result = r.download(download_dir)
for f in download_result.download:
shutil.unpack_archive(f, os.path.join(download_dir))
os.remove(f)
for f in glob.glob(os.path.join(download_dir, '*.fits')):
f_info = os.path.basename(f).split('.')
channel = f_info[3]
if f_info[0] == 'hmi':
channel = '6173'
date = round_date(parse(f_info[2][:-4].replace('_', 'T')))
else:
date = round_date(parse(f_info[2][:-1]))
shutil.move(f, os.path.join(download_dir, str(channel), date.isoformat('T', timespec='hours') + '.fits'))
[os.remove(f) for f in glob.glob(os.path.join(download_dir, '*.*'))]
def download_month(year, month):
tstart = datetime(year, month, 1, 0, 0, 0)
tend = tstart + relativedelta(months=1) - timedelta(hours=6)
download_date_range(tstart, tend)
def download_date_range(tstart, tend):
tstart, tend = tstart.isoformat('_', timespec='seconds'), tend.isoformat('_', timespec='seconds')
print('Download AIA: %s -- %s' % (tstart, tend))
download('aia.lev1_euv_12s[%sZ-%sZ@6h][%s]{image}' % (tstart, tend, ','.join(euv_channels)), )
if '6173' in channels:
print('Download HMI: %s -- %s' % (tstart, tend))
download('hmi.M_720s[%sZ-%sZ@6h]{magnetogram}' % (tstart, tend), )
tstart = datetime(2010, 5, 1)
tend = datetime.now()
td = timedelta(days=30)
dates = [tstart + i * td for i in range((tend - tstart) // td)]
for d in dates:
download_date_range(d, d + td)
|
RobertJaroREPO_NAMEInstrumentToInstrumentPATH_START.@InstrumentToInstrument_extracted@InstrumentToInstrument-master@itipy@download@download_sdo_jsoc.py@.PATH_END.py
|
{
"filename": "test_nirspec.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/jwst/extract_2d/tests/test_nirspec.py",
"type": "Python"
}
|
import numpy as np
import pytest
from astropy.io import fits
from astropy.table import Table
from stdatamodels.jwst.datamodels import ImageModel, CubeModel, MultiSlitModel, SlitModel
from jwst.assign_wcs import AssignWcsStep
from jwst.extract_2d.extract_2d_step import Extract2dStep
# WCS keywords, borrowed from NIRCam grism tests
WCS_KEYS = {'wcsaxes': 2, 'ra_ref': 53.1490299775, 'dec_ref': -27.8168745624,
'v2_ref': 86.103458, 'v3_ref': -493.227512, 'roll_ref': 45.04234459270135,
'v3i_yang': 0.0, 'vparity': -1,
'crpix1': 1024.5, 'crpix2': 1024.5,
'crval1': 53.1490299775, 'crval2': -27.8168745624,
'cdelt1': 1.81661111111111e-05, 'cdelt2': 1.8303611111111e-05,
'ctype1': 'RA---TAN', 'ctype2': 'DEC--TAN',
'pc1_1': -0.707688557183348, 'pc1_2': 0.7065245261360363,
'pc2_1': 0.7065245261360363, 'pc2_2': 1.75306861111111e-05,
'cunit1': 'deg', 'cunit2': 'deg'}
def create_nirspec_hdul(detector='NRS1', grating='G395M', filter_name='F290LP',
exptype='NRS_MSASPEC', subarray='FULL', slit=None, nint=1,
wcskeys=None):
if wcskeys is None:
wcskeys = WCS_KEYS.copy()
hdul = fits.HDUList()
phdu = fits.PrimaryHDU()
phdu.header['TELESCOP'] = 'JWST'
phdu.header['INSTRUME'] = 'NIRSPEC'
phdu.header['DETECTOR'] = detector
phdu.header['FILTER'] = filter_name
phdu.header['GRATING'] = grating
phdu.header['PROGRAM'] = '01234'
phdu.header['TIME-OBS'] = '8:59:37'
phdu.header['DATE-OBS'] = '2023-01-05'
phdu.header['EXP_TYPE'] = exptype
phdu.header['PATT_NUM'] = 1
phdu.header['SUBARRAY'] = subarray
phdu.header['XOFFSET'] = 0.0
phdu.header['YOFFSET'] = 0.0
if subarray == 'SUBS200A1':
phdu.header['SUBSIZE1'] = 2048
phdu.header['SUBSIZE2'] = 64
phdu.header['SUBSTRT1'] = 1
phdu.header['SUBSTRT2'] = 1041
elif subarray == 'SUB2048':
phdu.header['SUBSIZE1'] = 2048
phdu.header['SUBSIZE2'] = 32
phdu.header['SUBSTRT1'] = 1
phdu.header['SUBSTRT2'] = 946
else:
phdu.header['SUBSIZE1'] = 2048
phdu.header['SUBSIZE2'] = 2048
phdu.header['SUBSTRT1'] = 1
phdu.header['SUBSTRT2'] = 1
if exptype == 'NRS_MSASPEC':
phdu.header['MSAMETID'] = 1
phdu.header['MSAMETFL'] = 'test_msa_01.fits'
if slit is not None:
phdu.header['FXD_SLIT'] = slit
phdu.header['APERNAME'] = f'NRS_{slit}_SLIT'
scihdu = fits.ImageHDU()
scihdu.header['EXTNAME'] = "SCI"
scihdu.header.update(wcskeys)
if nint > 1:
scihdu.data = np.ones((nint, phdu.header['SUBSIZE2'], phdu.header['SUBSIZE1']))
else:
scihdu.data = np.ones((phdu.header['SUBSIZE2'], phdu.header['SUBSIZE1']))
hdul.append(phdu)
hdul.append(scihdu)
return hdul
def create_msa_hdul():
# Two point sources, one in MSA, one fixed slit.
# Source locations for the fixed slit are placeholders, not realistic.
shutter_data = {
'slitlet_id': [12, 12, 12, 100],
'msa_metadata_id': [1, 1, 1, 1],
'shutter_quadrant': [4, 4, 4, 0],
'shutter_row': [251, 251, 251, 0],
'shutter_column': [22, 23, 24, 0],
'source_id': [1, 1, 1, 2],
'background': ['Y', 'N', 'Y', 'N'],
'shutter_state': ['OPEN', 'OPEN', 'OPEN', 'OPEN'],
'estimated_source_in_shutter_x': [np.nan, 0.18283921, np.nan, 0.5],
'estimated_source_in_shutter_y': [np.nan, 0.31907734, np.nan, 0.5],
'dither_point_index': [1, 1, 1, 1],
'primary_source': ['N', 'Y', 'N', 'Y'],
'fixed_slit': ['NONE', 'NONE', 'NONE', 'S200A1']}
source_data = {
'program': [95065, 95065],
'source_id': [1, 2],
'source_name': ['95065_1', '95065_2'],
'alias': ['2122', '2123'],
'ra': [53.139904, 53.15],
'dec': [-27.805002, -27.81],
'preimage_id': ['95065001_000', '95065001_000'],
'stellarity': [1.0, 1.0]}
shutter_table = Table(shutter_data)
source_table = Table(source_data)
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU())
hdul.append(fits.table_to_hdu(shutter_table))
hdul.append(fits.table_to_hdu(source_table))
hdul[2].name = 'SHUTTER_INFO'
hdul[3].name = 'SOURCE_INFO'
return hdul
@pytest.fixture
def nirspec_msa_rate(tmp_path):
hdul = create_nirspec_hdul()
hdul[0].header['MSAMETFL'] = str(tmp_path / 'test_msa_01.fits')
filename = str(tmp_path / 'test_nrs_msa_rate.fits')
hdul.writeto(filename, overwrite=True)
hdul.close()
return filename
@pytest.fixture
def nirspec_fs_rate(tmp_path):
hdul = create_nirspec_hdul(
exptype='NRS_FIXEDSLIT', subarray='SUBS200A1', slit='S200A1')
filename = str(tmp_path / 'test_nrs_fs_rate.fits')
hdul.writeto(filename, overwrite=True)
hdul.close()
return filename
@pytest.fixture
def nirspec_bots_rateints(tmp_path):
hdul = create_nirspec_hdul(
exptype='NRS_BRIGHTOBJ', subarray='SUB2048', slit='S1600A1', nint=3)
filename = str(tmp_path / 'test_nrs_bots_rateints.fits')
hdul.writeto(filename, overwrite=True)
hdul.close()
return filename
@pytest.fixture
def nirspec_msa_metfl(tmp_path):
hdul = create_msa_hdul()
filename = str(tmp_path / 'test_msa_01.fits')
hdul.writeto(filename, overwrite=True)
hdul.close()
return filename
def test_extract_2d_nirspec_msa_fs(nirspec_msa_rate, nirspec_msa_metfl):
model = ImageModel(nirspec_msa_rate)
result = AssignWcsStep.call(model)
result = Extract2dStep.call(result)
assert isinstance(result, MultiSlitModel)
# there should be 2 slits extracted: one MSA, one FS
assert len(result.slits) == 2
# the MSA slit has an integer name, slitlet_id matches name
assert result.slits[0].name == '12'
assert result.slits[0].slitlet_id == 12
assert result.slits[0].data.shape == (31, 1355)
# the FS slit has a string name, slitlet_id matches shutter ID
assert result.slits[1].name == 'S200A1'
assert result.slits[1].slitlet_id == 0
assert result.slits[1].data.shape == (45, 1254)
model.close()
result.close()
def test_extract_2d_nirspec_fs(nirspec_fs_rate):
model = ImageModel(nirspec_fs_rate)
model_wcs = AssignWcsStep.call(model)
result = Extract2dStep.call(model_wcs)
assert isinstance(result, MultiSlitModel)
# there should be 1 slit extracted: FS, S200A1
assert len(result.slits) == 1
# the FS slit has a string name, slitlet_id matches shutter ID
assert result.slits[0].name == 'S200A1'
assert result.slits[0].slitlet_id == 0
assert result.slits[0].data.shape == (45, 1254)
# ensure x_offset, y_offset become zero when dither information is missing
model_wcs.meta.dither = None
result = Extract2dStep.call(model_wcs)
assert result.slits[0].source_xpos == 0.0
assert result.slits[0].source_ypos == 0.0
model_wcs.meta.dither = {"x_offset": None, "y_offset": None}
result = Extract2dStep.call(model_wcs)
assert result.slits[0].source_xpos == 0.0
assert result.slits[0].source_ypos == 0.0
model.close()
model_wcs.close()
result.close()
def test_extract_2d_nirspec_bots(nirspec_bots_rateints):
model = CubeModel(nirspec_bots_rateints)
result = AssignWcsStep.call(model)
result = Extract2dStep.call(result)
# output is a single slit
assert isinstance(result, SlitModel)
# the BOTS slit has a string name, slitlet_id matches shutter ID
assert result.name == 'S1600A1'
assert result.data.shape == (3, 28, 1300)
model.close()
result.close()
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@jwst@extract_2d@tests@test_nirspec.py@.PATH_END.py
|
{
"filename": "computeOccurrence-checkpoint.ipynb",
"repo_name": "stevepur/DR25-occurrence-public",
"repo_path": "DR25-occurrence-public_extracted/DR25-occurrence-public-main/GKbaseline_kic/.ipynb_checkpoints/computeOccurrence-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import os
import requests
import pandas as pd
from astropy.io import fits
from cStringIO import StringIO
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gamma
from scipy.optimize import minimize
from scipy.interpolate import RectBivariateSpline
import emcee
import corner
import scipy.io as sio
from ipywidgets import FloatProgress
from IPython.display import display
import time
import os.path
from os import path
```
```python
stellarCatalog = "../stellarCatalogs/dr25_stellar_supp_clean_GK.txt"
pcCatalog = "koiCatalogs/dr25_GK_PCs.csv"
period_rng = (50, 400)
n_period = 57
rp_rng = (0.75, 2.5)
n_rp = 61
# for quick tests
# nWalkers = 6
# nBurnin = 200
# nMcmc = 1000
# for production runs
nWalkers = 16
nBurnin = 1000
nMcmc = 5000
model = "dualPowerLaw"
whichRadii = "kic"
```
```python
def rateModel(x, y, xRange, yRange, theta, model):
if model == "dualPowerLaw":
f0, alpha, beta = theta
ap1 = alpha+1;
bp1 = beta+1;
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1))
elif model == "dualPowerLawGap":
f0, alpha, beta, gd, gw, gapOffset, gapSlope = theta
ap1 = alpha+1;
bp1 = beta+1;
# van Eylen fit, really only good for p<100 days
# gapSlope = -0.13
# gapOffset = 0.41
# constant-radius valley, to match radius marginals
# gapSlope = 0
# gapOffset = 0.26
gapModel = 10**(gapSlope*np.log10(x) + gapOffset)
gapDist2 = (gapModel - y)**2
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1)
- gd*np.exp(-gapDist2/(2*gw*gw)))
elif model == "dualPowerLawGapFixedSlope":
f0, alpha, beta, gd, gw, gapOffset = theta
ap1 = alpha+1;
bp1 = beta+1;
# van Eylen fit, really only good for p<100 days
# gapSlope = -0.13
# gapOffset = 0.41
# constant-radius valley, to match radius marginals
gapSlope = 0
# gapOffset = 0.26
gapModel = 10**(gapSlope*np.log10(x) + gapOffset)
gapDist2 = (gapModel - y)**2
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1)
- gd*np.exp(-gapDist2/(2*gw*gw)))
elif model == "dualPowerLawFixedValley":
f0, alpha, beta = theta
ap1 = alpha+1;
bp1 = beta+1;
# van Eylen fit, really only good for p<100 days
# gapSlope = -0.13
# gapOffset = 0.41
# constant-radius valley, to match radius marginals
gd = 0.29297043
gw = 0.14683756
gapSlope = 0
gapOffset = 0.29125824
gapModel = 10**(gapSlope*np.log10(x) + gapOffset)
gapDist2 = (gapModel - y)**2
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1)
- gd*np.exp(-gapDist2/(2*gw*gw)))
else:
raise ValueError('Bad model name');
return r
def getModelLabels(model):
if model == "dualPowerLaw":
return [r"$F$", r"$\beta$", r"$\alpha$"]
elif model == "dualPowerLawGap":
return [r"$F$", r"$\beta$", r"$\alpha$", r"$d_g$", r"$w_g$", r"$o_g$", r"$s_g$"]
elif model == "dualPowerLawGapFixedSlope":
return [r"$F$", r"$\beta$", r"$\alpha$", r"$d_g$", r"$w_g$", r"$o_g$"]
elif model == "dualPowerLawFixedValley":
return [r"$F$", r"$\beta$", r"$\alpha$"]
else:
raise ValueError('Bad model name');
def initRateModel(model):
if model == "dualPowerLaw":
f0 = 0.75
alpha = -0.53218
beta = -0.5
theta = [f0, alpha, beta]
elif model == "dualPowerLawGap":
f0 = 0.75
alpha = -0.69
beta = -0.1
gd = 0.22
gw = 0.1
go = 0.26
gs = 0.0
theta = [f0, alpha, beta, gd, gw, go, gs]
elif model == "dualPowerLawGapFixedSlope":
f0 = 0.75
alpha = -0.69
beta = -0.1
gd = 0.22
gw = 0.1
go = 0.26
theta = [f0, alpha, beta, gd, gw, go]
elif model == "dualPowerLawFixedValley":
f0 = 0.75
alpha = -0.53218
beta = -0.5
theta = [f0, alpha, beta]
else:
raise ValueError('Bad model name');
return theta
def lnPoisprior(theta, model):
if model == "dualPowerLaw":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0:
return 1.0
elif model == "dualPowerLawGap":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0 \
and 0 <= theta[3] < 5 \
and 0.1 <= theta[4] < 0.3 \
and 0.2 <= theta[5] < 0.4 \
and -0.0 <= theta[6] < 0.05:
return 1.0
elif model == "dualPowerLawGapFixedSlope":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0 \
and 0 <= theta[3] < 0.6 \
and 0.1 <= theta[4] < 0.3 \
and 0.2 <= theta[5] < 0.4:
return 1.0
elif model == "dualPowerLawFixedValley":
if 0.0 <= theta[0] <= 5 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0:
return 1.0
else:
raise ValueError('Bad model name');
# print(theta)
return -np.inf
```
```python
def medianAndErrorbars(data):
if data.ndim > 1:
dataResult = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(data, [16, 50, 84],
axis=0)))
dataResult = list(dataResult)
return dataResult
else:
v = np.percentile(data, [16, 50, 84])
return [v[1], v[2]-v[1], v[1]-v[0]]
def printMedianAndErrorbars(data):
e = medianAndErrorbars(data)
if data.ndim > 1:
print("printMedianAndErrorbars only works for 1D arrays")
else:
return "{:.3f}".format(e[0]) +"^{+" + "{:.3f}".format(e[1]) + "}_{-" + "{:.3f}".format(e[2]) + "}"
```
```python
```
```python
```
```python
from scipy.integrate import romb
def integrate2DGrid(g, dx, dy):
if g.shape[0]%2 == 0 or g.shape[1]%2 == 0:
raise ValueError('integrate2DGrid requires a grid with odd number of points on a side');
return romb(romb(g, dx), dy)
def integrateRateModel(periodRange, rpRange, theta, model):
nPts = 2**5+1 # must be 2**n + 1
pGrid, rGrid = np.meshgrid(np.linspace(periodRange[0], periodRange[1], nPts),
np.linspace(rpRange[0], rpRange[1], nPts),
indexing="ij")
dp = (pGrid[1,0]-pGrid[0,0])
dr = (rGrid[0,1]-rGrid[0,0])
if theta.ndim == 1:
y = rateModel(pGrid, rGrid, period_rng, rp_rng, theta, model)
return integrate2DGrid(y, dp, dr)
else: # assume first dimension is array of thetas
ret = np.zeros(theta.shape[0])
if len(ret) > 100:
f = FloatProgress(min=0, max=len(ret))
display(f)
for i in range(len(ret)):
y = rateModel(pGrid, rGrid, period_rng, rp_rng, theta[i,:], model)
ret[i] = integrate2DGrid(y, dp, dr)
if len(ret) > 100:
f.value += 1
return ret
def integratePopTimesComp(periodRange, rpRange, theta, model, compGrid):
nP = compGrid.shape[0]
nR = compGrid.shape[1]
pGrid, rGrid = np.meshgrid(np.linspace(periodRange[0], periodRange[1], nP),
np.linspace(rpRange[0], rpRange[1], nR),
indexing="ij")
dp = (pGrid[1,0]-pGrid[0,0])
dr = (rGrid[0,1]-rGrid[0,0])
y = rateModel(pGrid, rGrid, period_rng, rp_rng, theta, model)*compGrid
return integrate2DGrid(y, dp, dr)
```
```python
# population inference functions
def lnlike(theta):
pop = rateModel(period_grid, rp_grid, period_rng, rp_rng, theta, model) * summedCompleteness
pop = 0.5 * (pop[:-1, :-1] + pop[1:, 1:])
norm = np.sum(pop * vol)
ll = np.sum(np.log(rateModel(koi_periods, koi_rps, period_rng, rp_rng, theta, model))) - norm
return ll if np.isfinite(ll) else -np.inf
# The ln-probability function is just propotional to the ln-likelihood
# since we're assuming uniform priors.
def lnprob(theta):
lp = lnPoisprior(theta, model)
if not np.isfinite(lp):
return -np.inf
return lnlike(theta)
# The negative ln-likelihood is useful for optimization.
# Optimizers want to *minimize* your function.
def nll(theta):
ll = lnlike(theta)
return -ll if np.isfinite(ll) else 1e15
```
```python
# population analysis functions
# We'll reuse these functions to plot all of our results.
def make_plot(pop_comp, x0, x, y, ax):
# print("in make_plot, pop_comp:")
# print(pop_comp.shape)
pop = 0.5 * (pop_comp[:, 1:] + pop_comp[:, :-1])
# print("pop:")
# print(pop.shape)
pop = np.sum(pop * np.diff(y)[None, :, None], axis=1)
a, b, c, d, e = np.percentile(pop * np.diff(x)[0], [2.5, 16, 50, 84, 97.5], axis=0)
ax.fill_between(x0, a, e, color="k", alpha=0.1, edgecolor="none")
ax.fill_between(x0, b, d, color="k", alpha=0.3, edgecolor="none")
ax.plot(x0, c, "k", lw=1)
def plot_results(samples):
# Loop through the samples and compute the list of population models.
samples = np.atleast_2d(samples)
pop = np.empty((len(samples), period_grid.shape[0], period_grid.shape[1]))
gamma_earth = np.empty((len(samples)))
for i, p in enumerate(samples):
pop[i] = rateModel(period_grid, rp_grid, period_rng, rp_rng, p, model)
gamma_earth[i] = rateModel(365.25, 1.0, period_rng, rp_rng, p, model) * 365.
fig, axes = plt.subplots(2, 2, figsize=(10, 8))
fig.subplots_adjust(wspace=0.4, hspace=0.4)
# Integrate over period.
dx = 0.25
x = np.arange(rp_rng[0], rp_rng[1] + dx, dx)
n, _ = np.histogram(koi_rps, x)
fsize = 18
# Plot the observed radius distribution.
ax = axes[0, 0]
make_plot(pop * summedCompleteness[None, :, :], rp, x, period, ax)
ax.errorbar(0.5*(x[:-1]+x[1:]), n, yerr=np.sqrt(n), fmt=".k",
capsize=0)
ax.set_xlim(rp_rng[0], rp_rng[1])
ax.set_xlabel("$R_p\,[R_\oplus]$", fontsize = fsize)
ax.set_ylabel("# of detected planets", fontsize = fsize)
# Plot the true radius distribution.
ax = axes[0, 1]
make_plot(pop, rp, x, period, ax)
ax.set_xlim(rp_rng[0], rp_rng[1])
ax.set_ylim(0, 0.37)
ax.set_xlabel("$R_p\,[R_\oplus]$", fontsize = fsize)
ax.set_ylabel("$\mathrm{d}N / \mathrm{d}R$; $\Delta R = 0.25\,R_\oplus$", fontsize = fsize)
# Integrate over period.
dx = 31.25
x = np.arange(period_rng[0], period_rng[1] + dx, dx)
n, _ = np.histogram(koi_periods, x)
# Plot the observed period distribution.
ax = axes[1, 0]
make_plot(np.swapaxes(pop * summedCompleteness[None, :, :], 1, 2), period, x, rp, ax)
ax.errorbar(0.5*(x[:-1]+x[1:]), n, yerr=np.sqrt(n), fmt=".k",
capsize=0)
ax.set_xlim(period_rng[0], period_rng[1])
ax.set_ylim(0, 79)
ax.set_xlabel("$P\,[\mathrm{days}]$", fontsize = fsize)
ax.set_ylabel("# of detected planets", fontsize = fsize)
# Plot the true period distribution.
ax = axes[1, 1]
make_plot(np.swapaxes(pop, 1, 2), period, x, rp, ax)
ax.set_xlim(period_rng[0], period_rng[1])
ax.set_ylim(0, 0.27)
ax.set_xlabel("$P\,[\mathrm{days}]$", fontsize = fsize)
ax.set_ylabel("$\mathrm{d}N / \mathrm{d}P$; $\Delta P = 31.25\,\mathrm{days}$", fontsize = fsize)
return gamma_earth, fig
```
```python
def getRadii(catalog):
if whichRadii == "corrected":
return catalog.corrected_prad
if whichRadii == "corrected Minus 1Sigma":
return catalog.corrected_prad - catalog.corrected_prad_err1
elif whichRadii == "kic":
return catalog.koi_prad
else:
raise ValueError('Bad whichRadii string');
```
```python
stellarTargets = pd.read_csv(stellarCatalog)
base_kois = pd.read_csv(pcCatalog)
m = (period_rng[0] <= base_kois.koi_period) & (base_kois.koi_period <= period_rng[1])
thisRadii = getRadii(base_kois)
m &= np.isfinite(thisRadii) & (rp_rng[0] <= thisRadii) & (thisRadii <= rp_rng[1])
kois = pd.DataFrame(base_kois[m])
allKois = kois
```
```python
```
```python
period = np.linspace(period_rng[0], period_rng[1], n_period)
rp = np.linspace(rp_rng[0], rp_rng[1], n_rp)
period_grid, rp_grid = np.meshgrid(period, rp, indexing="ij")
periodShape = period_grid.shape
```
```python
inputgrid = "../completenessContours/out_sc0_GK_baseline_kic.fits.gz"
hdulist = fits.open(inputgrid)
cumulative_array = hdulist[0].data
kiclist = np.asarray(hdulist[1].data, dtype=np.int32)
probdet = np.transpose(cumulative_array[0])
probtot = np.transpose(cumulative_array[1])
prihdr = hdulist[0].header
min_comp_period = prihdr["MINPER"]
max_comp_period = prihdr["MAXPER"]
n_comp_period = prihdr["NPER"]
min_comp_rp = prihdr["MINRP"]
max_comp_rp = prihdr["MAXRP"]
n_comp_rp = prihdr["NRP"]
# print "KIC list length" + '{:6d}'.format(kiclist.size)
period_want = np.linspace(min_comp_period, max_comp_period, n_comp_period)
rp_want = np.linspace(min_comp_rp, max_comp_rp, n_comp_rp)
period_want2d, rp_want2d = np.meshgrid(period_want, rp_want)
# interpolate the numerical grids onto the period_grid, rp_grid space
#print("size probtot = " + str(np.shape(probtot)))
#print("size period_want = " + str(np.shape(period_want)))
#print("size rp_want = " + str(np.shape(rp_want)))
numCompVeInterp = RectBivariateSpline(period_want, rp_want, probtot)
numProbDetInterp = RectBivariateSpline(period_want, rp_want, probdet)
```
```python
```
```python
summedCompleteness = numCompVeInterp(period, rp)
summedProbDet = numProbDetInterp(period, rp)
```
```python
# contourLevels = np.arange(1e-2, 1, 5e-2)
contourLevels = [0.01, 0.05, 0.1, 0.25, 0.5, 0.75, 1.0]
fig, ax = plt.subplots(figsize=(15,10));
plt.pcolor(period_grid, rp_grid, summedProbDet, cmap="BuGn")
c = plt.contour(period_grid, rp_grid, summedProbDet / kiclist.size, contourLevels,
colors="k", alpha=0.8)
scf = plt.scatter(kois.koi_period, getRadii(kois), cmap="plasma",
c=kois.reliability, edgecolors='k', s=100*kois.totalReliability, alpha = 1.0)
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Instrumental FP Reliability");
#c = plt.contour(period_grid, rp_grid, numCompVe / kiclist.size,
# colors="k", alpha=0.8)
plt.ylim(0.5, 2.5)
plt.xlim(50, 400)
plt.clabel(c, fontsize=12, inline=1, fmt="%.3f")
plt.title("Summed detection*vetting efficiency, " + whichRadii + " radii", fontsize = 18)
plt.xlabel("period [days]", fontsize = 18)
plt.ylabel("$R_p \, [R_\oplus]$", fontsize = 18);
plt.plot([200, 200], [1, 2], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [1, 1], color='k', linestyle='--', linewidth=1)
plt.plot([50, 200], [2, 2], color='k', linestyle='--', linewidth=1)
```
[<matplotlib.lines.Line2D at 0x7f8f097a7510>]

```python
# contourLevels = np.arange(1e-2, 1, 5e-2)
contourLevels = np.arange(1e-3, 1e-2, 1e-3)
contourLevels = np.insert(contourLevels, 0, [1e-4, 5e-4])
fig, ax = plt.subplots(figsize=(15,10));
plt.pcolor(period_grid, rp_grid, summedCompleteness, cmap="BuGn")
c = plt.contour(period_grid, rp_grid, summedCompleteness / kiclist.size, contourLevels,
colors="k", alpha=0.8)
ax.errorbar(kois.koi_period, getRadii(kois),
yerr = [-kois.koi_prad_err2, kois.koi_prad_err1],
fmt="none", ecolor="k", alpha = 0.15, marker = None);
scf = plt.scatter(kois.koi_period, getRadii(kois), cmap="plasma",
c=kois.totalReliability, edgecolors='k', s=100*kois.totalReliability, alpha = 1.0)
cbh = plt.colorbar(scf);
cbh.ax.set_ylabel("Reliability", fontsize = 24);
#c = plt.contour(period_grid, rp_grid, numCompVe / kiclist.size,
# colors="k", alpha=0.8)
plt.ylim(0.75, 2.5)
plt.xlim(50, 400)
plt.clabel(c, fontsize=12, inline=1, fmt="%.4f")
# plt.title("DR25 PC Average detection*vetting efficiency", fontsize = 18)
plt.tick_params(labelsize = 18)
plt.xlabel("period [days]", fontsize = 24)
plt.ylabel("$R_p \, [R_\oplus]$", fontsize = 24);
plt.plot([200, 200], [1, 2], color='k', linestyle=':', linewidth=1)
plt.plot([50, 200], [1, 1], color='k', linestyle=':', linewidth=1)
plt.plot([50, 200], [2, 2], color='k', linestyle=':', linewidth=1)
plt.plot([0.8*365, 0.8*365], [0.8, 1.2], color='k', linestyle='--', linewidth=1)
plt.plot([1.2*365, 1.2*365], [0.8, 1.2], color='k', linestyle='--', linewidth=1)
plt.plot([0.8*365, 1.2*365], [0.8, 0.8], color='k', linestyle='--', linewidth=1)
plt.plot([0.8*365, 1.2*365], [1.2, 1.2], color='k', linestyle='--', linewidth=1)
plt.text(180, 1.05, "$F_1$", fontsize = 24)
plt.text(300, 0.85, "$\zeta_{\oplus}$", fontsize = 24)
plt.savefig("summedCompleteness.pdf",bbox_inches='tight')
```

```python
1.2*365
```
438.0
```python
```
Compute a basic occurrence rate without reliability
```python
kois = allKois
if model == "dualPowerLaw":
bounds = [(0, 5), (-5, 5), (-5, 5)]
elif model == "dualPowerLawGap":
bounds = [(0, 5), (-5, 5), (-5, 5), (0, 5), (0.0, 0.3), (0.2, 0.4), (-0.2, 0.2)]
elif model == "dualPowerLawGapFixedSlope":
bounds = [(0, 5), (-5, 5), (-5, 5), (0, 5), (0.0, 0.3), (0.2, 0.4)]
elif model == "dualPowerLawFixedValley":
bounds = [(0, 5), (-5, 5), (-5, 5)]
# The ln-likelihood function given at the top of this post.
koi_periods = np.array(kois.koi_period)
koi_rps = np.array(getRadii(kois))
# koi_rps = getRadii(kois)
vol = np.diff(period_grid, axis=0)[:, :-1] * np.diff(rp_grid, axis=1)[:-1, :]
theta_0 = initRateModel(model)
r = minimize(nll, theta_0, method="L-BFGS-B", bounds=bounds)
print(r.x)
ge, fig = plot_results(r.x);
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:7: RuntimeWarning: divide by zero encountered in log
import sys
[ 0.66267151 -0.56815132 -0.49815819]

```python
rateModel(365.25, 1.0, period_rng, rp_rng, r.x, model)*365
```
0.32419644456826213
```python
##################################################################
postName = "occurenceRatePosteriors/occurenceRatePosteriors_noreliability.npy"
if path.exists(postName):
samples_noreliability = np.load(postName)
ndim = samples_noreliability.shape[1]
else:
ndim, nwalkers = len(r.x), nWalkers
pos = [r.x + 1e-5 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, threads=8)
# Burn in.
pos, _, _ = sampler.run_mcmc(pos, nBurnin)
sampler.reset()
# Production.
start_time = time.time()
pos, _, _ = sampler.run_mcmc(pos, nMcmc)
print("--- %s seconds ---" % (time.time() - start_time))
samples_noreliability = sampler.flatchain
np.save(postName, samples_noreliability)
```
--- 18.1960279942 seconds ---
```python
```
```python
##################################################################
##################################################################
corner.corner(samples_noreliability, labels=getModelLabels(model));
##################################################################
gamma_earth_no_reliability, fig = plot_results(samples_noreliability)
print(np.mean(gamma_earth_no_reliability))
##################################################################
```
0.3489529957190273


```python
print("F = " + printMedianAndErrorbars(samples_noreliability[:,0]))
print("radius exp (alpha) = " + printMedianAndErrorbars(samples_noreliability[:,2]))
print("period exp (beta) = " + printMedianAndErrorbars(samples_noreliability[:,1]))
```
F = 0.672^{+0.115}_{-0.095}
radius exp (alpha) = -0.517^{+0.398}_{-0.390}
period exp (beta) = -0.559^{+0.150}_{-0.151}
```python
plt.hist(np.log10(gamma_earth_no_reliability), 50, histtype="step", color="k", density=True)
plt.gca().set_yticklabels([])
plt.title("the rate of Earth analogs: " + str(10**np.mean(np.log10(gamma_earth_no_reliability))))
plt.xlabel(r"$\log_{10}\Gamma_\oplus = \left. \log_{10}\mathrm{d}N / \mathrm{d}\ln P \, \mathrm{d}\ln R_p \right |_\oplus$");
print("Mean Gamma_Earth = {0}".format(10**np.mean(np.log10(gamma_earth_no_reliability))))
print("Gamma at p=365 days, r=1Re without reliability = " + printMedianAndErrorbars(gamma_earth_no_reliability))
```
Mean Gamma_Earth = 0.329138178584
Gamma at p=365 days, r=1Re without reliability = 0.331^{+0.133}_{-0.098}

```python
F1Dist_nr = integrateRateModel([50.,200.], [1., 2.], samples_noreliability, model)
print("1-2Re, 50-200 Days without reliability = " + printMedianAndErrorbars(F1Dist_nr))
F1Dist_nr = integrateRateModel([50.,300.], [0.75, 2.5], samples_noreliability, model)
print("0.75-2.5Re, 50-300 Days without reliability = " + printMedianAndErrorbars(F1Dist_nr))
```
FloatProgress(value=0.0, max=80000.0)
1-2Re, 50-200 Days without reliability = 0.217^{+0.032}_{-0.028}
FloatProgress(value=0.0, max=80000.0)
0.75-2.5Re, 50-300 Days without reliability = 0.537^{+0.085}_{-0.070}
Compute an occurrence rate with reliability
```python
nTrials = 100
postName = "occurenceRatePosteriors/occurenceRatePosteriors.npy"
# postName = "occurenceRatePosteriors/occurenceRatePosteriors_FAReliability.npy"
if path.exists(postName):
allSamples = np.load(postName)
ndim = allSamples.shape[1]
else:
f = FloatProgress(min=0, max=nTrials)
display(f)
allKois = kois
for mCount in range(nTrials):
# randomly select kois
koiSelect = (np.random.rand(len(allKois)) < allKois.totalReliability)
# koiSelect = (np.random.rand(len(allKois)) < allKois.reliability)
kois = allKois[koiSelect]
# print(str(mCount) + " of " + str(nTrials) + ", selected " + str(len(kois))
# + " kois out of " + str(len(allKois)) + " after reliability cut")
koi_periods = np.array(kois.koi_period)
koi_rps = np.array(getRadii(kois))
theta_0 = initRateModel(model)
r = minimize(nll, theta_0, method="L-BFGS-B", bounds=bounds)
##################################################################
ndim, nwalkers = len(r.x), 2*len(r.x)
pos = [r.x + 1e-5 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Burn in.
pos, _, _ = sampler.run_mcmc(pos, 400)
sampler.reset()
# Production.
pos, _, _ = sampler.run_mcmc(pos, 2000)
samples = sampler.flatchain
if mCount == 0:
allSamples = samples
else:
allSamples = np.concatenate((allSamples, samples))
f.value += 1
np.save(postName, allSamples)
```
FloatProgress(value=0.0)
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:7: RuntimeWarning: divide by zero encountered in log
import sys
```python
```
```python
corner.corner(allSamples, labels=getModelLabels(model));
plt.savefig("mcmcSampleDist.pdf",bbox_inches='tight')
```

```python
modelLabels = getModelLabels(model)
for i in range(0,ndim):
print("MCMC no reliability " + modelLabels[i] + "=" + printMedianAndErrorbars(samples_noreliability[:,i]))
for i in range(0,ndim):
print("MCMC with reliability " + modelLabels[i] + "=" + printMedianAndErrorbars(allSamples[:,i]))
```
MCMC no reliability $F$=0.672^{+0.115}_{-0.095}
MCMC no reliability $\beta$=-0.559^{+0.150}_{-0.151}
MCMC no reliability $\alpha$=-0.517^{+0.398}_{-0.390}
MCMC with reliability $F$=0.483^{+0.097}_{-0.078}
MCMC with reliability $\beta$=-0.876^{+0.193}_{-0.196}
MCMC with reliability $\alpha$=-0.373^{+0.459}_{-0.444}
```python
gamma_earth, fig = plot_results(allSamples[:-1:10,:])
plt.savefig("planetResults.pdf",bbox_inches='tight')
```

```python
fig, ax = plt.subplots(figsize=(10,5));
rateGrid = rateModel(period_grid, rp_grid, period_rng, rp_rng, np.median(allSamples, 0), model)
CS = ax.contour(period_grid, rp_grid, rateGrid);
ax.clabel(CS, inline=1, fontsize=10);
plt.xlabel("Period", fontsize = 18);
plt.ylabel("Radius", fontsize = 18);
plt.title("Occurrence Rate Fit", fontsize = 24);
```

```python
plt.hist(np.log10(gamma_earth), 50, histtype="step", color="k", density=True)
plt.hist(np.log10(gamma_earth_no_reliability), 50, histtype="step", color="b", density=True)
plt.gca().set_yticklabels([])
plt.title("the rate of Earth analogs: " + str(round(10**np.mean(np.log10(gamma_earth)), 3))
+ "/" + str(round(10**np.mean(np.log10(gamma_earth_no_reliability)), 3)))
plt.xlabel(r"$\log_{10}\Gamma_\oplus = \left. \log_{10}\mathrm{d}N / \mathrm{d}\ln P \, \mathrm{d}\ln R_p \right |_\oplus$");
```

```python
plt.hist(gamma_earth, 50, histtype="step", color="k", density=True)
plt.hist(gamma_earth_no_reliability, 50, histtype="step", color="b", density=True)
plt.gca().set_yticklabels([])
plt.title("the rate of Earth analogs: " + str(round(np.median(gamma_earth), 3))
+ "/" + str(round(np.median(gamma_earth_no_reliability), 3)))
plt.xlabel(r"$\Gamma_\oplus$");
```

```python
print("Gamma at p=365 days, r=1Re = " + printMedianAndErrorbars(gamma_earth))
print("Gamma at p=365 days, r=1Re without reliability = " + printMedianAndErrorbars(gamma_earth_no_reliability))
```
Gamma at p=365 days, r=1Re = 0.171^{+0.091}_{-0.061}
Gamma at p=365 days, r=1Re without reliability = 0.331^{+0.133}_{-0.098}
```python
F1Dist = integrateRateModel([50.,200.], [1., 2.], allSamples[:-1:10,:], model)
F1Dist_nr = integrateRateModel([50.,200.], [1., 2.], samples_noreliability, model)
```
FloatProgress(value=0.0, max=120000.0)
FloatProgress(value=0.0, max=80000.0)
```python
greyLevel = "0.7"
plt.hist(F1Dist, 50, histtype="step", color="k", density=True);
plt.hist(F1Dist_nr, 50, histtype="step", color="b", density=True);
plt.title("Distribution for 50-200 days, 1-2 $R_\oplus$", fontsize=18);
plt.plot([0.34, 0.34], [0, 10], color=greyLevel, linestyle='--', linewidth=1)
plt.plot([0.23, 0.23], [0, 10], color=greyLevel, linestyle='--', linewidth=1)
plt.savefig("f1Dist.pdf",bbox_inches='tight')
```

```python
print("median theta: 1-2Re, 50-200 Days = " + str(integrateRateModel([50.,200.],
[1., 2.], np.median(allSamples, 0), model)))
print("median theta: 1-2Re, 50-200 Days without reliability = " + str(integrateRateModel([50.,200.],
[1., 2.], np.median(samples_noreliability, 0), model)))
print("1-2Re, 50-200 Days = " + printMedianAndErrorbars(F1Dist))
print("1-2Re, 50-200 Days without reliability = " + printMedianAndErrorbars(F1Dist_nr))
```
median theta: 1-2Re, 50-200 Days = 0.17813683305122982
median theta: 1-2Re, 50-200 Days without reliability = 0.21837012191322197
1-2Re, 50-200 Days = 0.177^{+0.029}_{-0.026}
1-2Re, 50-200 Days without reliability = 0.217^{+0.032}_{-0.028}
```python
zetaDist = integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], allSamples[:-1:10,:], model)
zetaDist_nr = integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], samples_noreliability, model)
```
FloatProgress(value=0.0, max=120000.0)
FloatProgress(value=0.0, max=80000.0)
```python
plt.hist(zetaDist, 50, histtype="step", color="k", density=True);
plt.hist(zetaDist_nr, 50, histtype="step", color="b", density=True);
plt.title("Distribution of $\zeta_\oplus$", fontsize=18);
plt.plot([0.1, 0.1], [0, 40], color=greyLevel, linestyle='--', linewidth=1)
plt.plot([0.03, 0.03], [0, 40], color=greyLevel, linestyle='--', linewidth=1)
plt.savefig("zetaEarthDist.pdf",bbox_inches='tight')
```

```python
print("median theta: zeta-Earth = " + str(integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], np.median(allSamples, 0), model)))
print("median theta: zeta-Earth without reliability = " + str(integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], np.median(samples_noreliability, 0), model)))
print("zeta-Earth = " + printMedianAndErrorbars(zetaDist))
print("zeta-Earth without reliability = " + printMedianAndErrorbars(zetaDist_nr))
```
median theta: zeta-Earth = 0.02818063108861002
median theta: zeta-Earth without reliability = 0.053965456771412526
zeta-Earth = 0.028^{+0.015}_{-0.010}
zeta-Earth without reliability = 0.054^{+0.022}_{-0.016}
```python
sag13HZDist = integrateRateModel([237,860], [0.5,1.5], allSamples[:-1:10,:], model)
plt.hist(sag13HZDist, 50, histtype="step", color="k", density=True);
sag13HZDist_nr = integrateRateModel([237,860], [0.5,1.5], samples_noreliability, model)
plt.hist(sag13HZDist_nr, 50, histtype="step", color="b", density=True);
```
FloatProgress(value=0.0, max=120000.0)
FloatProgress(value=0.0, max=80000.0)

```python
print("median theta: SAG13 HZ = " + str(integrateRateModel([237,860],
[0.5,1.5], np.median(allSamples, 0), model)))
print("median theta: SAG13 HZ without reliability = " + str(integrateRateModel([237,860],
[0.5,1.5], np.median(samples_noreliability, 0), model)))
print("SAG13 HZ = " + printMedianAndErrorbars(sag13HZDist))
print("SAG13 HZ without reliability = " + printMedianAndErrorbars(sag13HZDist_nr))
```
median theta: SAG13 HZ = 0.2353931163939619
median theta: SAG13 HZ without reliability = 0.4960269584511472
SAG13 HZ = 0.233^{+0.150}_{-0.091}
SAG13 HZ without reliability = 0.495^{+0.243}_{-0.165}
```python
hsuFordDist = integrateRateModel([237,500], [1.0,1.75], allSamples[:-1:10,:], model)
plt.hist(hsuFordDist, 50, histtype="step", color="k", density=True);
hsuFordDist_nr = integrateRateModel([237,500], [1.0,1.75], samples_noreliability, model)
plt.hist(hsuFordDist_nr, 50, histtype="step", color="b", density=True);
```
FloatProgress(value=0.0, max=120000.0)
FloatProgress(value=0.0, max=80000.0)

```python
print("median theta: Hsu and Ford HZ = " + str(integrateRateModel([237,500], [1.0,1.75], np.median(allSamples, 0), model)))
print("median theta: Hsu and Ford HZ without reliability = " + str(integrateRateModel([237,500], [1.0,1.75], np.median(samples_noreliability, 0), model)))
print("Hsu and Ford HZ = " + printMedianAndErrorbars(hsuFordDist))
print("Hsu and Ford HZ without reliability = " + printMedianAndErrorbars(hsuFordDist_nr))
```
median theta: Hsu and Ford HZ = 0.08626443957561757
median theta: Hsu and Ford HZ without reliability = 0.15657596298872278
Hsu and Ford HZ = 0.086^{+0.033}_{-0.024}
Hsu and Ford HZ without reliability = 0.156^{+0.044}_{-0.036}
```python
habDist = integrateRateModel([0.61*365,2.216*365], [0.72,1.7], allSamples[:-1:10,:], model)
plt.hist(habDist, 50, histtype="step", color="k", density=True);
habDist_nr = integrateRateModel([0.61*365,2.216*365], [0.72,1.7], samples_noreliability, model)
plt.hist(habDist_nr, 50, histtype="step", color="b", density=True);
```
FloatProgress(value=0.0, max=120000.0)
FloatProgress(value=0.0, max=80000.0)

```python
print("median theta: Zink HZ = " + str(integrateRateModel([0.61*365,2.216*365], [0.72,1.7], np.median(allSamples, 0), model)))
print("median theta: Zink HZ without reliability = " + str(integrateRateModel([0.61*365,2.216*365], [0.72,1.7], np.median(samples_noreliability, 0), model)))
print("Zink HZ = " + printMedianAndErrorbars(habDist))
print("Zink HZ without reliability = " + printMedianAndErrorbars(habDist_nr))
```
median theta: Zink HZ = 0.21156397376359976
median theta: Zink HZ without reliability = 0.42332436832068776
Zink HZ = 0.210^{+0.107}_{-0.071}
Zink HZ without reliability = 0.422^{+0.160}_{-0.119}
```python
plt.hist(allKois.reliability, 30, histtype="step", color="b", density=True);
plt.hist(allKois.totalReliability, 30, histtype="step", color="k", density=True);
```

```javascript
%%javascript
IPython.notebook.save_notebook()
```
<IPython.core.display.Javascript object>
```bash
%%bash -s "$model"
jupyter nbconvert --to html computeOccurrence.ipynb
mv computeOccurrence.html htmlArchive/computeOccurrence_$1.html
```
[NbConvertApp] Converting notebook computeOccurrence.ipynb to html
[NbConvertApp] Writing 1367505 bytes to computeOccurrence.html
```python
```
```python
```
|
stevepurREPO_NAMEDR25-occurrence-publicPATH_START.@DR25-occurrence-public_extracted@DR25-occurrence-public-main@GKbaseline_kic@.ipynb_checkpoints@computeOccurrence-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "runatm.py",
"repo_name": "dzesmin/TEA",
"repo_path": "TEA_extracted/TEA-master/tea/runatm.py",
"type": "Python"
}
|
#! /usr/bin/env python
############################# BEGIN FRONTMATTER ################################
# #
# TEA - calculates Thermochemical Equilibrium Abundances of chemical species #
# #
# TEA is part of the PhD dissertation work of Dr. Jasmina #
# Blecic, who developed it with coding assistance from #
# undergraduate M. Oliver Bowman and under the advice of #
# Prof. Joseph Harrington at the University of Central Florida, #
# Orlando, Florida, USA. #
# #
# Copyright (C) 2014-2016 University of Central Florida #
# #
# This program is reproducible-research software: you can #
# redistribute it and/or modify it under the terms of the #
# Reproducible Research Software License as published by #
# Prof. Joseph Harrington at the University of Central Florida, #
# either version 0.3 of the License, or (at your option) any later #
# version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# Reproducible Research Software License for more details. #
# #
# You should have received a copy of the Reproducible Research #
# Software License along with this program. If not, see #
# <http://planets.ucf.edu/resources/reproducible/>. The license's #
# preamble explains the situation, concepts, and reasons surrounding #
# reproducible research, and answers some common questions. #
# #
# This project was started with the support of the NASA Earth and #
# Space Science Fellowship Program, grant NNX12AL83H, held by #
# Jasmina Blecic, Principal Investigator Joseph Harrington, and the #
# NASA Science Mission Directorate Planetary Atmospheres Program, #
# grant NNX12AI69G. #
# #
# See the file ACKNOWLEDGING in the top-level TEA directory for #
# instructions on how to acknowledge TEA in publications. #
# #
# Visit our Github site: #
# https://github.com/dzesmin/TEA/ #
# #
# Reach us directly at: #
# Jasmina Blecic <jasmina@nyu.edu> #
# #
############################## END FRONTMATTER #################################
import numpy as np
import sys
import ntpath
import os
import shutil
import time
import multiprocessing as mp
import ctypes
import warnings
import six
import readconf as rc
import iterate as it
import format as form
import makeheader as mh
import readatm as ra
import balance as bal
import warnings
warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning)
location_TEA = os.path.realpath(os.path.dirname(__file__) + "/..") + "/"
# =============================================================================
# This program runs TEA over a pre-atm file that contains multiple T-P points.
# It prints on screen the current T-P line from the pre-atm file, the iteration
# number at which the set precision (tolerance error) is accomplished and if
# maximum iteration is reached informs the user that the minimization is done.
# Example:
# Layer 100:
# 5
# The solution has converged to the given tolerance error.
#
# The program is executed with in-shell inputs:
# runatm.py <MULTITP_INPUT_FILE_PATH> <DIRECTORY_NAME>
# Example: ../TEA/tea/runatm.py ../TEA/tea/doc/examples/multiTP/atm_inputs/multiTP_Example.atm example_multiTP
# =============================================================================
def worker(pressure, temp, b, free_energy, heat, stoich_arr, guess,
maxiter, verb, times, xtol, savefiles, start, end, abn, n):
"""
Multiprocessing thermochemical-equilibrium calculation.
"""
# Switch off verbosity if using more than one CPU
#if ncpu > 1 and n != 0:
if ncpu > 1:
verb, times = 0, False
save_info = None
for q in np.arange(start, end):
if verb >= 1:
print('\nLayer {:d}:'.format(q+1))
g_RT = mh.calc_gRT(free_energy, heat, temp[q])
if savefiles:
save_info = location_out, desc, speclist, temp[q]
hfolder = location_out + desc + "/headers/"
mh.write_header(hfolder, desc, temp[q], pressure[q], speclist,
atom_name, stoich_arr, b[q], g_RT)
# Execute main TEA loop for the current line, run iterate.py
y, x, delta, y_bar, x_bar, delta_bar = it.iterate(pressure[q],
stoich_arr, b[q], g_RT, maxiter, verb, times, guess, xtol, save_info)
guess = x, x_bar
abn[q] = x/x_bar
tstart = time.time()
# Read configuration-file parameters:
TEApars, PREATpars = rc.readcfg()
maxiter, savefiles, verb, times, abun_file, location_out, xtol, ncpu = TEApars
# Print license
if verb>=1:
print("\n\
================= Thermal Equilibrium Abundances (TEA) =================\n\
A program to calculate species abundances under thermochemical equilibrium.\n\
\n\
Copyright (C) 2014-2016 University of Central Florida.\n\
\n\
This program is reproducible-research software. See the Reproducible\n\
Research Software License that accompanies the code.\n\
\n\
Direct contact: \n\
Jasmina Blecic <jasmina@nyu.edu> \n\
========================================================================\n")
# Correct directory names
if location_out[-1] != '/':
location_out += '/'
# Retrieve pre-atm file
infile = sys.argv[1:][0]
# Retrieve current output directory name given by user
desc = sys.argv[1:][1]
# Check if config file exists in the working directory
TEA_config = 'TEA.cfg'
try:
f = open(TEA_config)
except IOError:
print("\nConfig file is missing. Place TEA.cfg in the working directory.\n")
# If input file does not exist break
try:
f = open(infile)
except:
raise IOError ("\nPre-atmospheric file does not exist.\n")
# Set up locations of necessary scripts and directories of files
thermo_dir = location_TEA + "lib/gdata"
if verb==2 or savefiles==True:
inputs_dir = location_out + desc + "/inputs/"
out_dir = location_out + desc + "/results/"
if os.path.exists(out_dir):
six.moves.input(" Output directory " + str(location_out + desc) +
"/\n already exists.\n"
" Press enter to continue and overwrite existing files,\n"
" or quit and choose another output name.\n")
# Create directories
if not os.path.exists(out_dir):
os.makedirs(out_dir)
if not os.path.exists(inputs_dir):
os.makedirs(inputs_dir)
# Inform user if TEA.cfg file already exists in inputs/ directory
if os.path.isfile(inputs_dir + TEA_config):
print(" " + str(TEA_config) + " overwritten in inputs/ directory.")
# Copy TEA.cfg file to current inputs directory
shutil.copy2(TEA_config, inputs_dir + TEA_config)
# Inform user if abundances file already exists in inputs/ directory
head, abun_filename = ntpath.split(abun_file)
if os.path.isfile(inputs_dir + abun_filename):
print(" " + str(abun_filename) + " overwritten in inputs/ directory.")
# Copy abundances file to inputs/ directory
shutil.copy2(abun_file, inputs_dir + abun_filename)
# Inform user if pre-atm file already exists in inputs/ directory
head, preatm_filename = ntpath.split(infile)
if os.path.isfile(inputs_dir + preatm_filename):
print(" pre-atm file " + str(preatm_filename) +
" overwritten in inputs/ directory.")
else:
# Copy pre-atm file to inputs/ directory
shutil.copy2(infile, inputs_dir + preatm_filename)
# Read pre-atm file
n_runs, speclist, pres_arr, temp_arr, atom_arr, atom_name, end_head = \
ra.readatm(infile)
# Number of output species:
nspec = np.size(speclist)
# Correct species list for only species found in thermo_dir
gdata_files = os.listdir(thermo_dir)
good_spec = []
for i in np.arange(nspec):
spec_file = speclist[i] + '.txt'
if spec_file in gdata_files:
good_spec = np.append(good_spec, speclist[i])
else:
print('Species ' + speclist[i] + ' does not exist in /' \
+ thermo_dir.split("/")[-1] + ' ! IGNORED THIS SPECIES.')
# Update list of valid species
speclist = np.copy(good_spec)
# =================== Start writing final atm file ===================
# Open final atm file for writing, keep open to add new lines
# If running in multiprocessor mode with verbosity zero, supress savefiles
fout_name = desc + '.tea'
if verb==2 or savefiles==True:
fout_name = out_dir + desc + '.tea'
fout = open(fout_name, 'w+')
# Write a header file
fout.write(
"# This is a final TEA output file with calculated abundances (mixing "
"fractions) for all listed species."
"\n# Units: pressure (bar), temperature (K), abundance (unitless).\n\n")
fout.write('#SPECIES\n')
# Write corrected species list into pre-atm file and continue
for i in np.arange(nspec):
fout.write(speclist[i] + ' ')
fout.write("\n\n")
fout.write("#TEADATA\n")
# Write data header from the pre-atm file into each column of atm file
fout.write('#Pressure'.ljust(11) + ' ')
fout.write('Temp'.ljust(8) + ' ')
for i in np.arange(nspec):
fout.write(speclist[i].ljust(10)+' ')
fout.write('\n')
# Times / speed check for pre-loop runtime
if times:
tnew = time.time()
elapsed = tnew - tstart
print("\npre-loop: " + str(elapsed))
# Supress warning that ctypeslib will throw
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Allocate abundances matrix for all species and all T-Ps
sm_abn = mp.Array(ctypes.c_double, n_runs*nspec)
abn = np.ctypeslib.as_array(sm_abn.get_obj()).reshape((n_runs, nspec))
# Bound ncpu to the manchine capacity
ncpu = np.clip(ncpu, 1, mp.cpu_count())
chunksize = int(n_runs/float(ncpu)+1)
# Load gdata
free_energy, heat = mh.read_gdata(speclist, thermo_dir)
stoich_arr, elem_arr = mh.read_stoich(speclist)
temp_arr = np.array(temp_arr, np.double)
pres_arr = np.array(pres_arr, np.double)
atom_arr = np.array(atom_arr, np.double)
# Use only elements with non-null stoichiometric values
eidx = np.in1d(atom_name, elem_arr)
atom_arr = atom_arr[:,eidx]
atom_name = atom_name[eidx]
# Sort stoich_arr according to atom_name
sidx = np.zeros(len(atom_name), int)
for i in np.arange(len(atom_name)):
sidx[i] = np.where(elem_arr == atom_name[i])[0][0]
stoich_arr = stoich_arr[:,sidx]
# Time / speed testing for balance.py
if times:
ini = time.time()
# Initial abundances guess
guess = bal.balance(stoich_arr, atom_arr[0], verb)
# Retrieve balance runtime
if times:
fin = time.time()
elapsed = fin - ini
print("balance.py: " + str(elapsed))
# ============== Execute TEA for each T-P ==============
# Loop over all lines in pre-atm file and execute TEA loop
processes = []
for n in np.arange(ncpu):
start = n * chunksize
end = np.amin(((n+1) * chunksize, n_runs))
proc = mp.Process(target=worker, args=(pres_arr, temp_arr, atom_arr,
free_energy, heat, stoich_arr, guess, maxiter, verb, times,
xtol, savefiles, start, end, abn, n))
processes.append(proc)
proc.start()
# Make sure all processes finish their work
for n in np.arange(ncpu):
processes[n].join()
# Write layers output
for q in np.arange(n_runs):
fout.write("{:.4e} {:7.2f} ".format(pres_arr[q], temp_arr[q]))
for i in np.arange(nspec):
fout.write('{:1.4e} '.format(abn[q,i]))
fout.write('\n')
# Close atm file
fout.close()
# Print on-screen
if verb >= 1:
print("\n Species abundances calculated.\n Created TEA atmospheric file.")
# Time / speed testing
if verb > 1:
tend = time.time()
elapsed = tend - tstart
print("Overall run time: " + str(elapsed) + " seconds")
|
dzesminREPO_NAMETEAPATH_START.@TEA_extracted@TEA-master@tea@runatm.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/bar/marker/line/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._widthsrc import WidthsrcValidator
from ._width import WidthValidator
from ._reversescale import ReversescaleValidator
from ._colorsrc import ColorsrcValidator
from ._colorscale import ColorscaleValidator
from ._coloraxis import ColoraxisValidator
from ._color import ColorValidator
from ._cmin import CminValidator
from ._cmid import CmidValidator
from ._cmax import CmaxValidator
from ._cauto import CautoValidator
from ._autocolorscale import AutocolorscaleValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._widthsrc.WidthsrcValidator",
"._width.WidthValidator",
"._reversescale.ReversescaleValidator",
"._colorsrc.ColorsrcValidator",
"._colorscale.ColorscaleValidator",
"._coloraxis.ColoraxisValidator",
"._color.ColorValidator",
"._cmin.CminValidator",
"._cmid.CmidValidator",
"._cmax.CmaxValidator",
"._cauto.CautoValidator",
"._autocolorscale.AutocolorscaleValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@bar@marker@line@__init__.py@.PATH_END.py
|
{
"filename": "_opacity.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/violin/_opacity.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacityValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="opacity", parent_name="violin", **kwargs):
super(OpacityValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@violin@_opacity.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/streamtube/hoverlabel/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="streamtube.hoverlabel.font", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@streamtube@hoverlabel@font@_shadow.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/volume/contour/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="color", parent_name="volume.contour", **kwargs):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@volume@contour@_color.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/legend/title/font/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._size import SizeValidator
from ._family import FamilyValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._size.SizeValidator", "._family.FamilyValidator", "._color.ColorValidator"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@legend@title@font@__init__.py@.PATH_END.py
|
{
"filename": "_ticktext.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/colorbar/_ticktext.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicktextValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="ticktext", parent_name="isosurface.colorbar", **kwargs
):
super(TicktextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@colorbar@_ticktext.py@.PATH_END.py
|
{
"filename": "run_cosmotest.py",
"repo_name": "phirling/pyc2ray",
"repo_path": "pyc2ray_extracted/pyc2ray-main/test/archive/244Mpc_test/run_cosmotest.py",
"type": "Python"
}
|
import sys
sys.path.append("../../")
import numpy as np, time
import pyc2ray as pc2r
from pyc2ray.utils.other_utils import get_redshifts_from_output
from astropy import units as u
# ======================================================================
# Example 2 for pyc2ray: Cosmological simulation from N-body
# ======================================================================
# TODO: find a way to replace this line
import tools21cm as t2c
t2c.set_sim_constants(boxsize_cMpc=244)
# Global parameters
num_steps_between_slices = 2 # Number of timesteps between redshift slices
paramfile = sys.argv[1] # Name of the parameter file
N = 250 # Mesh size
use_octa = True # Determines which raytracing algorithm to use
# Raytracing Parameters
max_subbox = 100 # Maximum subbox when using C2Ray raytracing
r_RT = 40 # When using C2Ray raytracing, sets the subbox size. When using OCTA, sets the octahedron size
# Create C2Ray object
#sim = pc2r.C2Ray_CubeP3M(paramfile=paramfile, Nmesh=N, use_gpu=use_octa)
sim = pc2r.C2Ray_244Test(paramfile=paramfile, Nmesh=N, use_gpu=use_octa)
# Get redshift list (test case)
#zred_array = np.loadtxt(sim.inputs_basename+'redshifts.txt', dtype=float)
zred_array = np.loadtxt(sim.inputs_basename+'redshifts_checkpoints.txt', dtype=float)
# check for resume simulation
if(sim.resume):
i_start = np.argmin(np.abs(zred_array - sim.zred))
else:
i_start = 0
# Measure time
tinit = time.time()
prev_i_zdens, prev_i_zsourc = -1, -1
# Loop over redshifts
for k in range(i_start, len(zred_array)-1):
zi = zred_array[k] # Start redshift
zf = zred_array[k+1] # End redshift
pc2r.printlog(f"\n=================================", sim.logfile)
pc2r.printlog(f"Doing redshift {zi:.3f} to {zf:.3f}", sim.logfile)
pc2r.printlog(f"=================================\n", sim.logfile)
# Compute timestep of current redshift slice
dt = sim.set_timestep(zi, zf, num_steps_between_slices)
# Write output
sim.write_output(zi)
# Read input files
sim.read_density(z=zi)
# Read source files
srcpos, normflux = sim.read_sources(file='%ssources_hdf5/%.3f-coarsest_wsubgrid_sources.hdf5' %(sim.inputs_basename, zi), mass='hm', ts=num_steps_between_slices*dt)
# Set redshift to current slice redshift
sim.zred = zi
# Loop over timesteps
for t in range(num_steps_between_slices):
tnow = time.time()
pc2r.printlog(f"\n --- Timestep {t+1:n}. Redshift: z = {sim.zred : .3f} Wall clock time: {tnow - tinit : .3f} seconds --- \n", sim.logfile)
# Evolve Cosmology: increment redshift and scale physical quantities (density, proper cell size, etc.)
sim.cosmo_evolve(dt)
# Evolve the simulation: raytrace -> photoionization rates -> chemistry -> until convergence
sim.evolve3D(dt, normflux, srcpos, r_RT, max_subbox)
# Evolve cosmology over final half time step to reach the correct time for next slice (see note in c2ray_base.py)
sim.cosmo_evolve(0)
# Write final output
sim.write_output(zf)
|
phirlingREPO_NAMEpyc2rayPATH_START.@pyc2ray_extracted@pyc2ray-main@test@archive@244Mpc_test@run_cosmotest.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "njcuk9999/apero-drs",
"repo_path": "apero-drs_extracted/apero-drs-main/setup/README.md",
"type": "Markdown"
}
|
# APERO setup
For full details see
|
njcuk9999REPO_NAMEapero-drsPATH_START.@apero-drs_extracted@apero-drs-main@setup@README.md@.PATH_END.py
|
{
"filename": "tracer_model.py",
"repo_name": "lenstronomy/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/ImSim/tracer_model.py",
"type": "Python"
}
|
from lenstronomy.ImSim.image_model import ImageModel
from lenstronomy.LightModel.light_model import LightModel
from lenstronomy.LensModel.lens_model import LensModel
from lenstronomy.ImSim.image2source_mapping import Image2SourceMapping
from lenstronomy.Util import util
import numpy as np
class TracerModelSource(ImageModel):
"""Tracer model class, inherits ImageModel."""
def __init__(
self,
data_class,
psf_class=None,
lens_model_class=None,
source_model_class=None,
lens_light_model_class=None,
point_source_class=None,
extinction_class=None,
tracer_source_class=None,
kwargs_numerics=None,
likelihood_mask=None,
psf_error_map_bool_list=None,
kwargs_pixelbased=None,
tracer_partition=None,
tracer_type="LINEAR",
):
"""
:param data_class: ImageData() instance
:param psf_class: PSF() instance
:param lens_model_class: LensModel() instance
:param source_model_class: LightModel() instance
:param lens_light_model_class: LightModel() instance
:param point_source_class: PointSource() instance
:param tracer_source_class: LightModel() instance describing the tracers of the source
:param kwargs_numerics: keyword arguments passed to the Numerics module
:param likelihood_mask: 2d boolean array of pixels to be counted in the likelihood calculation/linear
optimization
:param psf_error_map_bool_list: list of boolean of length of point source models.
Indicates whether PSF error map is used for the point source model stated as the index.
:param kwargs_pixelbased: keyword arguments with various settings related to the pixel-based solver
(see SLITronomy documentation) being applied to the point sources.
:param tracer_partition: in case of tracer models for specific sub-parts of the surface brightness model
[[list of light profiles, list of tracer profiles], [list of light profiles, list of tracer profiles], [...], ...]
:type tracer_partition: None or list
:param tracer_type: LINEAR or LOG. If the tracer is in log units, it is converted to linear units, summed,
and then converted back to log units.
:type tracer_partition: str
"""
if likelihood_mask is None:
likelihood_mask = np.ones_like(data_class.data)
self.likelihood_mask = np.array(likelihood_mask, dtype=bool)
self._mask1d = util.image2array(self.likelihood_mask)
if tracer_partition is None:
tracer_partition = [[None, None]]
self._tracer_partition = tracer_partition
if tracer_type not in ["LINEAR", "LOG"]:
raise Exception(
"Unknown input tracer_type: {0}. Only two tracer types are currently supported: LINEAR and LOG. Please convert your tracer to linear/log units.".format(
tracer_type
)
)
self._tracer_type = tracer_type
super(TracerModelSource, self).__init__(
data_class,
psf_class=psf_class,
lens_model_class=lens_model_class,
source_model_class=source_model_class,
lens_light_model_class=lens_light_model_class,
point_source_class=point_source_class,
extinction_class=extinction_class,
kwargs_numerics=kwargs_numerics,
kwargs_pixelbased=kwargs_pixelbased,
)
if psf_error_map_bool_list is None:
psf_error_map_bool_list = [True] * len(
self.PointSource.point_source_type_list
)
self._psf_error_map_bool_list = psf_error_map_bool_list
if tracer_source_class is None:
tracer_source_class = LightModel(light_model_list=[])
if lens_model_class is None:
lens_model_class = LensModel(lens_model_list=[])
self.tracer_mapping = Image2SourceMapping(
lens_model=lens_model_class, source_model=tracer_source_class
)
self.tracer_source_class = tracer_source_class
def tracer_model(
self,
kwargs_tracer_source,
kwargs_lens,
kwargs_source,
kwargs_extinction=None,
kwargs_special=None,
de_lensed=False,
):
"""Tracer model as a convolved surface brightness weighted quantity conv(tracer
* surface brightness) / conv(surface brightness)
:param kwargs_tracer_source:
:param kwargs_lens:
:param kwargs_source:
:return: model predicted observed tracer component
"""
tracer_brightness_conv = np.zeros_like(self.Data.data)
source_light_conv = np.zeros_like(self.Data.data)
for [k_light, k_tracer] in self._tracer_partition:
source_light_k = self._source_surface_brightness_analytical_numerics(
kwargs_source,
kwargs_lens,
kwargs_extinction,
kwargs_special=kwargs_special,
de_lensed=de_lensed,
k=k_light,
)
source_light_conv_k = self.ImageNumerics.re_size_convolve(
source_light_k, unconvolved=False
)
source_light_conv_k[source_light_conv_k < 10 ** (-20)] = 10 ** (-20)
tracer_k = self._tracer_model_source(
kwargs_tracer_source, kwargs_lens, de_lensed=de_lensed, k=k_tracer
)
if self._tracer_type == "LINEAR":
tracer_brightness_conv_k = self.ImageNumerics.re_size_convolve(
tracer_k * source_light_k, unconvolved=False
)
tracer_brightness_conv += tracer_brightness_conv_k
if self._tracer_type == "LOG":
lin_tracer_k = 10 ** (tracer_k)
lin_tracer_brightness_conv_k = self.ImageNumerics.re_size_convolve(
lin_tracer_k * source_light_k, unconvolved=False
)
tracer_brightness_conv += lin_tracer_brightness_conv_k
source_light_conv += source_light_conv_k
if self._tracer_type == "LINEAR":
return tracer_brightness_conv / source_light_conv
if self._tracer_type == "LOG":
return np.log10(tracer_brightness_conv / source_light_conv)
def _tracer_model_source(
self, kwargs_tracer_source, kwargs_lens, de_lensed=False, k=None
):
"""
:param kwargs_tracer_source:
:param kwargs_lens:
:return:
"""
ra_grid, dec_grid = self.ImageNumerics.coordinates_evaluate
if de_lensed is True:
source_light = self.tracer_source_class.surface_brightness(
ra_grid, dec_grid, kwargs_tracer_source, k=k
)
else:
source_light = self.tracer_mapping.image_flux_joint(
ra_grid, dec_grid, kwargs_lens, kwargs_tracer_source, k=k
)
return source_light
def likelihood_data_given_model(
self,
kwargs_tracer_source,
kwargs_lens,
kwargs_source,
kwargs_extinction=None,
kwargs_special=None,
):
model = self.tracer_model(
kwargs_tracer_source,
kwargs_lens,
kwargs_source,
kwargs_extinction,
kwargs_special,
)
log_likelihood = self.Data.log_likelihood(
model, self.likelihood_mask, additional_error_map=0
)
return log_likelihood
@property
def num_data_evaluate(self):
"""Number of data points to be used in the linear solver :return:"""
return int(np.sum(self.likelihood_mask))
|
lenstronomyREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@ImSim@tracer_model.py@.PATH_END.py
|
{
"filename": "_ticklabelstep.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergeo/marker/colorbar/_ticklabelstep.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklabelstepValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self,
plotly_name="ticklabelstep",
parent_name="scattergeo.marker.colorbar",
**kwargs,
):
super(TicklabelstepValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 1),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergeo@marker@colorbar@_ticklabelstep.py@.PATH_END.py
|
{
"filename": "SymTensor.py",
"repo_name": "LLNL/spheral",
"repo_path": "spheral_extracted/spheral-main/src/PYB11/Geometry/SymTensor.py",
"type": "Python"
}
|
from PYB11Generator import *
#-------------------------------------------------------------------------------
# SymTensor (rank 2) template
#-------------------------------------------------------------------------------
@PYB11template("ndim")
class SymTensor:
"Spheral geometric symmetric tensor (rank 2: %(ndim)sx%(ndim)s) class"
# Static attributes
nDimensions = PYB11readonly(static=True, doc="Number of dimensions", returnpolicy="copy")
numElements = PYB11readonly(static=True, doc="Number of elements stored in the type", returnpolicy="copy")
zero = PYB11readonly(static=True, doc="The zero value equivalent", returnpolicy="copy")
one = PYB11readonly(static=True, doc="The unit value equivalent", returnpolicy="copy")
# Constructors
def pyinit0(self):
"Default constructor"
def pyinit1(self,
rhs = "const Dim<%(ndim)s>::Tensor"):
"Copy constructor (tensor)"
def pyinit2(self,
rhs = "const Dim<%(ndim)s>::SymTensor"):
"Copy constructor (symmetric tensor)"
def pyinit3(self,
xx="double"):
"Construct with element values."
def pyinit4(self,
xx="double", xy="double",
yx="double", yy="double"):
"Construct with element values."
def pyinit5(self,
xx="double", xy="double", xz="double",
yx="double", yy="double", yz="double",
zx="double", zy="double", zz="double"):
"Construct with element values."
# Sequence methods
@PYB11implementation("[](const Dim<%(ndim)s>::SymTensor&) { return Dim<%(ndim)s>::SymTensor::numElements; }")
def __len__(self):
"The size (number of elements) of the SymTensor."
@PYB11implementation("[](const Dim<%(ndim)s>::SymTensor &s, size_t i) { if (i >= Dim<%(ndim)s>::SymTensor::numElements) throw py::index_error(); return s[i]; }")
@PYB11returnpolicy("reference_internal")
def __getitem__(self):
"Python indexing to get an element."
@PYB11implementation("[](Dim<%(ndim)s>::SymTensor &s, size_t i, double v) { if (i >= Dim<%(ndim)s>::SymTensor::numElements) throw py::index_error(); s[i] = v; }")
def __setitem__(self):
"Python indexing to set an element."
@PYB11implementation("[](const Dim<%(ndim)s>::SymTensor &s) { return py::make_iterator(s.begin(), s.end()); }, py::keep_alive<0,1>()")
def __iter__(self):
"Python iteration through a SymTensor."
@PYB11const
@PYB11returnpolicy("reference_internal")
def __call__(self,
row="Dim<%(ndim)s>::SymTensor::size_type",
col="Dim<%(ndim)s>::SymTensor::size_type"):
"Extract the (row, column) element."
return "double"
@PYB11pycppname("__call__")
@PYB11implementation("[](Dim<%(ndim)s>::SymTensor& self, Dim<%(ndim)s>::SymTensor::size_type row, Dim<%(ndim)s>::SymTensor::size_type col, double val) { self(row,col) = val; }")
def assignCall(self,
row="Dim<%(ndim)s>::SymTensor::size_type",
col="Dim<%(ndim)s>::SymTensor::size_type",
val="double"):
return "void"
# String representation
@PYB11implementation("""
[](const Dim<%(ndim)s>::SymTensor& self) {
std::string result = "SymTensor" + std::to_string(%(ndim)s) + "d(";
for (auto val: self) result += (" " + std::to_string(val) + " ");
result += ")";
return result;
}""")
def __repr__(self):
return
# Operators
def __neg__(self):
return
def __add__(self, rhs="Dim<%(ndim)s>::SymTensor()"):
return
def __sub__(self, rhs="Dim<%(ndim)s>::SymTensor()"):
return
def __mul__(self, rhs="Dim<%(ndim)s>::SymTensor()"):
return
def __iadd__(self, rhs="Dim<%(ndim)s>::SymTensor()"):
return
def __isub__(self, rhs="Dim<%(ndim)s>::SymTensor()"):
return
@PYB11pycppname("__add__")
def __add__T(self, rhs="Dim<%(ndim)s>::Tensor()"):
return
@PYB11pycppname("__sub__")
def __sub__T(self, rhs="Dim<%(ndim)s>::Tensor()"):
return
@PYB11pycppname("__mul__")
def __mul__T(self, rhs="Dim<%(ndim)s>::Tensor()"):
return
@PYB11pycppname("__mul__")
def __mul__f(self, rhs="double()"):
return
@PYB11pycppname("__rmul__")
def __rmul__f(self, rhs="double()"):
return
@PYB11pycppname("__truediv__")
def __truediv__f(self, rhs="double()"):
return
@PYB11pycppname("__imul__")
def __imul__f(self, rhs="double()"):
return
@PYB11pycppname("__itruediv__")
def __itruediv__f(self, rhs="double()"):
return
@PYB11pycppname("__mul__")
def __mul__V(self, rhs="Dim<%(ndim)s>::Vector()"):
return
# Comparison
def __eq__(self):
return
def __ne__(self):
return
def __lt__(self):
return
def __gt__(self):
return
def __le__(self):
return
def __ge__(self):
return
# Methods
def getRow(self):
"Extract the indexed row as a Vector%(ndim)sd."
def getColumn(self):
"Extract the indexed column as a Vector%(ndim)sd."
def Zero(self):
"Zero out the elements."
def Identity(self):
"Set equal to the I tensor."
def Symmetric(self):
"Return a SymTensor with the symmetric portion of this tensor."
def SkewSymmetric(self):
"Return a Tensor with the skew-symmetric portion of this tensor."
def Transpose(self):
"Return the transpose of this Tensor."
def Inverse(self):
"Return the inverse of the tensor."
def diagonalElements(self):
"Return a Vector%(ndim)sd with the diagonal elements of this tensor."
def Trace(self):
"Compute the trace (sum of diagonal elements)."
def Determinant(self):
"Compute the determinant of the tensor."
@PYB11const
def dot(self, rhs="const Dim<%(ndim)s>::Vector&"):
"Product with a Vector%(ndim)sd."
return "Dim<%(ndim)s>::Vector"
@PYB11const
@PYB11pycppname("dot")
def dot2(self, rhs="const Dim<%(ndim)s>::Tensor&"):
"Product with a Tensor%(ndim)sd."
return "Dim<%(ndim)s>::Tensor"
@PYB11const
@PYB11pycppname("dot")
def dot3(self, rhs="const Dim<%(ndim)s>::SymTensor&"):
"Product with a SymTensor%(ndim)sd."
return "Dim<%(ndim)s>::Tensor"
@PYB11const
def doubledot(self, rhs="const Dim<%(ndim)s>::Tensor&"):
"Double dot contraction with another tensor (returns a scalar)."
return "double"
@PYB11const
@PYB11pycppname("doubledot")
def doubledot2(self, rhs="const Dim<%(ndim)s>::SymTensor&"):
"Double dot contraction with a SymTensor (returns a scalar)."
return "double"
def selfDoubledot(self):
"Double dot contraction with ourself (returns a scalar)."
def square(self):
"Compute the product with ourself as a matrix product."
def squareElements(self):
"Returns the element-wise square of the tensor."
def eigenValues(self):
"Return a Vector%(ndim)sd with the eigenvalues of this tensor."
def rotationalTransform(self):
"Apply the given rotational transform to the tensor."
def maxAbsElement(self):
"Return the maximum of the absolute values of the elements."
# Methods special to symmetric tensor (not in tensor).
def cube(self):
"Cube power of this symmetric tensor."
def sqrt(self):
"Sqrt of the symmetric tensor."
def cuberoot(self):
"Cube root of the symmetric tensor."
def pow(self):
"Raise the symmetric tensor to an arbitrary power."
def eigenVectors(self):
"Return an EigenStruct with the eigenvalues and eigenvectors."
# Properties
xx = PYB11property("double", "xx", "xx", doc="The xx element.")
xy = PYB11property("double", "xy", "xy", doc="The xy element.")
xz = PYB11property("double", "xz", "xz", doc="The xz element.")
yx = PYB11property("double", "yx", "yx", doc="The yx element.")
yy = PYB11property("double", "yy", "yy", doc="The yy element.")
yz = PYB11property("double", "yz", "yz", doc="The yz element.")
zx = PYB11property("double", "zx", "zx", doc="The zx element.")
zy = PYB11property("double", "zy", "zy", doc="The zy element.")
zz = PYB11property("double", "zz", "zz", doc="The zz element.")
#-------------------------------------------------------------------------------
# SymTensor instantiations.
#-------------------------------------------------------------------------------
SymTensor1d = PYB11TemplateClass(SymTensor,
template_parameters = ("1"),
cppname = "Dim<1>::SymTensor",
pyname = "SymTensor1d")
SymTensor2d = PYB11TemplateClass(SymTensor,
template_parameters = ("2"),
cppname = "Dim<2>::SymTensor",
pyname = "SymTensor2d")
SymTensor3d = PYB11TemplateClass(SymTensor,
template_parameters = ("3"),
cppname = "Dim<3>::SymTensor",
pyname = "SymTensor3d")
|
LLNLREPO_NAMEspheralPATH_START.@spheral_extracted@spheral-main@src@PYB11@Geometry@SymTensor.py@.PATH_END.py
|
{
"filename": "_z.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/cone/lightposition/_z.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="z", parent_name="cone.lightposition", **kwargs):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 100000),
min=kwargs.pop("min", -100000),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@cone@lightposition@_z.py@.PATH_END.py
|
{
"filename": "painter.py",
"repo_name": "rainwoodman/gaepsi2",
"repo_path": "gaepsi2_extracted/gaepsi2-master/gaepsi2/painter.py",
"type": "Python"
}
|
from . import _painter
import sharedmem
import numpy
def paint(pos, sml, data, shape, mask=None, np=0):
""" Use SPH kernel to splat particles to an image.
Parameters
----------
pos : array_like
(..., >2) position of particles. Only two first
column is used. In device coordinate
data : array_like
(Nc, ...) or (...). Weight to use for painting.
Nc channels will be produces on the device.
if the array is 1d, Nc = 1
sml : array_like
smoothing length (half of effective size).
In device coordinate; only correct in isotropic
cameras
shape : list, tuple
(w[0], w[1]) the size of the device.
shall enclose pos[..., 0] and pos[..., 1]
mask : array_like, boolean
If provided, elements with False will not be painted.
np : int
number of multiprocessing. 0 for single-processing.
None for all available cores.
Returns
-------
image: array_like
(Nc, shape[0], shape[1])
Notes
-----
Remember to transpose for imshow and pmesh to correct put x horizontaly.
"""
if len(numpy.shape(data)) == 1:
data = [data]
with sharedmem.MapReduce(np=np) as pool:
if pool.np > 0: nbuf = pool.np
else: nbuf = 1
buf = sharedmem.empty([nbuf, len(data)] + list(shape), dtype='f4')
buf[:] = 0
chunksize = 1024 * 8
def work(i):
sl = slice(i, i + chunksize)
datas = [d[sl] for d in data]
if mask is not None: masks = mask[sl]
else: masks = None
_painter.paint(pos[sl], sml[sl], numpy.array(datas),
buf[pool.local.rank], masks)
pool.map(work, range(0, len(pos), chunksize))
return numpy.sum(buf, axis=0)
|
rainwoodmanREPO_NAMEgaepsi2PATH_START.@gaepsi2_extracted@gaepsi2-master@gaepsi2@painter.py@.PATH_END.py
|
{
"filename": "modern_treasury.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/document_loaders/modern_treasury.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import ModernTreasuryLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"ModernTreasuryLoader": "langchain_community.document_loaders"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"ModernTreasuryLoader",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@document_loaders@modern_treasury.py@.PATH_END.py
|
{
"filename": "_basic.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/fft/_basic.py",
"type": "Python"
}
|
from scipy._lib.uarray import generate_multimethod, Dispatchable
import numpy as np
def _x_replacer(args, kwargs, dispatchables):
"""
uarray argument replacer to replace the transform input array (``x``)
"""
if len(args) > 0:
return (dispatchables[0],) + args[1:], kwargs
kw = kwargs.copy()
kw['x'] = dispatchables[0]
return args, kw
def _dispatch(func):
"""
Function annotation that creates a uarray multimethod from the function
"""
return generate_multimethod(func, _x_replacer, domain="numpy.scipy.fft")
@_dispatch
def fft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 1-D discrete Fourier Transform.
This function computes the 1-D *n*-point discrete Fourier
Transform (DFT) with the efficient Fast Fourier Transform (FFT)
algorithm [1]_.
Parameters
----------
x : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode. Default is "backward", meaning no normalization on
the forward transforms and scaling by ``1/n`` on the `ifft`.
"forward" instead applies the ``1/n`` factor on the forward transform.
For ``norm="ortho"``, both directions are scaled by ``1/sqrt(n)``.
.. versionadded:: 1.6.0
``norm={"forward", "backward"}`` options were added
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See the notes below for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``. See below for more
details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
if `axes` is larger than the last axis of `x`.
See Also
--------
ifft : The inverse of `fft`.
fft2 : The 2-D FFT.
fftn : The N-D FFT.
rfftn : The N-D FFT of real input.
fftfreq : Frequency bins for given FFT parameters.
next_fast_len : Size to pad input to for most efficient transforms
Notes
-----
FFT (Fast Fourier Transform) refers to a way the discrete Fourier Transform
(DFT) can be calculated efficiently, by using symmetries in the calculated
terms. The symmetry is highest when `n` is a power of 2, and the transform
is therefore most efficient for these sizes. For poorly factorizable sizes,
`scipy.fft` uses Bluestein's algorithm [2]_ and so is never worse than
O(`n` log `n`). Further performance improvements may be seen by zero-padding
the input using `next_fast_len`.
If ``x`` is a 1d array, then the `fft` is equivalent to ::
y[k] = np.sum(x * np.exp(-2j * np.pi * k * np.arange(n)/n))
The frequency term ``f=k/n`` is found at ``y[k]``. At ``y[n/2]`` we reach
the Nyquist frequency and wrap around to the negative-frequency terms. So,
for an 8-point transform, the frequencies of the result are
[0, 1, 2, 3, -4, -3, -2, -1]. To rearrange the fft output so that the
zero-frequency component is centered, like [-4, -3, -2, -1, 0, 1, 2, 3],
use `fftshift`.
Transforms can be done in single, double, or extended precision (long
double) floating point. Half precision inputs will be converted to single
precision and non-floating-point inputs will be converted to double
precision.
If the data type of ``x`` is real, a "real FFT" algorithm is automatically
used, which roughly halves the computation time. To increase efficiency
a little further, use `rfft`, which does the same calculation, but only
outputs half of the symmetrical spectrum. If the data are both real and
symmetrical, the `dct` can again double the efficiency, by generating
half of the spectrum from half of the signal.
When ``overwrite_x=True`` is specified, the memory referenced by ``x`` may
be used by the implementation in any way. This may include reusing the
memory for the result, but this is in no way guaranteed. You should not
rely on the contents of ``x`` after the transform as this may change in
future without warning.
The ``workers`` argument specifies the maximum number of parallel jobs to
split the FFT computation into. This will execute independent 1-D
FFTs within ``x``. So, ``x`` must be at least 2-D and the
non-transformed axes must be large enough to split into chunks. If ``x`` is
too small, fewer jobs may be used than requested.
References
----------
.. [1] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
machine calculation of complex Fourier series," *Math. Comput.*
19: 297-301.
.. [2] Bluestein, L., 1970, "A linear filtering approach to the
computation of discrete Fourier transform". *IEEE Transactions on
Audio and Electroacoustics.* 18 (4): 451-455.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> scipy.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,
2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,
-1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,
1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part:
>>> from scipy.fft import fft, fftfreq, fftshift
>>> import matplotlib.pyplot as plt
>>> t = np.arange(256)
>>> sp = fftshift(fft(np.sin(t)))
>>> freq = fftshift(fftfreq(t.shape[-1]))
>>> plt.plot(freq, sp.real, freq, sp.imag)
[<matplotlib.lines.Line2D object at 0x...>,
<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ifft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 1-D inverse discrete Fourier Transform.
This function computes the inverse of the 1-D *n*-point
discrete Fourier transform computed by `fft`. In other words,
``ifft(fft(x)) == x`` to within numerical accuracy.
The input should be ordered in the same way as is returned by `fft`,
i.e.,
* ``x[0]`` should contain the zero frequency term,
* ``x[1:n//2]`` should contain the positive-frequency terms,
* ``x[n//2 + 1:]`` should contain the negative-frequency terms, in
increasing order starting from the most negative frequency.
For an even number of input points, ``x[n//2]`` represents the sum of
the values at the positive and negative Nyquist frequencies, as the two
are aliased together. See `fft` for details.
Parameters
----------
x : array_like
Input array, can be complex.
n : int, optional
Length of the transformed axis of the output.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
See notes about padding issues.
axis : int, optional
Axis over which to compute the inverse DFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
Raises
------
IndexError
If `axes` is larger than the last axis of `x`.
See Also
--------
fft : The 1-D (forward) FFT, of which `ifft` is the inverse.
ifft2 : The 2-D inverse FFT.
ifftn : The N-D inverse FFT.
Notes
-----
If the input parameter `n` is larger than the size of the input, the input
is padded by appending zeros at the end. Even though this is the common
approach, it might lead to surprising results. If a different padding is
desired, it must be performed before calling `ifft`.
If ``x`` is a 1-D array, then the `ifft` is equivalent to ::
y[k] = np.sum(x * np.exp(2j * np.pi * k * np.arange(n)/n)) / len(x)
As with `fft`, `ifft` has support for all floating point types and is
optimized for real input.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> scipy.fft.ifft([0, 4, 0, 0])
array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary
Create and plot a band-limited signal with random phases:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> t = np.arange(400)
>>> n = np.zeros((400,), dtype=complex)
>>> n[40:60] = np.exp(1j*rng.uniform(0, 2*np.pi, (20,)))
>>> s = scipy.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
[<matplotlib.lines.Line2D object at ...>, <matplotlib.lines.Line2D object at ...>]
>>> plt.legend(('real', 'imaginary'))
<matplotlib.legend.Legend object at ...>
>>> plt.show()
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def rfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 1-D discrete Fourier Transform for real input.
This function computes the 1-D *n*-point discrete Fourier
Transform (DFT) of a real-valued array by means of an efficient algorithm
called the Fast Fourier Transform (FFT).
Parameters
----------
x : array_like
Input array
n : int, optional
Number of points along transformation axis in the input to use.
If `n` is smaller than the length of the input, the input is cropped.
If it is larger, the input is padded with zeros. If `n` is not given,
the length of the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the FFT. If not given, the last axis is
used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
If `n` is even, the length of the transformed axis is ``(n/2)+1``.
If `n` is odd, the length is ``(n+1)/2``.
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
irfft : The inverse of `rfft`.
fft : The 1-D FFT of general (complex) input.
fftn : The N-D FFT.
rfft2 : The 2-D FFT of real input.
rfftn : The N-D FFT of real input.
Notes
-----
When the DFT is computed for purely real input, the output is
Hermitian-symmetric, i.e., the negative frequency terms are just the complex
conjugates of the corresponding positive-frequency terms, and the
negative-frequency terms are therefore redundant. This function does not
compute the negative frequency terms, and the length of the transformed
axis of the output is therefore ``n//2 + 1``.
When ``X = rfft(x)`` and fs is the sampling frequency, ``X[0]`` contains
the zero-frequency term 0*fs, which is real due to Hermitian symmetry.
If `n` is even, ``A[-1]`` contains the term representing both positive
and negative Nyquist frequency (+fs/2 and -fs/2), and must also be purely
real. If `n` is odd, there is no term at fs/2; ``A[-1]`` contains
the largest positive frequency (fs/2*(n-1)/n), and is complex in the
general case.
If the input `a` contains an imaginary part, it is silently discarded.
Examples
--------
>>> import scipy.fft
>>> scipy.fft.fft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary
>>> scipy.fft.rfft([0, 1, 0, 0])
array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def irfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Computes the inverse of `rfft`.
This function computes the inverse of the 1-D *n*-point
discrete Fourier Transform of real input computed by `rfft`.
In other words, ``irfft(rfft(x), len(x)) == x`` to within numerical
accuracy. (See Notes below for why ``len(a)`` is necessary here.)
The input is expected to be in the form returned by `rfft`, i.e., the
real zero-frequency term followed by the complex positive frequency terms
in order of increasing frequency. Since the discrete Fourier Transform of
real input is Hermitian-symmetric, the negative frequency terms are taken
to be the complex conjugates of the corresponding positive frequency terms.
Parameters
----------
x : array_like
The input array.
n : int, optional
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
it is padded with zeros. If `n` is not given, it is taken to be
``2*(m-1)``, where ``m`` is the length of the input along the axis
specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*(m-1)`` where ``m`` is the length of the transformed axis of the
input. To get an odd number of output points, `n` must be specified.
Raises
------
IndexError
If `axis` is larger than the last axis of `x`.
See Also
--------
rfft : The 1-D FFT of real input, of which `irfft` is inverse.
fft : The 1-D FFT.
irfft2 : The inverse of the 2-D FFT of real input.
irfftn : The inverse of the N-D FFT of real input.
Notes
-----
Returns the real valued `n`-point inverse discrete Fourier transform
of `x`, where `x` contains the non-negative frequency terms of a
Hermitian-symmetric sequence. `n` is the length of the result, not the
input.
If you specify an `n` such that `a` must be zero-padded or truncated, the
extra/removed values will be added/removed at high frequencies. One can
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
The default value of `n` assumes an even output length. By the Hermitian
symmetry, the last imaginary component must be 0 and so is ignored. To
avoid losing information, the correct length of the real input *must* be
given.
Examples
--------
>>> import scipy.fft
>>> scipy.fft.ifft([1, -1j, -1, 1j])
array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary
>>> scipy.fft.irfft([1, -1j, -1])
array([0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
part everywhere. When calling `irfft`, the negative frequencies are not
specified, and the output array is purely real.
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def hfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
spectrum.
Parameters
----------
x : array_like
The input array.
n : int, optional
Length of the transformed axis of the output. For `n` output
points, ``n//2 + 1`` input points are necessary. If the input is
longer than this, it is cropped. If it is shorter than this, it is
padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``,
where ``m`` is the length of the input along the axis specified by
`axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See `fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is `n`, or, if `n` is not given,
``2*m - 2``, where ``m`` is the length of the transformed axis of
the input. To get an odd number of output points, `n` must be
specified, for instance, as ``2*m - 1`` in the typical case,
Raises
------
IndexError
If `axis` is larger than the last axis of `a`.
See Also
--------
rfft : Compute the 1-D FFT for real input.
ihfft : The inverse of `hfft`.
hfftn : Compute the N-D FFT of a Hermitian signal.
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So, here, it's `hfft`, for
which you must supply the length of the result if it is to be odd.
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
Examples
--------
>>> from scipy.fft import fft, hfft
>>> import numpy as np
>>> a = 2 * np.pi * np.arange(10) / 10
>>> signal = np.cos(a) + 3j * np.sin(3 * a)
>>> fft(signal).round(10)
array([ -0.+0.j, 5.+0.j, -0.+0.j, 15.-0.j, 0.+0.j, 0.+0.j,
-0.+0.j, -15.-0.j, 0.+0.j, 5.+0.j])
>>> hfft(signal[:6]).round(10) # Input first half of signal
array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.])
>>> hfft(signal, 10) # Input entire signal and truncate
array([ 0., 5., 0., 15., -0., 0., 0., -15., -0., 5.])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ihfft(x, n=None, axis=-1, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the inverse FFT of a signal that has Hermitian symmetry.
Parameters
----------
x : array_like
Input array.
n : int, optional
Length of the inverse FFT, the number of points along
transformation axis in the input to use. If `n` is smaller than
the length of the input, the input is cropped. If it is larger,
the input is padded with zeros. If `n` is not given, the length of
the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See `fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n//2 + 1``.
See Also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here, the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So, here, it's `hfft`, for
which you must supply the length of the result if it is to be odd:
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
Examples
--------
>>> from scipy.fft import ifft, ihfft
>>> import numpy as np
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> ifft(spectrum)
array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary
>>> ihfft(spectrum)
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def fftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the N-D discrete Fourier Transform.
This function computes the N-D discrete Fourier Transform over
any number of axes in an M-D array by means of the Fast Fourier
Transform (FFT).
Parameters
----------
x : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `x`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
ifftn : The inverse of `fftn`, the inverse N-D FFT.
fft : The 1-D FFT, with definitions and conventions used.
rfftn : The N-D FFT of real input.
fft2 : The 2-D FFT.
fftshift : Shifts zero-frequency terms to centre of array.
Notes
-----
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of all axes, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.mgrid[:3, :3, :3][0]
>>> scipy.fft.fftn(x, axes=(1, 2))
array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[ 9.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[18.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> scipy.fft.fftn(x, (2, 2), axes=(0, 1))
array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> [X, Y] = np.meshgrid(2 * np.pi * np.arange(200) / 12,
... 2 * np.pi * np.arange(200) / 34)
>>> S = np.sin(X) + np.cos(Y) + rng.uniform(0, 1, X.shape)
>>> FS = scipy.fft.fftn(S)
>>> plt.imshow(np.log(np.abs(scipy.fft.fftshift(FS))**2))
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ifftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the N-D inverse discrete Fourier Transform.
This function computes the inverse of the N-D discrete
Fourier Transform over any number of axes in an M-D array by
means of the Fast Fourier Transform (FFT). In other words,
``ifftn(fftn(x)) == x`` to within numerical accuracy.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fftn`, i.e., it should have the term for zero frequency
in all axes in the low-order corner, the positive frequency terms in the
first half of all axes, the term for the Nyquist frequency in the middle
of all axes and the negative frequency terms in the second half of all
axes, in order of decreasingly negative frequency.
Parameters
----------
x : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``ifft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the IFFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `x`,
as explained in the parameters section above.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
fftn : The forward N-D FFT, of which `ifftn` is the inverse.
ifft : The 1-D inverse FFT.
ifft2 : The 2-D inverse FFT.
ifftshift : Undoes `fftshift`, shifts zero-frequency terms to beginning
of array.
Notes
-----
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifftn` is called.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.eye(4)
>>> scipy.fft.ifftn(scipy.fft.fftn(x, axes=(0,)), axes=(1,))
array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
>>> import matplotlib.pyplot as plt
>>> rng = np.random.default_rng()
>>> n = np.zeros((200,200), dtype=complex)
>>> n[60:80, 20:40] = np.exp(1j*rng.uniform(0, 2*np.pi, (20, 20)))
>>> im = scipy.fft.ifftn(n).real
>>> plt.imshow(im)
<matplotlib.image.AxesImage object at 0x...>
>>> plt.show()
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def fft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 2-D discrete Fourier Transform
This function computes the N-D discrete Fourier Transform
over any axes in an M-D array by means of the
Fast Fourier Transform (FFT). By default, the transform is computed over
the last two axes of the input array, i.e., a 2-dimensional FFT.
Parameters
----------
x : array_like
Input array, can be complex
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
This corresponds to ``n`` for ``fft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two axes are
used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
ifft2 : The inverse 2-D FFT.
fft : The 1-D FFT.
fftn : The N-D FFT.
fftshift : Shifts zero-frequency terms to the center of the array.
For 2-D input, swaps first and third quadrants, and second
and fourth quadrants.
Notes
-----
`fft2` is just `fftn` with a different default for `axes`.
The output, analogously to `fft`, contains the term for zero frequency in
the low-order corner of the transformed axes, the positive frequency terms
in the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
the axes, in order of decreasingly negative frequency.
See `fftn` for details and a plotting example, and `fft` for
definitions and conventions used.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.mgrid[:5, :5][0]
>>> scipy.fft.fft2(x)
array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary
0. +0.j , 0. +0.j ],
[-12.5+17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ],
[-12.5-17.20477401j, 0. +0.j , 0. +0.j ,
0. +0.j , 0. +0.j ]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ifft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 2-D inverse discrete Fourier Transform.
This function computes the inverse of the 2-D discrete Fourier
Transform over any number of axes in an M-D array by means of
the Fast Fourier Transform (FFT). In other words, ``ifft2(fft2(x)) == x``
to within numerical accuracy. By default, the inverse transform is
computed over the last two axes of the input array.
The input, analogously to `ifft`, should be ordered in the same way as is
returned by `fft2`, i.e., it should have the term for zero frequency
in the low-order corner of the two axes, the positive frequency terms in
the first half of these axes, the term for the Nyquist frequency in the
middle of the axes and the negative frequency terms in the second half of
both axes, in order of decreasingly negative frequency.
Parameters
----------
x : array_like
Input array, can be complex.
s : sequence of ints, optional
Shape (length of each axis) of the output (``s[0]`` refers to axis 0,
``s[1]`` to axis 1, etc.). This corresponds to `n` for ``ifft(x, n)``.
Along each axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used. See notes for issue on `ifft` zero padding.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last two
axes are used.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or the last two axes if `axes` is not given.
Raises
------
ValueError
If `s` and `axes` have different length, or `axes` not given and
``len(s) != 2``.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
fft2 : The forward 2-D FFT, of which `ifft2` is the inverse.
ifftn : The inverse of the N-D FFT.
fft : The 1-D FFT.
ifft : The 1-D inverse FFT.
Notes
-----
`ifft2` is just `ifftn` with a different default for `axes`.
See `ifftn` for details and a plotting example, and `fft` for
definition and conventions used.
Zero-padding, analogously with `ifft`, is performed by appending zeros to
the input along the specified dimension. Although this is the common
approach, it might lead to surprising results. If another form of zero
padding is desired, it must be performed before `ifft2` is called.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = 4 * np.eye(4)
>>> scipy.fft.ifft2(x)
array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
[0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
[0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def rfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the N-D discrete Fourier Transform for real input.
This function computes the N-D discrete Fourier Transform over
any number of axes in an M-D real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining
transforms are complex.
Parameters
----------
x : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
The final element of `s` corresponds to `n` for ``rfft(x, n)``, while
for the remaining axes, it corresponds to `n` for ``fft(x, n)``.
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `x`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
irfftn : The inverse of `rfftn`, i.e., the inverse of the N-D FFT
of real input.
fft : The 1-D FFT, with definitions and conventions used.
rfft : The 1-D FFT of real input.
fftn : The N-D FFT.
rfft2 : The 2-D FFT of real input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `rfft`, then the transform over the remaining axes is
performed as by `fftn`. The order of the output is as for `rfft` for the
final transformation axis, and as for `fftn` for the remaining
transformation axes.
See `fft` for details, definitions and conventions used.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.ones((2, 2, 2))
>>> scipy.fft.rfftn(x)
array([[[8.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
>>> scipy.fft.rfftn(x, axes=(2, 0))
array([[[4.+0.j, 0.+0.j], # may vary
[4.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def rfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 2-D FFT of a real array.
Parameters
----------
x : array
Input array, taken to be real.
s : sequence of ints, optional
Shape of the FFT.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The result of the real 2-D FFT.
See Also
--------
irfft2 : The inverse of the 2-D FFT of real input.
rfft : The 1-D FFT of real input.
rfftn : Compute the N-D discrete Fourier Transform for real
input.
Notes
-----
This is really just `rfftn` with different default behavior.
For more details see `rfftn`.
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def irfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Computes the inverse of `rfftn`
This function computes the inverse of the N-D discrete
Fourier Transform for real input over any number of axes in an
M-D array by means of the Fast Fourier Transform (FFT). In
other words, ``irfftn(rfftn(x), x.shape) == x`` to within numerical
accuracy. (The ``a.shape`` is necessary like ``len(a)`` is for `irfft`,
and for the same reason.)
The input should be ordered in the same way as is returned by `rfftn`,
i.e., as for `irfft` for the final transformation axis, and as for `ifftn`
along all the other axes.
Parameters
----------
x : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the axes
specified by axes is used. Except for the last axis which is taken to be
``2*(m-1)``, where ``m`` is the length of the input along that axis.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `x`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)``, where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
rfftn : The forward N-D FFT of real input,
of which `ifftn` is the inverse.
fft : The 1-D FFT, with definitions and conventions used.
irfft : The inverse of the 1-D FFT of real input.
irfft2 : The inverse of the 2-D FFT of real input.
Notes
-----
See `fft` for definitions and conventions used.
See `rfft` for definitions and conventions used for real input.
The default value of `s` assumes an even output length in the final
transformation axis. When performing the final complex to real
transformation, the Hermitian symmetry requires that the last imaginary
component along that axis must be 0 and so it is ignored. To avoid losing
information, the correct length of the real input *must* be given.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.zeros((3, 2, 2))
>>> x[0, 0, 0] = 3 * 2 * 2
>>> scipy.fft.irfftn(x)
array([[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def irfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Computes the inverse of `rfft2`
Parameters
----------
x : array_like
The input array
s : sequence of ints, optional
Shape of the real output to the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
rfft2 : The 2-D FFT of real input.
irfft : The inverse of the 1-D FFT of real input.
irfftn : The inverse of the N-D FFT of real input.
Notes
-----
This is really `irfftn` with different defaults.
For more details see `irfftn`.
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def hfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the N-D FFT of Hermitian symmetric complex input, i.e., a
signal with a real spectrum.
This function computes the N-D discrete Fourier Transform for a
Hermitian symmetric complex input over any number of axes in an
M-D array by means of the Fast Fourier Transform (FFT). In other
words, ``ihfftn(hfftn(x, s)) == x`` to within numerical accuracy. (``s``
here is ``x.shape`` with ``s[-1] = x.shape[-1] * 2 - 1``, this is necessary
for the same reason ``x.shape`` would be necessary for `irfft`.)
Parameters
----------
x : array_like
Input array.
s : sequence of ints, optional
Shape (length of each transformed axis) of the output
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.). `s` is also the
number of input points used along this axis, except for the last axis,
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
with zeros. If `s` is not given, the shape of the input along the axes
specified by axes is used. Except for the last axis which is taken to be
``2*(m-1)`` where ``m`` is the length of the input along that axis.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` or `x`,
as explained in the parameters section above.
The length of each transformed axis is as given by the corresponding
element of `s`, or the length of the input in every axis except for the
last one if `s` is not given. In the final transformed axis the length
of the output when `s` is not given is ``2*(m-1)`` where ``m`` is the
length of the final transformed axis of the input. To get an odd
number of output points in the final axis, `s` must be specified.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
ihfftn : The inverse N-D FFT with real spectrum. Inverse of `hfftn`.
fft : The 1-D FFT, with definitions and conventions used.
rfft : Forward FFT of real input.
Notes
-----
For a 1-D signal ``x`` to have a real spectrum, it must satisfy
the Hermitian property::
x[i] == np.conj(x[-i]) for all i
This generalizes into higher dimensions by reflecting over each axis in
turn::
x[i, j, k, ...] == np.conj(x[-i, -j, -k, ...]) for all i, j, k, ...
This should not be confused with a Hermitian matrix, for which the
transpose is its own conjugate::
x[i, j] == np.conj(x[j, i]) for all i, j
The default value of `s` assumes an even output length in the final
transformation axis. When performing the final complex to real
transformation, the Hermitian symmetry requires that the last imaginary
component along that axis must be 0 and so it is ignored. To avoid losing
information, the correct length of the real input *must* be given.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.ones((3, 2, 2))
>>> scipy.fft.hfftn(x)
array([[[12., 0.],
[ 0., 0.]],
[[ 0., 0.],
[ 0., 0.]],
[[ 0., 0.],
[ 0., 0.]]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def hfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 2-D FFT of a Hermitian complex array.
Parameters
----------
x : array
Input array, taken to be Hermitian complex.
s : sequence of ints, optional
Shape of the real output.
axes : sequence of ints, optional
Axes over which to compute the FFT.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See `fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The real result of the 2-D Hermitian complex real FFT.
See Also
--------
hfftn : Compute the N-D discrete Fourier Transform for Hermitian
complex input.
Notes
-----
This is really just `hfftn` with different default behavior.
For more details see `hfftn`.
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ihfftn(x, s=None, axes=None, norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the N-D inverse discrete Fourier Transform for a real
spectrum.
This function computes the N-D inverse discrete Fourier Transform
over any number of axes in an M-D real array by means of the Fast
Fourier Transform (FFT). By default, all axes are transformed, with the
real transform performed over the last axis, while the remaining transforms
are complex.
Parameters
----------
x : array_like
Input array, taken to be real.
s : sequence of ints, optional
Shape (length along each transformed axis) to use from the input.
(``s[0]`` refers to axis 0, ``s[1]`` to axis 1, etc.).
Along any axis, if the given shape is smaller than that of the input,
the input is cropped. If it is larger, the input is padded with zeros.
if `s` is not given, the shape of the input along the axes specified
by `axes` is used.
axes : sequence of ints, optional
Axes over which to compute the FFT. If not given, the last ``len(s)``
axes are used, or all axes if `s` is also not specified.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : complex ndarray
The truncated or zero-padded input, transformed along the axes
indicated by `axes`, or by a combination of `s` and `x`,
as explained in the parameters section above.
The length of the last axis transformed will be ``s[-1]//2+1``,
while the remaining transformed axes will have lengths according to
`s`, or unchanged from the input.
Raises
------
ValueError
If `s` and `axes` have different length.
IndexError
If an element of `axes` is larger than the number of axes of `x`.
See Also
--------
hfftn : The forward N-D FFT of Hermitian input.
hfft : The 1-D FFT of Hermitian input.
fft : The 1-D FFT, with definitions and conventions used.
fftn : The N-D FFT.
hfft2 : The 2-D FFT of Hermitian input.
Notes
-----
The transform for real input is performed over the last transformation
axis, as by `ihfft`, then the transform over the remaining axes is
performed as by `ifftn`. The order of the output is the positive part of
the Hermitian output signal, in the same format as `rfft`.
Examples
--------
>>> import scipy.fft
>>> import numpy as np
>>> x = np.ones((2, 2, 2))
>>> scipy.fft.ihfftn(x)
array([[[1.+0.j, 0.+0.j], # may vary
[0.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
>>> scipy.fft.ihfftn(x, axes=(2, 0))
array([[[1.+0.j, 0.+0.j], # may vary
[1.+0.j, 0.+0.j]],
[[0.+0.j, 0.+0.j],
[0.+0.j, 0.+0.j]]])
"""
return (Dispatchable(x, np.ndarray),)
@_dispatch
def ihfft2(x, s=None, axes=(-2, -1), norm=None, overwrite_x=False, workers=None, *,
plan=None):
"""
Compute the 2-D inverse FFT of a real spectrum.
Parameters
----------
x : array_like
The input array
s : sequence of ints, optional
Shape of the real input to the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
norm : {"backward", "ortho", "forward"}, optional
Normalization mode (see `fft`). Default is "backward".
overwrite_x : bool, optional
If True, the contents of `x` can be destroyed; the default is False.
See :func:`fft` for more details.
workers : int, optional
Maximum number of workers to use for parallel computation. If negative,
the value wraps around from ``os.cpu_count()``.
See :func:`~scipy.fft.fft` for more details.
plan : object, optional
This argument is reserved for passing in a precomputed plan provided
by downstream FFT vendors. It is currently not used in SciPy.
.. versionadded:: 1.5.0
Returns
-------
out : ndarray
The result of the inverse real 2-D FFT.
See Also
--------
ihfftn : Compute the inverse of the N-D FFT of Hermitian input.
Notes
-----
This is really `ihfftn` with different defaults.
For more details see `ihfftn`.
"""
return (Dispatchable(x, np.ndarray),)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@fft@_basic.py@.PATH_END.py
|
{
"filename": "test_model.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/statsmodels/tsa/arima/tests/test_model.py",
"type": "Python"
}
|
"""
Tests for ARIMA model.
Tests are primarily limited to checking that the model is constructed correctly
and that it is calling the appropriate parameter estimators correctly. Tests of
correctness of parameter estimation routines are left to the individual
estimators' test functions.
Author: Chad Fulton
License: BSD-3
"""
from statsmodels.compat.platform import PLATFORM_WIN32
import io
import numpy as np
import pandas as pd
import pytest
from numpy.testing import assert_equal, assert_allclose, assert_raises, assert_
from statsmodels.datasets import macrodata
from statsmodels.tsa.arima.model import ARIMA
from statsmodels.tsa.arima.estimators.yule_walker import yule_walker
from statsmodels.tsa.arima.estimators.burg import burg
from statsmodels.tsa.arima.estimators.hannan_rissanen import hannan_rissanen
from statsmodels.tsa.arima.estimators.innovations import (
innovations, innovations_mle)
from statsmodels.tsa.arima.estimators.statespace import statespace
dta = macrodata.load_pandas().data
dta.index = pd.date_range(start='1959-01-01', end='2009-07-01', freq='QS')
def test_default_trend():
# Test that we are setting the trend default correctly
endog = dta['infl'].iloc[:50]
# Defaults when only endog is specified
mod = ARIMA(endog)
# with no integration, default trend a constant
assert_equal(mod._spec_arima.trend_order, 0)
assert_allclose(mod.exog, np.ones((mod.nobs, 1)))
# Defaults with integrated model
mod = ARIMA(endog, order=(0, 1, 0))
# with no integration, default trend is none
assert_equal(mod._spec_arima.trend_order, None)
assert_equal(mod.exog, None)
def test_invalid():
# Tests that invalid options raise errors
# (note that this is only invalid options specific to `ARIMA`, and not
# invalid options that would raise errors in SARIMAXSpecification).
endog = dta['infl'].iloc[:50]
mod = ARIMA(endog, order=(1, 0, 0))
# Need valid method
assert_raises(ValueError, mod.fit, method='not_a_method')
# Can only use certain methods with fixed parameters
# (e.g. 'statespace' and 'hannan-rissanen')
with mod.fix_params({'ar.L1': 0.5}):
assert_raises(ValueError, mod.fit, method='yule_walker')
# Cannot override model-level values in fit
assert_raises(ValueError, mod.fit, method='statespace', method_kwargs={
'enforce_stationarity': False})
# start_params only valid for MLE methods
assert_raises(ValueError, mod.fit, method='yule_walker',
start_params=[0.5, 1.])
# has_exog and gls=False with non-statespace method
mod2 = ARIMA(endog, order=(1, 0, 0), trend='c')
assert_raises(ValueError, mod2.fit, method='yule_walker', gls=False)
# non-stationary parameters
mod3 = ARIMA(np.arange(100) * 1.0, order=(1, 0, 0), trend='n')
assert_raises(ValueError, mod3.fit, method='hannan_rissanen')
# non-invertible parameters
mod3 = ARIMA(np.arange(20) * 1.0, order=(0, 0, 1), trend='n')
assert_raises(ValueError, mod3.fit, method='hannan_rissanen')
def test_yule_walker():
# Test for basic use of Yule-Walker estimation
endog = dta['infl'].iloc[:50]
# AR(2), no trend (since trend would imply GLS estimation)
desired_p, _ = yule_walker(endog, ar_order=2, demean=False)
mod = ARIMA(endog, order=(2, 0, 0), trend='n')
res = mod.fit(method='yule_walker')
assert_allclose(res.params, desired_p.params)
def test_burg():
# Test for basic use of Yule-Walker estimation
endog = dta['infl'].iloc[:50]
# AR(2), no trend (since trend would imply GLS estimation)
desired_p, _ = burg(endog, ar_order=2, demean=False)
mod = ARIMA(endog, order=(2, 0, 0), trend='n')
res = mod.fit(method='burg')
assert_allclose(res.params, desired_p.params)
def test_hannan_rissanen():
# Test for basic use of Hannan-Rissanen estimation
endog = dta['infl'].diff().iloc[1:101]
# ARMA(1, 1), no trend (since trend would imply GLS estimation)
desired_p, _ = hannan_rissanen(
endog, ar_order=1, ma_order=1, demean=False)
mod = ARIMA(endog, order=(1, 0, 1), trend='n')
res = mod.fit(method='hannan_rissanen')
assert_allclose(res.params, desired_p.params)
def test_innovations():
# Test for basic use of Yule-Walker estimation
endog = dta['infl'].iloc[:50]
# MA(2), no trend (since trend would imply GLS estimation)
desired_p, _ = innovations(endog, ma_order=2, demean=False)
mod = ARIMA(endog, order=(0, 0, 2), trend='n')
res = mod.fit(method='innovations')
assert_allclose(res.params, desired_p[-1].params)
def test_innovations_mle():
# Test for basic use of Yule-Walker estimation
endog = dta['infl'].iloc[:100]
# ARMA(1, 1), no trend (since trend would imply GLS estimation)
desired_p, _ = innovations_mle(
endog, order=(1, 0, 1), demean=False)
mod = ARIMA(endog, order=(1, 0, 1), trend='n')
res = mod.fit(method='innovations_mle')
# Note: atol is required only due to precision issues on Windows
assert_allclose(res.params, desired_p.params, atol=1e-5)
# SARMA(1, 0)x(1, 0)4, no trend (since trend would imply GLS estimation)
desired_p, _ = innovations_mle(
endog, order=(1, 0, 0), seasonal_order=(1, 0, 0, 4), demean=False)
mod = ARIMA(endog, order=(1, 0, 0), seasonal_order=(1, 0, 0, 4), trend='n')
res = mod.fit(method='innovations_mle')
# Note: atol is required only due to precision issues on Windows
assert_allclose(res.params, desired_p.params, atol=1e-5)
def test_statespace():
# Test for basic use of Yule-Walker estimation
endog = dta['infl'].iloc[:100]
# ARMA(1, 1), no trend
desired_p, _ = statespace(endog, order=(1, 0, 1),
include_constant=False)
mod = ARIMA(endog, order=(1, 0, 1), trend='n')
res = mod.fit(method='statespace')
# Note: tol changes required due to precision issues on Windows
rtol = 1e-7 if not PLATFORM_WIN32 else 1e-3
assert_allclose(res.params, desired_p.params, rtol=rtol, atol=1e-4)
# ARMA(1, 2), with trend
desired_p, _ = statespace(endog, order=(1, 0, 2),
include_constant=True)
mod = ARIMA(endog, order=(1, 0, 2), trend='c')
res = mod.fit(method='statespace')
# Note: atol is required only due to precision issues on Windows
assert_allclose(res.params, desired_p.params, atol=1e-4)
# SARMA(1, 0)x(1, 0)4, no trend
desired_p, _spec = statespace(endog, order=(1, 0, 0),
seasonal_order=(1, 0, 0, 4),
include_constant=False)
mod = ARIMA(endog, order=(1, 0, 0), seasonal_order=(1, 0, 0, 4), trend='n')
res = mod.fit(method='statespace')
# Note: atol is required only due to precision issues on Windows
assert_allclose(res.params, desired_p.params, atol=1e-4)
def test_low_memory():
# Basic test that the low_memory option is working
endog = dta['infl'].iloc[:50]
mod = ARIMA(endog, order=(1, 0, 0), concentrate_scale=True)
res1 = mod.fit()
res2 = mod.fit(low_memory=True)
# Check that the models produce the same results
assert_allclose(res2.params, res1.params)
assert_allclose(res2.llf, res1.llf)
# Check that the model's basic memory conservation option was not changed
assert_equal(mod.ssm.memory_conserve, 0)
# Check that low memory was actually used (just check a couple)
assert_(res2.llf_obs is None)
assert_(res2.predicted_state is None)
assert_(res2.filtered_state is None)
assert_(res2.smoothed_state is None)
def check_cloned(mod, endog, exog=None):
mod_c = mod.clone(endog, exog=exog)
assert_allclose(mod.nobs, mod_c.nobs)
assert_(mod._index.equals(mod_c._index))
assert_equal(mod.k_params, mod_c.k_params)
assert_allclose(mod.start_params, mod_c.start_params)
p = mod.start_params
assert_allclose(mod.loglike(p), mod_c.loglike(p))
assert_allclose(mod.concentrate_scale, mod_c.concentrate_scale)
def test_clone():
endog = dta['infl'].iloc[:50]
exog = np.arange(endog.shape[0])
# Basic model
check_cloned(ARIMA(endog), endog)
check_cloned(ARIMA(endog.values), endog.values)
# With trends
check_cloned(ARIMA(endog, trend='c'), endog)
check_cloned(ARIMA(endog, trend='t'), endog)
check_cloned(ARIMA(endog, trend='ct'), endog)
# With exog
check_cloned(ARIMA(endog, exog=exog), endog, exog=exog)
check_cloned(ARIMA(endog, exog=exog, trend='c'), endog, exog=exog)
# Concentrated scale
check_cloned(ARIMA(endog, exog=exog, trend='c', concentrate_scale=True),
endog, exog=exog)
# Higher order (use a different dataset to avoid warnings about
# non-invertible start params)
endog = dta['realgdp'].iloc[:100]
exog = np.arange(endog.shape[0])
check_cloned(ARIMA(endog, order=(2, 1, 1), seasonal_order=(1, 1, 2, 4),
exog=exog, trend=[0, 0, 1], concentrate_scale=True),
endog, exog=exog)
def test_constant_integrated_model_error():
with pytest.raises(ValueError, match="In models with integration"):
ARIMA(np.ones(100), order=(1, 1, 0), trend='c')
with pytest.raises(ValueError, match="In models with integration"):
ARIMA(np.ones(100), order=(1, 0, 0), seasonal_order=(1, 1, 0, 6),
trend='c')
with pytest.raises(ValueError, match="In models with integration"):
ARIMA(np.ones(100), order=(1, 2, 0), trend='t')
with pytest.raises(ValueError, match="In models with integration"):
ARIMA(np.ones(100), order=(1, 1, 0), seasonal_order=(1, 1, 0, 6),
trend='t')
def test_forecast():
# Numpy
endog = dta['infl'].iloc[:100].values
mod = ARIMA(endog[:50], order=(1, 1, 0), trend='t')
res = mod.filter([0.2, 0.3, 1.0])
endog2 = endog.copy()
endog2[50:] = np.nan
mod2 = mod.clone(endog2)
res2 = mod2.filter(res.params)
assert_allclose(res.forecast(50), res2.fittedvalues[-50:])
def test_forecast_with_exog():
# Numpy
endog = dta['infl'].iloc[:100].values
exog = np.arange(len(endog))**2
mod = ARIMA(endog[:50], order=(1, 1, 0), exog=exog[:50], trend='t')
res = mod.filter([0.2, 0.05, 0.3, 1.0])
endog2 = endog.copy()
endog2[50:] = np.nan
mod2 = mod.clone(endog2, exog=exog)
print(mod.param_names)
print(mod2.param_names)
res2 = mod2.filter(res.params)
assert_allclose(res.forecast(50, exog=exog[50:]), res2.fittedvalues[-50:])
def test_append():
endog = dta['infl'].iloc[:100].values
mod = ARIMA(endog[:50], trend='c')
res = mod.fit()
res_e = res.append(endog[50:])
mod2 = ARIMA(endog)
res2 = mod2.filter(res_e.params)
assert_allclose(res2.llf, res_e.llf)
def test_append_with_exog():
# Numpy
endog = dta['infl'].iloc[:100].values
exog = np.arange(len(endog))
mod = ARIMA(endog[:50], exog=exog[:50], trend='c')
res = mod.fit()
res_e = res.append(endog[50:], exog=exog[50:])
mod2 = ARIMA(endog, exog=exog, trend='c')
res2 = mod2.filter(res_e.params)
assert_allclose(res2.llf, res_e.llf)
def test_append_with_exog_and_trend():
# Numpy
endog = dta['infl'].iloc[:100].values
exog = np.arange(len(endog))**2
mod = ARIMA(endog[:50], exog=exog[:50], trend='ct')
res = mod.fit()
res_e = res.append(endog[50:], exog=exog[50:])
mod2 = ARIMA(endog, exog=exog, trend='ct')
res2 = mod2.filter(res_e.params)
assert_allclose(res2.llf, res_e.llf)
def test_append_with_exog_pandas():
# Pandas
endog = dta['infl'].iloc[:100]
exog = pd.Series(np.arange(len(endog)), index=endog.index)
mod = ARIMA(endog.iloc[:50], exog=exog.iloc[:50], trend='c')
res = mod.fit()
res_e = res.append(endog.iloc[50:], exog=exog.iloc[50:])
mod2 = ARIMA(endog, exog=exog, trend='c')
res2 = mod2.filter(res_e.params)
assert_allclose(res2.llf, res_e.llf)
def test_cov_type_none():
endog = dta['infl'].iloc[:100].values
mod = ARIMA(endog[:50], trend='c')
res = mod.fit(cov_type='none')
assert_allclose(res.cov_params(), np.nan)
def test_nonstationary_gls_error():
# GH-6540
endog = pd.read_csv(
io.StringIO(
"""\
data\n
9.112\n9.102\n9.103\n9.099\n9.094\n9.090\n9.108\n9.088\n9.091\n9.083\n9.095\n
9.090\n9.098\n9.093\n9.087\n9.088\n9.083\n9.095\n9.077\n9.082\n9.082\n9.081\n
9.081\n9.079\n9.088\n9.096\n9.081\n9.098\n9.081\n9.094\n9.091\n9.095\n9.097\n
9.108\n9.104\n9.098\n9.085\n9.093\n9.094\n9.092\n9.093\n9.106\n9.097\n9.108\n
9.100\n9.106\n9.114\n9.111\n9.097\n9.099\n9.108\n9.108\n9.110\n9.101\n9.111\n
9.114\n9.111\n9.126\n9.124\n9.112\n9.120\n9.142\n9.136\n9.131\n9.106\n9.112\n
9.119\n9.125\n9.123\n9.138\n9.133\n9.133\n9.137\n9.133\n9.138\n9.136\n9.128\n
9.127\n9.143\n9.128\n9.135\n9.133\n9.131\n9.136\n9.120\n9.127\n9.130\n9.116\n
9.132\n9.128\n9.119\n9.119\n9.110\n9.132\n9.130\n9.124\n9.130\n9.135\n9.135\n
9.119\n9.119\n9.136\n9.126\n9.122\n9.119\n9.123\n9.121\n9.130\n9.121\n9.119\n
9.106\n9.118\n9.124\n9.121\n9.127\n9.113\n9.118\n9.103\n9.112\n9.110\n9.111\n
9.108\n9.113\n9.117\n9.111\n9.100\n9.106\n9.109\n9.113\n9.110\n9.101\n9.113\n
9.111\n9.101\n9.097\n9.102\n9.100\n9.110\n9.110\n9.096\n9.095\n9.090\n9.104\n
9.097\n9.099\n9.095\n9.096\n9.085\n9.097\n9.098\n9.090\n9.080\n9.093\n9.085\n
9.075\n9.067\n9.072\n9.062\n9.068\n9.053\n9.051\n9.049\n9.052\n9.059\n9.070\n
9.058\n9.074\n9.063\n9.057\n9.062\n9.058\n9.049\n9.047\n9.062\n9.052\n9.052\n
9.044\n9.060\n9.062\n9.055\n9.058\n9.054\n9.044\n9.047\n9.050\n9.048\n9.041\n
9.055\n9.051\n9.028\n9.030\n9.029\n9.027\n9.016\n9.023\n9.031\n9.042\n9.035\n
"""
),
index_col=None,
)
mod = ARIMA(
endog,
order=(18, 0, 39),
enforce_stationarity=False,
enforce_invertibility=False,
)
with pytest.raises(ValueError, match="Roots of the autoregressive"):
mod.fit(method="hannan_rissanen", low_memory=True, cov_type="none")
@pytest.mark.parametrize(
"ar_order, ma_order, fixed_params",
[
(1, 1, {}),
(1, 1, {'ar.L1': 0}),
(2, 3, {'ar.L2': -1, 'ma.L1': 2}),
([0, 1], 0, {'ar.L2': 0}),
([1, 5], [0, 0, 1], {'ar.L5': -10, 'ma.L3': 5}),
]
)
def test_hannan_rissanen_with_fixed_params(ar_order, ma_order, fixed_params):
# Test for basic uses of Hannan-Rissanen estimation with fixed parameters
endog = dta['infl'].diff().iloc[1:101]
desired_p, _ = hannan_rissanen(
endog, ar_order=ar_order, ma_order=ma_order,
demean=False, fixed_params=fixed_params
)
# no constant or trend (since constant or trend would imply GLS estimation)
mod = ARIMA(endog, order=(ar_order, 0, ma_order), trend='n',
enforce_stationarity=False, enforce_invertibility=False)
with mod.fix_params(fixed_params):
res = mod.fit(method='hannan_rissanen')
assert_allclose(res.params, desired_p.params)
@pytest.mark.parametrize(
"random_state_type", [7, np.random.RandomState, np.random.default_rng]
)
def test_reproducible_simulation(random_state_type):
x = np.random.randn(100)
res = ARIMA(x, order=(1, 0, 0)).fit()
def get_random_state(val):
if isinstance(random_state_type, int):
return 7
return random_state_type(7)
random_state = get_random_state(random_state_type)
sim1 = res.simulate(1, random_state=random_state)
random_state = get_random_state(random_state_type)
sim2 = res.simulate(1, random_state=random_state)
assert_allclose(sim1, sim2)
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@statsmodels@tsa@arima@tests@test_model.py@.PATH_END.py
|
{
"filename": "utility_functions.py",
"repo_name": "eogarvin/MLCCS",
"repo_path": "MLCCS_extracted/MLCCS-main/ml_spectroscopy/utility_functions.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 13 09:49:10 2021
@author: emily
"""
## LIBRARIES
import numpy as np
import pandas as pd
from PyAstronomy import pyasl
from math import sqrt
from scipy.stats import t
# My modules
from ml_spectroscopy.config import path_init
from ml_spectroscopy.crosscorrNorm import crosscorrRVnorm
from sklearn.metrics import confusion_matrix
## ACTIVE SUBDIR
subdir = path_init()
#subdir = "C:/Users/emily/Documents/ML_spectroscopy_thesis/"
# PATHS
code_path = subdir + "50_code/"
data_path = subdir + "30_data/DataSets/"
plot_path = subdir + "60_plots/"
## Utility functions
################################################################################
## Create a small bootstrap to find the scaling values relative to the variance
################################################################################
def scale_miniboot(planet, noise, B, N, Replace=True):
noise=pd.DataFrame(noise)
planet=pd.DataFrame(planet)
alphameans=np.zeros((B))
alphasigmas=np.zeros((B))
for i in range(0,B):
noiseboot=noise.sample(N, replace=Replace)
planetboot=planet.sample(N, replace=Replace)
sigmanoise=noiseboot.std(axis='columns')
sigmaplanet=planetboot.std(axis='columns')
alpha0=np.array(sigmanoise)/np.array(sigmaplanet)
alphameans[i]=np.mean(alpha0)
alphasigmas[i]=np.std(alpha0)
alpha=np.mean(alphameans)
return alpha, alphameans, alphasigmas
#####################################################################################
## Function for line by line adjustment of alpha based on the signal to noise ratio
#####################################################################################
def scale_SNR(planet, noise, M, N0, N, step):
minsize=min(planet.shape[0], noise.shape[0])
alpha=np.arange(N0,N,step)
beta0=pd.DataFrame(np.arange(0,minsize))
beta=np.array(beta0.sample(M, replace=False))
SNR=np.zeros((len(beta),len(alpha)))
for j in range(0,M):
it=0
for i in alpha:
planetarray=np.array(planet.iloc[int(beta[[j]]),:])
noisearray=np.array(noise.iloc[int(beta[[j]]),:])
planetwl=pd.to_numeric(planet.columns)
noisewl=pd.to_numeric(noise.columns)
combination=i*planetarray+noisearray
rv1, cc1 = pyasl.crosscorrRV(noisewl, noisearray, planetwl, planetarray, -2000., 2000., 2000./2000., skipedge=70, mode='doppler')
rv2, cc2 = pyasl.crosscorrRV(noisewl, combination, planetwl, planetarray, -2000., 2000., 2000./2000., skipedge=70, mode='doppler')
SNR[j,it]=np.max(cc2)/np.std(cc1)
it=it+1
return SNR, alpha
#####################################################################################
## t tests on average of data series
#####################################################################################
def t_test_onSeriesMean(data):
sqrtn = sqrt(len(data.mean(axis=0).index))
xbar = data.mean(axis=0).mean()
sigmabar = data.mean(axis=0).std()
#alpha = 0.05
df = len(data.mean(axis=0).index) - 1
tstat = (xbar - 0) / (sigmabar / sqrtn)
#cv = t.ppf(1.0 - alpha, df)
p = (1 - t.cdf(abs(tstat), df)) * 2
return {'Pval': p, 't-stat': tstat}
def t_test_onCCF_max(data, alpha):
sqrtn = np.sqrt(len(data.columns))
xbar = data.mean(axis=1)
sigmabar = data.std(axis=1)
maxtotest = data.max(axis=1)
#alpha = 0.05
df = len(data.columns) - 1
tstat = (maxtotest - xbar) / (sigmabar / sqrtn)
#cv = t.ppf(1.0 - alpha, df)
p = (1 - t.cdf(abs(tstat), df)) * 2
ypred=[]
for i in range(0,len(p)):
if p[i]<=alpha:
yt=1
elif p[i]>alpha:
yt=0
ypred.append(yt)
return {'Y_pred': ypred,'Pval': p, 't-stat': tstat}
# =============================================================================
#
# def t_test_onCCF_rv0(data, alpha=0.05):
#
# sqrtn = np.sqrt(len(data.columns))
# xbar = data.mean(axis=1)
# sigmabar = data.std(axis=1)
# #alpha = 0.05
# df = len(data.columns) - 1
# tstat = (xbar - data[0]) / (sigmabar / sqrtn)
# #cv = t.ppf(1.0 - alpha, df)
# p = (1 - t.cdf(abs(tstat), df)) * 2
#
# ypred=[]
# for i in range(0,len(p)):
# if p[i]<=alpha:
# yt=1
# elif p[i]>alpha:
# yt=0
# ypred.append(yt)
#
# return {'Y_pred': ypred,'Pval': p, 't-stat': tstat}
#
#
# =============================================================================
def t_test_onCCF_rv0(data, alpha):
sqrtn = np.sqrt(len(data.columns))
xbar = data.mean(axis=1)
sigmabar = data.std(axis=1)
#alpha = 0.05
df = len(data.columns) - 2
tstat = (data[0] - xbar) / (sigmabar / sqrtn)
cv = t.ppf((1.0 - alpha/2), df)
p = (1 - t.cdf(abs(tstat), df)) * 2
ypred=[]
for i in range(0,len(p)):
if p[i]<alpha:
yt=1
elif p[i]>=alpha:
yt=0
ypred.append(yt)
ypredt=[]
for i in range(0,len(p)):
if abs(tstat[i])<=cv:
yt=0
elif abs(tstat[i])>cv:
yt=1
ypredt.append(yt)
return {'Y_predp': ypred,'Y_predt': ypredt, 'Pval': p, 't-stat': tstat}
def t_test_onCCF_rv0_onesided(data, alpha):
sqrtn = np.sqrt(len(data.columns))
xbar = data.mean(axis=1)
sigmabar = data.std(axis=1)
#alpha = 0.05
df = len(data.columns) - 1
tstat = (data[0] - xbar) / (sigmabar / sqrtn)
cv = t.ppf(1.0 - alpha, df)
p = (1-t.cdf(tstat, df))
#p = (t.sf(tstat, df))
ypred=[]
for i in range(0,len(p)):
if p[i]<alpha:
yt=1
elif p[i]>=alpha:
yt=0
ypred.append(yt)
ypredt=[]
for i in range(0,len(p)):
if tstat[i]<=cv:
yt=0
elif (tstat[i])>cv:
yt=1
ypredt.append(yt)
return {'Y_predp': ypred,'Y_predt': ypredt, 'Pval': p, 't-stat': tstat}
## SNR function
def test_onCCF_rv0_SNR(data, snr):
sigmabar = data.drop(data[range(-200, 200)], axis=1).std(axis=1)
stat = data[0] / sigmabar
ypredstat = []
for i in range(0, len(stat)):
if stat[i] <= snr:
yt = 0
elif (stat[i]) > snr:
yt = 1
ypredstat.append(yt)
return {'Y_pred': ypredstat, 'SNR': stat}
## SNR function with lower RV steps
def test_onCCF_rv0_SNR_drv(data, drv, snr):
sigmabar = data.drop(data[range(-200, 200, drv)], axis=1).std(axis=1)
stat = data[0] / sigmabar
ypredstat = []
for i in range(0, len(stat)):
if stat[i] <= snr:
yt = 0
elif (stat[i]) > snr:
yt = 1
ypredstat.append(yt)
return {'Y_pred': ypredstat, 'SNR': stat}
## SNR function
def test_onCCF_rv0_SNR_CO(data, snr):
sigmabar = data.drop(data[range(-750, 750)], axis=1).std(axis=1)
stat = data[0] / sigmabar
ypredstat = []
for i in range(0, len(stat)):
if stat[i] <= snr:
yt = 0
elif (stat[i]) > snr:
yt = 1
ypredstat.append(yt)
return {'Y_pred': ypredstat, 'SNR': stat}
def test_onCCF_rv0_SNR_autocorrel(data,template,snr):
TempCol = template.columns.get_loc("tempP")
tf = template.drop(template.columns[TempCol:], axis=1)
tw = pd.to_numeric(tf.columns)
tf = np.array(tf).flatten()
df = tf
dw = tw
cc1, rv1 = crosscorrRVnorm(dw, df, tw, tf, -2000, 2000, 1, mode="doppler", skipedge=100, edgeTapering=None)
cc0=pd.DataFrame(np.reshape(cc1, [1,4000]), columns=rv1)
ratio=np.array((abs(data[0])))/np.array((cc0[0])) # good
cc=np.tile(np.array((cc0)), (len(ratio),1))*np.transpose([ratio] * cc0.shape[1])
cc=pd.DataFrame(cc, columns=rv1)
sigmabar = (np.array(data.drop(data[range(-200,200)], axis=1))-np.array(cc.drop(cc.columns[range(-200,200)], axis=1))).std(axis=1)
mubar = (np.array(data.drop(data[range(-200,200)], axis=1))-np.array(cc.drop(cc.columns[range(-200,200)], axis=1))).mean(axis=1)
#sigmabar = data.drop(data[range(-200,200)], axis=1).std(axis=1)
sqrtn = np.sqrt(len(data.drop(data[range(-200,200)], axis=1).columns))
#xbar = X_test.mean(axis=1)
#sigmabar = X_test.drop(data[range(-200,200)], axis=1).std(axis=1)
#alpha = 0.05
#stat = (data[0]) / (sigmabar / sqrtn)
#stat = (data[0]-mubar) / sigmabar
stat = data[0] / sigmabar
ypredstat=[]
for i in range(0,len(stat)):
if stat[i]<=snr:
yt=0
elif (stat[i])>snr:
yt=1
ypredstat.append(yt)
return {'Y_pred': ypredstat, 'SNR': stat}
# =============================================================================
# get the keys from a dictionary
# =============================================================================
def DictKeystoList(dictionary_object):
ls = []
for s in dictionary_object.keys():
ls.append(s)
return ls
# =============================================================================
# flatten a list
# =============================================================================
def flatten(t):
return [item for sublist in t for item in sublist]
# =============================================================================
# average a list
# =============================================================================
def Average(lst):
return sum(lst) / len(lst)
# =============================================================================
# grid search threshold
# =============================================================================
def grid_search0(hyperparams, prob_Y, data_test):
store=np.zeros((len(hyperparams)))
store[:]=np.nan
for i in range(len(hyperparams)):
newpred=list(map(int,list(prob_Y>=hyperparams[i])))
store[i]=confusion_matrix(data_test, newpred).ravel()[1]/(confusion_matrix(data_test, newpred).ravel()[1]+confusion_matrix(data_test, newpred).ravel()[3])
try:
optim=int(np.argwhere(store < 0.05)[0])
optimal_hyperparam = hyperparams[optim]
optim_pred = list(map(int, list(prob_Y >= hyperparams[optim])))
tn, fp, fn, tp = confusion_matrix(data_test, optim_pred).ravel()
except IndexError:
optimal_hyperparam = np.nan
tn, fp, fn, tp = np.nan, np.nan, np.nan, np.nan
return {'optim_score': optimal_hyperparam, 'tn': tn, 'fp': fp, 'fn': fn, 'tp': tp}
|
eogarvinREPO_NAMEMLCCSPATH_START.@MLCCS_extracted@MLCCS-main@ml_spectroscopy@utility_functions.py@.PATH_END.py
|
{
"filename": "test_red_corr_metrics.ipynb",
"repo_name": "HERA-Team/hera_qm",
"repo_path": "hera_qm_extracted/hera_qm-main/hera_qm/scripts/test_red_corr_metrics.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%matplotlib notebook
import numpy as np
from hera_cal import omni, utils
reload(utils)
import hera_qm.ant_metrics as ant_metrics
reload(ant_metrics)
from hera_cal.data import DATA_PATH
from hera_cal.redcal import get_pos_reds
import sys
from pyuvdata import UVData
```
```python
def red_corr_metrics(data, pols, antpols, ants, reds, xants=[],
rawMetric=False, crossPol=False):
"""Calculate the modified Z-Score over all redundant groups for each antenna.
Calculates the extent to which baselines involving an antenna do not correlate
with others they are nominmally redundant with.
Arguments:
data -- data for all polarizations in a format that can support data.get_data(i,j,pol)
pols -- List of visibility polarizations (e.g. ['xx','xy','yx','yy']).
antpols -- List of antenna polarizations (e.g. ['x', 'y'])
ants -- List of all antenna indices.
reds -- List of lists of tuples of antenna numbers that make up redundant baseline groups.
xants -- list of antennas in the (ant,antpol) format that should be ignored.
rawMetric -- return the raw power correlations instead of the modified z-score
crossPol -- return results only when the two visibility polarizations differ by a single flip
Returns:
powerRedMetric -- a dictionary indexed by (ant,antpol) of the modified z-scores of the mean
power correlations inside redundant baseline groups that the antenna participates in.
Very small numbers are probably bad antennas.
"""
# Compute power correlations and assign them to each antenna
autoPower = compute_median_auto_power_dict(data, pols, reds)
antCorrs = {(ant, antpol): 0.0 for ant in ants for antpol in antpols if
(ant, antpol) not in xants}
antCounts = deepcopy(antCorrs)
for pol0 in pols:
for pol1 in pols:
iscrossed_i = (pol0[0] != pol1[0])
iscrossed_j = (pol0[1] != pol1[1])
onlyOnePolCrossed = (iscrossed_i ^ iscrossed_j)
# This function can instead record correlations for antennas whose counterpart are pol-swapped
if (not crossPol and (pol0 is pol1)) or (crossPol and onlyOnePolCrossed):
for bls in reds:
data_shape = data.get_data(bls[0][0], bls[0][1], pol0).shape
data_array_shape = (len(bls), data_shape[0], data_shape[1])
# correlation_array = np.zeros(corr_shape, dtype=np.complex128)
data_array = np.zeros(data_array_shape, np.complex128)
data_array1 = np.zeros(data_array_shape, np.complex128)
antpols1, antopols2 = [], []
for n, (ant0_i, ant0_j) in enumerate(bls):
data_array[n] = data.get_data(ant0_i, ant0_j, pol0)
data_array1[n] = data.get_data(ant0_i, ant0_j, pol1)
antpols1.append((ant0_i, pol0[0]))
antpols1.append((ant0_j, pol0[1]))
antpols2.append((ant0_i, pol1[0]))
antpols2.append((ant0_j, pol1[1]))
# Take the tensor dot over the times axis, data_arry is (nbls, ntimes, nfreqs)
corr_array = np.tensordot(data_array, data_array1.conj(), axes=[[0],[0]]).reshape(0,2,1,3)
corr_array = np.median(corr_array, axis=(2,3))
autos = np.sqrt(np.diagonal(corr_array, axis1=0, axis2=1).copy())
corr_array /= autos[:, None]
corr_array /= autos[None, :]
# for (ant1_i, ant1_j) in bls[n + 1:]:
# data1 = data.get_data(ant1_i, ant1_j, pol1)
# corr = np.median(np.abs(np.mean(data0 * data1.conj(),
# axis=0)))
# corr /= np.sqrt(autoPower[ant0_i, ant0_j, pol0] *
# autoPower[ant1_i, ant1_j, pol1])
# antsInvolved = [(ant0_i, pol0[0]), (ant0_j, pol0[1]),
# (ant1_i, pol1[0]), (ant1_j, pol1[1])]
# if not np.any([(ant, antpol) in xants for ant, antpol
# in antsInvolved]):
# # Only record the crossed antenna if i or j is crossed
# if crossPol and iscrossed_i:
# antsInvolved = [(ant0_i, pol0[0]),
# (ant1_i, pol1[0])]
# elif crossPol and iscrossed_j:
# antsInvolved = [(ant0_j, pol0[1]), (ant1_j, pol1[1])]
# for ant, antpol in antsInvolved:
# antCorrs[(ant, antpol)] += corr
# antCounts[(ant, antpol)] += 1
# Compute average and return
for key, count in antCounts.items():
if count > 0:
antCorrs[key] /= count
else:
# Was not found in reds, should not have a valid metric.
antCorrs[key] = np.NaN
```
```python
verbose = True
pols = ['xx','xy','yx','yy']
JD = '2457757.47316'
dataFileList = [DATA_PATH + '/zen.2457698.40355.xx.HH.uvcA',
DATA_PATH + '/zen.2457698.40355.yy.HH.uvcA',
DATA_PATH + '/zen.2457698.40355.xy.HH.uvcA',
DATA_PATH + '/zen.2457698.40355.yx.HH.uvcA']
freqs = np.arange(.1,.2,.1/1024)
sys.path.append(DATA_PATH)
uvd = UVData()
uvd.read_miriad(dataFileList[0])
aa = utils.get_aa_from_uv(uvd)
info = omni.aa_to_info(aa, pols=[pols[-1][0]], crosspols=[pols[-1]])
reds = info.get_reds()
metricsJSONFilename = JD+'.metrics.json'
```
```python
am = ant_metrics.AntennaMetrics(dataFileList, reds, fileformat='miriad')
```
```python
%prun red_corr = am.red_corr_metrics(rawMetric=True)
```
```python
am.data.get_data(reds[0][0][0], reds[0][0][1], 'xx').shape
```
(1, 1024)
```python
am.data.data_array.shape
```
(190, 1, 1024, 4)
```python
pol0='xx'
pol1='xx'
for bls in [reds[0]]:
data_shape = am.data.get_data(bls[0][0], bls[0][1], pol0).shape
data_array_shape = (len(bls), data_shape[0], data_shape[1])
# correlation_array = np.zeros(corr_shape, dtype=np.complex128)
data_array = np.zeros(data_array_shape, np.complex128)
data_array1 = np.zeros(data_array_shape, np.complex128)
antpols1, antpols2 = [], []
for n, (ant0_i, ant0_j) in enumerate(bls):
data_array[n] = am.data.get_data(ant0_i, ant0_j, pol0)
data_array1[n] = am.data.get_data(ant0_i, ant0_j, pol1)
antpols1.append((ant0_i, pol0[0]))
antpols1.append((ant0_j, pol0[1]))
antpols2.append((ant0_i, pol1[0]))
antpols2.append((ant0_j, pol1[1]))
corr_array = np.tensordot(data_array, data_array1.conj(), axes=[[1],[1]]).transpose([0,2,1,3])
corr_array = np.median(corr_array, axis=(2,3))
autos = np.sqrt(np.diagonal(corr_array, axis1=0, axis2=1).copy())
corr_array /= autos[:, None]
corr_array /= autos[None, :]
```
divide by zero encountered in divide
invalid value encountered in divide
divide by zero encountered in divide
invalid value encountered in divide
```python
c
```
(9, 1, 1024)
```python
```
|
HERA-TeamREPO_NAMEhera_qmPATH_START.@hera_qm_extracted@hera_qm-main@hera_qm@scripts@test_red_corr_metrics.ipynb@.PATH_END.py
|
{
"filename": "baby_agi.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/cookbook/baby_agi.ipynb",
"type": "Jupyter Notebook"
}
|
# BabyAGI User Guide
This notebook demonstrates how to implement [BabyAGI](https://github.com/yoheinakajima/babyagi/tree/main) by [Yohei Nakajima](https://twitter.com/yoheinakajima). BabyAGI is an AI agent that can generate and pretend to execute tasks based on a given objective.
This guide will help you understand the components to create your own recursive agents.
Although BabyAGI uses specific vectorstores/model providers (Pinecone, OpenAI), one of the benefits of implementing it with LangChain is that you can easily swap those out for different options. In this implementation we use a FAISS vectorstore (because it runs locally and is free).
## Install and Import Required Modules
```python
from typing import Optional
from langchain_experimental.autonomous_agents import BabyAGI
from langchain_openai import OpenAI, OpenAIEmbeddings
```
## Connect to the Vector Store
Depending on what vectorstore you use, this step may look different.
```python
from langchain.docstore import InMemoryDocstore
from langchain_community.vectorstores import FAISS
```
```python
# Define your embedding model
embeddings_model = OpenAIEmbeddings()
# Initialize the vectorstore as empty
import faiss
embedding_size = 1536
index = faiss.IndexFlatL2(embedding_size)
vectorstore = FAISS(embeddings_model.embed_query, index, InMemoryDocstore({}), {})
```
### Run the BabyAGI
Now it's time to create the BabyAGI controller and watch it try to accomplish your objective.
```python
OBJECTIVE = "Write a weather report for SF today"
```
```python
llm = OpenAI(temperature=0)
```
```python
# Logging of LLMChains
verbose = False
# If None, will keep on going forever
max_iterations: Optional[int] = 3
baby_agi = BabyAGI.from_llm(
llm=llm, vectorstore=vectorstore, verbose=verbose, max_iterations=max_iterations
)
```
```python
baby_agi({"objective": OBJECTIVE})
```
[95m[1m
*****TASK LIST*****
[0m[0m
1: Make a todo list
[92m[1m
*****NEXT TASK*****
[0m[0m
1: Make a todo list
[93m[1m
*****TASK RESULT*****
[0m[0m
1. Check the weather forecast for San Francisco today
2. Make note of the temperature, humidity, wind speed, and other relevant weather conditions
3. Write a weather report summarizing the forecast
4. Check for any weather alerts or warnings
5. Share the report with the relevant stakeholders
[95m[1m
*****TASK LIST*****
[0m[0m
2: Check the current temperature in San Francisco
3: Check the current humidity in San Francisco
4: Check the current wind speed in San Francisco
5: Check for any weather alerts or warnings in San Francisco
6: Check the forecast for the next 24 hours in San Francisco
7: Check the forecast for the next 48 hours in San Francisco
8: Check the forecast for the next 72 hours in San Francisco
9: Check the forecast for the next week in San Francisco
10: Check the forecast for the next month in San Francisco
11: Check the forecast for the next 3 months in San Francisco
1: Write a weather report for SF today
[92m[1m
*****NEXT TASK*****
[0m[0m
2: Check the current temperature in San Francisco
[93m[1m
*****TASK RESULT*****
[0m[0m
I will check the current temperature in San Francisco. I will use an online weather service to get the most up-to-date information.
[95m[1m
*****TASK LIST*****
[0m[0m
3: Check the current UV index in San Francisco.
4: Check the current air quality in San Francisco.
5: Check the current precipitation levels in San Francisco.
6: Check the current cloud cover in San Francisco.
7: Check the current barometric pressure in San Francisco.
8: Check the current dew point in San Francisco.
9: Check the current wind direction in San Francisco.
10: Check the current humidity levels in San Francisco.
1: Check the current temperature in San Francisco to the average temperature for this time of year.
2: Check the current visibility in San Francisco.
11: Write a weather report for SF today.
[92m[1m
*****NEXT TASK*****
[0m[0m
3: Check the current UV index in San Francisco.
[93m[1m
*****TASK RESULT*****
[0m[0m
The current UV index in San Francisco is moderate. The UV index is expected to remain at moderate levels throughout the day. It is recommended to wear sunscreen and protective clothing when outdoors.
[91m[1m
*****TASK ENDING*****
[0m[0m
{'objective': 'Write a weather report for SF today'}
```python
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@cookbook@baby_agi.ipynb@.PATH_END.py
|
{
"filename": "parser.py",
"repo_name": "duvall3/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/rat/parser.py",
"type": "Python"
}
|
'''Code for parsing ROOT-style selector strings, as are used in TTree::Draw.'''
import operator
import itertools
def unpack_attributes(identifier):
'''Converts an identifier string into a list of attribute parts.
>>> unpack_attributes('mc.numPE')
['mc', 'numPE']
>>> unpack_attributes('mc.particle.pos.X()')
['mc', 'particle', 'pos', 'X()']
'''
return identifier.split('.')
def is_loopable(obj):
'''Returns true if this an object we should loop over by default
when evaluating attribute lookups. This includes lists and
tuples, but not strings or TVector3
>>> is_loopable('a')
False
>>> is_loopable([1,2,3])
True
>>> is_loopable(1)
False
>>> is_loopable((1,2,3))
True
'''
try:
iter(obj)
iterable = True
except TypeError:
iterable = False
if isinstance(obj, (str, unicode)):
return False
elif obj.__class__.__name__.endswith('TVector3'):
return False
elif iterable:
return True
else:
return False
def merge_holes(list_of_lists):
'''Combine a list of lists such that the final list has the same
length as the longest list, and it contains all the non-null entries
of the individiual lists.
>>> merge_holes([[None, 1, 2], [0]])
[0, 1, 2]
'''
max_len = max(map(len, list_of_lists))
merged = [None] * max_len
# Merge all these lists into one list. Require that
# for each slot there is either zero or one entry.
for i in xrange(max_len):
# Filter out non-null
non_null = [ entry[i] for entry in list_of_lists
if i < len(entry) and entry[i] is not None ]
if len(non_null) == 0:
merged[i] = None
elif len(non_null) == 1:
merged[i] = non_null[0]
else:
assert False, 'Two attributes have slot #%d' % i
return merged
def sum_non_null(a, b):
'''If ``a`` and ``b`` are not ``None``, return ``a + b``. Otherwise return
``None``.'''
if a is None or b is None:
return None
else:
return a + b
def create_evaluation_tree(*selectors):
'''Returns an AttributeNode tree that evaluates the list of selector strings
and returns the values in the same order.
>>> str(create_evaluation_tree('mc.numPE', 'ev.qPE', 'ev.cut'))
'(mc : (numPE : 0), ev : (qPE : 1, cut : 2))'
'''
root = AttributeNode('')
all_parts = [ unpack_attributes(selector) for selector in selectors ]
for slot, selector_parts in enumerate(all_parts):
node = root
optional = False
for part in selector_parts:
if part.endswith('?'):
optional = True
# All parts after the first one with a ? need to have a ?
if optional and not part.endswith('?'):
part += '?'
child = node.find_child(part)
if child is None:
child = AttributeNode(part)
node.child.append(child)
node = child
# node is the leaf
node.slot = slot
# remove unnecessary empty top node
if len(root.child) == 0:
return root.child[0]
else:
return root
class AttributeNode(object):
'''Represents a prefix-tree of attribute lookups.'''
def __init__(self, attribute, slot=None, child=None):
'''
``attribute``: str
Name of attribute
``slot``: integer or None
If this is a leaf node, it needs a slot number
that indicates its index in the top-level array of
attribute values. If this node has children,
``slot`` should be set to None.
``child``: list of AttributeNode objects
The list is copied. Defaults to empty list.
'''
self.attribute = attribute
self.slot = slot
if child is None:
self.child = []
else:
self.child = list(child)
assert not (slot is not None and len(self.child) > 0)
def flatten(self):
'''Returns a list of strings of full attribute lookup strings
sorted by their slot ID number. ``None`` will fill empty
slots.
Child attribute lookups are joined to their parents with periods.
>>> AttributeNode('a', slot=2).flatten()
[None, None, 'a']
>>> AttributeNode('a', child=[AttributeNode('b',0),AttributeNode('c',3)]).flatten()
['a.b', None, None, 'a.c']
'''
if self.slot is not None:
full_attributes = [None] * (self.slot + 1)
full_attributes[self.slot] = self.attribute
return full_attributes
elif len(self.child) == 0:
return []
else:
child_attributes = [ child.flatten() for child in self.child ]
max_len = max(map(len, child_attributes))
full_attributes = [None] * max_len
if self.attribute == '':
prefix = ''
else:
prefix = self.attribute + '.'
return [ sum_non_null(prefix, element)
for element in
merge_holes(child_attributes) ]
def get(self, obj):
'''Returns an iterator over the immediate values for the attribute
represented by this node. If getattr(obj,self.attribute) is a list
or tuple, the returned iterator will return each element separately.
>>> class Struct(object): pass
>>> a = Struct()
>>> a.foo = 1
>>> a.bar = [2,3]
>>> list(AttributeNode('foo').get(a))
[1]
>>> list(AttributeNode('bar').get(a))
[2, 3]
>>> list(AttributeNode('@bar').get(a))
[[2, 3]]
>>> list(AttributeNode('split()').get('a b c'))
[['a', 'b', 'c']]
'''
attribute = self.attribute
iterate_list = True
call_function = False
none_is_one_element = False
# Identify special kinds of attributes
if attribute.startswith('@'):
iterate_list = False
attribute = attribute[1:]
if attribute.endswith('?'):
none_is_one_element = True
attribute = attribute[:-1]
if attribute.endswith('()'):
call_function = True
attribute = attribute[:-2]
if self.attribute == '':
yield obj # Pass through empty attribute
elif obj is not None:
value = getattr(obj, attribute)
# PyROOT does not return a None when it gives you a null ptr anymore
# Have to convert to a boolean to test!
if not bool(value) and 'RAT' in value.__class__.__name__:
value = None
if is_loopable(value) and iterate_list:
if len(value) == 0 and none_is_one_element:
yield None
else:
for element in value:
yield element
else:
if call_function:
yield value()
else:
yield value
elif none_is_one_element:
yield None
else:
pass # equivalent to zero length list
def eval(self, obj):
'''Returns a list of lists of attribute contents for this tree, given
the slot numbers of the leaf nodes.
Lists are implicitly looped over, which is why this function returns
a list of attribute evaluation lists. When the children of a node
return different numbers of entries, the parent will return
the Cartesian product of these values.
>>> class Struct(object): pass
>>> obj = Struct()
>>> obj.a = Struct()
>>> obj.a.b = 4
>>> obj.a.c = 'test'
>>> tree = AttributeNode('a', child=[AttributeNode('b',0),AttributeNode('c',1)])
>>> tree.eval(obj)
[[4, 'test']]
>>> obj.a.b = [4,5]
>>> obj.a.c = ['a','b','c']
>>> tree.eval(obj)
[[4, 'a'], [4, 'b'], [4, 'c'], [5, 'a'], [5, 'b'], [5, 'c']]
'''
content_list = []
if self.slot is not None:
for v in self.get(obj):
values = [None] * (self.slot + 1)
values[self.slot] = v
content_list.append(values)
elif len(self.child) == 0:
pass
else:
for v in self.get(obj):
child_content_lists = [ child.eval(v) for child in self.child ]
nentries_each = map(len, child_content_lists)
nentries_total = reduce(operator.mul, nentries_each)
if nentries_total > 1000000:
raise Exception("parser: Adding %d rows for one event to "
"ntuple \"%s\"! Aborting to prevent memory "
"explosion."
% (nentries_total, str(self)))
for value_lists in itertools.product(*child_content_lists):
content_list.append(merge_holes(value_lists))
return content_list
def find_child(self, child_attr):
'''Returns AttributeNode for child with attribute ``child_attr`` or
``None`` if it does not exist.
>>> root = AttributeNode('a', child=[AttributeNode('b',0),AttributeNode('c',1)])
>>> str(root.find_child('b'))
'b : 0'
>>> str(root.find_child('d'))
'None'
'''
for child in self.child:
if child.attribute == child_attr:
return child
return None
def __str__(self):
'''Returns strings representation of this object which recursively
stringifies all children.
Examples:
>>> str(AttributeNode('a'))
'a : ()'
>>> str(AttributeNode('a', slot=0))
'a : 0'
>>> str(AttributeNode('a', child=[AttributeNode('b',0),AttributeNode('c',1)]))
'a : (b : 0, c : 1)'
>>> str(AttributeNode('', child=[AttributeNode('b',0),AttributeNode('c',1)]))
'(b : 0, c : 1)'
'''
if self.slot is None:
child_str = ', '.join(map(str, self.child))
if self.attribute == '':
return '(%s)' % child_str
return '%s : (%s)' % (self.attribute, child_str)
else:
return '%s : %d' % (self.attribute, self.slot)
|
duvall3REPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@rat@parser.py@.PATH_END.py
|
{
"filename": "test_subhalo.py",
"repo_name": "Jammy2211/PyAutoLens",
"repo_path": "PyAutoLens_extracted/PyAutoLens-main/test_autolens/lens/test_subhalo.py",
"type": "Python"
}
|
import autofit as af
import autolens as al
# def test__detection_array_from():
# samples_list = [
# [
# [
# af.mock.MockSamples(log_likelihood_list=[1.0]),
# af.mock.MockSamples(log_likelihood_list=[2.0]),
# ],
# [
# af.mock.MockSamples(log_likelihood_list=[3.0]),
# af.mock.MockSamples(log_likelihood_list=[4.0]),
# ],
# ],
# ]
#
# grid_search_result_with_subhalo = af.GridSearchResult(
# lower_limits_lists=[[1.0, 2.0], [3.0, 4.0]],
# samples=samples_list,
# grid_priors=[[1, 2], [3, 4]],
# )
#
# subhalo_result = al.subhalo.SubhaloGridSearchResult(
# subhalo_grid_search_result=grid_search_result_with_subhalo,
# fit_imaging_no_subhalo=None,
# samples_no_subhalo=None,
# )
#
# detection_array = subhalo_result.detection_array_from(
# use_log_evidences=False, relative_to_no_subhalo=False, remove_zeros=False
# )
#
# print(detection_array)
|
Jammy2211REPO_NAMEPyAutoLensPATH_START.@PyAutoLens_extracted@PyAutoLens-main@test_autolens@lens@test_subhalo.py@.PATH_END.py
|
{
"filename": "MultiplePlotAxes.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/examples/MultiplePlotAxes.py",
"type": "Python"
}
|
"""
Demonstrates a way to put multiple axes around a single plot.
(This will eventually become a built-in feature of PlotItem)
"""
import pyqtgraph as pg
pg.mkQApp()
pw = pg.PlotWidget()
pw.show()
pw.setWindowTitle('pyqtgraph example: MultiplePlotAxes')
p1 = pw.plotItem
p1.setLabels(left='axis 1')
## create a new ViewBox, link the right axis to its coordinate system
p2 = pg.ViewBox()
p1.showAxis('right')
p1.scene().addItem(p2)
p1.getAxis('right').linkToView(p2)
p2.setXLink(p1)
p1.getAxis('right').setLabel('axis2', color='#0000ff')
## create third ViewBox.
## this time we need to create a new axis as well.
p3 = pg.ViewBox()
ax3 = pg.AxisItem('right')
p1.layout.addItem(ax3, 2, 3)
p1.scene().addItem(p3)
ax3.linkToView(p3)
p3.setXLink(p1)
ax3.setZValue(-10000)
ax3.setLabel('axis 3', color='#ff0000')
## Handle view resizing
def updateViews():
## view has resized; update auxiliary views to match
global p1, p2, p3
p2.setGeometry(p1.vb.sceneBoundingRect())
p3.setGeometry(p1.vb.sceneBoundingRect())
## need to re-update linked axes since this was called
## incorrectly while views had different shapes.
## (probably this should be handled in ViewBox.resizeEvent)
p2.linkedViewChanged(p1.vb, p2.XAxis)
p3.linkedViewChanged(p1.vb, p3.XAxis)
updateViews()
p1.vb.sigResized.connect(updateViews)
p1.plot([1,2,4,8,16,32])
p2.addItem(pg.PlotCurveItem([10,20,40,80,40,20], pen='b'))
p3.addItem(pg.PlotCurveItem([3200,1600,800,400,200,100], pen='r'))
if __name__ == '__main__':
pg.exec()
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@examples@MultiplePlotAxes.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "kimakan/FaintCOS",
"repo_path": "FaintCOS_extracted/FaintCOS-master/README.md",
"type": "Markdown"
}
|
FaintCOS: Improved background subtraction and co-adding code for CALCOS
Authors: Kirill Makan, Gabor Worseck
We used the following version of the software
- python 3.7.3 with astropy 4.0.1, scipy 1.3.1, numpy 1.17.3
- CALCOS 3.3.9
- GCC 7.4.0 (required for the calculation of the Feldman & Cousins uncertainties, see Section 5)
There is no guarantee that FaintCOS will work properly with other versions.
We caution that the default reduction parameters, such as primary science apertures (PSA) and
pulse height amplitude (PHA) limits, are optimized for faint point sources. For the extended
sources, you most likely have to adapt these parameters. Please, use "plot_datasets_info.py" to
check 2D spectra and PHA distributions (see Sections 1 and 6).
CONTENT:
1. INSTRUCTIONS
2. PRODUCED FILES
3. FIT TABLE COLUMNS
4. REDUCTION PARAMETERS
5. CALCULATED UNCERTAINTIES
6. OPTIONAL CODE
-------------------------------------------------------------------------------------
1. INSTRUCTIONS
-------------------------------------------------------------------------------------
- Download uncalibrated darkframes and associated calibration/reference files
from https://archive.stsci.edu/hst/search.php
For this, make the following changes in the standard form:
Target Name: "DARK"
Resolver: Don't Resolve
Select Imagers: COS
Start Time: "> YYYY-MM-DD" (earliest science data start
time - DARK_EXPSTART_INTERVAL, see Section 4).
Uncheck "Science" and select "Calibration"
User-specified field 1: select "Start Time" and write "< YYYY-MM-DD" (latest science
data start time + DARK_EXPSTART_INTERVAL, see Section 4).
User-specified field 2: select "Instrument Config" and write "COS/FUV"
Search for the darkframes by clicking "Search".
All available darks should be now listed in a table. Click "Mark all" and then
"Submit marked data for retrieval from STDADS". A new window will open and show
the retrieval form. Uncheck "Calibrated" and select "Used Reference Files" and
"Uncalibrated". Send the retrieval request anonymously or, if you are registered,
with your STScI ID.
- Download uncalibrated science data and associated calibration/reference files
from https://archive.stsci.edu/hst/search.php
Make sure to uncheck "Calibrated" and select "Used Reference Files" and
"Uncalibrated" in the retrieval form after you have selected the datasets.
- Put all calibration/reference files from darkframes AND science
data in one folder (e.g. "calib"). You can easely spot the reference files,
because they do not contain a dataset ID in their file name.
- Define 'lref' as described in "COS Data Handbook (Version 4.0)" Section 3.6.1
example:
export lref="/home/user/Documents/calib/"
- Create a new folder for scientific data for every object and resolution (low and medium
resolution data should be ALWAYS in different folders)
- Check the defined PSA and PHA limits in the faintcos_config.py (add new
definitions if necessary). Later (after CALCOS), you can run "plot_datasets_info.py" to
check the PSA position and PHA distribution.
- Run "pre_calcos.py" with the path to the uncalibrated darkframes (rawtag files) as
an argument
example:
python pre_calcos.py /home/user/Documents/darkframes/
!!!WARNING!!! ALL CODES SHOULD RUN IN THE SAME AstroConda ENVIRONMENT AS CALCOS!!!
- Copy "exec_calcos_for_darks.py" into the folder with the uncalibrated darkframes
and then run it. It will reduce all darkframes and put reduced files in the
folder "reduced". (This process can take a while)
- Put the reduced darkframes files (corrtag files) in a separate folder and define
'ldark' = path_to_reduced_darkframes, as you defined 'lref'.
- Run 'pre_calcos.py" with the path to the folder with uncalibrated data for
a single target and resolution (M or L).
example:
python pre_calcos.py /home/user/Documents/data/QSO-231145-141752/
- Reduce the scientific data with CALCOS
- Set the switches (binning, co-adding, wavelength range etc.) in the 'faintcos_config.py'
(see Section 4 below)
- Run 'post_calcos.py' with the path to the REDUCED science data (corrtags and 1dx
files) as an command-line argument (it is the working folder for the pipeline).
example:
python post_calcos.py /home/user/Documents/data/reduced_QSO-231145-141752/
- All visits and exposures will be listed after you run post_calcos.py. If you
want to reduce the listed data, then proceed with the reduction.
- After post_calcos.py is done, the reduced files appear in the working folder
The default version of the code calculates the statistical count errors according
to Kraft et al. 1991.
If you wish to use the more correct Feldman & Cousins 1998 confidence limits instead:
- Install CustomConfLim module by running in the CustomConfLimits folder:
"python setup.py build"
"python setup.py install"
- Set FELDMAN_COUSINS=True in the faintcos_config.py file
-------------------------------------------------------------------------------------
2. PRODUCED FILES
-------------------------------------------------------------------------------------
"DATASET_dataset_sum.fits"
Co-added spectrum for a single data set. Similar to the standard CALCOS x1dsum.fits
file but with the improved data reduction.
"TARGETNAME_DATASET_Npx_binned.fits":
Binned spectrum of a single dataset in N pixels bins. Number of binned pixels
is defined in post_calcos.py with BIN_PX. This file will only be produced if the
switch "BIN_VISIT" is set to "True". If there are several data sets in the working
folder then the pipeline produces new file for every data set.
"TARGETNAME_spectrum.fits":
Co-added and binned spectrum of all data sets in the working folder in BIN_SIZE bins.
The width of the bins in angstroms is defined in post_calcos.py with BIN_SIZE. This
file will only be produced if the switch "COADD_ALL_VISITS" is set to "True".
WARNING!!! Make sure that every data set in the working folder is for the same
target and resolution (M or L)!!!
------------------------------------------------------------------------------------
3. DESCRIPTION OF THE COLUMNS
------------------------------------------------------------------------------------
|Column Name |Units |Desctiption |
|:----------------------|:------------------|:--------------------------------------|
|WAVELENGTH |angstrom |Wavelength scale |
|FLUX |ergs/s/cm^2/A |Calibrated flux |
|FLUX_ERR_UP |ergs/s/cm^2/A |Upper error estimate |
|FLUX_ERR_DOWN |ergs/s/cm^2/A |Lower error estimate |
|GCOUNTS |counts |Gross counts |
|BACKGROUND |counts |Dark current + Scattered light |
|BKG_ERR_UP |counts |Upper sys. error for BACKGROUND |
|BKG_ERR_DOWN |counts |Lower sys. error for BACKGROUND |
|DARK_CURRENT |counts |Estimated dark current model |
|DARK_CURRENT_ERR |counts |Sys. error of DARK_CURRENT |
|DQ | |Data quality flag |
|EXPTIME |seconds |Pixel exposure time |
|CALIB |cnts cm^2 A/erg |Flux calibration factor, lin. interpolated NET/FLUX from 1dx files |
|FLAT_CORR | |Flatfield and deadtime factor, (GROSS - BACKGROUND)/NET from 1dx files |
|LYA_SCATTER |counts |Estimated contamination by Lya, according to Worseck et al. 2016 |
|LYA_SCATTER_ERR_UP |counts |Upper error for LYA_SCATTER |
|LYA_SCATTER_ERR_DOWN |counts |Lower error for LYA_SCATTER |
-------------------------------------------------------------------------------------
4. REDUCTION PARAMETERS (faintcos_config.py)
-------------------------------------------------------------------------------------
PHA_OPT_ELEM_SEGMENT: Lower and upper threshold for the valid counts
(see "COS Data Handbook (Version 4.0)" Section 3.4.9)
This parameter should be chosen according to the PHA distribution
of the detected photons in the PSA, since it changes with time
(see Figure 1.9 in "COS Data Handbook (Version 4.0)"
and Worseck et al. 2016).
DARK_EXPSTART_INTERVAL: For every exposure the code searches for darkframes in the
time period +/-DARK_EXPSTART_INTERVAL around the exposure
date. The value should be given in days. Standard value is
30 days. Can be increased, if there are not enough darkframes
in this time interval.
MIN_DARKS: Minimum number of darks that will be selected.
KS_THRESHOLD: Kolmogorov-Smirnov test threshold for the comparison of the
cumulative PHA distributions (science vs. darkframe). Only
darkframes with KS-test lower than KS_THRESHOLD will be selected.
If the number of selected darks is lower than MIN_DARKS
than the KS_THRESHOLD will be increased by KS_STEP until
sufficient number of darkframes is reached.
KS_STEP: KS_THRESHOLD will be increased with this value if the number
of selected darkframes is lower than MIN_DARKS
BKG_AV: The width of the window in pixels for the central moving
average to calculate the dark current model. The central
moving average is applied on the PSA of the stacked darkframes,
which were selected with the KS-Test. Since the algorithm uses
central moving average, BKG_AV must be an odd number!
BIN_SIZE: Bin size of the co-added spectrum in Angstrom. It is relevant
only if COADD_ALL_VISITS = True.
BIN_PX: Bin size of the binned spectrum for every data set in pixels. It
is relevant only if BIN_VISIT = True.
CUSTOM_INTERVAL: If TRUE, the co-add routine will use custom wavelength region
for the final co-add of all data sets in the working folder. The
regions is defined by WAVE_MIN and WAVE_MAX in Angstrom. It works
only for the total co-add of all data sets (COADD_ALL_VISITS = True).
If FALSE, the co-add routine will use max. and min. wavelength of
all available data.
BIN_DATASET: If TRUE, the spectra for every data set will be binned in BIN_PX pixels
and stored in the working folder as TARGETNAME_DATASET_Npx_binned.fits
COADD_ALL_DATASETS: If TRUE, the spectra of all data sets in the working folder will be
co-added to a single spectrum with binning size BIN_SIZE Angstrom.
!!!FOR THIS TO WORK PROPERLY, MAKE SURE TO HAVE SPECTRA FOR THE SAME
OBJECT AND RESOLUTION (M OR L) IN THE WORKING FOLDER!!!
FELDMAN_COUSINS: if TRUE, the pipeline will use the algorithm from Feldman & Cousins 1998
to calculate the statistical count errors in poisonian regime.
Otherwise it uses the algorithm from Kraft et al. 1991.
For this to work, you need to install the custom C library. (see INSTRUCTIONS)
-------------------------------------------------------------------------------------
5. CALCULATED UNCERTAINTIES
-------------------------------------------------------------------------------------
We provide two methods for the calcualtion of the 1\sigma uncertainties.
- The Bayesian method (Kraft et al. 1991, http://articles.adsabs.harvard.edu/pdf/1991ApJ...374..344K)
with the shortest 68.26% confidence interval. Kraft et al. (1991) provide only the confindence
limits. We modified it by assuming that the measured signal is N-B (N - measured counts,
B - background counts).
- The frequentist method (Feldman & Cousins 1998, https://arxiv.org/abs/physics/9711021),
which use likelihood ratio ordering for the measured signal (N-B). For the unphysical case N<B,
we use Monte Carlo simulations to calculate the limits.
Then, the calculated uncertainties are ERR_UP = upper_limit - (N-B) and
ERR_DOWN = (N-B) - lower_limit. The uncertainties for the unphysical cases where the signal
is negative (the measured counts N is smaller than the background B) should be treated with caution.
For this, we suggest to use the "sensitivity" as defined by Feldman & Cousins (1998) which is the upper
limit on the poisson distributed background with no signal. FaintCOS does not provide sensitivity calcualtions.
-------------------------------------------------------------------------------------
6. OPTIONAL CODE
-------------------------------------------------------------------------------------
"exec_calcos_for_darks.py": executes CALCOS on dark frames and puts the reduced corrtag files
into the "/reduced/" folder. Copy this code into the folder with
the downloaded dark frame "rawtag" files and run it.
"plot_datasets_info.py": plots the stacked 2D spectra from the "corrtag" files for each dataset
and segment (FUVA/FUVB). Additionally, it plots the histogram for the PHA
of the counts in the primary science aperture. The primary science aperture
is indicated with dashed red lines in the 2D spectrum.
Run the code with the path to the corrtags and 1dx files as command-line argument.
The code creates a new subdirectory "/datasets_info" where it stores the resulted
2D spectra. It also creates a .txt file with a table for all corrtags in the diretory.
|
kimakanREPO_NAMEFaintCOSPATH_START.@FaintCOS_extracted@FaintCOS-master@README.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "gwpy/gwpy",
"repo_path": "gwpy_extracted/gwpy-main/gwpy/astro/tests/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2018-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for :mod:`gwpy.astro`
"""
|
gwpyREPO_NAMEgwpyPATH_START.@gwpy_extracted@gwpy-main@gwpy@astro@tests@__init__.py@.PATH_END.py
|
{
"filename": "_z.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/contours/y/project/_z.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self, plotly_name="z", parent_name="surface.contours.y.project", **kwargs
):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@contours@y@project@_z.py@.PATH_END.py
|
{
"filename": "_gridcolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/ternary/caxis/_gridcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class GridcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="gridcolor", parent_name="layout.ternary.caxis", **kwargs
):
super(GridcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@ternary@caxis@_gridcolor.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattercarpet/textfont/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="scattercarpet.textfont", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattercarpet@textfont@_shadow.py@.PATH_END.py
|
{
"filename": "7DT_Routine.py",
"repo_name": "SilverRon/gppy",
"repo_path": "gppy_extracted/gppy-main/7DT_Routine.py",
"type": "Python"
}
|
# %% [markdown]
# # IMSNG_Rougine.ipynb
# - Automatically Process the image data from GECKO facilities and search for transients in the subtracted images with `gpPy`
# - Author: Gregory S.H. Paek (23.04.24)
# %% [markdown]
# ## Library
# %%
from __future__ import print_function, division, absolute_import
import os, sys, glob, subprocess
import numpy as np
import astropy.io.ascii as ascii
import matplotlib.pyplot as plt
plt.ioff()
from astropy.nddata import CCDData
from preprocess import calib
from util import tool
from astropy.io import fits
from astropy.table import Table, vstack
from astropy import units as u
from ccdproc import ImageFileCollection
import warnings
warnings.filterwarnings(action='ignore')
# from itertools import product
from itertools import repeat
import multiprocessing
import time
start_localtime = time.strftime('%Y-%m-%d %H:%M:%S (%Z)', time.localtime())
# %%
# plot setting
import matplotlib.pyplot as plt
import matplotlib as mpl
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "last_expr"
mpl.rcParams["axes.titlesize"] = 14
mpl.rcParams["axes.labelsize"] = 20
plt.rcParams['savefig.dpi'] = 500
plt.rc('font', family='serif')
# %% [markdown]
# ## Ready
# %%
try:
obs = (sys.argv[1]).upper()
except:
obs = input(f"7DT## (e.g. 7DT01):").upper()
# obs = '7DT01'
print(f'# Observatory : {obs.upper()}')
try:
ncores = int(sys.argv[2])
except:
ncores = 2
print(f"- Number of Cores: {ncores}")
# %% [markdown]
# ### Path
# %%
path_base = '/data4/gecko/factory'
path_ref = f'{path_base}/ref_frames/{obs.upper()}'
path_factory = f'{path_base}/{obs.lower()}'
path_save = f'/data6/bkgdata/{obs.upper()}'
path_log = f'/home/paek/log/{obs.lower()}.log'
path_keys = '/home/paek/table'
#------------------------------------------------------------
path_gal = '/data6/IMSNG/IMSNGgalaxies'
path_refcat = '/data4/gecko/factory/ref_frames/LOAO'
#------------------------------------------------------------
# path_config = '/home/paek/config'
path_config = './config'
path_default_gphot = f'{path_config}/gphot.{obs.lower()}.config'
path_mframe = f'/data3/paek/factory/master_frames'
path_calib = f'{path_base}/calib'
#------------------------------------------------------------
# Codes
#------------------------------------------------------------
path_phot_sg = './phot/gregoryphot_2021.py'
path_phot_mp = './phot/gregoryphot_mp_2021.py'
path_phot_sub = './phot/gregoryphot_sub_2021.py'
path_find = './phot/gregoryfind_bulk_mp_2021.py'
#------------------------------------------------------------
path_obsdata = '/data6/obsdata'
path_raw = f'{path_obsdata}/{obs.upper()}'
rawlist = sorted(glob.glob(path_raw+'/2*'))
#------------------------------------------------------------
path_obs = '/home/paek/table/obs.dat'
path_changehdr = '/home/paek/table/changehdr.dat'
path_alltarget = '/home/paek/table/alltarget.dat'
ccdinfo = tool.getccdinfo(obs, path_obs)
# %% [markdown]
# - Folder setting
# %%
if not os.path.exists(path_base):
os.makedirs(path_base)
if not os.path.exists(path_ref):
os.makedirs(path_ref)
if not os.path.exists(path_factory):
os.makedirs(path_factory)
if not os.path.exists(path_save):
os.makedirs(path_save)
# %% [markdown]
# ### Table
# %%
logtbl = ascii.read(path_log)
datalist = np.copy(logtbl['date'])
obstbl = ascii.read(path_obs)
hdrtbl = ascii.read(path_changehdr)
alltbl = ascii.read(path_alltarget)
keytbl = ascii.read(f'{path_keys}/keys.dat')
# %% [markdown]
# ## Process Summary Status
# %%
protbl = Table()
protbl['process'] = ['master_frame', 'pre_process', 'astrometry', 'cr_removal', 'defringe', 'photometry', 'image_stack', 'photometry_com', 'subtraction', 'photometry_sub', 'transient_search', 'total']
protbl['status'] = False
protbl['time'] = 0.0 * u.second
# %% [markdown]
# ## Main Body
# %%
newlist = [i for i in rawlist if (i not in datalist) & (i+'/' not in datalist)]
if len(newlist) == 0:
print('No new data')
sys.exit()
else:
print(newlist)
# path = newlist[0]
path = newlist[-1]
tdict = dict()
starttime = time.time()
path_data = '{}/{}'.format(path_factory, os.path.basename(path))
# Remove old folder and re-copy folder
rmcom = 'rm -rf {}'.format(path_data)
print(rmcom)
os.system(rmcom)
cpcom = 'cp -r {} {}'.format(path, path_data)
print(cpcom)
os.system(cpcom)
# %% [markdown]
# ### Header Correction
# %%
ic0 = ImageFileCollection(path_data, keywords='*')
ic0.summary.write(f'{path_data}/hdr.raw.dat', format='ascii.tab', overwrite=True)
obsinfo = calib.getobsinfo(obs, obstbl)
calib.correcthdr_routine(path_data, hdrtbl, obs)
print("Correction Done")
# objfilterlist, objexptimelist, flatfilterlist, darkexptimelist, obstime = calib.correcthdr_routine(path_data, hdrtbl)
ic1 = ImageFileCollection(path_data, keywords='*')
ic1.summary.write('{}/hdr.cor.dat'.format(path_data), format='ascii.tab', overwrite=True)
try:
nobj = len(ic1.filter(imagetyp='OBJECT').summary)
except:
try:
nobj = len(ic1.filter(imagetyp='object').summary)
except:
nobj = 0
# %% [markdown]
# ### Marking the `GECKO` data
# %%
# testobj = 'S190425z'
project = "7DT"
obsmode = "MONITORING" # Default
if 'OBJECT' in ic1.summary.keys():
for obj in ic1.filter(imagetyp='object').summary['object']:
if 'MS' in obj[:2]: # MS230425 (test event)
print(obj)
project = "GECKO"
obsmode = "TEST"
elif 'S2' in obj[:2]: # S230425 (super event)
print(obj)
project = "GECKO"
obsmode = "FOLLOWUP" # Follow-up
else:
pass
else:
pass
print(f"[{project}] {obsmode}")
# %% [markdown]
# - Slack notification
# %%
OAuth_Token = keytbl['key'][keytbl['name']=='slack'].item()
channel = '#pipeline'
text = f'[`gpPy`/{project}-{obsmode}] Start Processing {obs} {os.path.basename(path)} Data ({nobj} objects) with {ncores} cores'
param_slack = dict(
token = OAuth_Token,
channel = channel,
text = text,
)
tool.slack_bot(**param_slack)
# %% [markdown]
# ### Master Frame
# %% [markdown]
# - Bias
# %%
st = time.time()
#------------------------------------------------------------
# BIAS
#------------------------------------------------------------
try:
biasnumb = len(ic1.filter(imagetyp='Bias').summary)
except:
biasnumb = 0
# if len(ic1.filter(imagetyp='Bias').summary) != 0:
if biasnumb != 0:
mzero = calib.master_zero(ic1, fig=False)
# print(zeroim)
date = fits.getheader(f'{path_data}/zero.fits')['date-obs'][:10].replace('-', '')
zeroim = f'{path_mframe}/{obs}/zero/{date}-zero.fits'
if not os.path.exists(os.path.dirname(zeroim)):
os.makedirs(os.path.dirname(zeroim))
cpcom = f'cp {path_data}/zero.fits {zeroim}'
print(cpcom)
os.system(cpcom)
plt.close('all')
else:
# IF THERE IS NO FLAT FRAMES, BORROW FROM CLOSEST OTHER DATE
print('\nNO BIAS FRAMES\n')
pastzero = np.array(glob.glob(f'{path_mframe}/{obs}/zero/*zero.fits'))
# CALCULATE CLOSEST ONE FROM TIME DIFFERENCE
deltime = []
for date in pastzero:
zeromjd = calib.isot_to_mjd((os.path.basename(date)).split('-')[0])
deltime.append(np.abs(ic1.summary['mjd'][0]-zeromjd))
indx_closet = np.where(deltime == np.min(deltime))
# tmpzero = path_data+'/'+os.path.basename(np.asscalar(pastzero[indx_closet]))
tmpzero = f"{path_data}/{os.path.basename(pastzero[indx_closet][0])}"
# cpcom = 'cp {} {}'.format(np.asscalar(pastzero[indx_closet]), tmpzero)
cpcom = f'cp {pastzero[indx_closet][0]} {tmpzero}'
print(cpcom)
os.system(cpcom)
mzero = CCDData.read(tmpzero, hdu=0)#, unit='adu')
mzero.meta['FILENAME'] = os.path.basename(tmpzero)
delt_bias = time.time() - st
print(f"{delt_bias:.3f} sec")
# %% [markdown]
# - Dark
# %%
#------------------------------------------------------------
# DARK (ITERATION FOR EACH EXPOSURE TIMES)
#------------------------------------------------------------
st = time.time()
try:
darkexptimelist = sorted(list(set(ic1.filter(imagetyp='dark').summary['exptime'])))
darknumb = len(darkexptimelist)
except:
darknumb = 0
darkdict = dict()
# if len(darkexptimelist) != 0:
if darknumb != 0:
dark_process = True
for i, exptime in enumerate(darkexptimelist):
print('PRE PROCESS FOR DARK ({} sec)\t[{}/{}]'.format(exptime, i+1, len(darkexptimelist)))
mdark = calib.master_dark(ic1, mzero=mzero, exptime=exptime, fig=False)
darkdict['{}'.format(int(exptime))] = mdark
date = fits.getheader(f'{path_data}/dark-{int(exptime)}.fits')['date-obs'][:10].replace('-', '')
if obs == 'RASA36':
darkim = f'{path_mframe}/{obs}/dark/{int(exptime)}-{date}-dark_{mode}.fits'
else:
darkim = f'{path_mframe}/{obs}/dark/{int(exptime)}-{date}-dark.fits'
# print(zeroim)
cpcom = 'cp {}/dark-{}.fits {}'.format(path_data, int(exptime), darkim)
print(cpcom)
os.system(cpcom)
plt.close('all')
else:
# Borrow
print('\nNO DARK FRAMES\n')
objexptimelist = sorted(list(set(ic1.filter(imagetyp='object').summary['exptime'])))
exptime = objexptimelist[-1]
# pastdark = np.array(glob.glob('{}/{}/dark/{}*dark*.fits'.format(path_mframe, obs, int(exptime))))
if obs == 'RASA36':
pastdark = np.array(glob.glob(f'{path_mframe}/{obs}/dark/{int(exptime)}*dark_{mode}.fits'))
else:
pastdark = np.array(glob.glob(f'{path_mframe}/{obs}/dark/{int(exptime)}*dark.fits'))
if len(pastdark) == 0:
pastdark = np.array(glob.glob('{}/{}/dark/*dark*.fits'.format(path_mframe, obs)))
else:
pass
# CALCULATE CLOSEST ONE FROM TIME DIFFERENCE
deltime = []
delexptime = []
darkexptimes = []
for date in pastdark:
# darkmjd = calib.isot_to_mjd((os.path.basename(date)).split('-')[0])
darkmjd = calib.isot_to_mjd((os.path.basename(date)).split('-')[1])
darkexptime = int( os.path.basename(date).split('-')[0] )
# darkexptime = delexptime.append(int( os.path.basename(date).split('-')[1] ))
darkexptimes.append(darkexptime)
deltime.append(np.abs(ic1.summary['mjd'][0]-darkmjd))
if 'KCT' in obs:
indx_closet = np.where(
(np.abs(np.array(darkexptimes)-exptime) == np.min(np.abs(np.array(darkexptimes)-exptime)))
)
else:
indx_closet = np.where(
(deltime == np.min(deltime)) &
(darkexptimes == np.max(darkexptimes))
)
if len(indx_closet[0]) == 0:
indx_closet = np.where(
(deltime == np.min(deltime))
)
else:
pass
# tmpdark = path_data+'/'+os.path.basename(pastdark[indx_closet].item())
# tmpdark = '{}/{}'.format(path_data, os.path.basename(pastdark[indx_closet[0]].item()))
# tmpdark = pastdark[indx_closet[0]].item()
tmpdark = pastdark[indx_closet][-1]
exptime = int(fits.getheader(tmpdark)['exptime'])
cpcom = 'cp {} {}/dark-{}.fits'.format(tmpdark, path_data, int(exptime))
# cpcom = 'cp {} {}'.format(tmpdark, path_data, int(exptime))
print(cpcom)
os.system(cpcom)
mdark = CCDData.read(tmpdark, hdu=0)#, unit='adu')
mdark.meta['FILENAME'] = os.path.basename(tmpdark)
mdark.meta['EXPTIME'] = exptime
darkdict['{}'.format(int(exptime))] = mdark
delt_dark = time.time() - st
print(f"{delt_dark:.3f} sec")
# %% [markdown]
# - Flat
# %%
flatdict = dict()
try:
flatfilterlist = list(set(ic1.filter(imagetyp='flat').summary['filter']))
for i, filte in enumerate(flatfilterlist):
# print(i, filte)
print('MAKING MASTER FLAT IN {}-BAND'.format(filte))
mflat = calib.master_flat(ic1, mzero, filte, mdark=mdark, fig=True)
flatdict[filte] = mflat
date = fits.getheader(f'{path_data}/dark-{int(exptime)}.fits')['date-obs'][:10].replace('-', '')
flatim = f'{path_mframe}/{obs}/flat/{date}-n{filte}.fits'
cpcom = f'cp {path_data}/n{filte}.fits {flatim}'
print(cpcom)
os.system(cpcom)
plt.close('all')
except:
print('No flat calibration image.')
# flatdict['None'] = None
pass
# tdict['masterframe'] = time.time() - st
protbl['status'][protbl['process']=='master_frame'] = True
protbl['time'][protbl['process']=='master_frame'] = int(time.time() - st)
#------------------------------------------------------------
# OBJECT CALIBARTION (ZERO, DARK, FLAT)
#------------------------------------------------------------
st_ = time.time()
comment = '='*60+'\n' \
+ 'OBJECT CALIBRATION\n' \
+ '='*60+'\n'
print(comment)
objfilterlist = sorted(list(set(ic1.filter(imagetyp='object').summary['filter'])))
objexptimelist = sorted(list(set(ic1.filter(imagetyp='object').summary['exptime'])))
for i, filte in enumerate(objfilterlist):
print('PRE PROCESS FOR {} FILTER OBJECT\t[{}/{}]'.format(filte, i+1, len(objfilterlist)))
if filte in flatdict.keys():
mflat = flatdict[filte]
else:
print('\nNO {} FLAT FRAMES\n'.format(filte))
# CALCULATE CLOSEST ONE FROM TIME DIFFERENCE
deltime = []
pastflat = np.array(glob.glob('{}/{}/flat/*n{}*.fits'.format(path_mframe, obs, filte)))
for date in pastflat:
flatmjd = calib.isot_to_mjd((os.path.basename(date)).split('-')[0])
deltime.append(np.abs(ic1.summary['mjd'][0]-flatmjd))
indx_closet = np.where(deltime == np.min(deltime))
tmpflat = '{}/{}'.format(path_data, os.path.basename(pastflat[indx_closet][0].item()))
# tmpflat = pastflat[indx_closet][0].item()
cpcom = 'cp {} {}'.format(pastflat[indx_closet][0].item(), tmpflat)
print(cpcom)
os.system(cpcom)
if ('KCT' not in obs) & (obs != 'LSGT'):
mflat = CCDData.read(tmpflat, hdu=0)#, unit='adu')
elif obs == 'LSGT':
mflat = CCDData.read(tmpflat, hdu=0, unit='adu')
else:
#KCT Exception
mflat = CCDData.read(tmpflat, hdu=0, unit='adu')
mflat.meta['FILENAME'] = os.path.basename(tmpflat)
mflat.meta['FILENAME'] = os.path.basename(tmpflat)
flatdict[filte] = mflat
for expt in objexptimelist:
if str(int(expt)) in darkdict.keys():
mdark = darkdict[str(int(expt))]
else:
mdark = darkdict[list(darkdict.keys())[-1]]
calib.calibration(ic1, mzero, mflat, filte, mdark=mdark)
# tdict['objectcorrection'] = time.time() - st - tdict[list(tdict.keys())[-1]]
protbl['status'][protbl['process']=='pre_process'] = True
protbl['time'][protbl['process']=='pre_process'] = int(time.time() - st_)
# Corrected image list
fzimlist = []
for ims in ('{}/fz*.fits'.format(path_data), '{}/fz*.fit'.format(path_data), '{}/fz*.fts'.format(path_data)):
fzimlist.extend(sorted(glob.glob(ims)))
# %% [markdown]
# ### WCS Calculation
# %%
#------------------------------------------------------------
# ASTROMETRY
#------------------------------------------------------------
st_ = time.time()
print('ASTROMETRY START')
print('='*60)
astrometryfailist = []
# fzimlist = sorted(glob.glob(path_data+'/fz*.fits'))
astimlist = []
astotlist = []
astralist = []
astdelist = []
for inim in fzimlist:
obj = (fits.getheader(inim)['object']).upper()
if (obj in alltbl['obj']):
indx_target = np.where(obj == alltbl['obj'])[0][0]
ra, dec = alltbl['ra'][indx_target].item(), alltbl['dec'][indx_target].item()
astimlist.append(inim)
astralist.append(ra)
astdelist.append(dec)
else:
astotlist.append(inim)
# Astrometry (IMSNG field)
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncores) as pool:
results = pool.starmap(calib.astrometry, zip(astimlist, repeat(obsinfo['pixscale']), astralist, astdelist, repeat(obsinfo['fov']/60.), repeat(15)))
# Astrometry (non IMSNG field)
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncores) as pool:
results = pool.starmap(calib.astrometry, zip(astotlist, repeat(obsinfo['pixscale']), repeat(None), repeat(None), repeat(None), repeat(60)))
# Astrometry (failed IMSNG field)
astfailist = []
for inim in astimlist:
if (os.path.exists('{}/a{}'.format(path_data, os.path.basename(inim))) == False):
astfailist.append(inim)
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncores) as pool:
results = pool.starmap(calib.astrometry, zip(astfailist, repeat(obsinfo['pixscale']), repeat(None), repeat(None), repeat(None), repeat(60)))
for inim in astfailist:
if (os.path.exists('{}/a{}'.format(path_data, os.path.basename(inim))) == False):
astrometryfailist.append('{}/a{}'.format(path_data, os.path.basename(inim)))
os.system('rm '+path_data+'/*.axy '+path_data+'/*.corr '+path_data+'/*.xyls '+path_data+'/*.match '+path_data+'/*.rdls '+path_data+'/*.solved '+path_data+'/*.wcs ')
print('ASTROMETRY COMPLETE\n'+'='*60)
# tdict['astronomy'] = time.time() - st - tdict[list(tdict.keys())[-1]]
protbl['status'][protbl['process']=='astrometry'] = True
protbl['time'][protbl['process']=='astrometry'] = int(time.time() - st_)
# %% [markdown]
# ### Cosmic-ray Removal
# %%
st_ = time.time()
print('Quick seeing measurement with SE & Cosmic ray removal')
print('='*60)
gain = ccdinfo['gain'].value
rdnoise = ccdinfo['rdnoise']
# afzimlist = sorted(glob.glob(path_data+'/afz*.fits'))
afzimlist = []
for ims in ('{}/a*.fits'.format(path_data), '{}/a*.fit'.format(path_data), '{}/a*.fts'.format(path_data)):
afzimlist.extend(sorted(glob.glob(ims)))
outimlist = []
for i, inim in enumerate(afzimlist):
outim = '{}/cr{}'.format(os.path.dirname(inim), os.path.basename(inim))
outimlist.append(outim)
if ('KCT' not in obs) & ('RASA36' not in obs) & ('LOAO_FLI' not in obs) & ('LSGT_ASI1600MM' != obs) & ('DNSM' != obs) & ('7DT01' != obs):
# Seeing measurement
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncores) as pool:
results = pool.starmap(tool.SE_seeing, zip(afzimlist, repeat(obs), repeat(path_obs), repeat(path_config), repeat(3*u.arcsecond), repeat(0.95), repeat(True)))
# Remove cosmic-ray
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncores) as pool:
results = pool.starmap(calib.cr_removal, zip(afzimlist, outimlist, repeat(gain), repeat(rdnoise)))
else:
print('Skip Seeing measurement & CR remove processes for {}'.format(obs))
for inim, outim in zip(afzimlist, outimlist):
cpcom = 'cp {} {}'.format(inim, outim)
print(cpcom)
os.system(cpcom)
protbl['status'][protbl['process']=='cr_removal'] = True
protbl['time'][protbl['process']=='cr_removal'] = int(time.time() - st_)
# %% [markdown]
# ### Rename to IMSNG/GECKO Convention
# %%
fov = obsinfo['fov']*u.arcmin
crafzimlist = []
for ims in ('{}/cra*.fits'.format(path_data), '{}/cra*.fit'.format(path_data), '{}/cra*.fts'.format(path_data)):
crafzimlist.extend(sorted(glob.glob(ims)))
# for inim in sorted(glob.glob('{}/crafz*.fits'.format(path_data))):
for inim in crafzimlist:
obj = fits.getheader(inim)['object']
# Modify incorrect object header
if (inim.replace('crafz', 'afz') in astrometryfailist) & (obj in alltbl['obj']):
robj, sep = tool_tbd.imsng_name_correction(inim, alltbl, radius=fov)
else:
pass
calib.fnamechange(inim, obs)
caliblist = sorted(glob.glob(path_data+'/Calib*.fits'))
ic_cal = ImageFileCollection(path_data, glob_include='Calib*0.fits', keywords='*')
os.system('chmod 777 {}'.format(path_data))
os.system('chmod 777 {}/*'.format(path_data))
# Calib-*.fits TO SAVE PATH
f = open(path_data+'/object.txt', 'a')
f.write('obs obj dateobs filter exptime\n')
for inim in caliblist:
img = os.path.basename(inim)
part = img.split('-')
line = '{} {} {} {} {}\n'.format(part[1], part[2], part[3]+'T'+part[4], part[5], part[6])
print(line)
f.write(line)
f.close()
# DATA FOLDER TO SAVE PATH
# os.system('rm {}/afz*.fits {}/fz*.fits'.format(path_data, path_data))
os.system(f'rm {path_data}/*fz*.f*')
os.system(f'rm -rf {path_save}/{os.path.basename(path_data)}')
plt.close('all')
# %%
for calibim in caliblist:
center, vertices = tool.get_wcs_coordinates(calibim)
fits.setval(calibim, "RACENT", value=round(center[0].item(), 3), comment="RA CENTER [deg]")
fits.setval(calibim, "DECCENT", value=round(center[1].item(), 3), comment="DEC CENTER [deg]")
for ii, (_ra, _dec) in enumerate(vertices):
# print(ii, _ra, _dec)
fits.setval(calibim, f"RAPOLY{ii}", value=round(_ra, 3), comment=f"RA POLYGON {ii} [deg]")
fits.setval(calibim, f"DEPOLY{ii}", value=round(_dec, 3), comment=f"DEC POLYGON {ii} [deg]")
# %% [markdown]
# ### Defringe
# - Only for LOAO z, I, Y-bands
# %%
st_ = time.time()
if (obs == 'LOAO') & ('I' in ic_cal.filter(imagetyp='object').summary['filter']):
dfim = '/home/paek/qsopy/fringe/LOAO/fringe_i_ori.fits'
dfdat = '/home/paek/qsopy/fringe/LOAO/fringe_i.dat'
dfimlist = []
for inim in ic_cal.filter(imagetyp='object', filter='I').summary['file']:
# dfimlist.append(calib.defringe(str(inim), dfim, dfdat))
dfedim = calib.defringe(str(inim), dfim, dfdat)
mvcom = 'mv {} {}'.format(dfedim, inim)
print(mvcom)
os.system(mvcom)
# tdict['defringe'] = time.time() - st - tdict[list(tdict.keys())[-1]]
else:
print('No images to defringe')
pass
protbl['status'][protbl['process']=='defringe'] = True
protbl['time'][protbl['process']=='defringe'] = int(time.time() - st_)
#------------------------------------------------------------
print('='*60)
print('Calibration IS DONE.\t('+str(int(time.time() - starttime))+' sec)')
print('='*60)
# %% [markdown]
# ## Photometry
# %%
st_ = time.time()
print('#\tPhotometry')
path_infile = f'{path_data}/{os.path.basename(path_default_gphot)}'
path_new_gphot = f'{os.path.dirname(path_infile)}/gphot.config'
# Copy default photometry configuration
cpcom = f'cp {path_default_gphot} {path_new_gphot}'
print(cpcom)
os.system(cpcom)
# Read default photometry configuration
f = open(path_default_gphot, 'r')
lines = f.read().splitlines()
f.close()
# Write photometry configuration
g = open(path_new_gphot, 'w')
for line in lines:
if 'imkey' in line:
line = f'imkey\t{path_data}/C*0.fits'
else:
pass
g.write(line+'\n')
g.close()
if obs == 'DOAO':
path_phot = path_phot_sg
else:
path_phot = path_phot_mp
# Execute
com = f'python {path_phot} {path_data} {ncores}'
# com = f'python {path_phot} {path_data} 1'
print(com)
os.system(com)
protbl['status'][protbl['process']=='photometry'] = True
protbl['time'][protbl['process']=='photometry'] = int(time.time() - st_)
# %% [markdown]
# ## Image registering & combine
# %%
st_ = time.time()
print('IMAGE REGISTERING & COMBINE')
combined_images = []
step = (1/24/60)*60 # 1 hour
ic_cal_phot = ImageFileCollection(path_data, glob_include='Calib*0.fits', keywords='*')
calist = sorted(glob.glob('{}/Calib*.fits'.format(path_data)))
objlist = sorted(list(set(ic_cal_phot.summary['object'])))
filterlist = sorted(list(set(ic_cal_phot.summary['filter'])))
# obj = 'NGC3147'
# filte = 'R'
for obj in objlist:
for filte in filterlist:
imlist_tmp = sorted(glob.glob('{}/Calib*-{}-*-{}-*.fits'.format(path_data, obj, filte)))
if len(imlist_tmp) == 0:
pass
elif len(imlist_tmp) == 1:
inim = imlist_tmp[0]
comim = inim.replace('.fits', '.com.fits')
cpcom = f'cp {inim} {comim}'
print(cpcom)
os.system(cpcom)
else:
print(obj, filte)
# ic_part = sorted(glob.glob('{}/Calib*{}*{}*.fits'.format(path_data, obj, filte)))
jds = np.array([fits.getheader(inim)['jd'] for inim in imlist_tmp])
delts = jds - np.min(jds)
grouplist = []
grouplists = []
i = 0
for i in range(len(delts)):
# Initial setting
if i == 0:
t0 = delts[i]
# Add last group to grouplists
elif i == len(delts)-1:
grouplists.append(grouplist)
t1 = delts[i]
# print(t0, t1)
dif = np.abs(t0-t1)
if dif < step:
grouplist.append(imlist_tmp[i])
# Generate new group
else:
grouplists.append(grouplist)
grouplist = [imlist_tmp[i]]
t0 = t1
for group in grouplists:
print('-'*60)
if len(group) > 1:
ref_image = group[0]
images_to_align = group[1:]
for inim in group:
print(inim)
# _data, _hdr = fits.getdata(ref_image, header=True)
# ximage, yimage = _data.shape
# racent, decent = _hdr['RACENT'], _hdr['DECCENT']
try:
# outim = tool.swarpcomb(
# images_to_align,
# gain=obsinfo['gain'].value,
# pixscale=obsinfo['pixscale'],
# racent=racent,
# decent=decent,
# ximage=ximage,
# yimage=yimage,
# listname='obj.list',
# path_save='.',
# keys_to_get=[
# 'OBJECT',
# 'FILTER',
# 'RACENT',
# 'DECCENT',
# 'RAPOLY0',
# 'DEPOLY0',
# 'RAPOLY1',
# 'DEPOLY1',
# 'RAPOLY2',
# 'DEPOLY2',
# 'RAPOLY3',
# 'DEPOLY3',
# ]
# )
outim = tool.imcombine_routine(images_to_align, ref_image)
combined_images.append(outim)
except:
print('Fail to image align & combine routine.')
print(images_to_align)
pass
else:
print('There is only one image.')
combined_images.append(group[0])
protbl['status'][protbl['process']=='image_stack'] = True
protbl['time'][protbl['process']=='image_stack'] = int(time.time() - st_)
# %%
# images_to_align = group
# ref_image = images_to_align[0]
# outim = tool.imcombine_routine(images_to_align, ref_image)
# %% [markdown]
# ## Photometry for combined images
# %%
st_ = time.time()
# Write photometry configuration
h = open(path_new_gphot, 'w')
for line in lines:
if 'imkey' in line:
line = '{}\t{}/C*com.fits'.format('imkey', path_data)
else:
pass
h.write(line+'\n')
h.close()
# Execute
path_phot = path_phot_mp
com = 'python {} {} {}'.format(path_phot, path_data, ncores)
print(com)
os.system(com)
# tdict['photometry_com'] = time.time() - st - tdict[list(tdict.keys())[-1]]
protbl['status'][protbl['process']=='photometry_com'] = True
protbl['time'][protbl['process']=='photometry_com'] = int(time.time() - st_)
ic_com_phot = ImageFileCollection(path_data, glob_include='Calib*com.fits', keywords='*')
# Summary
print('Draw observation summary plots')
# for filte in list(set(ic_cal_phot.summary['filter'])):
for filte in filterlist:
try:
tool.obs_summary(filte, ic_cal_phot, ic_com_phot, path_save=path_data)
except:
print('Fail to make summary plots.')
pass
plt.close('all')
# %% [markdown]
# # Image subtraction
#
# %%
print('IMAGE SUBTRACTION')
subtracted_images = []
ds9comlist = []
for inim in combined_images:
hdr = fits.getheader(inim)
# obs = os.path.basename(inim).split('-')[1]
# obs = 'LOAO'
obj = hdr['object']
filte = hdr['filter']
path_refim = '/data3/paek/factory/ref_frames/{}'.format(obs)
refimlist = glob.glob('{}/Ref*{}*{}*.fits'.format(path_refim, obj, filte))
if len(refimlist) > 0:
refim = refimlist[0]
# subim, ds9com = tool.subtraction_routine3(inim, refim)
# if False:
if obs not in ['LSGT', 'DOAO', 'RASA36', 'SAO_C361K',]:
subim, ds9com = tool.subtraction_routine(inim, refim)
else:
subim, ds9com = tool.subtraction_routine2(inim, refim)
if os.path.getsize(subim) != 0:
rmcom = f"rm {subim}"
print(rmcom)
os.system(rmcom)
subim, ds9com = tool.subtraction_routine(inim, refim)
else:
pass
if subim != None:
subtracted_images.append(subim)
ds9comlist.append(ds9com)
else:
print('There is no reference image for {}'.format(os.path.basename(inim)))
pass
rmcom = 'rm {}/*Ref*gregister.fits'.format(path_data)
print(rmcom)
os.system(rmcom)
# tdict['subtraction'] = time.time() - st - tdict[list(tdict.keys())[-1]]
protbl['status'][protbl['process']=='subtraction'] = True
protbl['time'][protbl['process']=='subtraction'] = int(time.time() - st_)
# %% [markdown]
# ## Photometry for subtracted images
#
# %%
st_ = time.time()
# Write photometry configuration
s = open(path_new_gphot, 'w')
for line in lines:
if 'imkey' in line:
# line = '{}\t{}/hd*com.fits'.format('imkey', path_data)
line = '{}\t{}/hd*.fits'.format('imkey', path_data)
else:
pass
if 'photfraction' in line:
line = '{}\t{}'.format('photfraction', 1.0)
else:
pass
if 'DETECT_MINAREA' in line:
line = '{}\t{}'.format('DETECT_MINAREA', 10)
else:
pass
if 'DETECT_THRESH' in line:
line = '{}\t{}'.format('DETECT_THRESH', 1.25)
else:
pass
s.write(line+'\n')
s.close()
# Execute
hdimlist = sorted(glob.glob('{}/hd*.fits'.format(path_data)))
if len(hdimlist) > 0:
com = 'python {} {}'.format(path_phot_sub, path_data)
print(com)
os.system(com)
# tdict['photometry_sub'] = time.time() - st - tdict[list(tdict.keys())[-1]]
else:
print('No subtracted image.')
pass
protbl['status'][protbl['process']=='photometry_sub'] = True
protbl['time'][protbl['process']=='photometry_sub'] = int(time.time() - st_)
# %% [markdown]
# ## Transient Search
#
# %%
st_ = time.time()
fovval = fov.value
# Input table for transient search
tstbl = Table()
# hdimlist = sorted(glob.glob(f'{path_data}/hd*com.fits'))
hdimlist = sorted(glob.glob(f'{path_data}/hd*.fits'))
if len(hdimlist) != 0:
tstbl['hdim'] = hdimlist
tskeys = ['hdcat', 'hcim', 'inim', 'scicat', 'refim']
for key in tskeys:
tstbl[key] = ' '*300
tstbl['fovval'] = fovval
for i, hdim in enumerate(hdimlist):
hdcat = hdim.replace('.fits','.phot_sub.cat')
hcim = hdim.replace('hdCalib', 'hcCalib')
inim = hdim.replace('hdCalib', 'Calib')
scicat = inim.replace('.fits', '.phot.cat')
hdr = fits.getheader(hdim)
obj = hdr['object']
filte = hdr['filter']
path_refim = f'/data3/paek/factory/ref_frames/{obs}'
refimlist = glob.glob(f'{path_refim}/Ref*{obj}*{filte}*.fits')
refim = refimlist[0]
for key, im in zip(tskeys, [hdcat, hcim, inim, scicat, refim]):
tstbl[key][i] = im
out_tstbl = f'{path_data}/transient_search.txt'
tstbl.write(out_tstbl, format='ascii.tab', overwrite=True)
com = f'python {path_find} {out_tstbl} {ncores}'
print(com)
subprocess.call(com, shell=True)
protbl['status'][protbl['process']=='transient_search'] = True
protbl['time'][protbl['process']=='transient_search'] = int(time.time() - st_)
# %% [markdown]
# # Summary file
# %%
#------------------------------------------------------------
#------------------------------------------------------------
protbl['status'][protbl['process']=='total'] = True
protbl['time'][protbl['process']=='total'] = int(time.time() - st)
protbl.write('{}/obs.summary.log'.format(path_data), format='ascii.tab', overwrite=True)
print(protbl)
# Write data summary
f = open(path_data+'/obs.summary.log', 'a')
end_localtime = time.strftime('%Y-%m-%d %H:%M:%S (%Z)', time.localtime())
f.write('Pipelne start\t: {}\n'.format(start_localtime))
f.write('Pipelne end\t: {}\n'.format(end_localtime))
try:
f.write('='*60+'\n')
f.write('PATH :{}\n'.format(path))
f.write('OBJECT NUMBER # :{}\n'.format(len(ic_cal.summary)))
objkind = sorted(set(ic_cal.summary['object']))
f.write('OBJECTS # : {}\n'.format(objkind))
for obj in objkind:
f.write('-'*60+'\n')
for filte in list(set(ic_cal.summary['filter'])):
indx_tmp = ic_cal.files_filtered(filter=filte, object=obj)
if len(indx_tmp) > 0:
f.write('{}\t{}\n'.format(obj, filte))
except:
pass
f.close()
# %% [markdown]
# ## File Transfer
# %%
rmcom = 'rm {}/inv*.*'.format(path_data, path_data)
print(rmcom)
os.system(rmcom)
tails = ['.transients.', '.new.', '.ref.', '.sub.', '']
for obj in objlist:
for filte in filterlist:
for tail in tails:
# tail = 'transients'
# obj = 'NGC3147'
# filte = 'B'
pathto = f'{path_gal}/{obj}/{obs}/{filte}'
files = f'{path_data}/*Calib*-{obj}-*-{filte}-*{tail}*'
nfiles = len(glob.glob(files))
# print(files, nfiles)
# if nfiles >0:
# print(obj, filte, pathto, files, glob.glob(files)[-1])
if nfiles !=0:
# Save path
if tail == '':
pathto = f'{path_gal}/{obj}/{obs}/{filte}'
else:
pathto = f'{path_gal}/{obj}/{obs}/{filte}/transients'
# Make path
if (not os.path.exists(pathto)):
os.makedirs(pathto)
mvcom = f'mv {files} {pathto}'
print(mvcom)
os.system(mvcom)
# Image transfer
mvcom = f'mv {path_data} {path_save}'
os.system(mvcom)
# WRITE LOG
f = open(path_log, 'a')
# f.write(path_raw+'/'+os.path.basename(path_data)+'\n')
# f.write('{}/{}\n'.format(path_raw, os.path.basename(path_data)))
f.write(f'{path_raw}/{os.path.basename(path_data)}\n')
f.close()
# %% [markdown]
# ## Slack message
# %%
total_time = round(protbl['time'][protbl['process']=='total'].item()/60., 1)
channel = '#pipeline'
text = f'[`gpPy`/{project}-{obsmode}] Processing Complete {obs} {os.path.basename(path)} Data ({nobj} objects) with {ncores} cores taking {total_time} mins'
param_slack = dict(
token = OAuth_Token,
channel = channel,
text = text,
)
tool.slack_bot(**param_slack)
|
SilverRonREPO_NAMEgppyPATH_START.@gppy_extracted@gppy-main@7DT_Routine.py@.PATH_END.py
|
{
"filename": "timeline.py",
"repo_name": "subisarkar/JexoSim",
"repo_path": "JexoSim_extracted/JexoSim-master/jexosim/modules/timeline.py",
"type": "Python"
}
|
"""
JexoSim
2.0
Timeline module
v1.0
"""
import numpy as np
from astropy import units as u
from jexosim.lib.jexosim_lib import jexosim_msg
def run(opt):
planet = opt.planet
opt.T14 = planet.T14
opt.time_at_transit = opt.T14*(0.5+ opt.observation.obs_frac_t14_pre_transit.val )
opt.frame_time = opt.t_f
opt.T14 = (opt.T14).to(u.s)
opt.multiaccum = opt.effective_multiaccum # Number of NDRs per exposure
opt.allocated_time = (opt.channel.detector_readout.nGND()+
opt.channel.detector_readout.nNDR0()+
opt.channel.detector_readout.nRST()) * opt.frame_time
jexosim_msg ('exposure_time %s'%(opt.exposure_time), opt.diagnostics)
jexosim_msg ('allocated_time %s'%(opt.allocated_time) , opt.diagnostics)
jexosim_msg ('effective multiaccum %s'%(opt.effective_multiaccum), opt.diagnostics)
NDR_time = (opt.exposure_time-opt.allocated_time)/(opt.multiaccum-1)
jexosim_msg ("initial NDR time %s"%(NDR_time ), opt.diagnostics)
jexosim_msg ("overheads %s %s %s"%(opt.channel.detector_readout.nGND()*opt.frame_time, opt.channel.detector_readout.nNDR0()*opt.frame_time, opt.channel.detector_readout.nRST()*opt.frame_time), opt.diagnostics)
# find number of frames in each non-zeroth NDR
nNDR = np.round(NDR_time/opt.frame_time).astype(np.int).take(0)
base = [opt.channel.detector_readout.nGND.val, opt.channel.detector_readout.nNDR0.val]
for x in range(opt.multiaccum-1): base.append(nNDR)
base.append(opt.channel.detector_readout.nRST.val)
# Calcaulate exposure time (=total cycle time) and estimates how many exposures are needed
opt.exposure_time = sum(base)*opt.frame_time
opt.frames_per_exposure = sum(base)
if opt.timeline.apply_lc.val ==1:
jexosim_msg ("Since light curve is implemented, observing time is set to 2x T14", opt.diagnostics)
opt.timeline.use_T14.val = 1
if opt.timeline.use_T14.val ==1:
total_observing_time = opt.T14*(1.0+opt.observation.obs_frac_t14_pre_transit.val+opt.observation.obs_frac_t14_post_transit.val)
number_of_exposures = np.ceil((total_observing_time.to(u.s)/opt.exposure_time.to(u.s))).astype(np.int)
number_of_exposures = number_of_exposures.value
elif opt.timeline.use_T14.val ==0:
number_of_exposures = int(opt.timeline.n_exp.val)
if opt.timeline.obs_time.val >0:
number_of_exposures = int(opt.timeline.obs_time.val.to(u.s) / opt.exposure_time)
opt.n_exp = number_of_exposures
opt.total_observing_time = opt.exposure_time*opt.n_exp
jexosim_msg ("number of integrations %s"%(number_of_exposures), opt.diagnostics)
jexosim_msg ("number of NDRs %s"%(number_of_exposures*opt.multiaccum), opt.diagnostics)
jexosim_msg ("total observing time (hrs) %s"%((number_of_exposures*opt.exposure_time/3600).value), opt.diagnostics)
jexosim_msg ("T14 %s"%(opt.T14), opt.diagnostics)
opt.frame_sequence=np.tile(base, number_of_exposures)
opt.time_sequence = opt.frame_time * opt.frame_sequence.cumsum()
# End time of each NDR
opt.ndr_end_time = np.dstack([opt.time_sequence[1+i::len(base)] \
for i in range(opt.multiaccum)]).flatten()
jexosim_msg ("actual NDR time %s"%(opt.ndr_end_time[1]-opt.ndr_end_time[0] ), opt.diagnostics)
jexosim_msg ("final exposure time %s"%(opt.exposure_time ) , opt.diagnostics)
# Number of frames contributing to each NDR
opt.frames_per_ndr = np.dstack([opt.frame_sequence[1+i::len(base)] \
for i in range(opt.multiaccum)]).flatten()
opt.ndr_end_frame_number = np.round(opt.ndr_end_time/opt.frame_time).astype(int).value
opt.duration_per_ndr = opt.frames_per_ndr*opt.frame_time
opt.n_ndr = number_of_exposures*opt.multiaccum
opt.ndr_list = np.arange(0,opt.n_ndr,1)
return opt
|
subisarkarREPO_NAMEJexoSimPATH_START.@JexoSim_extracted@JexoSim-master@jexosim@modules@timeline.py@.PATH_END.py
|
{
"filename": "add_linsys_par.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/scripts/mock_tools/add_linsys_par.py",
"type": "Python"
}
|
#standard python
import sys
import os
import shutil
import unittest
from datetime import datetime
import json
import numpy as np
import fitsio
import glob
import argparse
from astropy.table import Table,join,unique,vstack
from matplotlib import pyplot as plt
import LSS.main.cattools as ct
import LSS.common_tools as common
from LSS.globals import main
if os.environ['NERSC_HOST'] == 'cori':
scratch = 'CSCRATCH'
elif os.environ['NERSC_HOST'] == 'perlmutter':
scratch = 'PSCRATCH'
else:
print('NERSC_HOST is not cori or permutter but is '+os.environ['NERSC_HOST'])
sys.exit('NERSC_HOST not known (code only works on NERSC), not proceeding')
parser = argparse.ArgumentParser()
parser.add_argument("--tracer", help="tracer type to be selected")
parser.add_argument("--min_real", help="minimum number for mock realization",default=0,type=int)
parser.add_argument("--max_real", help="maximum (+1) for mock realization",default=25,type=int)
parser.add_argument("--base_dir", help="base directory for input/output",default='/global/cfs/cdirs/desi/survey/catalogs/Y1/mocks/SecondGenMocks/AbacusSummit/')
parser.add_argument("--survey", help="e.g., main (for all), DA02, any future DA",default='Y1')
parser.add_argument("--verspec",help="version for redshifts",default='iron')
parser.add_argument("--data_version",help="version for redshifts",default='v0.6')
parser.add_argument("--mockcatver", default=None, help = "if not None, gets added to the output path")
parser.add_argument("--minr", help="minimum number for random files",default=0)
parser.add_argument("--maxr", help="maximum for random files, 18 are available (use parallel script for all)",default=18)
parser.add_argument("--imsys_zbin",help="if yes, do imaging systematic regressions in z bins",default='y')
parser.add_argument("--add_imsys_ran",help="add sysnet weights to randoms",default='n')
parser.add_argument("--par",help="whether to run in parallel",default='n')
parser.add_argument("--imsys_nside",help="healpix nside used for imaging systematic regressions",default=256,type=int)
parser.add_argument("--imsys_colname",help="column name for fiducial imaging systematics weight, if there is one (array of ones by default)",default=None)
args = parser.parse_args()
print(args)
tp = args.tracer
rm = int(args.minr)
rx = int(args.maxr)
datadir = '/global/cfs/cdirs/desi/survey/catalogs/'+args.survey+'/LSS/'+args.verspec+'/LSScats/'+args.data_version+'/'
if tp[:3] == 'BGS' or tp == 'bright' or tp == 'MWS_ANY':
prog = 'BRIGHT'
else:
prog = 'DARK'
progl = prog.lower()
mainp = main(args.tracer,args.verspec,survey=args.survey)
zmin = mainp.zmin
zmax = mainp.zmax
fit_maps = mainp.fit_maps
tpstr = tp
if tp == 'BGS_BRIGHT-21.5':
tpstr = 'BGS_BRIGHT'
nside = 256
if tp[:3] == 'ELG':
if args.imsys_zbin == 'y':
zrl = [(0.8,1.1),(1.1,1.6)]
else:
zrl = [(0.8,1.6)]
if tp[:3] == 'QSO':
if args.imsys_zbin == 'y':
zrl = [(0.8,1.3),(1.3,2.1)]#,(2.1,3.5)]
else:
zrl = [(0.8,3.5)]
if tp[:3] == 'LRG':
if args.imsys_zbin == 'y':
zrl = [(0.4,0.6),(0.6,0.8),(0.8,1.1)]
else:
zrl = [(0.4,1.1)]
if tp == 'BGS_BRIGHT-21.5':
zrl = [(0.1,0.4)]
elif tp[:3] == 'BGS':
zrl = [(0.01,0.5)]
zmin = 0.01
zmax = 0.5
def splitGC_wo(flroot,datran='.dat',rann=0):
import LSS.common_tools as common
from astropy.coordinates import SkyCoord
import astropy.units as u
app = 'clustering'+datran+'.fits'
if datran == '.ran':
app = str(rann)+'_clustering'+datran+'.fits'
fn = fitsio.read(flroot+app)
#c = SkyCoord(fn['RA']* u.deg,fn['DEC']* u.deg,frame='icrs')
#gc = c.transform_to('galactic')
sel_ngc = common.splitGC(fn)#gc.b > 0
outf_ngc = flroot+'NGC_'+app
common.write_LSS(fn[sel_ngc],outf_ngc)
outf_sgc = flroot+'SGC_'+app
common.write_LSS(fn[~sel_ngc],outf_sgc)
debv = common.get_debv()
lssmapdirout = datadir+'/hpmaps/'
def get_imlin(realization):
mockdir = args.base_dir+'mock'+str(realization)+'/'
if args.mockcatver is not None:
mockdir += args.mockcatver+'/'
dirout = mockdir
from LSS.imaging import densvar
use_maps = fit_maps
dat = Table(fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_clustering.dat.fits')))
ranl = []
for i in range(0,1):
#rann = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_NGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP'])
#rans = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_SGC'+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP'])
#ran = np.concatenate((rann,rans))
#ran = common.addNS(Table(ran))
ran = fitsio.read(os.path.join(dirout.replace('global','dvs_ro') , tp+'_'+str(i)+'_clustering.ran.fits'), columns=['RA', 'DEC','WEIGHT','WEIGHT_FKP','PHOTSYS'])
ranl.append(ran)
rands = np.concatenate(ranl)
regl = ['N','S']
syscol = 'WEIGHT_IMLIN'
dat[syscol] = np.ones(len(dat))
for reg in regl:
pwf = lssmapdirout+'QSO_mapprops_healpix_nested_nside'+str(nside)+'_'+reg+'.fits'
sys_tab = Table.read(pwf)
cols = list(sys_tab.dtype.names)
for col in cols:
if 'DEPTH' in col:
bnd = col.split('_')[-1]
sys_tab[col] *= 10**(-0.4*common.ext_coeff[bnd]*sys_tab['EBV'])
for ec in ['GR','RZ']:
if 'EBV_DIFF_'+ec in fit_maps:
sys_tab['EBV_DIFF_'+ec] = debv['EBV_DIFF_'+ec]
#seld = dat['PHOTSYS'] == reg
selr = rands['PHOTSYS'] == reg
print(zrl)
for zr in zrl:
zmin = zr[0]
zmax = zr[1]
print('getting weights for region '+reg+' and '+str(zmin)+'<z<'+str(zmax))
wsysl = densvar.get_imweight(dat,rands[selr],zmin,zmax,reg,fit_maps,use_maps,sys_tab=sys_tab,zcol='Z',wtmd='wt_comp',figname=dirout+args.tracer+'_'+reg+'_'+str(zmin)+str(zmax)+'_linimsysfit.png')
sel = wsysl != 1
dat[syscol][sel] = wsysl[sel]
fname = os.path.join(dirout, tp+'_clustering.dat.fits')
common.write_LSS(dat,fname)
flroot = os.path.join(dirout, tp+'_')
splitGC_wo(flroot)
if args.add_imsys_ran == 'y':
regl = ['NGC','SGC']
wtcol = 'WEIGHT_IMLIN'
fb = dirout+tp
fcdn = fitsio.read(fb.replace('global','dvs_ro')+'_NGC_clustering.dat.fits',columns=['TARGETID',wtcol])
fcds = fitsio.read(fb.replace('global','dvs_ro')+'_SGC_clustering.dat.fits',columns=['TARGETID',wtcol])
indata = Table(np.concatenate((fcdn,fcds)))
indata.rename_column('TARGETID', 'TARGETID_DATA')
def addrancol(rn):
for reg in regl:
fname = dirout+tp+'_'+reg+'_'+str(rn)+'_clustering.ran.fits'
cd = Table(fitsio.read(fname.replace('global','dvs_ro')))
cols2rem = [wtcol,wtcol+'_1',wtcol+'_2']
for col in cols2rem:
if col in list(cd.dtype.names):
cd.remove_column(col)
cd = join(cd,indata,keys=['TARGETID_DATA'],join_type='left')
common.write_LSS(cd,fname)
if args.par == 'n':
for rn in range(rm,rx):
addrancol(rn)
if args.par == 'y':
nproc = 9
nran = rx-rm
inds = np.arange(nran)
from multiprocessing import Pool
with Pool(processes=nproc) as pool:
res = pool.map(addrancol, inds)
if __name__ == '__main__':
from multiprocessing import Pool
from desitarget.internal import sharedmem
import sys
inds = []
for i in range(args.min_real,args.max_real):
inds.append(i)
pool = sharedmem.MapReduce()
with pool:
pool.map(get_imlin,inds)#,reduce=reduce)
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@scripts@mock_tools@add_linsys_par.py@.PATH_END.py
|
{
"filename": "_variant.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/shape/label/font/_variant.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="variant", parent_name="layout.shape.label.font", **kwargs
):
super(VariantValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+arraydraw"),
values=kwargs.pop(
"values",
[
"normal",
"small-caps",
"all-small-caps",
"all-petite-caps",
"petite-caps",
"unicase",
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@shape@label@font@_variant.py@.PATH_END.py
|
{
"filename": "lightcones.py",
"repo_name": "21cmfast/21cmFAST",
"repo_path": "21cmFAST_extracted/21cmFAST-master/src/py21cmfast/lightcones.py",
"type": "Python"
}
|
"""A module for classes that create lightcone slices from Coeval objects."""
from __future__ import annotations
import attr
import numpy as np
from abc import ABC, abstractmethod
from astropy.cosmology import FLRW, z_at_value
from astropy.units import MHz, Mpc, Quantity, pixel, pixel_scale
from cosmotile import (
make_lightcone_slice_interpolator,
make_lightcone_slice_vector_field,
)
from functools import cached_property, partial
from scipy.spatial.transform import Rotation
from typing import Sequence
from .inputs import Planck18 # Not *quite* the same as astropy's Planck18
from .inputs import FlagOptions, UserParams
from .outputs import Coeval
_LIGHTCONERS = {}
_LENGTH = "length"
@attr.define(kw_only=True, slots=False)
class Lightconer(ABC):
"""A class that creates lightcone slices from Coeval objects.
Parameters
----------
lc_distances
The comoving distances to the lightcone slices, in Mpc. Either this or
the ``lc_redshifts`` must be provided.
lc_redshifts
The redshifts of the lightcone slices. Either this or the ``lc_distances``
must be provided.
cosmo
The cosmology to use. Defaults to Planck18.
quantities
An iteratable of quantities to include in the lightcone slices. These should
be attributes of the :class:~`outputs.Coeval` class that are arrays of
shape ``HII_DIM^3``. A *special* value here is `velocity_los`, which Defaults to ``("brightness_temp",)``.
"""
cosmo: FLRW = attr.field(
default=Planck18, validator=attr.validators.instance_of(FLRW)
)
_lc_redshifts: np.ndarray = attr.field(default=None, eq=False)
lc_distances: Quantity[_LENGTH] = attr.field(
eq=attr.cmp_using(eq=partial(np.allclose, rtol=1e-5, atol=0))
)
quantities: tuple[str] = attr.field(
default=("brightness_temp",),
converter=tuple,
validator=attr.validators.deep_iterable(attr.validators.instance_of(str)),
)
get_los_velocity: bool = attr.field(default=False, converter=bool)
interp_kinds: dict[str, str] = attr.field()
@lc_distances.default
def _lcd_default(self):
if self._lc_redshifts is None:
raise ValueError("Either lc_distances or lc_redshifts must be provided")
return self.cosmo.comoving_distance(self._lc_redshifts)
@lc_distances.validator
def _lcd_vld(self, attribute, value):
if np.any(value < 0):
raise ValueError("lc_distances must be non-negative")
@_lc_redshifts.validator
def _lcz_vld(self, attribute, value):
if value is None:
return
if np.any(value < 0):
raise ValueError("lc_redshifts must be non-negative")
@cached_property
def lc_redshifts(self) -> np.ndarray:
"""The redshifts of the lightcone slices."""
if self._lc_redshifts is not None:
return self._lc_redshifts
return np.array(
[z_at_value(self.cosmo.comoving_distance, d) for d in self.lc_distances]
)
def get_lc_distances_in_pixels(self, resolution: Quantity[_LENGTH]):
"""Get the lightcone distances in pixels, given a resolution."""
return self.lc_distances.to(pixel, pixel_scale(resolution / pixel))
@interp_kinds.default
def _interp_kinds_def(self):
return {"z_re_box": "mean_max"}
def get_shape(self, user_params: UserParams) -> tuple[int, int, int]:
"""The shape of the lightcone slices."""
raise NotImplementedError
@classmethod
def with_equal_cdist_slices(
cls,
min_redshift: float,
max_redshift: float,
resolution: Quantity[_LENGTH],
cosmo=Planck18,
**kw,
):
"""Construct a Lightconer with equally spaced slices in comoving distance."""
d_at_redshift = cosmo.comoving_distance(min_redshift).to_value(Mpc)
dmax = cosmo.comoving_distance(max_redshift).to_value(Mpc)
res = resolution.to_value(Mpc)
lc_distances = np.arange(d_at_redshift, dmax + res, res)
# if np.isclose(lc_distances.max() + res, dmax):
# lc_distances = np.append(lc_distances, dmax)
return cls(lc_distances=lc_distances * Mpc, cosmo=cosmo, **kw)
@classmethod
def with_equal_redshift_slices(
cls,
min_redshift: float,
max_redshift: float,
dz: float | None = None,
resolution: Quantity[_LENGTH] | None = None,
cosmo=Planck18,
**kw,
):
"""Construct a Lightconer with equally spaced slices in redshift."""
if dz is None and resolution is None:
raise ValueError("Either dz or resolution must be provided")
if dz is None:
dc = cosmo.comoving_distance(min_redshift) + resolution
zdash = z_at_value(cosmo.comoving_distance, dc)
dz = zdash - min_redshift
zs = np.arange(min_redshift, max_redshift + dz, dz)
return cls(lc_redshifts=zs, cosmo=cosmo, **kw)
@classmethod
def from_frequencies(cls, freqs: Quantity[MHz], cosmo=Planck18, **kw):
"""Construct a Lightconer with slices corresponding to given frequencies."""
zs = (1420.4 * MHz / freqs - 1).to_value("")
return cls(lc_redshifts=zs, cosmo=cosmo, **kw)
def make_lightcone_slices(
self, c1: Coeval, c2: Coeval
) -> tuple[dict[str, np.ndarray], np.ndarray]:
"""
Make lightcone slices out of two coeval objects.
Parameters
----------
c1, c2 : Coeval
The coeval boxes to interpolate.
Returns
-------
quantity
The field names of the quantities required by the lightcone.
lcidx
The indices of the lightcone to which these slices belong.
scalar_field_slices
The scalar fields evaluated on the "lightcone" slices that exist within
the redshift range spanned by ``c1`` and ``c2``.
"""
if c1.user_params != c2.user_params:
raise ValueError("c1 and c2 must have the same user parameters")
if c1.cosmo_params != c2.cosmo_params:
raise ValueError("c1 and c2 must have the same cosmological parameters")
cosmo = c1.cosmo_params.cosmo
pixeleq = pixel_scale(c1.user_params.cell_size / pixel)
dc1 = cosmo.comoving_distance(c1.redshift).to(pixel, equivalencies=pixeleq)
dc2 = cosmo.comoving_distance(c2.redshift).to(pixel, equivalencies=pixeleq)
dcmin = min(dc1, dc2)
dcmax = max(dc1, dc2)
pixlcdist = self.get_lc_distances_in_pixels(c1.user_params.cell_size)
# At the lower redshift, we include some tolerance. This is because the very
# last slice (lowest redshift) may correspond *exactly* to the lowest coeval
# box, and due to rounding error in the `z_at_value` call, they might be
# slightly off.
lcidx = np.nonzero((pixlcdist >= dcmin * 0.9999) & (pixlcdist < dcmax))[0]
# Return early if no lightcone indices are between the coeval distances.
if len(lcidx) == 0:
yield None, lcidx, None
lc_distances = pixlcdist[lcidx]
for idx, lcd in zip(lcidx, lc_distances):
for q in self.quantities:
box1 = self.coeval_subselect(
lcd, getattr(c1, q), c1.user_params.cell_size
)
box2 = self.coeval_subselect(
lcd, getattr(c2, q), c2.user_params.cell_size
)
box = self.redshift_interpolation(
lcd, box1, box2, dc1, dc2, kind=self.interp_kinds.get(q, "mean")
)
yield q, idx, self.construct_lightcone(lcd, box)
if self.get_los_velocity and q == self.quantities[0]:
# While doing the first quantity, also add in the los velocity, if desired.
# Doing it now means we can keep whatever cached interpolation setup
# is used to do construct_lightcone().
boxes1 = [
self.coeval_subselect(
lcd, getattr(c1, f"velocity_{q}"), c1.user_params.cell_size
)
for q in "xyz"
]
boxes2 = [
self.coeval_subselect(
lcd, getattr(c2, f"velocity_{q}"), c2.user_params.cell_size
)
for q in "xyz"
]
interpolated_boxes = [
self.redshift_interpolation(
lcd,
box1,
box2,
dc1,
dc2,
kind=self.interp_kinds.get("velocity", "mean"),
)
for (box1, box2) in zip(boxes1, boxes2)
]
yield (
"los_velocity",
idx,
self.construct_los_velocity_lightcone(
lcd,
interpolated_boxes,
),
)
def coeval_subselect(
self, lcd: float, coeval: np.ndarray, coeval_res: Quantity[_LENGTH]
) -> np.ndarray:
"""Sub-Select the coeval box required for interpolation at one slice."""
return coeval
def redshift_interpolation(
self,
dc: float,
coeval_a: np.ndarray,
coeval_b: np.ndarray,
dc_a: float,
dc_b: float,
kind: str = "mean",
) -> np.ndarray:
"""Perform redshift interpolation to a new box given two bracketing coevals."""
if coeval_a.shape != coeval_b.shape:
raise ValueError("coeval_a and coeval_b must have the same shape")
out = (np.abs(dc_b - dc) * coeval_a + np.abs(dc_a - dc) * coeval_b) / np.abs(
dc_a - dc_b
)
if kind == "mean_max":
flag = coeval_a * coeval_b < 0
out[flag] = np.maximum(coeval_a, coeval_b)[flag]
elif kind != "mean":
raise ValueError("kind must be 'mean' or 'mean_max'")
return out
@abstractmethod
def construct_lightcone(
self,
lc_distances: np.ndarray,
boxes: Sequence[np.ndarray],
) -> np.ndarray:
"""Abstract method for constructing the lightcone slices."""
pass
@abstractmethod
def construct_los_velocity_lightcone(
self,
lc_distances: np.ndarray,
velocities: tuple[np.ndarray, np.ndarray, np.ndarray],
) -> np.ndarray:
"""Abstract method for constructing the LoS velocity lightcone slices."""
pass
def validate_options(self, user_params: UserParams, flag_options: FlagOptions):
"""Validate 21cmFAST options."""
pass
def __init_subclass__(cls) -> None:
"""Enabe plugin-style behaviour."""
_LIGHTCONERS[cls.__name__] = cls
return super().__init_subclass__()
@attr.define(kw_only=True, slots=False)
class RectilinearLightconer(Lightconer):
"""The class rectilinear lightconer."""
index_offset: int = attr.field()
line_of_sight_axis: int = attr.field(
default=-1, converter=int, validator=attr.validators.in_([-1, 0, 1, 2])
)
@index_offset.default
def _index_offset_default(self) -> int:
# While it probably makes more sense to use zero as the default offset,
# we use n_lightcone to maintain default backwards compatibility.
return len(self.lc_distances)
def coeval_subselect(
self, lcd: Quantity[pixel], coeval: np.ndarray, coeval_res: Quantity[_LENGTH]
):
"""Sub-select the coeval slice corresponding to this coeval distance."""
# This makes the back of the lightcone exactly line up with the back of the
# coeval box at that redshift, modulo the index_offset.
lcpix = self.get_lc_distances_in_pixels(coeval_res)
lcidx = int((lcpix.max() - lcd + 1 * pixel).to_value(pixel))
return coeval.take(-lcidx + self.index_offset, axis=2, mode="wrap")
def construct_lightcone(
self,
lcd: np.ndarray,
box: np.ndarray,
) -> tuple[np.ndarray, np.ndarray]:
"""Construct slices of the lightcone between two coevals."""
return box
def construct_los_velocity_lightcone(
self,
lcd: np.ndarray,
velocities: np.ndarray,
) -> np.ndarray:
"""Construct slices of the lightcone between two coevals."""
return velocities[self.line_of_sight_axis]
def get_shape(self, user_params: UserParams) -> tuple[int, int, int]:
"""Get the shape of the lightcone."""
return (user_params.HII_DIM, user_params.HII_DIM, len(self.lc_distances))
def _rotation_eq(x, y):
"""Compare two rotations."""
if x is None and y is None:
return True
return np.allclose(x.as_matrix(), y.as_matrix())
@attr.define(kw_only=True, slots=False)
class AngularLightconer(Lightconer):
"""Angular lightcone slices constructed from rectlinear coevals."""
latitude: np.ndarray = attr.field(eq=attr.cmp_using(eq=np.allclose))
longitude: np.ndarray = attr.field(eq=attr.cmp_using(eq=np.allclose))
interpolation_order: int = attr.field(
default=1, converter=int, validator=attr.validators.in_([0, 1, 3, 5])
)
origin: Quantity[pixel, (3,), float] = attr.field(eq=attr.cmp_using(eq=np.allclose))
rotation: Rotation = attr.field(
default=None,
eq=attr.cmp_using(eq=_rotation_eq),
validator=attr.validators.optional(attr.validators.instance_of(Rotation)),
)
def __attrs_post_init__(self) -> None:
"""Post-init."""
self._cache = {
"lcd": None,
"interpolator": None,
}
@longitude.validator
def _longitude_validator(self, attribute, value):
if value.ndim != 1:
raise ValueError("longitude must be 1-dimensional")
if np.any(value < 0) or np.any(value > 2 * np.pi):
raise ValueError("longitude must be in the range [0, 2pi]")
if value.shape != self.latitude.shape:
raise ValueError("longitude and latitude must have the same shape")
@origin.default
def _origin_default(self):
return np.zeros(3) * pixel
@classmethod
def like_rectilinear(
cls, user_params: UserParams, match_at_z: float, cosmo: FLRW = Planck18, **kw
):
"""Create an angular lightconer with the same pixel size as a rectilinear one.
This is useful for comparing the two lightconer types.
Parameters
----------
user_params
The user parameters.
match_at_z
The redshift at which the angular lightconer should match the rectilinear
one.
cosmo
The cosmology to use.
Other Parameters
----------------
All other parameters passed through to the constructor.
Returns
-------
AngularLightconer
The angular lightconer.
"""
box_size_radians = (
user_params.BOX_LEN / cosmo.comoving_distance(match_at_z).value
)
lon = np.linspace(0, box_size_radians, user_params.HII_DIM)
# This makes the X-values increasing from 0.
lat = np.linspace(0, box_size_radians, user_params.HII_DIM)[::-1]
LON, LAT = np.meshgrid(lon, lat)
LON = LON.flatten()
LAT = LAT.flatten()
origin_offset = -cosmo.comoving_distance(match_at_z).to(
pixel, pixel_scale(user_params.cell_size / pixel)
)
origin = np.array([0, 0, origin_offset.value]) * origin_offset.unit
rot = Rotation.from_euler("Y", -np.pi / 2)
return cls.with_equal_cdist_slices(
min_redshift=match_at_z,
resolution=user_params.cell_size,
latitude=LAT,
longitude=LON,
origin=origin,
rotation=rot,
**kw,
)
def construct_lightcone(
self,
lcd: Quantity[pixel],
box: np.ndarray,
) -> tuple[np.ndarray, np.ndarray]:
"""Construct the lightcone slices from bracketing coevals."""
if self._cache["lcd"] == lcd:
interpolator = self._cache["interpolator"]
else:
interpolator = self._refresh_cache(lcd)
return interpolator(box)
def construct_los_velocity_lightcone(
self,
lcd: Quantity[pixel],
velocities: Sequence[np.ndarray],
):
"""Construct the LoS velocity lightcone from 3D velocities."""
if self._cache["lcd"] == lcd:
interpolator = self._cache["interpolator"]
else:
interpolator = self._refresh_cache(lcd)
return next(make_lightcone_slice_vector_field([velocities], interpolator))
def _refresh_cache(self, lcd):
result = make_lightcone_slice_interpolator(
latitude=self.latitude,
longitude=self.longitude,
distance_to_shell=lcd,
interpolation_order=self.interpolation_order,
origin=self.origin,
rotation=self.rotation,
)
self._cache["lcd"] = lcd
self._cache["interpolator"] = result
return result
def get_shape(self, user_params: UserParams) -> tuple[int, int]:
"""The shape of the lightcone slices."""
return (len(self.longitude), len(self.lc_redshifts))
def validate_options(self, user_params: UserParams, flag_options: FlagOptions):
"""Validate 21cmFAST options.
Raises
------
ValueError
If APPLY_RSDs is True.
"""
if flag_options.APPLY_RSDS:
raise ValueError(
"APPLY_RSDs must be False for angular lightcones, as the RSDs are "
"applied in the lightcone construction."
)
if self.get_los_velocity and not user_params.KEEP_3D_VELOCITIES:
raise ValueError(
"To get the LoS velocity, you need to set "
"user_params.KEEP_3D_VELOCITIES=True"
)
|
21cmfastREPO_NAME21cmFASTPATH_START.@21cmFAST_extracted@21cmFAST-master@src@py21cmfast@lightcones.py@.PATH_END.py
|
{
"filename": "T04CompareModels.py",
"repo_name": "nu-radio/NuRadioMC",
"repo_path": "NuRadioMC_extracted/NuRadioMC-master/NuRadioMC/SignalGen/tests/T04CompareModels.py",
"type": "Python"
}
|
import NuRadioMC.SignalGen.askaryan as ask
from NuRadioReco.utilities import units
import numpy as np
import matplotlib.pyplot as plt
from radiotools import plthelpers as php
import logging
logging.basicConfig(level=logging.INFO)
interp_factor = 20
# HAD for different viewing angles
E = 1 * units.EeV
shower_type = "HAD"
n_index = 1.78
theta_C = np.arccos(1. / n_index)
N = 512*2
dt = 0.1 * units.ns
R = 1 * units.km
tt = np.arange(0, dt * N, dt)
thetas = theta_C + np.array([0, 1, 2, 3, 4, 5]) * units.deg
fig, ax = plt.subplots(2, 3, sharex=True)
ax = ax.flatten()
for iTheta, theta in enumerate(thetas):
trace = ask.get_time_trace(E, theta, N, dt, shower_type, n_index, R, "ARZ2019", interp_factor=interp_factor)
ax[iTheta].plot(tt, trace[1], '-C0'.format(iTheta), label="$\Delta \Theta$ = {:.1f}".format((theta-theta_C)/units.deg))
trace = ask.get_time_trace(E, theta, N, dt, shower_type, n_index, R, "Alvarez2000")
trace = np.roll(trace, int(-1 * units.ns/dt))
ax[iTheta].plot(tt, trace, '--C1'.format(iTheta), label="$\Delta \Theta$ = {:.1f}".format((theta-theta_C)/units.deg))
ax[iTheta].set_title("$\Delta \Theta$ = {:.1f}".format((theta-theta_C)/units.deg))
ax[iTheta].set_xlim(45, 60)
# ax.legend()
fig.tight_layout()
fig.suptitle("HAD, Esh = {:.1f}EeV".format(E/units.EeV))
fig.subplots_adjust(top=0.9)
fig.savefig("plots/04_1EeV_HAD.png")
shower_type = "EM"
fig, ax = plt.subplots(2, 3, sharex=True)
ax = ax.flatten()
for iTheta, theta in enumerate(thetas):
trace = ask.get_time_trace(E, theta, N, dt, shower_type, n_index, R, "ARZ2019", interp_factor=interp_factor)
ax[iTheta].plot(tt, trace[1], '-C0'.format(iTheta), label="$\Delta \Theta$ = {:.1f}".format((theta-theta_C)/units.deg))
trace = ask.get_time_trace(E, theta, N, dt, shower_type, n_index, R, "Alvarez2000")
trace = np.roll(trace, int(-1 * units.ns/dt))
ax[iTheta].plot(tt, trace, '--C1'.format(iTheta), label="$\Delta \Theta$ = {:.1f}".format((theta-theta_C)/units.deg))
ax[iTheta].set_title("$\Delta \Theta$ = {:.1f}".format((theta-theta_C)/units.deg))
ax[iTheta].set_xlim(45, 70)
# ax.legend()
fig.tight_layout()
fig.suptitle("EM, Esh = {:.1f}EeV".format(E/units.EeV))
fig.subplots_adjust(top=0.9)
fig.savefig("plots/04_1EeV_EM.png")
shower_type = "EM"
E = 0.01 * units.EeV
fig, ax = plt.subplots(2, 3, sharex=True)
ax = ax.flatten()
for iTheta, theta in enumerate(thetas):
trace = ask.get_time_trace(E, theta, N, dt, shower_type, n_index, R, "ARZ2019", interp_factor=interp_factor)
ax[iTheta].plot(tt, trace[1], '-C0'.format(iTheta), label="$\Delta \Theta$ = {:.1f}".format((theta-theta_C)/units.deg))
trace = ask.get_time_trace(E, theta, N, dt, shower_type, n_index, R, "Alvarez2000")
trace = np.roll(trace, int(-1 * units.ns/dt))
ax[iTheta].plot(tt, trace, '--C1'.format(iTheta), label="$\Delta \Theta$ = {:.1f}".format((theta-theta_C)/units.deg))
ax[iTheta].set_title("$\Delta \Theta$ = {:.1f}".format((theta-theta_C)/units.deg))
ax[iTheta].set_xlim(45, 70)
# ax.legend()
fig.tight_layout()
fig.suptitle("EM, Esh = {:.1f}PeV".format(E/units.PeV))
fig.subplots_adjust(top=0.9)
fig.savefig("plots/04_10PeV_EM.png")
plt.show()
|
nu-radioREPO_NAMENuRadioMCPATH_START.@NuRadioMC_extracted@NuRadioMC-master@NuRadioMC@SignalGen@tests@T04CompareModels.py@.PATH_END.py
|
{
"filename": "io_names_client.py",
"repo_name": "triton-inference-server/server",
"repo_path": "server_extracted/server-main/qa/L0_libtorch_io_names/io_names_client.py",
"type": "Python"
}
|
#!/usr/bin/python
# Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
sys.path.append("../common")
import unittest
from builtins import range
import numpy as np
import test_util as tu
import tritonclient.http as httpclient
class IONamingConvention(tu.TestResultCollector):
def _infer_helper(self, model_name, io_names, reversed_order=False):
triton_client = httpclient.InferenceServerClient(
"localhost:8000", verbose=False
)
# Create the data for the two inputs. Initialize the first to unique
# integers and the second to all ones.
input0_data = np.arange(start=0, stop=16, dtype=np.float32)
input0_data = np.expand_dims(input0_data, axis=0)
input1_data = np.full(shape=(1, 16), fill_value=-1, dtype=np.float32)
inputs = []
output_req = []
inputs.append(
httpclient.InferInput(
io_names[0] if not reversed_order else io_names[1], [1, 16], "FP32"
)
)
inputs[-1].set_data_from_numpy(input0_data)
inputs.append(
httpclient.InferInput(
io_names[1] if not reversed_order else io_names[0], [1, 16], "FP32"
)
)
inputs[-1].set_data_from_numpy(input1_data)
output_req.append(
httpclient.InferRequestedOutput(io_names[2], binary_data=True)
)
output_req.append(
httpclient.InferRequestedOutput(io_names[3], binary_data=True)
)
results = triton_client.infer(model_name, inputs, outputs=output_req)
output0_data = results.as_numpy(
io_names[2] if not reversed_order else io_names[3]
)
output1_data = results.as_numpy(
io_names[3] if not reversed_order else io_names[2]
)
for i in range(16):
self.assertEqual(input0_data[0][i] - input1_data[0][i], output0_data[0][i])
self.assertEqual(input0_data[0][i] + input1_data[0][i], output1_data[0][i])
def test_io_index(self):
io_names = ["INPUT__0", "INPUT__1", "OUTPUT__0", "OUTPUT__1"]
self._infer_helper("libtorch_io_index", io_names)
def test_output_index(self):
io_names = ["INPUT0", "INPUT1", "OUTPUT__0", "OUTPUT__1"]
self._infer_helper("libtorch_output_index", io_names)
def test_no_output_index(self):
io_names = ["INPUT0", "INPUT1", "OUTPUT0", "OUTPUT1"]
self._infer_helper("libtorch_no_output_index", io_names)
def test_no_arguments_no_output_index(self):
io_names = ["INPUTA", "INPUTB", "OUTPUTA", "OUTPUTB"]
self._infer_helper("libtorch_no_arguments_output_index", io_names)
def test_mix_index(self):
io_names = ["INPUTA", "INPUT__1", "OUTPUTA", "OUTPUT__1"]
self._infer_helper("libtorch_mix_index", io_names)
def test_mix_arguments(self):
io_names = ["INPUT0", "INPUTB", "OUTPUTA", "OUTPUT__1"]
self._infer_helper("libtorch_mix_arguments", io_names)
def test_mix_arguments_index(self):
io_names = ["INPUT0", "INPUT__1", "OUTPUT0", "OUTPUT__1"]
self._infer_helper("libtorch_mix_arguments_index", io_names)
def test_unordered_index(self):
io_names = ["INPUT1", "INPUT0", "OUT__1", "OUT__0"]
self._infer_helper("libtorch_unordered_index", io_names, reversed_order=True)
if __name__ == "__main__":
unittest.main()
|
triton-inference-serverREPO_NAMEserverPATH_START.@server_extracted@server-main@qa@L0_libtorch_io_names@io_names_client.py@.PATH_END.py
|
{
"filename": "smooth_cal_inspect_2458086.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/smooth_cal_inspect/smooth_cal_inspect_2458086.ipynb",
"type": "Jupyter Notebook"
}
|
# Stage 2 Calibration Smoothing Nightly Notebook
**Josh Dillon**, Last Revised 12/4/20
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from hera_cal import io, redcal, apply_cal, abscal, utils
from hera_cal.smooth_cal import build_time_blacklist
from hera_qm.metrics_io import load_metric_file
import pyuvdata
import glob
import os
from copy import deepcopy
import inspect
import h5py
import matplotlib.cm as cm
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = '2459122'
# data_path = '/lustre/aoc/projects/hera/H4C/2459122'
# lst_blacklist_string = '0-1.3 2.5-4.3 5.0-5.7 6.5-9.1 10.6-11.5 11.9-14.3 16.3-1.3'
# abscal_model_glob = '/lustre/aoc/projects/hera/zmartino/hera_calib_model/H3C/abscal_files_unique_baselines/zen.2458894.?????.uvh5'
# os.environ["JULIANDATE"] = JD
# os.environ["DATA_PATH"] = data_path
# os.environ["LST_BLACKLIST_STRING"] = lst_blacklist_string
# os.environ["ABSCAL_MODEL_GLOB"] = abscal_model_glob
```
```python
# Use environment variables to figure out path to data
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
lst_blacklist_string = os.environ['LST_BLACKLIST_STRING']
abscal_model_glob = os.environ['ABSCAL_MODEL_GLOB']
print(f'JD = "{JD}"')
print(f'data_path = "{data_path}"')
print(f'lst_blacklist_string = "{lst_blacklist_string}"')
print(f'abscal_model_glob = "{abscal_model_glob}"')
```
JD = "2458086"
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458086"
lst_blacklist_string = ""
abscal_model_glob = "/lustre/aoc/projects/hera/H1C_IDR3/abscal_model/zen.245804*.HH.uvRXLS.uvh5"
```python
print('Looking for data in', data_path, 'on JD', JD)
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.sum.uvh5')))
if len(data_list) == 0:
data_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.?????.uvh5')))
print('...found {} data files.'.format(len(data_list)))
abscal_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.abs.calfits')))
print('...found {} abscal files.'.format(len(abscal_list)))
smooth_cal_list = sorted(glob.glob(os.path.join(data_path, f'zen.{JD}.*.sum.smooth_abs.calfits')))
print('...found {} smooth_cal files.'.format(len(smooth_cal_list)))
```
Looking for data in /lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458086 on JD 2458086
...found 73 data files.
...found 73 abscal files.
...found 73 smooth_cal files.
```python
# get all JDs and LSTs
_, _, file_lst_arrays, file_time_arrays = io.get_file_times(data_list)
# parse lst_blacklist_string
lst_blacklists = []
if len(lst_blacklist_string) > 0:
lst_blacklists = [tuple([float(arg) for arg in arg_pair.split('-', maxsplit=1)])
for arg_pair in lst_blacklist_string.split(' ')]
# get times that are blacklisted and reshape them like file_time_arrays
time_blacklisted_flat = build_time_blacklist(np.hstack(file_time_arrays), lst_blacklists=lst_blacklists)
time_blacklisted = [fta.astype(bool) for fta in file_time_arrays]
n = 0
for i in range(len(file_time_arrays)):
time_blacklisted[i] = np.zeros_like(time_blacklisted[i], dtype=bool)
for j in range(len(file_time_arrays[i])):
time_blacklisted[i][j] = time_blacklisted_flat[n]
n += 1
# pick the central time from among the not-LST blacklisted files, if possible
good_indices = [i for i, tb in enumerate(time_blacklisted) if not np.any(tb)]
if len(good_indices) > 0:
file_index = good_indices[len(good_indices)//2]
else:
file_index = len(data_list)//2
file_JD = '.'.join([s for s in data_list[file_index].split('.') if s.isdigit()])
```
/lustre/aoc/projects/hera/heramgr/anaconda2/envs/h1c_idr3/lib/python3.7/site-packages/numpy/core/_asarray.py:83: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
return array(a, dtype, copy=False, order=order)
```python
# Load abscal gains
hca = io.HERACal(abscal_list[file_index])
ga, gaf, _, _ = hca.read()
# Get min_bl_cut, we only want to compare baselines actually used in absolute calibration
try:
min_bl_cut = float(hca.history.replace('\n','').split('--min_bl_cut')[-1].split('--')[0].strip())
except:
print('Could not find min_bl_cut, setting to 1 m.')
min_bl_cut = 1.0
# Load the most common redundant baseline longer than min_bl_cut
hd = io.HERAData(data_list[file_index])
bls_to_plot = []
for pol in ['ee', 'nn']:
reds = redcal.get_reds(hd.antpos, pols=[pol])
reds = sorted(reds, key=len, reverse=True)
bl_lens = np.array([np.linalg.norm(hd.antpos[red[0][1]] - hd.antpos[red[0][0]]) for red in reds])
try:
bl_group_to_plot = (np.array(reds)[bl_lens >= min_bl_cut])[0]
except:
bl_group_to_plot = reds[0]
bls_to_plot.extend(bl_group_to_plot)
# Load smooth_cal gains and determine ex_ants
hc = io.HERACal(smooth_cal_list[file_index])
gains, gain_flags, _, _ = hc.read()
ex_ants = [ant for ant in gain_flags if np.all(gain_flags[ant])]
# Load data and calibrate
data, flags, nsamples = hd.read(bls=bls_to_plot)
sc_data, sc_flags = deepcopy(data), deepcopy(flags)
ac_data, ac_flags = deepcopy(data), deepcopy(flags)
apply_cal.calibrate_in_place(sc_data, gains, data_flags=sc_flags, cal_flags=gain_flags)
apply_cal.calibrate_in_place(ac_data, ga, data_flags=ac_flags, cal_flags=gaf)
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
plt.figure(figsize=(8,8))
plt.scatter(np.array(list(hd.antpos.values()))[:,0],
np.array(list(hd.antpos.values()))[:,1], c='w', s=0)
for ant,pos in hd.antpos.items():
bad = ant in [ant[0] for ant in ex_ants]
plt.gca().add_artist(plt.Circle(tuple(pos[0:2]), radius=7,
fill=(~bad), color=['grey','r'][bad]))
plt.text(pos[0],pos[1],str(ant), va='center', ha='center', color='w')
plt.xlabel("Antenna East-West Position (meters)")
plt.ylabel("Antenna North-South Position (meters)")
plt.title('Antenna Positions on {} (Red = Flagged)'.format(file_JD));
plt.axis('equal')
plt.tight_layout()
plt.show()
```

### Figure 1: Array and Flagged Antennas
#### OBSERVER CHECKLIST:
* Check that the array configuration looks reasonable.
* Check that all flags expected to be flagged are actually flagged but also that not everything is getting flagged.
```python
#check whether the model is redudnant by looking at the history
model_is_redundant = ('--model_is_redundant' in "".join(hc.history.split()))
# Find files that overlap with this file
abscal_matched_files = list(abscal.match_times(data_list[file_index],
sorted(glob.glob(abscal_model_glob)),
filetype='uvh5', atol=1e-5))
hdm = io.HERAData(abscal_matched_files)
# Get model baselines to load
model_bls = hdm.bls
model_antpos = hdm.antpos
if isinstance(model_bls, dict):
model_bls = list(model_bls.values())[0]
model_antpos = {ant: pos for antpos in hdm.antpos.values() for ant, pos in antpos.items()}
_, model_bl_to_load, data_to_model_bl_map = abscal.match_baselines(bls_to_plot, model_bls,
hd.antpos, model_antpos=model_antpos,
model_is_redundant=model_is_redundant)
model, model_flags, _ = hdm.read(bls=model_bl_to_load)
# Rephase model at index of best match to mean LST in the data
model_index = np.argmin(np.abs(model.lsts - np.mean(data.lsts)))
model_blvecs = {bl: model.antpos[bl[0]] - model.antpos[bl[1]] for bl in model.keys()}
utils.lst_rephase(model, model_blvecs, model.freqs, np.mean(data.lsts) - model.lsts[model_index],
lat=hdm.telescope_location_lat_lon_alt_degrees[0], inplace=True)
if not model_is_redundant:
model, _, _ = utils.red_average(model, flags=model_flags)
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
import warnings
with warnings.catch_warnings():
warnings.filterwarnings('ignore', r'All-NaN (slice|axis) encountered')
for pol in ['ee', 'nn']:
for func, plot, ylabel in zip([np.abs, np.angle], [plt.semilogy, plt.plot], ['Amplitude (Jy)', 'Phase (Radians)']):
plt.figure(figsize=(16,4))
for d, f, l, m in zip([ac_data, sc_data],
[ac_flags, sc_flags],
['Abs Calibrated Data', 'Smooth Calibrated Data'],
['r-', 'b.']):
to_avg = []
for bl in [k for k in bls_to_plot if k[2] == pol]:
blvec = hd.antpos[bl[0]] - hd.antpos[bl[1]]
to_avg.append(deepcopy(d[bl]))
to_avg[-1][f[bl]] = np.nan + 1.0j * np.nan
to_plot = np.nanmedian(np.real(to_avg), axis=(0,1)) + 1.0j * np.nanmedian(np.imag(to_avg), axis=(0,1))
plot(hd.freqs/1e6, func(to_plot), m, label=l)
for bl in [k for k in model if k[2] == pol]:
plot(hd.freqs/1e6, func(model[bl][model_index]), 'k-', label='Abscal Model')
plt.xlabel('Frequency (MHz)')
plt.ylabel(ylabel)
plt.legend(loc='lower right')
plt.title('{}-Polarized, {:f} m East, {:f} m North Visibility on {}'.format(pol, blvec[0], blvec[1], file_JD))
```




### Figure 2: Example redundant baseline average, both absolute calibrated and smoothed, compared to the Abscal Model
#### OBSERVER CHECKLIST:
* Check that the abscaled data and the smoothcaled data are reasonably consistent
* Check that both match the abscal model fairly well.
# Load a whole day
```python
# Load relative difference and flagging info from smooth_cal gains
ant_flags_dict = {}
avg_rel_diff_ee_dict = {}
avg_rel_diff_nn_dict = {}
rel_diff_med_dict = {}
ants = set([])
for cal in smooth_cal_list:
hc = io.HERACal(cal)
_, flags, rel_diff, avg_rel_diff = hc.read()
ants |= set(flags.keys())
ant_flags_dict[cal] = {ant: np.all(flags[ant]) for ant in flags}
avg_rel_diff_ee_dict[cal] = avg_rel_diff['Jee']
avg_rel_diff_nn_dict[cal] = avg_rel_diff['Jnn']
rel_diff_med_dict[cal] = {ant: np.nanmedian(rel_diff[ant], axis=1) for ant in rel_diff}
all_flagged_dict = {ant: np.all([af[ant] for af in ant_flags_dict.values()]) for ant in ants}
avg_rel_diff_ee = np.vstack(np.array(list(avg_rel_diff_ee_dict.values())))
avg_rel_diff_nn = np.vstack(np.array(list(avg_rel_diff_nn_dict.values())))
```
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
```python
# save middle-numbered ants with a minimal number of flags
ants_to_save = {}
ant_to_nflags_dict = {ant: np.sum([af[ant] for af in ant_flags_dict.values()]) for ant in ants}
for pol in ['Jee', 'Jnn']:
min_flags = np.min([ant_to_nflags_dict[ant] for ant in ants if ant[1] == pol])
ant_candidates = sorted([ant for ant in ants if ant_to_nflags_dict[ant] == min_flags and ant[1] == pol])
Nac = len(ant_candidates)
ants_to_save[pol] = ant_candidates[(Nac // 2 - 1):(Nac // 2 + 1)]
```
```python
# Load smooth_cal gains/flags
times_dict = {}
sc_gain_dict = {}
sc_flag_dict = {}
for cal in smooth_cal_list:
hc = io.HERACal(cal)
gains, flags, _, _ = hc.read()
times_dict[cal] = hc.times
sc_gain_dict[cal] = {ant: gains[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
sc_flag_dict[cal] = {ant: flags[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
# Load abscal gains/flags
ac_gain_dict = {}
ac_flag_dict = {}
for cal in abscal_list:
hc = io.HERACal(cal)
gains, flags, _, _ = hc.read()
ac_gain_dict[cal] = {ant: gains[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
ac_flag_dict[cal] = {ant: flags[ant] for pol in ants_to_save for ant in ants_to_save[pol]}
# Organize gains/flags into grids
times = np.hstack(list(times_dict.values()))
lsts = 12 / np.pi * pyuvdata.utils.get_lst_for_time(times, *hd.telescope_location_lat_lon_alt_degrees)
sc_gains = {ant: np.vstack([sc_gain_dict[cal][ant] for cal in sc_gain_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
sc_flags = {ant: np.vstack([sc_flag_dict[cal][ant] for cal in sc_flag_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
flag_mask = np.all([f for f in sc_flags.values()], axis=0)
ac_gains = {ant: np.vstack([ac_gain_dict[cal][ant] for cal in ac_gain_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
ac_flags = {ant: np.vstack([ac_flag_dict[cal][ant] for cal in ac_flag_dict])
for pol in ants_to_save for ant in ants_to_save[pol]}
```
# Inspect a whole day
```python
# for overplotting blacklisted LSTs
my_cmap = cm.binary
my_cmap.set_under('k', alpha=0)
blacklist = np.ones_like(avg_rel_diff_ee) * np.hstack(time_blacklisted)[:, np.newaxis]
```
You are modifying the state of a globally registered colormap. In future versions, you will not be able to modify a registered colormap in-place. To remove this warning, you can make a copy of the colormap first. cmap = copy.copy(mpl.cm.get_cmap("binary"))
```python
# Pick vmax to not saturate 90% of the abscal gains
vmax = np.max([np.percentile(np.abs(sc_gains[ants_to_save[pol][1]][~flag_mask]), 99) for pol in ['Jee', 'Jnn']])
# Plot abscal gain amplitude waterfalls for a single antenna
fig, axes = plt.subplots(4, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25, 1]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.abs(sc_gains[ant]) / ~sc_flags[ant], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=0, vmax=vmax, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Smoothcal Gain Amplitude of Antenna {ant[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
# Now plot median gain spectra
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
# plot abscal
to_med = deepcopy(np.abs(ac_gains[ant]))
to_med[sc_flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(hd.freqs / 1e6, np.nanmedian(to_med[~np.hstack(time_blacklisted), :], axis=0), 'r.', label='Abscal')
# plot smooth_cal
to_med = deepcopy(np.abs(sc_gains[ant]))
to_med[sc_flags[ant]] = np.nan
if not np.all(np.hstack(time_blacklisted)):
ax.plot(hd.freqs / 1e6, np.nanmedian(to_med[~np.hstack(time_blacklisted), :], axis=0), 'k.', ms=2, label='Smoothcal')
ax.set_ylim([0, vmax])
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Non-Blacklisted or Flagged Gain Amplitude Spectrum of Antenna {ant[0]}: {pol[1:]}-polarized')
ax.legend()
# Now plot median gain time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
to_med = deepcopy(np.abs(ac_gains[ant]))
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan
# plot abscal
if not np.all(np.hstack(time_blacklisted)):
ax.plot(lsts[~np.hstack(time_blacklisted)],
np.nanmedian(to_med[~np.hstack(time_blacklisted), :], axis=1),
'b.', label='Abscal: Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
ax.plot(lsts[np.hstack(time_blacklisted)],
np.nanmedian(to_med[np.hstack(time_blacklisted), :], axis=1),
'r.', label='Abscal: Blacklisted LSTs')
# plot smooth_cal
to_med = deepcopy(np.abs(sc_gains[ant]))
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan
ax.plot(lsts, np.nanmedian(to_med, axis=1),'k.', ms=2, label='Smoothcal')
ax.set_ylim([0, vmax])
ax.set_xlabel('LST (hours)')
ax.set_ylabel('|g| (unitless)')
ax.set_title(f'Median Over Unflagged Channels Gain Amplitude Time-Series of Antenna {ant[0]}: {pol[1:]}-polarized')
ax.legend()
# Now flagged plot abscal waterfall
for ax, pol in zip(axes[3], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.abs(ac_gains[ant]) / ~sc_flags[ant], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=0, vmax=vmax, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Flagged Abscal Gain Amplitude of Antenna {ant[0]}: {pol[1:]}-polarized' )
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
plt.tight_layout()
```
divide by zero encountered in true_divide
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
divide by zero encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 3 Example Smoothing of Gain Amplitudes
Smoothcal (top row) and Abscal (bottom row) gain amplitudes for an example antenna. In the waterfalls, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted amplitudes as a function of frequency (second row) and the median amplitude as a function of time (third row) for both abscal and smoothcal.
#### OBSERVER CHECKLIST:
* Check that the smoothcal solution matches the abscal solution reasonably well in the non-blacklisted regions.
* Check to see that the overall bandpass looks reasonable
```python
# Plot abscal gain amplitude waterfalls for a single antenna
fig, axes = plt.subplots(4, 2, figsize=(16,16), gridspec_kw={'height_ratios': [1, .25, .25, 1]})
for ax, pol in zip(axes[0], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.angle(sc_gains[ant0] / sc_gains[ant1]) / ~sc_flags[ant0], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=-np.pi, vmax=np.pi, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Smoothcal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
# Now plot median gain spectra
for ax, pol in zip(axes[1], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
# plot abscal
to_med = deepcopy(ac_gains[ant0] / ac_gains[ant1])
to_med[sc_flags[ant0]] = np.nan + 1.0j * np.nan
if not np.all(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[~np.hstack(time_blacklisted), :].imag, axis=0)
med += np.nanmedian(to_med[~np.hstack(time_blacklisted), :].real, axis=0)
ax.plot(hd.freqs / 1e6, np.angle(med), 'r.', label='Abscal')
# plot smooth_cal
to_med = deepcopy(sc_gains[ant0] / sc_gains[ant1])
to_med[sc_flags[ant0]] = np.nan + 1.0j * np.nan
if not np.all(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[~np.hstack(time_blacklisted), :].imag, axis=0)
med += np.nanmedian(to_med[~np.hstack(time_blacklisted), :].real, axis=0)
ax.plot(hd.freqs / 1e6, np.angle(med), 'k.', ms=2, label='Smoothcal')
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel(f'Phase of g$_{ant0[0]}$ / g$_{ant1[0]}$')
ax.set_title(f'Median Non-Blacklisted or Flagged Gain Phase Spectrum of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.legend()
# Now plot median gain time series
for ax, pol in zip(axes[2], ['Jee', 'Jnn']):
ant = ants_to_save[pol][1]
to_med = deepcopy(np.abs(ac_gains[ant]))
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan
# plot abscal
to_med = deepcopy(ac_gains[ant0] / ac_gains[ant1])
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan + 1.0j * np.nan
if not np.all(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[~np.hstack(time_blacklisted), :].imag, axis=1)
med += np.nanmedian(to_med[~np.hstack(time_blacklisted), :].real, axis=1)
ax.plot(lsts[~np.hstack(time_blacklisted)], np.angle(med), 'b.', label='Abscal: Not Blacklisted LSTs')
if np.any(np.hstack(time_blacklisted)):
med = 1.0j * np.nanmedian(to_med[np.hstack(time_blacklisted), :].imag, axis=1)
med += np.nanmedian(to_med[np.hstack(time_blacklisted), :].real, axis=1)
ax.plot(lsts[np.hstack(time_blacklisted)], np.angle(med), 'r.', label='Abscal: Blacklisted LSTs')
# plot smooth_cal
to_med = deepcopy(sc_gains[ant0] / sc_gains[ant1])
to_med[:, np.all(sc_flags[ant], axis=0)] = np.nan + 1.0j * np.nan
med = 1.0j * np.nanmedian(to_med.imag, axis=1) + np.nanmedian(to_med.real, axis=1)
ax.plot(lsts, np.angle(med), 'k.', ms=2, label='Smoothcal')
ax.set_ylim([-np.pi, np.pi])
ax.set_xlabel('LST (hours)')
ax.set_ylabel(f'Phase of g$_{ant0[0]}$ / g$_{ant1[0]}$')
ax.set_title(f'Median Non-Blacklisted or Flagged Gain Phase Spectrum of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.legend()
# Now flagged plot abscal waterfall
for ax, pol in zip(axes[3], ['Jee', 'Jnn']):
ant0, ant1 = ants_to_save[pol]
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(np.angle(ac_gains[ant0] / ac_gains[ant1]) / ~sc_flags[ant], aspect='auto', cmap='inferno',
interpolation='nearest', vmin=-np.pi, vmax=np.pi, extent=extent)
ax.imshow(blacklist, aspect='auto', cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title(f'Flagged Abscal Gain Phase of Ant {ant0[0]} / Ant {ant1[0]}: {pol[1:]}-polarized')
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_xlim([hd.freqs[0]/1e6, hd.freqs[-1]/1e6])
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, orientation='horizontal', pad=.1)
plt.tight_layout()
```
divide by zero encountered in true_divide
FixedFormatter should only be used together with FixedLocator
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
All-NaN slice encountered
divide by zero encountered in true_divide
invalid value encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 4 Example Smoothing of Gain Phases
Smoothcal (top row) and Abscal (bottom row) gain phases for an example antenna. In the waterfalls, grayed out regions are "blacklisted," meaning they are not flagged but they are given zero weight when performing calibration smoothing. We also plot median non-blacklisted phases as a function of frequency (second row) and the median phases as a function of time (third row) for both abscal and smoothcal.
#### OBSERVER CHECKLIST:
* Check that the smoothcal solution matches the abscal solution reasonably well in the non-blacklisted regions.
* Check to see that the final gain solution is reasonably approximated by a single time-independent delay (linear phase ramp in row 2).
```python
fig, axes = plt.subplots(1, 2, figsize=(20,12))
for ax, rd, t in zip(axes, [avg_rel_diff_ee, avg_rel_diff_nn], ['ee-polarized', 'nn-polarized']):
extent=[hd.freqs[0]/1e6, hd.freqs[-1]/1e6, times[-1], times[0]]
im = ax.imshow(rd / ~sc_flags[ant0], aspect='auto', vmin=0, cmap='inferno', vmax=.2, interpolation='nearest', extent=extent)
ax.imshow(blacklist, aspect='auto',
cmap=my_cmap, interpolation=None, clim=[0.9, 1], alpha=.25, extent=extent)
ax.set_title('Relative Difference Between Smoothcal and Abscal: ' + t)
ax.set_xlabel('Frequency (MHz)')
ax.set_ylabel('LST (Hours)')
ax.set_yticklabels(np.around(lsts[[min(max(np.searchsorted(times, t), 0), len(times) - 1) for t in ax.get_yticks()]], 2))
plt.colorbar(im, ax=ax, label='$|g_{smooth} - g_{abs}| / |g_{abs}|$ (unitless)')
```
invalid value encountered in true_divide
FixedFormatter should only be used together with FixedLocator

### Figure 5: Relative difference between Abscal and Smoothcal
Where omnical calfits files store $\chi^2$ per antenna, smooth_cal calfits files store the relative difference between Abscal and Smoothcal gains. This difference is done before taking the absolute value, so this metric is sensitive both to phase errors and amplitude errors.
#### OBSERVER CHECKLIST:
* Look for regions of high relative difference that are not blacklisted. This would indicate a problem with smoothing.
# Metadata
```python
print(redcal.version.history_string())
```
------------
This file was produced by the function <module>() in <ipython-input-1-c6de44361328> using:
git_branch: master
git_description: v3.0-733-gd2dd8ccf
git_hash: d2dd8ccf3fe43d5e5eb6a4c28ceaf4a6e3d1fcb7
git_origin: git@github.com:HERA-Team/hera_cal.git
version: 3.0
------------
```python
```
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@smooth_cal_inspect@smooth_cal_inspect_2458086.ipynb@.PATH_END.py
|
{
"filename": "test_rc.py",
"repo_name": "gwpy/gwpy",
"repo_path": "gwpy_extracted/gwpy-main/gwpy/plot/tests/test_rc.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (C) Cardiff University (2018-2021)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Tests for `gwpy.plot.rc`
"""
import pytest
from matplotlib import rcParams
from .. import rc as plot_rc
DEFAULT_LRTB = [
rcParams[f"figure.subplot.{x}"]
for x in ('left', 'right', 'bottom', 'top')
]
@pytest.mark.parametrize('figsize, lrbt', [
((6.4, 4.8), (.1875, .87, .16, .88)),
((0, 0), DEFAULT_LRTB),
])
def test_get_subplot_params(figsize, lrbt):
params = plot_rc.get_subplot_params(figsize)
for key, val in zip(('left', 'right', 'bottom', 'top'), lrbt):
assert getattr(params, key) == val
|
gwpyREPO_NAMEgwpyPATH_START.@gwpy_extracted@gwpy-main@gwpy@plot@tests@test_rc.py@.PATH_END.py
|
{
"filename": "lumfn.py",
"repo_name": "desihub/LSS",
"repo_path": "LSS_extracted/LSS-main/py/LSS/DESI_ke/lumfn.py",
"type": "Python"
}
|
import fitsio
import subprocess
import astropy.io.fits as fits
import numpy as np
from astropy.table import Table
from cosmo import volcom
def multifield_lumfn(lumfn_list, ext=None, weight=None):
if ext is None:
tables = [Table.read(x) for x in lumfn_list]
else:
tables = [Table.read(x, ext) for x in lumfn_list]
if weight is not None:
weights = np.array([tab.meta[weight] for tab in tables]).astype(float)
print('Retrieved relative weights: {} for {} weight.'.format(weights, weight))
else:
weights = None
def sum_rule(tables, col, weights=None):
data = [table[col].data for table in tables]
data = np.c_[data].T
return np.sum(data, axis=1)
def mean_rule(tables, col, weights=None):
data = [table[col].data for table in tables]
data = np.c_[data].T
if weights is not None:
print('Mean rule: applying relative weights.')
return np.average(data, axis=1, weights=weights)
def quadsum_rule(tables, col, weights=None):
data = [table[col].data for table in tables]
data = np.c_[data].T
# TODO
if weights is not None:
print('WARNING: weights is unsupported for lumfn quadsum rule.')
return np.sqrt(np.sum(data**2., axis=1))
result = Table()
if ext in [None, 'LUMFN']:
sum_cols = ['N']
mean_cols = ['MEDIAN_M', 'PHI_N', 'PHI_IVMAX', 'V_ON_VMAX', 'REF_SCHECHTER', 'REF_RATIO']
qsum_cols = ['PHI_N_ERROR', 'PHI_IVMAX_ERROR']
elif ext == 'REFERENCE':
sum_cols = []
mean_cols = ['MS', 'REFSCHECHTER']
qsum_cols = []
else:
raise RuntimeError(f'MultifieldLumfn: Extension {ext} is not supported.')
for m in mean_cols:
result[m] = mean_rule(tables, m, weights=weights)
for s in sum_cols:
result[s] = sum_rule(tables, s, weights=weights)
for q in qsum_cols:
result[q] = quadsum_rule(tables, q, weights=weights)
return result
def lumfn(dat, Ms=np.arange(-25.5, -15.5, 0.4), Mcol='MCOLOR_0P0', bitmask='IN_D8LUMFN', jackknife=None, opath=None):
if type(jackknife) == np.ndarray:
for jk in jackknife:
lumfn(dat, Ms=Ms, Mcol=Mcol, bitmask=bitmask, jackknife=int(jk), opath=opath)
return 0
elif type(jackknife) == int:
pass
elif jackknife is None:
pass
else:
raise ValueError('Unsupported jackknife of type {}'.format(type(jackknife)))
dat = Table(dat, copy=True)
dat = dat[dat[bitmask] == 0]
dvmax = dat['VMAX'].data
vol = dat.meta['VOLUME']
# default: bins[i-1] <= x < bins[i]
if jackknife is not None:
print('Solving for jack knife {}'.format(jackknife))
jk_volfrac = dat.meta['JK_VOLFRAC']
vol *= jk_volfrac
dat = dat[dat['JK'] != f'JK{jackknife}']
dvmax = jk_volfrac * dat['VMAX'].data
idxs = np.digitize(dat[Mcol], bins=Ms)
result = []
ds = np.diff(Ms)
dM = ds[0]
assert np.all(ds == dM)
for idx in np.arange(len(Ms) - 1):
sample = dat[idxs == idx]
nsample = len(sample)
if nsample > 0:
median = np.median(sample[Mcol])
else:
median = 0.5 * (Ms[idx] + Ms[idx+1])
vmax = dvmax[idxs == idx]
ivmax = 1. / vmax
ivmax2 = 1. / vmax**2.
if len(vmax) == 0:
median_vmax = 0
else:
median_vmax = np.median(vmax) / vol
result.append([median,\
nsample / dM / vol,\
np.sqrt(nsample) / dM / vol,\
np.sum(ivmax) / dM,\
np.sqrt(np.sum(ivmax2)) / dM,\
nsample,
median_vmax])
names = ['MEDIAN_M', 'PHI_N', 'PHI_N_ERROR', 'PHI_IVMAX', 'PHI_IVMAX_ERROR', 'N', 'V_ON_VMAX']
result = Table(np.array(result), names=names)
result.meta.update(dat.meta)
result.pprint()
result.meta['MS'] = str(['{:.4f}'.format(x) for x in Ms.tolist()])
result.meta['VOLUME'] = vol
result.meta['ABSMAG_DEF'] = Mcol
if jackknife is not None:
result.meta['EXTNAME'] = 'LUMFN_JK{}'.format(jackknife)
result.meta['RENORM'] = 'FALSE'
result.meta['JK_VOLFRAC'] = dat.meta['JK_VOLFRAC']
result.meta['NJACK'] = dat.meta['NJACK']
result = fits.convenience.table_to_hdu(result)
with fits.open(opath, mode='update') as hdulist:
hdulist.append(result)
hdulist.flush()
hdulist.close()
cmds = []
cmds.append(f'chgrp desi {opath}')
cmds.append(f'chmod 700 {opath}')
for cmd in cmds:
output = subprocess.check_output(cmd, shell=True)
print(cmd, output)
return 0
else:
return result
|
desihubREPO_NAMELSSPATH_START.@LSS_extracted@LSS-main@py@LSS@DESI_ke@lumfn.py@.PATH_END.py
|
{
"filename": "hdf5_test.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/tests/hdf5_test.py",
"type": "Python"
}
|
from pathlib import Path
import fsspec.implementations.memory
from dask.utils import tmpdir
import vaex
from vaex.dataframe import DataFrameLocal
DATA_PATH = Path(__file__).parent / 'data'
def test_hdf5_with_alias(tmpdir):
df = vaex.from_dict({'X-1': [1], '#': [2]})
path = DATA_PATH / 'with_alias.hdf5'
df = vaex.open(str(path))
assert df['X-1'].tolist() == [1]
assert df['#'].tolist() == [2]
def test_categorical(tmpdir, df_factory):
path = tmpdir / "with_cats.hdf5"
s = ["aap", "noot", "mies", "mies", "aap", None]
df: DataFrameLocal = df_factory(s=s)
df = df.ordinal_encode("s")
df = df._future()
assert df.s.tolist(()) == s
df.export(path)
df_result = vaex.open(path)
assert df_result.s.tolist() == s
# make sure we also support cloud storage etc
fs = fsspec.implementations.memory.MemoryFileSystem()
fs.put(str(path), "with_cats.hdf5")
df = vaex.open('with_cats.hdf5', fs=fs)
assert df_result.s.tolist() == s
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@tests@hdf5_test.py@.PATH_END.py
|
{
"filename": "_ohlc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/template/data/_ohlc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OhlcValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="ohlc", parent_name="layout.template.data", **kwargs
):
super(OhlcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Ohlc"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@template@data@_ohlc.py@.PATH_END.py
|
{
"filename": "_bgcolorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choropleth/hoverlabel/_bgcolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="bgcolorsrc", parent_name="choropleth.hoverlabel", **kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choropleth@hoverlabel@_bgcolorsrc.py@.PATH_END.py
|
{
"filename": "conservative_viscosity_update.md",
"repo_name": "rometsch/fargocpt",
"repo_path": "fargocpt_extracted/fargocpt-master/docs_source/source/Numerics/conservative_viscosity_update.md",
"type": "Markdown"
}
|
# Conservative form
Baruteau 2008 writes the viscosity updates as:
## Radial velocity
$\frac{\partial \Sigma v_r}{\partial \mathrm{d}t} = \frac{1}{r}[\frac{\partial (r \tau_{rr})}{\partial r} + \frac{\partial (\tau_{r\varphi})}{\partial \varphi} - \tau_{\varphi \varphi}]$
Looking at the $r$ part, the non-conservative form is discretized as
$\frac{\partial \Sigma v_r}{dt} = \frac{1}{r}\frac{\partial (r \tau_{rr})}{\partial r} = \frac{1}{R_a^i}\frac{R_b^i \tau_{rr}^i - R_b^{i-1} \tau_{rr}^{i-1}}{R_b^i - R_b^{i-1}}$
The conservative form can is derived as follows:
$\int \frac{\partial \Sigma v_r}{\partial t} \mathrm{d}V = \int \frac{1}{r}\frac{\partial r \tau_{rr}}{\partial r} \mathrm{d}V$
with $\mathrm{d}V = r \mathrm{d}r \mathrm{d} \varphi$, we have: where we
integrate $\varphi$ over $\varphi^{j-1/2}$ to $\varphi^{j+1/2}$
and integrate $r$ from $R_b^{i-1}$ to $R_b^i$ such that the middle of the
integration is at $R_a^i$, $\varphi^j$, where $v_r$ is located.
$\int \frac{\partial \Sigma v_r}{\partial t} r \mathrm{d}r \mathrm{d} \varphi = \int \frac{1}{r}\frac{\partial r \tau_{rr}}{\partial r} r \mathrm{d}r \mathrm{d} \varphi$
$\Sigma \frac{1}{2}((R_b^i)^2 - (R_b^{i-1})^2) \Delta \varphi \frac{\partial v_r}{\partial t} = \int^{R_b^i}_{R_b^{i-1}} \partial (r \tau_{rr}) \Delta\varphi$
$\Sigma \frac{1}{2}((R_b^i)^2 - (R_b^{i-1})^2) \Delta \varphi \frac{\partial v_r}{\partial t} = (R_b^i \tau_{rr}^i - R_b^{i-1} \tau_{rr}^{i-1}) \Delta \varphi$
$\frac{\partial v_r}{dt} = \frac{2}{\Sigma}\frac{R_b^i \tau_{rr}^i - R_b^{i-1} \tau_{rr}^{i-1}}{(R_b^i)^2 - (R_b^{i-1})^2}$
$\frac{\partial v_r}{dt} = \frac{1}{\Sigma}\frac{2}{R_b^i + R_b^{i-1}} \frac{R_b^i \tau_{rr}^i - R_b^{i-1} \tau_{rr}^{i-1}}{R_b^i - R_b^{i-1}}$
$\int \frac{\partial \Sigma v_r}{\partial t} \mathrm{d}V = \int \frac{1}{r}\frac{\partial \tau_{r\varphi}}{\partial \varphi} \mathrm{d}V$
$\Sigma \frac{1}{2}((R_b^i)^2 - (R_b^{i-1})^2) \Delta \varphi \frac{\partial v_r}{\partial t} = \int \frac{1}{r}\frac{\partial \tau_{r\varphi}}{\partial \varphi} r \mathrm{d}r \mathrm{d} \varphi$
$\Sigma \frac{1}{2}((R_b^i)^2 - (R_b^{i-1})^2) \Delta \varphi \frac{\partial v_r}{\partial t} = \int \partial^{\varphi} \tau_{r\varphi} \mathrm{d}r$
$\Sigma \frac{1}{2}((R_b^i)^2 - (R_b^{i-1})^2) \Delta \varphi \frac{\partial v_r}{\partial t} = (R_b^{i} - R_b^{i-1})(\tau_{r\varphi}^{j+1} - \tau_{r\varphi}^j)$
$\Sigma \Delta \varphi \frac{\partial v_r}{\partial t} = \frac{2(R_b^{i} - R_b^{i-1})}{((R_b^i)^2 - (R_b^{i-1})^2)}(\tau_{r\varphi}^{j+1} - \tau_{r\varphi}^j)$
$\frac{\partial v_r}{\partial t} = \frac{1}{\Sigma} \frac{2}{R_b^i + R_b^{i-1}} \frac{\tau_{r\varphi}^{j+1} - \tau_{r\varphi}^j} {\Delta \varphi}$
$\int \frac{\partial \Sigma v_r}{\partial t} \mathrm{d}V = \int \frac{1}{r} \tau_{\varphi\varphi} \mathrm{d}V$
$\Sigma \frac{1}{2}((R_b^i)^2 - (R_b^{i-1})^2) \Delta \varphi \frac{\partial v_r}{\partial t} = \int \frac{1}{r}\tau_{\varphi \varphi} r \mathrm{d}r \mathrm{d} \varphi$
$\Sigma \frac{1}{2}((R_b^i)^2 - (R_b^{i-1})^2) \Delta \varphi \frac{\partial v_r}{\partial t} = \tau_{\varphi \varphi} (R_b^i - R_b^{i-1}) \Delta \varphi$
$\frac{\partial v_r}{\partial t} = \frac{1}{\Sigma} \frac{2(R_b^i - R_b^{i-1})}{(R_b^i)^2 - (R_b^{i-1})^2} \tau_{\varphi \varphi}$
$\frac{\partial v_r}{\partial t} = \frac{1}{\Sigma} \frac{2}{R_b^i + R_b^{i-1}} \tau_{\varphi \varphi}$
The total $v_r$ update is just the sum of the 3 parts we just covered:
$\frac{\partial v_r}{\partial t} = \frac{2}{\Sigma^i + \Sigma^{i-1}} \frac{2}{R_b^i + R_b^{i-1}} (\frac{R_b^i \tau_{rr}^i - R_b^{i-1} \tau_{rr}^{i-1}}{R_b^i - R_b^{i-1}} + \tau_{\varphi \varphi} + \frac{\tau_{r\varphi}^{j+1} - \tau_{r\varphi}^j} {\Delta \varphi})$
## Azimuthal velocity
For the azimuthal velocity, it is important to do the update for the angular
momentum such that it is conserved (see D'Angelo et al. 2002 Eq. 4).
The term $\frac{(\partial r \tau_{r\varphi})}{\partial r} + \tau_{r \varphi}$ can be rewritten to $\frac{1}{r}\frac{(\partial r^2 \tau_{r\varphi})}{\partial r}$ and we can write for the update:
Considering both these things, the velocity update in Masset 2002:
$\frac{\partial \Sigma v_\varphi}{\partial t} = \frac{1}{r}[\frac{\partial (r \tau_{r\varphi})}{\partial r} + \frac{\partial (\tau_{\varphi \varphi})}{\partial \varphi} + \tau_{r \varphi}]$
can be rewritten to the angular momentum update seen in D'Angelo et al. 2002
$\frac{\partial \Sigma l}{\partial t} = \frac{1}{r}\frac{\partial (r^2 \tau_{r\varphi})}{\partial r} + \frac{\partial (\tau_{\varphi \varphi})}{\partial \varphi}$
Again we integrate over $\mathrm{d}V = r \mathrm{d}r \mathrm{d} \varphi$, we
have: we have $\varphi$ ranging over $\varphi^{j+1}$ to $\varphi^{j}$ and
integrate $r$ from $R_a^{i+1}$ to $R_a^i$ such that the middle of the
integration is at $R_b^i$, $\varphi^{j-1/2}$, where $v_\varphi$ is located.
$\int \frac{\partial \Sigma l}{\partial t} \mathrm{d}V = \int \frac{1}{r}\frac{\partial (r^2 \tau_{r\varphi})}{\partial r} \mathrm{d}V$
$\int \frac{\partial \Sigma l}{\partial t} r \mathrm{d}r \mathrm{d} \varphi = \int \partial (r^2 \tau_{r\varphi})\mathrm{d} \varphi$
$\frac{1}{2}((R_a^{i+1})^2 - (R_a^i)^2) \Delta \varphi \frac{\partial \Sigma l}{\partial t} = ((R_a^{i+1})^2 \tau_{r\varphi}^{i+1} - (R_a^{i})^2 \tau_{r\varphi}^{i}) \Delta \varphi$
$\frac{\partial \Sigma l}{\partial t} = \frac{2}{((R_a^{i+1})^2 - (R_a^i)^2)} ((R_a^{i+1})^2 \tau_{r\varphi}^{i+1} - (R_a^{i})^2 \tau_{r\varphi}^{i})$
$\Sigma R_b^i \frac{\partial v_\varphi}{\partial t} = \frac{2}{((R_a^{i+1})^2 - (R_a^i)^2)} ((R_a^{i+1})^2 \tau_{r\varphi}^{i+1} - (R_a^{i})^2 \tau_{r\varphi}^{i})$
$\frac{\partial v_\varphi}{\partial t} = \frac{1}{\Sigma R_b^i} \frac{2}{((R_a^{i+1})^2 - (R_a^i)^2)} ((R_a^{i+1})^2 \tau_{r\varphi}^{i+1} - (R_a^{i})^2 \tau_{r\varphi}^{i})$
$\int \frac{\partial \Sigma l}{\partial t} \mathrm{d}V = \int \frac{\partial (\tau_{\varphi\varphi})} {\partial \varphi} \mathrm{d}V$
$\frac{1}{2}((R_a^{i+1})^2 - (R_a^i)^2) \Delta \varphi \frac{\partial \Sigma l}{\partial t} = \int \frac{\partial (\tau_{\varphi\varphi})} {\partial \varphi} r \mathrm{d}r \mathrm{d}\varphi$
$\frac{1}{2}((R_a^{i+1})^2 - (R_a^i)^2) \Delta \varphi \frac{\partial \Sigma l}{\partial t} = \frac{1}{2}((R_a^{i+1})^2 - (R_a^i)^2) (\tau_{\varphi\varphi}^{j} - \tau_{\varphi\varphi}^{j-1})$
$\frac{\partial \Sigma l}{\partial t} = \frac{(\tau_{\varphi\varphi}^{j} - \tau_{\varphi\varphi}^{j-1})}{\Delta \varphi}$
$\frac{\partial v_\varphi}{\partial t} = \frac{1}{\Sigma R_b^i} \frac{(\tau_{\varphi\varphi}^{j} - \tau_{\varphi\varphi}^{j-1})}{\Delta \varphi}$
The total $v_\varphi$ update is then just the sum of the 2 components:
$\frac{\partial v_\varphi}{\partial t} = \frac{2}{\Sigma^{j} + \Sigma^{j-1}}\frac{1}{R_b^i} (\frac{2}{((R_a^{i+1})^2 - (R_a^i)^2)} ((R_a^{i+1})^2 \tau_{r\varphi}^{i+1} - (R_a^{i})^2 \tau_{r\varphi}^{i}) + \frac{(\tau_{\varphi\varphi}^{j} - \tau_{\varphi\varphi}^{j-1})}{\Delta \varphi})$
|
rometschREPO_NAMEfargocptPATH_START.@fargocpt_extracted@fargocpt-master@docs_source@source@Numerics@conservative_viscosity_update.md@.PATH_END.py
|
{
"filename": "_svds_doc.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/sparse/linalg/_eigen/_svds_doc.py",
"type": "Python"
}
|
def _svds_arpack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='arpack', rng=None):
"""
Partial singular value decomposition of a sparse matrix using ARPACK.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : sparse matrix or LinearOperator
Matrix to decompose.
k : int, optional
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k <= min(M, N) - 1``.
Default is 6.
ncv : int, optional
The number of Lanczos vectors generated.
The default is ``min(n, max(2*k + 1, 20))``.
If specified, must satisfy ``k + 1 < ncv < min(M, N)``; ``ncv > 2*k``
is recommended.
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values.
v0 : ndarray, optional
The starting vector for iteration:
an (approximate) left singular vector if ``N > M`` and a right singular
vector otherwise. Must be of length ``min(M, N)``.
Default: random
maxiter : int, optional
Maximum number of Arnoldi update iterations allowed;
default is ``min(M, N) * 10``.
return_singular_vectors : {True, False, "u", "vh"}
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
return ``None`` for the right singular vectors. Otherwise, compute
all singular vectors.
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
return ``None`` for the left singular vectors. Otherwise, compute
all singular vectors.
solver : {'arpack', 'propack', 'lobpcg'}, optional
This is the solver-specific documentation for ``solver='arpack'``.
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>` and
:ref:`'propack' <sparse.linalg.svds-propack>`
are also supported.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
Notes
-----
This is a naive implementation using ARPACK as an eigensolver
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
efficient.
Examples
--------
Construct a matrix ``A`` from singular values and vectors.
>>> import numpy as np
>>> from scipy.stats import ortho_group
>>> from scipy.sparse import csc_array, diags_array
>>> from scipy.sparse.linalg import svds
>>> rng = np.random.default_rng()
>>> orthogonal = csc_array(ortho_group.rvs(10, random_state=rng))
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ diags_array(s) @ vT
With only three singular values/vectors, the SVD approximates the original
matrix.
>>> u2, s2, vT2 = svds(A, k=3, solver='arpack')
>>> A2 = u2 @ np.diag(s2) @ vT2
>>> np.allclose(A2, A.toarray(), atol=1e-3)
True
With all five singular values/vectors, we can reproduce the original
matrix.
>>> u3, s3, vT3 = svds(A, k=5, solver='arpack')
>>> A3 = u3 @ np.diag(s3) @ vT3
>>> np.allclose(A3, A.toarray())
True
The singular values match the expected singular values, and the singular
vectors are as expected up to a difference in sign.
>>> (np.allclose(s3, s) and
... np.allclose(np.abs(u3), np.abs(u.toarray())) and
... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
True
The singular vectors are also orthogonal.
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
... np.allclose(vT3 @ vT3.T, np.eye(5)))
True
"""
pass
def _svds_lobpcg_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='lobpcg', rng=None):
"""
Partial singular value decomposition of a sparse matrix using LOBPCG.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : sparse matrix or LinearOperator
Matrix to decompose.
k : int, default: 6
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k <= min(M, N) - 1``.
ncv : int, optional
Ignored.
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values.
v0 : ndarray, optional
If `k` is 1, the starting vector for iteration:
an (approximate) left singular vector if ``N > M`` and a right singular
vector otherwise. Must be of length ``min(M, N)``.
Ignored otherwise.
Default: random
maxiter : int, default: 20
Maximum number of iterations.
return_singular_vectors : {True, False, "u", "vh"}
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: if ``M <= N``, compute only the left singular vectors and
return ``None`` for the right singular vectors. Otherwise, compute
all singular vectors.
- ``"vh"``: if ``M > N``, compute only the right singular vectors and
return ``None`` for the left singular vectors. Otherwise, compute
all singular vectors.
solver : {'arpack', 'propack', 'lobpcg'}, optional
This is the solver-specific documentation for ``solver='lobpcg'``.
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
:ref:`'propack' <sparse.linalg.svds-propack>`
are also supported.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
Notes
-----
This is a naive implementation using LOBPCG as an eigensolver
on ``A.conj().T @ A`` or ``A @ A.conj().T``, depending on which one is more
efficient.
Examples
--------
Construct a matrix ``A`` from singular values and vectors.
>>> import numpy as np
>>> from scipy.stats import ortho_group
>>> from scipy.sparse import csc_array, diags_array
>>> from scipy.sparse.linalg import svds
>>> rng = np.random.default_rng()
>>> orthogonal = csc_array(ortho_group.rvs(10, random_state=rng))
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ diags_array(s) @ vT
With only three singular values/vectors, the SVD approximates the original
matrix.
>>> u2, s2, vT2 = svds(A, k=3, solver='lobpcg')
>>> A2 = u2 @ np.diag(s2) @ vT2
>>> np.allclose(A2, A.toarray(), atol=1e-3)
True
With all five singular values/vectors, we can reproduce the original
matrix.
>>> u3, s3, vT3 = svds(A, k=5, solver='lobpcg')
>>> A3 = u3 @ np.diag(s3) @ vT3
>>> np.allclose(A3, A.toarray())
True
The singular values match the expected singular values, and the singular
vectors are as expected up to a difference in sign.
>>> (np.allclose(s3, s) and
... np.allclose(np.abs(u3), np.abs(u.todense())) and
... np.allclose(np.abs(vT3), np.abs(vT.todense())))
True
The singular vectors are also orthogonal.
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
... np.allclose(vT3 @ vT3.T, np.eye(5)))
True
"""
pass
def _svds_propack_doc(A, k=6, ncv=None, tol=0, which='LM', v0=None,
maxiter=None, return_singular_vectors=True,
solver='propack', rng=None):
"""
Partial singular value decomposition of a sparse matrix using PROPACK.
Compute the largest or smallest `k` singular values and corresponding
singular vectors of a sparse matrix `A`. The order in which the singular
values are returned is not guaranteed.
In the descriptions below, let ``M, N = A.shape``.
Parameters
----------
A : sparse matrix or LinearOperator
Matrix to decompose. If `A` is a ``LinearOperator``
object, it must define both ``matvec`` and ``rmatvec`` methods.
k : int, default: 6
Number of singular values and singular vectors to compute.
Must satisfy ``1 <= k <= min(M, N)``.
ncv : int, optional
Ignored.
tol : float, optional
The desired relative accuracy for computed singular values.
Zero (default) means machine precision.
which : {'LM', 'SM'}
Which `k` singular values to find: either the largest magnitude ('LM')
or smallest magnitude ('SM') singular values. Note that choosing
``which='SM'`` will force the ``irl`` option to be set ``True``.
v0 : ndarray, optional
Starting vector for iterations: must be of length ``A.shape[0]``.
If not specified, PROPACK will generate a starting vector.
maxiter : int, optional
Maximum number of iterations / maximal dimension of the Krylov
subspace. Default is ``10 * k``.
return_singular_vectors : {True, False, "u", "vh"}
Singular values are always computed and returned; this parameter
controls the computation and return of singular vectors.
- ``True``: return singular vectors.
- ``False``: do not return singular vectors.
- ``"u"``: compute only the left singular vectors; return ``None`` for
the right singular vectors.
- ``"vh"``: compute only the right singular vectors; return ``None``
for the left singular vectors.
solver : {'arpack', 'propack', 'lobpcg'}, optional
This is the solver-specific documentation for ``solver='propack'``.
:ref:`'arpack' <sparse.linalg.svds-arpack>` and
:ref:`'lobpcg' <sparse.linalg.svds-lobpcg>`
are also supported.
rng : `numpy.random.Generator`, optional
Pseudorandom number generator state. When `rng` is None, a new
`numpy.random.Generator` is created using entropy from the
operating system. Types other than `numpy.random.Generator` are
passed to `numpy.random.default_rng` to instantiate a ``Generator``.
options : dict, optional
A dictionary of solver-specific options. No solver-specific options
are currently supported; this parameter is reserved for future use.
Returns
-------
u : ndarray, shape=(M, k)
Unitary matrix having left singular vectors as columns.
s : ndarray, shape=(k,)
The singular values.
vh : ndarray, shape=(k, N)
Unitary matrix having right singular vectors as rows.
Notes
-----
This is an interface to the Fortran library PROPACK [1]_.
The current default is to run with IRL mode disabled unless seeking the
smallest singular values/vectors (``which='SM'``).
References
----------
.. [1] Larsen, Rasmus Munk. "PROPACK-Software for large and sparse SVD
calculations." Available online. URL
http://sun.stanford.edu/~rmunk/PROPACK (2004): 2008-2009.
Examples
--------
Construct a matrix ``A`` from singular values and vectors.
>>> import numpy as np
>>> from scipy.stats import ortho_group
>>> from scipy.sparse import csc_array, diags_array
>>> from scipy.sparse.linalg import svds
>>> rng = np.random.default_rng()
>>> orthogonal = csc_array(ortho_group.rvs(10, random_state=rng))
>>> s = [0.0001, 0.001, 3, 4, 5] # singular values
>>> u = orthogonal[:, :5] # left singular vectors
>>> vT = orthogonal[:, 5:].T # right singular vectors
>>> A = u @ diags_array(s) @ vT
With only three singular values/vectors, the SVD approximates the original
matrix.
>>> u2, s2, vT2 = svds(A, k=3, solver='propack')
>>> A2 = u2 @ np.diag(s2) @ vT2
>>> np.allclose(A2, A.todense(), atol=1e-3)
True
With all five singular values/vectors, we can reproduce the original
matrix.
>>> u3, s3, vT3 = svds(A, k=5, solver='propack')
>>> A3 = u3 @ np.diag(s3) @ vT3
>>> np.allclose(A3, A.todense())
True
The singular values match the expected singular values, and the singular
vectors are as expected up to a difference in sign.
>>> (np.allclose(s3, s) and
... np.allclose(np.abs(u3), np.abs(u.toarray())) and
... np.allclose(np.abs(vT3), np.abs(vT.toarray())))
True
The singular vectors are also orthogonal.
>>> (np.allclose(u3.T @ u3, np.eye(5)) and
... np.allclose(vT3 @ vT3.T, np.eye(5)))
True
"""
pass
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@sparse@linalg@_eigen@_svds_doc.py@.PATH_END.py
|
{
"filename": "reconstruction_bump.ipynb",
"repo_name": "Magritte-code/pomme",
"repo_path": "pomme_extracted/pomme-main/docs/src/examples/spherical/reconstruction_bump.ipynb",
"type": "Jupyter Notebook"
}
|
# Reconstruction - spherical model
---
```python
import matplotlib.pyplot as plt
import numpy as np
import torch
import copy
from astropy import units, constants
from pomme.model import TensorModel, SphericalModel
from pomme.loss import Loss, diff_loss
from pomme.plot import plot_cube_2D
from pomme.utils import planck, T_CMB
from spherical import lines, velocities, frequencies, smodel, r_out, get_turbulence, get_boundary_condition
from spherical import smodel as smodel_truth, r_out, get_velocity, get_temperature, get_abundance
```
/STER/frederikd/pomme/docs/src/examples/spherical/spherical.py:45: RuntimeWarning: divide by zero encountered in true_divide
rho = Mdot / (4.0 * np.pi * rs**2 * v)
/home/frederikd/.local/lib/python3.9/site-packages/astroquery/lamda/core.py:145: UserWarning: The first time a LAMDA function is called, it must assemble a list of valid molecules and URLs. This list will be cached so future operations will be faster.
warnings.warn("The first time a LAMDA function is called, it must "
You have selected line:
CO(J=1-0)
Please check the properties that were inferred:
Frequency 1.152712018e+11 Hz
Einstein A coeff 7.203000000e-08 1/s
Molar mass 28.0101 g/mol
You have selected line:
CO(J=2-1)
Please check the properties that were inferred:
Frequency 2.305380000e+11 Hz
Einstein A coeff 6.910000000e-07 1/s
Molar mass 28.0101 g/mol
You have selected line:
CO(J=3-2)
Please check the properties that were inferred:
Frequency 3.457959899e+11 Hz
Einstein A coeff 2.497000000e-06 1/s
Molar mass 28.0101 g/mol
You have selected line:
CO(J=5-4)
Please check the properties that were inferred:
Frequency 5.762679305e+11 Hz
Einstein A coeff 1.221000000e-05 1/s
Molar mass 28.0101 g/mol
```python
obss = torch.load('obss.pt')
```
```python
# def get_velocity(model):
# """
# Get the velocity from the TensorModel.
# """
# return torch.exp(model['log_velocity'])
# def get_temperature(model):
# """
# Get the temperature from the TensorModel.
# """
# return torch.exp(model['log_temperature'])
# def get_abundance(model):
# """
# Get the abundance from the TensorModel.
# """
# return torch.exp(model['log_CO'])
```
```python
r = torch.exp(smodel_truth.model_1D['log_r'])
v_in = torch.exp(smodel_truth.model_1D['log_v_in'])
v_inf = torch.exp(smodel_truth.model_1D['log_v_inf'])
beta = 0.7
R_star = torch.exp(smodel_truth.model_1D['log_R_star'])
# Compute velocity
v = np.empty_like(r)
v[r <= R_star] = 0.0
v[r > R_star] = v_in + (v_inf - v_in) * (1.0 - R_star / r[r > R_star])**beta
T_in = 1.5e+3
epsilon = 0.5
# Compute temperature
T = np.empty_like(r)
T[r <= R_star] = T_in
T[r > R_star] = T_in * (R_star / r[r > R_star])**epsilon
Mdot = (5.0e-7 * units.M_sun / units.yr).si.value
rho = Mdot / (4.0 * np.pi * r**2 * v)
n_CO = (3.0e-4 * constants.N_A.si.value / 2.02e-3) * rho
n_CO[r <= R_star] = n_CO[n_CO<np.inf].max()
```
```python
plt.plot(smodel_truth.get_velocity(smodel_truth.model_1D).data)
plt.plot(v)
```
[<matplotlib.lines.Line2D at 0x7fc73c311bb0>]

```python
plt.plot(smodel_truth.get_temperature(smodel_truth.model_1D).data)
plt.plot(T)
plt.yscale('log')
```

```python
plt.plot(smodel_truth.get_abundance(smodel_truth.model_1D).data)
plt.plot(n_CO)
plt.yscale('log')
```

```python
smodel = SphericalModel(
rs = smodel_truth.rs,
model_1D = TensorModel.load('model_truth.h5'),
r_star = smodel_truth.r_star,
)
smodel.get_abundance = get_abundance
smodel.get_velocity = get_velocity
smodel.get_temperature = get_temperature
smodel.get_turbulence = get_turbulence
smodel.get_boundary_condition = get_boundary_condition
log_n_CO_init = np.log(5.0e+14 * (smodel.rs.min()/smodel.rs)**2)
smodel.model_1D['log_CO' ] = log_n_CO_init.copy()
# smodel.model_1D['log_velocity' ] = np.log(v).copy()
# smodel.model_1D['log_temperature'] = np.log(T).copy()
smodel.model_1D.free(['log_CO', 'log_v_in', 'log_v_inf', 'log_beta', 'log_T_in', 'log_epsilon'])
# smodel.model_1D.free(['log_CO', 'log_velocity', 'log_temperature'])
losses = Loss(['avg', 'rel', 'reg', 'cnt'])
```
```python
smodel.model_1D.info()
```
Variable key: Free/Fixed: Field: Min: Mean: Max:
log_CO Free True +1.082e+01 +2.233e+01 +3.385e+01
log_R_star Fixed False +2.573e+01 +2.573e+01 +2.573e+01
log_T_in Free False +7.824e+00 +7.824e+00 +7.824e+00
log_T_star Fixed False +7.824e+00 +7.824e+00 +7.824e+00
log_beta Free False -6.931e-01 -6.931e-01 -6.931e-01
log_epsilon Free False -5.108e-01 -5.108e-01 -5.108e-01
log_r Fixed True +2.343e+01 +2.919e+01 +3.494e+01
log_turbulence Fixed True +7.313e+00 +7.313e+00 +7.313e+00
log_v_in Free False +4.605e+00 +4.605e+00 +4.605e+00
log_v_inf Free False +9.903e+00 +9.903e+00 +9.903e+00
sizes: [1.49597871e+15]
shape: (1024,)
```python
imgs = smodel.image(lines, frequencies, r_max=r_out)
```
```python
plt.figure(dpi=150)
for obs, img in zip(obss, imgs):
plt.plot(velocities, obs.data)
plt.plot(velocities, img.data, marker='.')
```

```python
def steady_state_cont_loss(smodel):
"""
Loss assuming steady state hydrodynamics, i.e. vanishing time derivatives.
"""
# Get the model variables
rho = smodel.get_abundance(smodel.model_1D)
v_r = smodel.get_velocity (smodel.model_1D)
r = torch.from_numpy(smodel.rs)
# Continuity equation (steady state): div(ρ v) = 0
loss_cont = smodel.model_1D.diff_x(r**2 * rho * v_r)
# Compute the mean squared losses
loss = torch.mean((loss_cont/((r**2)*rho))**2)
# Return losses
return loss
```
```python
from torch.optim import Adam
from tqdm import tqdm
obss_avg = obss.mean(axis=1)
obss_rel = torch.einsum("ij, i -> ij", obss, 1.0 / obss.mean(axis=1))
# Get a mask for the elements outsife the star
outside_star = torch.from_numpy(smodel.rs) > torch.exp(smodel.model_1D['log_R_star'])
def fit(losses, smodel, lines, frequencies, obss, N_epochs=10, lr=1.0e-1, w_avg=1.0, w_rel=1.0, w_reg=1.0, w_cnt=1.0):
params = [
smodel.model_1D['log_CO'],
# smodel.model_1D['log_v_in'],
# smodel.model_1D['log_v_inf'],
# smodel.model_1D['log_beta'],
# smodel.model_1D['log_T_in'],
# smodel.model_1D['log_epsilon'],
]
abundance_evol = [smodel.get_abundance(smodel.model_1D).detach().clone()]
optimizer = Adam(params, lr=lr)
for _ in tqdm(range(N_epochs)):
# Forward model
imgs = smodel.image(lines, frequencies, r_max=r_out)
imgs_avg= imgs.mean(axis=1)
imgs_rel= torch.einsum("ij, i -> ij", imgs, 1.0 / imgs.mean(axis=1))
# Compute the reproduction loss
losses['avg'] = w_avg * torch.nn.functional.mse_loss(imgs_avg, obss_avg)
losses['rel'] = w_rel * torch.nn.functional.mse_loss(imgs_rel, obss_rel)
# Compute the regularisation loss
losses['reg'] = w_reg * diff_loss(smodel.model_1D['log_CO'][outside_star])
# Compute the hydrodynamic loss
losses['cnt'] = w_cnt * steady_state_cont_loss(smodel)
# Set gradients to zero
optimizer.zero_grad()
# Backpropagate gradients
losses.tot().backward()
# Update parameters
optimizer.step()
abundance_evol.append(smodel.get_abundance(smodel.model_1D).detach().clone())
return imgs, losses, abundance_evol
```
```python
imgs, losses, a_evol = fit(losses, smodel, lines, frequencies, obss, N_epochs=3, lr=1.0e-1, w_avg=1.0, w_rel=1.0e+0, w_reg=1.0e-0, w_cnt=1.0e+0)
losses.renormalise_all()
losses.reset()
```
0%| | 0/3 [00:00<?, ?it/s]/home/frederikd/.local/lib/python3.9/site-packages/torch/autograd/__init__.py:200: UserWarning: CUDA initialization: The NVIDIA driver on your system is too old (found version 9010). Please update your GPU driver by downloading and installing a new version from the URL: http://www.nvidia.com/Download/index.aspx Alternatively, go to: https://pytorch.org to install a PyTorch version that has been compiled with your version of the CUDA driver. (Triggered internally at ../c10/cuda/CUDAFunctions.cpp:109.)
Variable._execution_engine.run_backward( # Calls into the C++ engine to run the backward pass
100%|██████████| 3/3 [01:00<00:00, 20.09s/it]
```python
imgs, loss, a_evol = fit(losses, smodel, lines, frequencies, obss, N_epochs=10, lr=1.0e-1, w_avg=1.0e+2, w_rel=1.0e+2, w_reg=1.0e-0, w_cnt=1.0e+0)
```
0%| | 0/10 [00:00<?, ?it/s]
100%|██████████| 10/10 [03:54<00:00, 23.43s/it]
```python
losses.plot()
```

```python
imgs, loss, a_evol = fit(losses, smodel, lines, frequencies, obss, N_epochs=50, lr=1.0e-1, w_avg=1.0e+3, w_rel=1.0e+3, w_reg=1.0e-0, w_cnt=1.0e+0)
```
100%|██████████| 50/50 [22:32<00:00, 27.06s/it]
```python
loss.plot()
```

```python
imgs, loss, a_evol = fit(losses, smodel, lines, frequencies, obss, N_epochs=50, lr=1.0e-1, w_avg=1.0e+6, w_rel=1.0e+4, w_reg=1.0e-0, w_cnt=1.0e+0)
```
100%|██████████| 50/50 [24:04<00:00, 28.90s/it]
```python
loss.plot()
```

```python
plt.figure(dpi=150)
for CO in a_evol[:]:
plt.plot(CO)
plt.plot(torch.exp(smodel_truth.model_1D['log_CO']).data)
plt.plot(np.exp(log_n_CO_init))
plt.yscale('log')
```

```python
plt.plot(smodel_truth.get_velocity(smodel_truth.model_1D).data)
plt.plot(smodel .get_velocity(smodel .model_1D).data)
```
[<matplotlib.lines.Line2D at 0x7fc734713e50>]

```python
plt.plot(smodel_truth.get_temperature(smodel_truth.model_1D).data)
plt.plot(smodel .get_temperature(smodel .model_1D).data)
```
[<matplotlib.lines.Line2D at 0x7fc7346764f0>]

```python
plt.plot(smodel_truth.get_abundance(smodel_truth.model_1D).data)
plt.plot(smodel .get_abundance(smodel .model_1D).data)
plt.yscale('log')
```

```python
plt.figure(dpi=150)
for obs, img in zip(obss, imgs):
plt.plot(velocities, obs.data)
plt.plot(velocities, img.data, marker='.')
```

```python
```
|
Magritte-codeREPO_NAMEpommePATH_START.@pomme_extracted@pomme-main@docs@src@examples@spherical@reconstruction_bump.ipynb@.PATH_END.py
|
{
"filename": "io.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/frontends/athena/io.py",
"type": "Python"
}
|
import numpy as np
from yt.funcs import mylog
from yt.utilities.io_handler import BaseIOHandler
from .data_structures import chk23
float_size = {"float": np.dtype(">f4").itemsize, "double": np.dtype(">f8").itemsize}
axis_list = ["_x", "_y", "_z"]
class IOHandlerAthena(BaseIOHandler):
_dataset_type = "athena"
_offset_string = "data:offsets=0"
_data_string = "data:datatype=0"
_read_table_offset = None
def _field_dict(self, fhandle):
keys = fhandle["field_types"].keys()
val = fhandle["field_types"].keys()
return dict(zip(keys, val))
def _read_field_names(self, grid):
pass
def _read_chunk_data(self, chunk, fields):
data = {}
if len(chunk.objs) == 0:
return data
for grid in chunk.objs:
if grid.filename is None:
continue
f = open(grid.filename, "rb")
data[grid.id] = {}
grid_dims = grid.ActiveDimensions
read_dims = grid.read_dims.astype("int64")
grid_ncells = np.prod(read_dims)
grid0_ncells = np.prod(grid.index.grids[0].read_dims)
read_table_offset = get_read_table_offset(f)
for field in fields:
ftype, offsetr, dtype = grid.index._field_map[field]
if grid_ncells != grid0_ncells:
offset = offsetr + (
(grid_ncells - grid0_ncells) * (offsetr // grid0_ncells)
)
if grid_ncells == grid0_ncells:
offset = offsetr
offset = int(offset) # Casting to be certain.
file_offset = (
grid.file_offset[2]
* read_dims[0]
* read_dims[1]
* float_size[dtype]
)
xread = slice(grid.file_offset[0], grid.file_offset[0] + grid_dims[0])
yread = slice(grid.file_offset[1], grid.file_offset[1] + grid_dims[1])
f.seek(read_table_offset + offset + file_offset)
if dtype == "float":
dt = ">f4"
elif dtype == "double":
dt = ">f8"
if ftype == "scalar":
f.seek(read_table_offset + offset + file_offset)
v = np.fromfile(f, dtype=dt, count=grid_ncells).reshape(
read_dims, order="F"
)
if ftype == "vector":
vec_offset = axis_list.index(field[-1][-2:])
f.seek(read_table_offset + offset + 3 * file_offset)
v = np.fromfile(f, dtype=dt, count=3 * grid_ncells)
v = v[vec_offset::3].reshape(read_dims, order="F")
if grid.ds.field_ordering == 1:
data[grid.id][field] = v[xread, yread, :].T.astype("float64")
else:
data[grid.id][field] = v[xread, yread, :].astype("float64")
f.close()
return data
def _read_data_slice(self, grid, field, axis, coord):
sl = [slice(None), slice(None), slice(None)]
sl[axis] = slice(coord, coord + 1)
if grid.ds.field_ordering == 1:
sl.reverse()
return self._read_data_set(grid, field)[tuple(sl)]
def _read_fluid_selection(self, chunks, selector, fields, size):
chunks = list(chunks)
if any((ftype != "athena" for ftype, fname in fields)):
raise NotImplementedError
rv = {}
for field in fields:
rv[field] = np.empty(size, dtype="float64")
ng = sum(len(c.objs) for c in chunks)
mylog.debug(
"Reading %s cells of %s fields in %s grids",
size,
[f2 for f1, f2 in fields],
ng,
)
ind = 0
for chunk in chunks:
data = self._read_chunk_data(chunk, fields)
for g in chunk.objs:
for field in fields:
ftype, fname = field
ds = data[g.id].pop(field)
nd = g.select(selector, ds, rv[field], ind) # caches
ind += nd
data.pop(g.id)
return rv
def get_read_table_offset(f):
line = f.readline()
while True:
splitup = line.strip().split()
chkc = chk23("CELL_DATA")
chkp = chk23("POINT_DATA")
if chkc in splitup or chkp in splitup:
f.readline()
read_table_offset = f.tell()
break
line = f.readline()
return read_table_offset
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@frontends@athena@io.py@.PATH_END.py
|
{
"filename": "microlensing_compare.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/rubin_sim/maf/run_comparison/microlensing_compare.py",
"type": "Python"
}
|
import glob
import os
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import rubin_sim.maf.db as db
import rubin_sim.maf.metric_bundles as metricBundles
def microlensing_fom(
save_folder,
result_db_path,
metric_data_path,
figsize=None,
figure_name="microlensing_fom",
):
"""
Processes a folder, puts together results for
discovery/detect metric, Npts metric, and Fisher metric,
and plots them in the four tE bins of 1 - 10 days,
10 - 30 days, 30 - 100 days, and 100 - 1000 days.
Parameters
----------
result_db_path : `str`
Path to the directory storing the result databases
generated by MAF.
metric_data_path : `str`
Path to the directory storing the npz files
generated by MAF.
"""
# get a dictionary of resultDb from given directory
result_dbs = get_results_dbs(result_db_path)
# the following line will be useful if you did not run MAF on all opsims
run_names = list(result_dbs.keys())
# retrieve metricBundles for each opsim run and store them in a dictionary
bundle_dicts = {}
for run_name in result_dbs:
bundle_dicts[run_name] = bundle_dict_from_disk(result_dbs[run_name], run_name, metric_data_path)
# generates results and metric info from default name of file
results = np.zeros(len(list(bundle_dicts.keys())))
results_compare = []
run_names = []
metric_types = []
min_t_es = np.zeros(len(list(bundle_dicts.keys())))
max_t_es = np.zeros(len(list(bundle_dicts.keys())))
for run in range(len(list(bundle_dicts.keys()))):
npz = np.load(
result_db_path + "/" + list(bundle_dicts.keys())[run] + ".npz",
allow_pickle=True,
)
relevant_columns = ["metric_values", "mask"]
df = pd.DataFrame.from_dict({item: npz[item] for item in relevant_columns})
run_name, metric_type, min_t_e, max_t_e = parse_t_e_run_types(list(bundle_dicts.keys())[run])
run_names.append(run_name)
metric_types.append(metric_type)
min_t_es[run] = min_t_e
max_t_es[run] = max_t_e
results[run] = get_results(df, metric_type)
if metric_type == "Npts":
nan_to_be = np.where(df["metric_values"] >= 10e10)[0]
df["metric_values"][nan_to_be] = np.nan
results_compare.append(df["metric_values"])
run_names = np.array(run_names)
metric_types = np.array(metric_types)
results_compare = np.array(results_compare)
plot_fom(
results,
run_names,
metric_types,
min_t_es,
max_t_es,
save_folder,
figure_name,
figsize=figsize,
)
plot_compare(results_compare, run_names, metric_types, min_t_es, max_t_es, save_folder)
return
def parse_t_e_run_types(name):
"""
Parses names of MicrolensingMetric file names
Parameters
----------
name : `str`
A MicrolensingMetric file name
"""
split_name = name.split("MicrolensingMetric")
run_name = split_name[0][:-1]
metric_type = split_name[1].split("_")[1]
min_t_e = split_name[1].split("_")[3]
max_t_e = split_name[1].split("_")[4]
return run_name, metric_type, min_t_e, max_t_e
def get_results(df, run_type, fisher_sigmat_e_t_e_cutoff=0.1):
"""
Plots the results from the discovery/detect metric, Npts metric,
and Fisher metric in three sub plots
Parameters
----------
df : `pandas.Dataframe`
Pandas dataframe of the results npz file
run_types : `np.ndarray`, (N,)
Array of strings describing microlensing metric type:
either 'detect', 'Npts', or 'Fisher' as parsed by the file name
fisher_sigmat_e_t_e_cutoff : `float`
Maximum normalized uncertainty in tE (sigmatE/tE) as determined by
3sigma values of pubished planet microlensing candidates
"""
total = len(df)
if run_type == "detect":
# Fraction of discovered/detected events
result = len(np.where(df["metric_values"] == 1)[0]) / total
elif run_type == "Npts":
# Average number of points per lightcurve
result = (
sum(
df["metric_values"][~np.isnan(df["metric_values"])][df["metric_values"] >= 0][
df["metric_values"] <= 10e10
]
)
/ total
)
elif run_type == "Fisher":
# Fraction of events with sigmatE/tE below the cutoff of 0.1
result = len(np.where(df["metric_values"] < fisher_sigmat_e_t_e_cutoff)[0]) / total
return result
def plot_fom(results, run_names, run_types, min_t_e, max_t_e, save_folder, figure_name, figsize):
"""
Plots the results from the discovery/detect metric, Npts metric,
and Fisher metric in three sub plots
Parameters
----------
results : `np.ndarray`, (N,)
Results from the MicrolensingMetric from get_results() from the
respective microlensing metric type
run_names : `np.ndarray`, (N,)
Array of names of the OpSim run that was used in the metric
run_types : `np.ndarray`, (N,)
Array of strings describing microlensing metric type:
either 'detect', 'Npts', or 'Fisher' as parsed by the file name
min_t_e : `np.ndarray`, (N,)
Array of values describing the minium einstein crossing time (tE)
as parsed by the file name
max_t_e : `np.ndarray`, (N,)
Array of values describing the maximum einstein crossing time (tE)
as parsed by the file name
save_folder : `str`
String of folder name to save figure
figure_name : `str`
String of figure name
figsize : (`int`, `int`)
Tuple of figure size in inches.
Default is None, which sets figsize = (25, 30)
"""
if figsize is None:
figsize = (25, 30)
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, sharey=True, figsize=figsize)
plt.tight_layout()
plt.subplots_adjust(wspace=0, hspace=0)
subfig_list = [ax1, ax2, ax3]
plt.rcdefaults()
font = {"weight": "heavy", "size": 30}
plt.rc("font", **font)
t_e_range_list = []
time_run_names = []
for i, j in zip(np.unique(min_t_e), np.unique(max_t_e)):
idx_in_range = np.where((min_t_e >= i) & (max_t_e <= j))
t_e_range_list.append(idx_in_range)
time_run_names.append("tE {}-{} days".format(int(i), int(j)))
detect_runs_idx = np.where(run_types == "detect")
npts_runs_idx = np.where(run_types == "Npts")
fisher_runs_idx = np.where(run_types == "Fisher")
run_type_list = [detect_runs_idx, npts_runs_idx, fisher_runs_idx]
for t_e_range in range(len(t_e_range_list)):
for run_type in range(len(run_type_list)):
# sorted alphabetically according to name of run
idx_list = list(
zip(
np.intersect1d(t_e_range_list[t_e_range], run_type_list[run_type]),
run_names[np.intersect1d(t_e_range_list[t_e_range], run_type_list[run_type])],
)
)
idx_list.sort(key=lambda x: x[1])
sorted_idxs = np.array([x[0] for x in idx_list])
subfig_list[run_type].plot(
results[sorted_idxs],
run_names[sorted_idxs],
label=time_run_names[t_e_range],
marker=".",
markersize=15,
linewidth=2.5,
)
ax3.legend(bbox_to_anchor=(1, 1), fontsize=20)
plt.tight_layout()
plt.subplots_adjust(bottom=0.05)
ax1.set_xlabel("Discovery Efficiency")
ax2.set_xlabel("Avg Number of Points")
ax2.set_xscale("log")
ax3.set_xlabel("Characaterization Efficiency \n ($\\sigma_{t_E}/t_E$ < 0.1)")
plt.savefig(save_folder + "/" + figure_name + ".png", bbox_inches="tight")
return
def plot_compare(results, run_names, run_types, min_t_e, max_t_e, save_folder, npts_required=10):
"""
Plots confusion matrix type plots comparing fraction detected,
characterized (via Fisher),
and fraction of events with at least npts_required points within 2 tE
Parameters
----------
results : `np.ndarray`, (N,)
Results from the MicrolensingMetric from get_results() from the
respective microlensing metric type
run_names : `np.ndarray`, (N,)
Array of names of the OpSim run that was used in the metric
run_types : `np.ndarray`, (N,)
Array of strings describing microlensing metric type:
either 'detect', 'Npts', or 'Fisher' as parsed by the file name
min_t_e : `np.ndarray`, (N,)
Array of values describing the minium einstein crossing time (tE)
as parsed by the file name
max_t_e : `np.ndarray`, (N,)
Array of values describing the maximum einstein crossing time (tE)
as parsed by the file name
save_folder : `str`
String of folder name to save figures
npts_required : `int`
Number of poitns within 2tE required for the number of points fraction.
"""
plt.rcdefaults()
font = {"weight": "heavy", "size": 20}
plt.rc("font", **font)
t_e_range_list = []
time_run_names = []
for i, j in zip(np.unique(min_t_e), np.unique(max_t_e)):
idx_in_range = np.where((min_t_e >= i) & (max_t_e <= j))
t_e_range_list.append(idx_in_range)
time_run_names.append("tE {}-{} days".format(int(i), int(j)))
detect_runs_idx = np.where(run_types == "detect")
npts_runs_idx = np.where(run_types == "Npts")
fisher_runs_idx = np.where(run_types == "Fisher")
run_name_list = np.unique(run_names)
for t_e_range in range(len(t_e_range_list)):
for run_name in run_name_list:
run_name_idxs = np.where(run_names == run_name)
t_e_run_name_interesct = np.intersect1d(t_e_range_list[t_e_range], run_name_idxs)
detect_results = results[np.intersect1d(t_e_run_name_interesct, detect_runs_idx)]
npts_results = results[np.intersect1d(t_e_run_name_interesct, npts_runs_idx)]
fisher_results = results[np.intersect1d(t_e_run_name_interesct, fisher_runs_idx)]
detected_fisher_comparison_matrix = detected_fisher_comparison(fisher_results, detect_results)
fisher_npts_comparison_matrix = fisher_npts_comparison(fisher_results, npts_results)
detected_npts_comparison_matrix = detected_npts_comparison(detect_results, npts_results)
confusion_matrix_plot(
detected_fisher_comparison_matrix,
"Discovered",
"Characterized",
run_name,
time_run_names[t_e_range],
save_folder,
)
confusion_matrix_plot(
fisher_npts_comparison_matrix,
"More than {} Points".format(npts_required),
"Characterized",
run_name,
time_run_names[t_e_range],
save_folder,
)
confusion_matrix_plot(
detected_npts_comparison_matrix,
"More than {} Points".format(npts_required),
"Detected",
run_name,
time_run_names[t_e_range],
save_folder,
)
return
def confusion_matrix_plot(comparison_matrix, xlabel, ylabel, run_name, t_e_range, save_folder):
"""
Plots a confusion matrix type plot comparing two metric types.
Parameters
----------
comparison_matrix : `np.ndarray`, (N,)`
Array comparing two metric types (A and B) with the following shape:
[[(Yes A and Yes B), (Yes A and No B)], [(No A and Yes B),
(No A and No B)]]
where Yes A and Yes B are the number of events that pass both
the A and B criteria.
xlabel : `str`
Sring of xlabel (also used in file name of figure)
ylabel : `str`
Sring of ylabel (also used in file name of figure)
run_name : `str`
Name of the OpSim run that was used in the metric
(used in labels and file name)
t_e_range : `str`
String of the range of the tE (used in labels and file name)
save_folder : `str`
String of folder name to save figures
"""
fig, ax = plt.subplots(figsize=(5, 5))
ax.matshow(comparison_matrix, cmap=plt.cm.Blues, alpha=0.3)
for i in range(len(comparison_matrix[0])):
for j in range(len(comparison_matrix[1])):
ax.text(
x=j,
y=i,
s="{}".format(comparison_matrix[i, j]),
va="center",
ha="center",
size="medium",
)
ax.set(ylabel=ylabel, xlabel=xlabel, title=run_name + "\n" + t_e_range + "\n")
ax.set_xticklabels([np.nan, "Yes", "No"])
ax.set_yticklabels([np.nan, "Yes", "No"])
plt.tight_layout()
plt.savefig(save_folder + "/{}_{}_{}_{}.png".format(run_name, t_e_range, ylabel, xlabel))
plt.show()
plt.close()
return
def detected_fisher_comparison(fisher_results, detect_results, fisher_sigmat_e_t_e_cutoff=0.1):
"""
Returns an array of the following form where
A = fisher criteria and B = detection criteria:
[[(Yes A and Yes B), (Yes A and No B)], [(No A and Yes B),
(No A and No B)]]
where Yes A and Yes B are the number of events that pass both the
A and B criteria.
Parameters
----------
fisher_results : `np.ndarray`, (N,)
Array of results from running the Fisher metric of the
microlensing metric
detect_results : `np.ndarray`, (N,)
Array of results from running the detect metric of the
microlensing metric
fisher_sigmat_e_t_e_cutoff : `float`
Maximum normalized uncertainty in tE (sigmatE/tE) as determined by
3sigma values of pubished planet microlensing candidates
"""
char_detect = np.where((fisher_results < fisher_sigmat_e_t_e_cutoff) & (detect_results == 1))[0]
char_ndetect = np.where((fisher_results < fisher_sigmat_e_t_e_cutoff) & (detect_results == 0))[0]
nchar_detect = np.where((fisher_results > fisher_sigmat_e_t_e_cutoff) & (detect_results == 1))[0]
nchar_ndetect = np.where((fisher_results > fisher_sigmat_e_t_e_cutoff) & (detect_results == 0))[0]
return np.array([[len(char_detect), len(char_ndetect)], [len(nchar_detect), len(nchar_ndetect)]])
def fisher_npts_comparison(fisher_results, npts_results, npts_required=10, fisher_sigmat_e_t_e_cutoff=0.1):
"""
Returns an array of the following form where
A = fisher criteria and B = npts criteria:
[[(Yes A and Yes B), (Yes A and No B)], [(No A and Yes B),
(No A and No B)]]
where Yes A and Yes B are the number of events that pass both the
A and B criteria.
Parameters
----------
fisher_results : `np.ndarray`, (N,)
Array of results from running the Fisher metric of the
microlensing metric
npts_results : `np.ndarray`, (N,)
Array of results from running the Npts metric of the
microlensing metric
npts_required : `int`
Number of poitns within 2tE required for the number of points fraction.
fisher_sigmat_e_t_e_cutoff : `float`
Maximum normalized uncertainty in tE (sigmatE/tE) as determined by
3sigma values of pubished planet microlensing candidates
"""
char_npts = np.where((fisher_results < fisher_sigmat_e_t_e_cutoff) & (npts_results > npts_required))[0]
char_nnpts = np.where((fisher_results < fisher_sigmat_e_t_e_cutoff) & (npts_results < npts_required))[0]
nchar_npts = np.where((fisher_results > fisher_sigmat_e_t_e_cutoff) & (npts_results > npts_required))[0]
nchar_nnpts = np.where((fisher_results > fisher_sigmat_e_t_e_cutoff) & (npts_results < npts_required))[0]
return np.array([[len(char_npts), len(char_nnpts)], [len(nchar_npts), len(nchar_nnpts)]])
def detected_npts_comparison(detect_results, npts_results, npts_required=10):
"""
Returns an array of the following form where
A = detect criteria and B = npts criteria:
[[(Yes A and Yes B), (Yes A and No B)], [(No A and Yes B),
(No A and No B)]]
where Yes A and Yes B are the number of events that pass both the
A and B criteria.
Parameters
----------
detect_results : `np.ndarray`, (N,)
Array of results from running the detect metric of the
microlensing metric
npts_results : `np.ndarray`, (N,)
Array of results from running the Npts metric of the
microlensing metric
npts_required : `int`
Number of poitns within 2tE required for the number of points fraction.
"""
detect_npts = np.where((detect_results == 1) & (npts_results > npts_required))[0]
detect_nnpts = np.where((detect_results == 1) & (npts_results < npts_required))[0]
ndetect_npts = np.where((detect_results == 0) & (npts_results > npts_required))[0]
ndetect_nnpts = np.where((detect_results == 0) & (npts_results < npts_required))[0]
return np.array([[len(detect_npts), len(detect_nnpts)], [len(ndetect_npts), len(ndetect_nnpts)]])
def get_results_dbs(result_db_path):
"""
Create a dictionary of result_db from result_db files
via PCW Hackathan 2020 Resources
Parameters
----------
result_db_path : `str`
Path to the directory storing the result databases
generated by MAF.
Returns
-------
result_dbs : `dict`
A dictionary containing the ResultDb objects
reconstructed from result databases in the provided directory.
"""
result_dbs = {}
result_db_list = glob.glob(os.path.join(result_db_path, "*_result.db"))
for result_db in result_db_list:
run_name = os.path.basename(result_db).rsplit("_", 1)[0]
result_db = db.ResultsDb(database=result_db)
# Don't add empty results.db file,
if len(result_db.getAllMetricIds()) > 0:
result_dbs[run_name] = result_db
return result_dbs
def bundle_dict_from_disk(result_db, run_name, metric_data_path):
"""
Load metric data from disk and import them into metricBundles.
via PCW Hackathan 2020 Resources
Parameters
----------
results_db : `dict`
A ResultsDb object
run_name : `str`
The name of the opsim database for the metrics in results_db
metric_data_path : `str`
The path to the directory where the metric datafiles are stored.
Returns
-------
bundle_dict : `dict`
A dictionary of metricBundles reconstructed from the data
stored on disk.
"""
bundle_dict = {}
display_info = result_db.getMetricDisplayInfo()
for item in display_info:
metric_name = item["metric_name"]
metric_file_name = item["metricDataFile"]
metric_id = item["metric_id"]
newbundle = metricBundles.create_empty_metric_bundle()
newbundle.read(os.path.join(metric_data_path, metric_file_name))
newbundle.set_run_name(run_name)
bundle_dict[metric_id, metric_name] = newbundle
return bundle_dict
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@rubin_sim@maf@run_comparison@microlensing_compare.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "lsst-ts/ts_wep",
"repo_path": "ts_wep_extracted/ts_wep-main/doc/conf.py",
"type": "Python"
}
|
"""Sphinx configuration file for an LSST stack package.
This configuration only affects single-package Sphinx documentation builds.
"""
import lsst.ts.wep
from documenteer.conf.pipelinespkg import *
project = "ts_wep"
html_theme_options["logotext"] = project
html_title = project
html_short_title = project
doxylink = {}
# Support the sphinx extension of mermaid
extensions = [
"sphinxcontrib.mermaid",
"sphinx_automodapi.automodapi",
]
|
lsst-tsREPO_NAMEts_wepPATH_START.@ts_wep_extracted@ts_wep-main@doc@conf.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "bek0s/gbkfit",
"repo_path": "gbkfit_extracted/gbkfit-master/docs/source/conf.py",
"type": "Python"
}
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../../src/gbkfit/'))
# -- Project information -----------------------------------------------------
project = 'gbkfit'
copyright = '2020, Georgios Bekiaris'
author = 'Georgios Bekiaris'
# The full version, including alpha/beta/rc tags
release = '0.0.1'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.autosectionlabel',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
master_doc = 'index'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
|
bek0sREPO_NAMEgbkfitPATH_START.@gbkfit_extracted@gbkfit-master@docs@source@conf.py@.PATH_END.py
|
{
"filename": "realtransforms.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/fftpack/realtransforms.py",
"type": "Python"
}
|
# This file is not meant for public use and will be removed in SciPy v2.0.0.
# Use the `scipy.fftpack` namespace for importing the functions
# included below.
from scipy._lib.deprecation import _sub_module_deprecation
__all__ = [ # noqa: F822
'dct', 'idct', 'dst', 'idst', 'dctn', 'idctn', 'dstn', 'idstn'
]
def __dir__():
return __all__
def __getattr__(name):
return _sub_module_deprecation(sub_package="fftpack", module="realtransforms",
private_modules=["_realtransforms"], all=__all__,
attribute=name)
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@fftpack@realtransforms.py@.PATH_END.py
|
{
"filename": "_dy.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/box/_dy.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DyValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="dy", parent_name="box", **kwargs):
super(DyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@box@_dy.py@.PATH_END.py
|
{
"filename": "computeOccurrence_totalReliability-checkpoint.ipynb",
"repo_name": "stevepur/DR25-occurrence-public",
"repo_path": "DR25-occurrence-public_extracted/DR25-occurrence-public-main/GKbaselineSC0p9/.ipynb_checkpoints/computeOccurrence_totalReliability-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import os
import requests
import pandas as pd
from astropy.io import fits
from cStringIO import StringIO
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import gamma
from scipy.optimize import minimize
from scipy.interpolate import RectBivariateSpline
import emcee
import corner
import scipy.io as sio
from ipywidgets import FloatProgress
from IPython.display import display
import time
```
```python
stellarCatalog = "../stellarCatalogs/dr25_stellar_supp_gaia_clean_GK.txt"
pcCatalog = "koiCatalogs/dr25_GK_PCs.csv"
period_rng = (50, 400)
n_period = 57
rp_rng = (0.75, 2.5)
n_rp = 61
# for quick tests
nWalkers = 6
nBurnin = 200
nMcmc = 1000
# for production runs
# nWalkers = 16
# nBurnin = 1000
# nMcmc = 5000
model = "dualPowerLaw"
```
```python
def rateModel(x, y, xRange, yRange, theta, model):
if model == "dualPowerLaw":
f0, alpha, beta = theta
ap1 = alpha+1;
bp1 = beta+1;
r = f0*(ap1*(x**alpha)/(xRange[1]**ap1-xRange[0]**ap1))*(bp1*(y**beta)/(yRange[1]**bp1-yRange[0]**bp1))
else:
raise ValueError('Bad model name');
return r
def getModelLabels(model):
if model == "dualPowerLaw":
return [r"$F$", r"$\beta$", r"$\alpha$"]
else:
raise ValueError('Bad model name');
def initRateModel(model):
if model == "dualPowerLaw":
f0 = 0.75
alpha = -0.53218
beta = -0.5
theta = [f0, alpha, beta]
else:
raise ValueError('Bad model name');
return theta
def lnPoisprior(theta, model):
if model == "dualPowerLaw":
if 0.0 <= theta[0] <= 1 \
and -5.0 <= theta[1] <= 5.0 \
and -5.0 <= theta[2] <= 5.0:
return 1.0
else:
raise ValueError('Bad model name');
# print(theta)
return -np.inf
```
```python
from scipy.integrate import romb
def integrate2DGrid(g, dx, dy):
if g.shape[0]%2 == 0 or g.shape[1]%2 == 0:
raise ValueError('integrate2DGrid requires a grid with odd number of points on a side');
return romb(romb(g, dx), dy)
def integrateRateModel(periodRange, rpRange, theta, model):
nPts = 2**5+1 # must be 2**n + 1
pGrid, rGrid = np.meshgrid(np.linspace(periodRange[0], periodRange[1], nPts),
np.linspace(rpRange[0], rpRange[1], nPts),
indexing="ij")
dp = (pGrid[1,0]-pGrid[0,0])
dr = (rGrid[0,1]-rGrid[0,0])
if theta.ndim == 1:
y = rateModel(pGrid, rGrid, periodRange, rpRange, theta, model)
return integrate2DGrid(y, dp, dr)
else: # assume first dimension is array of thetas
ret = np.zeros(theta.shape[0])
if len(ret) > 100:
f = FloatProgress(min=0, max=len(ret))
display(f)
for i in range(len(ret)):
y = rateModel(pGrid, rGrid, periodRange, rpRange, theta[i,:], model)
ret[i] = integrate2DGrid(y, dp, dr)
if len(ret) > 100:
f.value += 1
return ret
def integratePopTimesComp(periodRange, rpRange, theta, model, compGrid):
nP = compGrid.shape[0]
nR = compGrid.shape[1]
pGrid, rGrid = np.meshgrid(np.linspace(periodRange[0], periodRange[1], nP),
np.linspace(rpRange[0], rpRange[1], nR),
indexing="ij")
dp = (pGrid[1,0]-pGrid[0,0])
dr = (rGrid[0,1]-rGrid[0,0])
y = rateModel(pGrid, rGrid, periodRange, rpRange, theta, model)*compGrid
return integrate2DGrid(y, dp, dr)
```
```python
# population inference functions
def lnlike(theta):
pop = rateModel(period_grid, rp_grid, period_rng, rp_rng, theta, model) * summedCompleteness
pop = 0.5 * (pop[:-1, :-1] + pop[1:, 1:])
norm = np.sum(pop * vol)
ll = np.sum(np.log(rateModel(koi_periods, koi_rps, period_rng, rp_rng, theta, model))) - norm
return ll if np.isfinite(ll) else -np.inf
# The ln-probability function is just propotional to the ln-likelihood
# since we're assuming uniform priors.
def lnprob(theta):
lp = lnPoisprior(theta, model)
if not np.isfinite(lp):
return -np.inf
return lnlike(theta)
# The negative ln-likelihood is useful for optimization.
# Optimizers want to *minimize* your function.
def nll(theta):
ll = lnlike(theta)
return -ll if np.isfinite(ll) else 1e15
```
```python
# population analysis functions
# We'll reuse these functions to plot all of our results.
def make_plot(pop_comp, x0, x, y, ax):
# print("in make_plot, pop_comp:")
# print(pop_comp.shape)
pop = 0.5 * (pop_comp[:, 1:] + pop_comp[:, :-1])
# print("pop:")
# print(pop.shape)
pop = np.sum(pop * np.diff(y)[None, :, None], axis=1)
a, b, c, d, e = np.percentile(pop * np.diff(x)[0], [2.5, 16, 50, 84, 97.5], axis=0)
ax.fill_between(x0, a, e, color="k", alpha=0.1, edgecolor="none")
ax.fill_between(x0, b, d, color="k", alpha=0.3, edgecolor="none")
ax.plot(x0, c, "k", lw=1)
def plot_results(samples):
# Loop through the samples and compute the list of population models.
samples = np.atleast_2d(samples)
pop = np.empty((len(samples), period_grid.shape[0], period_grid.shape[1]))
gamma_earth = np.empty((len(samples)))
for i, p in enumerate(samples):
pop[i] = rateModel(period_grid, rp_grid, period_rng, rp_rng, p, model)
gamma_earth[i] = rateModel(365.25, 1.0, period_rng, rp_rng, p, model) * 365.
fig, axes = plt.subplots(2, 2, figsize=(10, 8))
fig.subplots_adjust(wspace=0.4, hspace=0.4)
# Integrate over period.
dx = 0.25
x = np.arange(rp_rng[0], rp_rng[1] + dx, dx)
n, _ = np.histogram(koi_rps, x)
# Plot the observed radius distribution.
ax = axes[0, 0]
make_plot(pop * summedCompleteness[None, :, :], rp, x, period, ax)
ax.errorbar(0.5*(x[:-1]+x[1:]), n, yerr=np.sqrt(n), fmt=".k",
capsize=0)
ax.set_xlim(rp_rng[0], rp_rng[1])
ax.set_xlabel("$R_p\,[R_\oplus]$")
ax.set_ylabel("\# of detected planets")
# Plot the true radius distribution.
ax = axes[0, 1]
make_plot(pop, rp, x, period, ax)
ax.set_xlim(rp_rng[0], rp_rng[1])
ax.set_ylim(0, 0.37)
ax.set_xlabel("$R_p\,[R_\oplus]$")
ax.set_ylabel("$\mathrm{d}N / \mathrm{d}R$; $\Delta R = 0.25\,R_\oplus$")
# Integrate over period.
dx = 31.25
x = np.arange(period_rng[0], period_rng[1] + dx, dx)
n, _ = np.histogram(koi_periods, x)
# Plot the observed period distribution.
ax = axes[1, 0]
make_plot(np.swapaxes(pop * summedCompleteness[None, :, :], 1, 2), period, x, rp, ax)
ax.errorbar(0.5*(x[:-1]+x[1:]), n, yerr=np.sqrt(n), fmt=".k",
capsize=0)
ax.set_xlim(period_rng[0], period_rng[1])
ax.set_ylim(0, 79)
ax.set_xlabel("$P\,[\mathrm{days}]$")
ax.set_ylabel("\# of detected planets")
# Plot the true period distribution.
ax = axes[1, 1]
make_plot(np.swapaxes(pop, 1, 2), period, x, rp, ax)
ax.set_xlim(period_rng[0], period_rng[1])
ax.set_ylim(0, 0.27)
ax.set_xlabel("$P\,[\mathrm{days}]$")
ax.set_ylabel("$\mathrm{d}N / \mathrm{d}P$; $\Delta P = 31.25\,\mathrm{days}$")
return gamma_earth, fig
```
```python
stellarTargets = pd.read_csv(stellarCatalog)
base_kois = pd.read_csv(pcCatalog)
m = (period_rng[0] <= base_kois.koi_period) & (base_kois.koi_period <= period_rng[1])
m &= np.isfinite(base_kois.corrected_prad) & (rp_rng[0] <= base_kois.corrected_prad) & (base_kois.corrected_prad <= rp_rng[1])
kois = pd.DataFrame(base_kois[m])
allKois = kois
```
```python
fig, ax = plt.subplots(figsize=(15,10));
ax.errorbar(kois.koi_period, kois.koi_prad,
yerr = [-kois.koi_prad_err2, kois.koi_prad_err1],
fmt="k.", alpha = 0.5);
ax.errorbar(kois.koi_period, kois.corrected_prad,
yerr = [-kois.corrected_prad_err2, kois.corrected_prad_err1],
fmt="r.", alpha = 0.5);
plt.xlabel("period");
plt.ylabel("planet radius");
plt.title("KOI Radius Change");
plt.ylim([0, 2.5])
plt.xlim([50, 400])
```
(50, 400)

```python
period = np.linspace(period_rng[0], period_rng[1], n_period)
rp = np.linspace(rp_rng[0], rp_rng[1], n_rp)
period_grid, rp_grid = np.meshgrid(period, rp, indexing="ij")
periodShape = period_grid.shape
```
```python
inputgrid = "../completenessContours/out_sc0_GK_baseline.fits.gz"
hdulist = fits.open(inputgrid)
cumulative_array = hdulist[0].data
kiclist = np.asarray(hdulist[1].data, dtype=np.int32)
probdet = np.transpose(cumulative_array[0])
probtot = np.transpose(cumulative_array[1])
prihdr = hdulist[0].header
min_comp_period = prihdr["MINPER"]
max_comp_period = prihdr["MAXPER"]
n_comp_period = prihdr["NPER"]
min_comp_rp = prihdr["MINRP"]
max_comp_rp = prihdr["MAXRP"]
n_comp_rp = prihdr["NRP"]
# print "KIC list length" + '{:6d}'.format(kiclist.size)
period_want = np.linspace(min_comp_period, max_comp_period, n_comp_period)
rp_want = np.linspace(min_comp_rp, max_comp_rp, n_comp_rp)
period_want2d, rp_want2d = np.meshgrid(period_want, rp_want)
# interpolate the numerical grids onto the period_grid, rp_grid space
#print("size probtot = " + str(np.shape(probtot)))
#print("size period_want = " + str(np.shape(period_want)))
#print("size rp_want = " + str(np.shape(rp_want)))
numCompVeInterp = RectBivariateSpline(period_want, rp_want, probtot)
```
```python
```
```python
summedCompleteness = numCompVeInterp(period, rp)
```
```python
```
```python
contourLevels = np.arange(1e-3, 1e-2, 1e-3)
plt.pcolor(period_grid, rp_grid, summedCompleteness, cmap="BuGn")
c = plt.contour(period_grid, rp_grid, summedCompleteness / kiclist.size, contourLevels,
colors="k", alpha=0.8)
#c = plt.contour(period_grid, rp_grid, numCompVe / kiclist.size,
# colors="k", alpha=0.8)
plt.ylim(0.5, 2.5)
plt.xlim(50, 400)
plt.clabel(c, fontsize=12, inline=1, fmt="%.3f")
plt.title("mean numerical pipeline detection*vetting efficiency")
plt.xlabel("period [days]")
plt.ylabel("$R_p \, [R_\oplus]$");
```

```python
```
Compute a basic occurrence rate without reliability
```python
kois = allKois
bounds = [(-5, 5), (-5, 5), (-5, 5)]
# The ln-likelihood function given at the top of this post.
koi_periods = np.array(kois.koi_period)
koi_rps = np.array(kois.corrected_prad)
vol = np.diff(period_grid, axis=0)[:, :-1] * np.diff(rp_grid, axis=1)[:-1, :]
theta_0 = initRateModel(model)
r = minimize(nll, theta_0, method="L-BFGS-B", bounds=bounds)
print(r.x)
ge, fig = plot_results(r.x);
```
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:7: RuntimeWarning: invalid value encountered in log
import sys
[ 0.5111504 -0.62150153 0.37910062]

```python
rateModel(365.25, 1.0, period_rng, rp_rng, theta_0, model)*365
```
0.3777440266919595
```python
##################################################################
ndim, nwalkers = len(r.x), nWalkers
pos = [r.x + 1e-5 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, threads=8)
# Burn in.
pos, _, _ = sampler.run_mcmc(pos, nBurnin)
sampler.reset()
# Production.
start_time = time.time()
pos, _, _ = sampler.run_mcmc(pos, nMcmc)
print("--- %s seconds ---" % (time.time() - start_time))
kois.to_csv("occurenceRatePosteriors/selectedPcs_noreliability.csv")
samples = sampler.flatchain
np.save("occurenceRatePosteriors/occurenceRatePosteriors_noreliability.npy", samples)
```
--- 3.10405993462 seconds ---
```python
##################################################################
##################################################################
corner.corner(sampler.flatchain, labels=getModelLabels(model));
##################################################################
gamma_earth_no_reliability, fig = plot_results(sampler.flatchain)
print(np.mean(gamma_earth_no_reliability))
##################################################################
```
0.2073174969892992


```python
plt.hist(np.log10(gamma_earth_no_reliability), 50, histtype="step", color="k", density=True)
plt.gca().set_yticklabels([])
plt.title("the rate of Earth analogs: " + str(10**np.mean(np.log10(gamma_earth_no_reliability))))
plt.xlabel(r"$\log_{10}\Gamma_\oplus = \left. \log_{10}\mathrm{d}N / \mathrm{d}\ln P \, \mathrm{d}\ln R_p \right |_\oplus$");
print("Mean Gamma_Earth = {0}".format(10**np.mean(np.log10(gamma_earth_no_reliability))))
```
Mean Gamma_Earth = 0.186114599371

Compute an occurrence rate with reliability
```python
```
```python
bounds = [(-5, 5), (-5, 5), (-5, 5)]
nTrials = 100
f = FloatProgress(min=0, max=nTrials)
display(f)
allKois = kois
for mCount in range(nTrials):
# randomly select kois
koiSelect = (np.random.rand(len(allKois)) < allKois.totalReliability)
kois = allKois[koiSelect]
kois.to_csv("occurenceRatePosteriors/selectedPcs" + str (mCount) + ".csv")
# print(str(mCount) + " of " + str(nTrials) + ", selected " + str(len(kois))
# + " kois out of " + str(len(allKois)) + " after reliability cut")
koi_periods = np.array(kois.koi_period)
koi_rps = np.array(kois.corrected_prad)
theta_0 = initRateModel(model)
r = minimize(nll, theta_0, method="L-BFGS-B", bounds=bounds)
##################################################################
ndim, nwalkers = len(r.x), nWalkers
pos = [r.x + 1e-5 * np.random.randn(ndim) for i in range(nwalkers)]
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Burn in.
pos, _, _ = sampler.run_mcmc(pos, 200)
sampler.reset()
# Production.
pos, _, _ = sampler.run_mcmc(pos, 1000)
samples = sampler.flatchain
np.save("occurenceRatePosteriors/occurenceRatePosteriors_" + str(mCount) + ".npy", samples)
f.value += 1
```
FloatProgress(value=0.0)
/Users/steve/anaconda3/envs/py2/lib/python2.7/site-packages/ipykernel_launcher.py:7: RuntimeWarning: invalid value encountered in log
import sys
```python
import gc # for memory management
for mCount in range(nTrials):
samples = np.load("occurenceRatePosteriors/occurenceRatePosteriors_" + str(mCount) + ".npy");
subsampleFactor = int(np.round(nTrials/10))
if mCount == 0:
allSamples = samples[0:-1:subsampleFactor,:]
else:
allSamples = np.concatenate((allSamples, samples[0:-1:subsampleFactor,:]))
gc.collect() # force garbage collection before loading another one
corner.corner(allSamples, labels=getModelLabels(model));
##################################################################
gamma_earth, fig = plot_results(allSamples)
print(np.mean(gamma_earth))
##################################################################
print("Mean Gamma_Earth = {0}".format(10**np.mean(np.log10(gamma_earth))))
```
0.10650831284593473
Mean Gamma_Earth = 0.090440384808


```python
plt.hist(np.log10(gamma_earth), 50, histtype="step", color="k", density=True)
plt.hist(np.log10(gamma_earth_no_reliability), 50, histtype="step", color="b", density=True)
plt.gca().set_yticklabels([])
plt.title("the rate of Earth analogs: " + str(round(10**np.mean(np.log10(gamma_earth)), 3))
+ "/" + str(round(10**np.mean(np.log10(gamma_earth_no_reliability)), 3)))
plt.xlabel(r"$\log_{10}\Gamma_\oplus = \left. \log_{10}\mathrm{d}N / \mathrm{d}\ln P \, \mathrm{d}\ln R_p \right |_\oplus$");
```

```python
plt.hist(gamma_earth, 50, histtype="step", color="k", density=True)
plt.hist(gamma_earth_no_reliability, 50, histtype="step", color="b", density=True)
plt.gca().set_yticklabels([])
plt.title("the rate of Earth analogs: " + str(round(np.median(gamma_earth), 3))
+ "/" + str(round(np.median(gamma_earth_no_reliability), 3)))
plt.xlabel(r"$\Gamma_\oplus$");
```

```python
print("zeta-Earth = " + str(integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], np.median(allSamples, 0), model)))
```
zeta-Earth = 0.402898375514369
```python
zetaDist = integrateRateModel([.8*365.25,1.2*365.25],
[0.8,1.2], allSamples, model)
plt.hist(zetaDist, 50, histtype="step", color="k", density=True);
```
FloatProgress(value=0.0, max=60000.0)

```python
```
```python
```
|
stevepurREPO_NAMEDR25-occurrence-publicPATH_START.@DR25-occurrence-public_extracted@DR25-occurrence-public-main@GKbaselineSC0p9@.ipynb_checkpoints@computeOccurrence_totalReliability-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "test_pipelines.py",
"repo_name": "lsst/cp_verify",
"repo_path": "cp_verify_extracted/cp_verify-main/tests/test_pipelines.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# LSST Data Management System
#
# This product includes software developed by the
# LSST Project (http://www.lsst.org/).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the LSST License Statement and
# the GNU General Public License along with this program. If not,
# see <https://www.lsstcorp.org/LegalNotices/>.
#
"""Test cases for cp_verify pipelines."""
import glob
import os
import unittest
# We need this import here to allow the proj.db to be cleaned up
# properly.
import pyproj # noqa: F401
from lsst.pipe.base import Pipeline, PipelineGraph
import lsst.utils
try:
import lsst.obs.lsst
has_obs_lsst = True
except ImportError:
has_obs_lsst = False
try:
import lsst.obs.subaru
has_obs_subaru = True
except ImportError:
has_obs_subaru = False
try:
import lsst.obs.decam
has_obs_decam = True
except ImportError:
has_obs_decam = False
class VerifyPipelinesTestCase(lsst.utils.tests.TestCase):
"""Test case for building the pipelines."""
def setUp(self):
self.pipeline_path = os.path.join(lsst.utils.getPackageDir("cp_verify"), "pipelines")
def _get_pipelines(self, exclude=[]):
pipelines = {
"verifyBfk.yaml",
"verifyBias.yaml",
"verifyCrosstalk.yaml",
"verifyDark.yaml",
"verifyDefectsIndividual.yaml",
"verifyDefects.yaml",
"verifyFlat.yaml",
# Old pipeline name.
"verifyGain.yaml",
# New pipeline name.
"verifyGainFromFlatPairs.yaml",
"verifyLinearizer.yaml",
"verifyPtc.yaml",
}
for ex in exclude:
pipelines.remove(ex)
return pipelines
def _check_pipeline(self, pipeline_file):
# Confirm that the file is there.
self.assertTrue(os.path.isfile(pipeline_file), msg=f"Could not find {pipeline_file}")
# The following loads the pipeline and confirms that it can parse all
# the configs.
try:
pipeline = Pipeline.fromFile(pipeline_file)
graph = pipeline.to_graph()
except Exception as e:
raise RuntimeError(f"Could not process {pipeline_file}") from e
self.assertIsInstance(graph, PipelineGraph)
def test_ingredients(self):
"""Check that all pipelines in pipelines/_ingredients are tested."""
glob_str = os.path.join(self.pipeline_path, "_ingredients", "*.yaml")
# The *LSST.yaml pipelines are imported by LATISS/LSSTComCam/LSSTCam
# and are not tested on their own.
ingredients = set(
[os.path.basename(pipeline) for pipeline in glob.glob(glob_str) if "LSST.yaml" not in pipeline]
)
# The _ingredients/verifyGainFromFlatPairs.yaml becomes
# verifyGain.yaml in older pipelines for compatibility.
expected = self._get_pipelines()
# The _ingredients/verifyGainFromFlatPairs.yaml becomes
# verifyGain.yaml in older pipelines for compatibility.
expected.remove("verifyGain.yaml")
self.assertEqual(ingredients, expected)
def test_cameras(self):
"""Check that all the cameras in pipelines are tested."""
glob_str = os.path.join(self.pipeline_path, "*")
paths = set(
[os.path.basename(path) for path in glob.glob(glob_str)]
)
expected = {
"DECam",
"HSC",
"_ingredients",
"LATISS",
"LSSTCam",
"LSSTCam-imSim",
"LSSTComCam",
"LSSTComCamSim",
"README.md",
}
self.assertEqual(paths, expected)
@unittest.skipIf(not has_obs_lsst, reason="Cannot test LATISS pipelines without obs_lsst")
def test_latiss_pipelines(self):
for pipeline in self._get_pipelines(exclude=[
# The old pipeline name should be excluded.
"verifyGain.yaml",
# The following tasks are not part of the new pipelines.
"verifyDefectsIndividual.yaml",
# The following tasks will be added in the future.
"verifyCrosstalk.yaml",
"verifyBfk.yaml",
]):
self._check_pipeline(os.path.join(self.pipeline_path, "LATISS", pipeline))
@unittest.skipIf(not has_obs_lsst, reason="Cannot test LSSTCam pipelines without obs_lsst")
def test_lsstcam_pipelines(self):
for pipeline in self._get_pipelines(
exclude=[
# These are renamed/not used in the new pipelines.
"verifyGain.yaml",
"verifyDefectsIndividual.yaml",
# These are not used yet.
"verifyCrosstalk.yaml",
]):
self._check_pipeline(os.path.join(self.pipeline_path, "LSSTCam", pipeline))
@unittest.skipIf(not has_obs_lsst, reason="Cannot test LSSTCam-imSim pipelines without obs_lsst")
def test_lsstcam_imsim_pipelines(self):
for pipeline in self._get_pipelines(exclude=["verifyGainFromFlatPairs.yaml"]):
self._check_pipeline(os.path.join(self.pipeline_path, "LSSTCam-imSim", pipeline))
@unittest.skipIf(not has_obs_lsst, reason="Cannot test LSSTComCam pipelines without obs_lsst")
def test_lsstcomcam_pipelines(self):
for pipeline in self._get_pipelines(
exclude=[
# These are renamed/not used in the new pipelines.
"verifyGain.yaml",
"verifyDefectsIndividual.yaml",
# These are not used yet.
"verifyCrosstalk.yaml",
]
):
self._check_pipeline(os.path.join(self.pipeline_path, "LSSTComCam", pipeline))
@unittest.skipIf(not has_obs_lsst, reason="Cannot test LSSTComCamSim pipelines without obs_lsst")
def test_lsstcomcamsim_pipelines(self):
for pipeline in self._get_pipelines(
exclude=[
# These are renamed/not used in the new pipelines.
"verifyGain.yaml",
"verifyDefectsIndividual.yaml",
# These are not valid for LSSTComCamSim.
"verifyCrosstalk.yaml",
"verifyLinearizer.yaml",
]
):
self._check_pipeline(os.path.join(self.pipeline_path, "LSSTComCamSim", pipeline))
@unittest.skipIf(not has_obs_decam, reason="Cannot test DECam pipelines without obs_decam")
def test_decam_pipelines(self):
for pipeline in self._get_pipelines(exclude=["verifyGainFromFlatPairs.yaml"]):
self._check_pipeline(os.path.join(self.pipeline_path, "DECam", pipeline))
@unittest.skipIf(not has_obs_subaru, reason="Cannot test HSC pipelines without obs_subaru")
def test_hsc_pipelines(self):
for pipeline in self._get_pipelines(exclude=["verifyGainFromFlatPairs.yaml"]):
self._check_pipeline(os.path.join(self.pipeline_path, "HSC", pipeline))
class TestMemory(lsst.utils.tests.MemoryTestCase):
pass
def setup_module(module):
lsst.utils.tests.init()
if __name__ == "__main__":
lsst.utils.tests.init()
unittest.main()
|
lsstREPO_NAMEcp_verifyPATH_START.@cp_verify_extracted@cp_verify-main@tests@test_pipelines.py@.PATH_END.py
|
{
"filename": "run_pipelines.py",
"repo_name": "ucberkeleyseti/turbo_seti",
"repo_path": "turbo_seti_extracted/turbo_seti-master/turbo_seti/find_event/run_pipelines.py",
"type": "Python"
}
|
"""
Main program module for executable plotSETI.
Facilitates the automation of 2 large functions:
find_event_pipline()
plot_event_pipline()
"""
import sys
import os
import glob
from argparse import ArgumentParser, RawDescriptionHelpFormatter
import matplotlib
from blimpy import __version__ as BLIMPY_VERSION
from turbo_seti.find_event.find_event_pipeline import find_event_pipeline
from turbo_seti.find_event.plot_event_pipeline import plot_event_pipeline
from turbo_seti.find_doppler.turbo_seti_version import TURBO_SETI_VERSION
# This file is in the find_event directory.
# The version file is next door in the find_doppler directory (sibling).
CURDIR = os.path.abspath(os.path.join(__file__, os.pardir))
UPDIR = os.path.abspath(os.path.join(CURDIR, os.pardir))
sys.path.append(UPDIR + "/find_doppler")
# 3 standard intermediate files:
NAME_CSVF = "found_event_table.csv"
NAME_H5_LIST = "list_h5_files.txt"
NAME_DAT_LIST = "list_dat_files.txt"
# Error return code
RETURN_NORMAL = 0
RETURN_ERROR = 1
HELP_EPILOGUE = \
"""
Optional Filtering Parameters
--------------------------------------
The following parameters can be used to prune hits from the dat files,
regardless of the filter threshold value:
* min_drift_rate (Hz/s)
* max_drift_rate (Hz/s)
* min_snr
Filter Threshold (ON-OFF tables)
--------------------------------------
1 : Select all top hits from the DAT files.
2 : Select only those top hits that are in at least one ON file AND not in any OFF files.
3 : Select only those top hits that are in all ON files AND not in any OFF files.
Default: 3.
Complex Cadences (--cadence=complex)
------------------------------------
All input .h5/.dat file pairs where the file header source_name fails to match
the --source_name parameter value are bypassed. In this way, source_name matches are
similar to ON files and non-matches are similar to OFF files.
Using the default --filter_threshold value of 2 means that a top hit must be in
at least one of the matched files to qualify as an event.
Specifying a --filter_threshold value of 3 indicates that a top hit must be in all
matched files to be an event.
Lists of h5 and dat files used internally
-----------------------------------------
Internally, plotSETI uses one text-file-resident list of h5 files
and another for the corresponding dat files.
Normally, these 2 lists are generated internally.
However, if parameter --h5dat_lists is set to 2 file paths separated by spaces
(one text file for h5s, one text file for dats), then those 2 list files:
* Will be checked for existence and consistency.
* Will be used for internal list processing.
E.g. plotSETI --h5dat_lists /dir_a/list_h5_files.txt /b/list_h5_files.txt --out_dir .....
tells plotSETI that there exists a list of h5 files in /dir_a/list_h5_files.txt
and a list of dat files in /dir_b/list_dat_files.txt
If --h5dat_lists is absent (default), plotSETI will internally generate the 2 list files.
"""
def clean_event_stuff(path_out_dir):
"""
Clean up the output directory of old artifacts.
Parameters
----------
path_out_dir : str
Output path of directory holding old artifacts.
Returns
-------
None.
"""
for deader in glob.glob(f"{path_out_dir}/*.png"):
os.remove(deader)
for deader in glob.glob(f"{path_out_dir}/list*.txt"):
os.remove(deader)
PATH_CSV = f"{path_out_dir}/{NAME_CSVF}"
if os.path.exists(PATH_CSV):
os.remove(PATH_CSV)
def count_text_lines(path_list_file):
"""
Count the list of text lines in a file.
Parameters
----------
path_list_file : str
Path of file containing a list of text lines..
Returns
-------
int
Count of text lines.
"""
temp_list = open(path_list_file, "r", encoding="utf-8").readlines()
return len(temp_list)
def make_lists(path_h5_dir, path_h5_list, path_dat_dir, path_dat_list):
"""
Create a list of .h5 files and a list of .dat files.
Parameters
----------
path_h5_dir : str
Directory where the h5 files reside.
path_h5_list : str
Path of output list of h5 files.
path_dat_dir : str
Directory where the dat files reside.
path_dat_list : str
Path of output list of dat files.
Returns
-------
int
Number in cadence : Success.
0 : Failure.
"""
N_dat = N_h5 = 0
print(f"plotSETI: Directory of h5 files: {path_h5_dir}")
print(f"plotSETI: Directory of dat files: {path_dat_dir}")
# Make a list of the h5 files.
with open(path_h5_list, "w", encoding="utf-8") as fh_h5:
for path_h5 in sorted(glob.glob("{}/*.h5".format(path_h5_dir))):
N_h5 += 1
fh_h5.write("{}\n".format(path_h5))
if N_h5 < 1:
print("\n*** plotSETI: No h5 files found!")
return 0
print(f"plotSETI: Found {N_h5} h5 files.")
# Make a list of the dat files.
with open(path_dat_list, "w", encoding="utf-8") as fh_dat:
for path_dat in sorted(glob.glob("{}/*.dat".format(path_dat_dir))):
N_dat += 1
fh_dat.write("{}\n".format(path_dat))
if N_dat < 1:
print("\n*** plotSETI: No dat files found!")
return 0
print(f"plotSETI: Found {N_dat} dat files.")
# Make sure that the lists are of the same size.
if N_h5 != N_dat:
print("\n*** plotSETI: Count of dat files must = count of h5 files!")
return 0
return N_h5
def main(args=None):
"""
This is the entry point to the plotSETI executable.
Parameters
----------
args : dict
"""
# Create an option parser to get command-line input/arguments
parser = ArgumentParser(description="plotSETI - post-search event-plot utility, version {}."
.format(TURBO_SETI_VERSION),
formatter_class=RawDescriptionHelpFormatter,
epilog=HELP_EPILOGUE)
parser.add_argument("h5_dir", type=str, default="", nargs="?",
help="Path to the directory holding the set of .h5 files")
parser.add_argument("-d", "--dat_dir", dest="dat_dir", type=str, default=None,
help="Path to the directory holding the set of .dat files. Default: h5_path. ")
parser.add_argument("-o", "--out_dir", dest="out_dir", type=str, default="./",
help="Path to the output directory. Default: current directory (.).")
parser.add_argument("--h5dat_lists", type=str, nargs="+", required=False,
help="User-supplied paths to lists of h5 files and dat files. Default: None supplied; will be internally-generated.")
parser.add_argument("-f", "--filter_threshold", dest="filter_threshold", type=int,
choices=[1, 2, 3], default=3,
help="Specification for how strict the top hit filtering will be.")
parser.add_argument("-z", "--plot_offset", dest="plot_offset", default=False, action="store_true",
help="Plot offset lines from from signal? Default:")
parser.add_argument("-s", "--snr_threshold", dest="snr_threshold", default=None,
help="The SNR below which signals will be discarded.")
parser.add_argument("-m", "--min_drift_rate", dest="min_drift_rate", default=None,
help="The minimum drift rate below which signals will be discarded.")
parser.add_argument("-M", "--max_drift_rate", dest="max_drift_rate", default=None,
help="The maximum drift rate above which signals will be discarded.")
parser.add_argument("-c", "--cadence", dest="cadence", type=str,
choices=["on", "off", "complex"], default="on",
help="Input file cadence FIRST file: on source, off source, complex cadence. Default: on.")
parser.add_argument("-n", "--source_name", dest="source_name", type=str, default="",
help="Complex cadence source name. Don't set this for on or off cadences.")
parser.add_argument("-e", "--erase_old_files", dest="erase_old", default=True, action="store_true",
help="Erase pre-existing *.png, *.csv, and list*.txt files in the output directory. Default: False.")
parser.add_argument("-v", "--version", dest="show_version", default=False, action="store_true",
help="Show the turbo_seti and blimpy versions and exit.")
parser.add_argument("--debug", default=False, action="store_true",
help="Turn on debug tracing (developer).")
if args is None:
args = parser.parse_args()
else:
args = parser.parse_args(args)
if args.show_version:
print("turbo_seti: {}".format(TURBO_SETI_VERSION))
print("blimpy: {}".format(BLIMPY_VERSION))
return RETURN_NORMAL
if args.h5_dir == "":
print("\nThe .h5 directory must be specified!\n")
os.system("plotSETI -h")
return RETURN_NORMAL
if not os.path.exists(args.h5_dir):
print("\nThe .h5 directory {} does not exist!\n".format(args.h5_dir))
return RETURN_ERROR
if args.dat_dir is None:
args.dat_dir = args.h5_dir
return execute_pipelines(args)
def execute_pipelines(args):
"""
Interface to the pipeline functions, called by main().
Parameters
----------
args : dict
"""
# Setup some parameter values for find_event_pipeline().
if args.cadence == "complex":
complex_cadence = True
if len(args.source_name) < 1:
print("\n*** plotSETI: Complex cadence requires a source_name.")
sys.exit(RETURN_ERROR)
else:
complex_cadence = False
if args.cadence == "on":
first_file = "ON"
else:
first_file = "OFF"
h5_dir = os.path.abspath(args.h5_dir) + "/"
dat_dir = os.path.abspath(args.dat_dir) + "/"
out_dir = os.path.abspath(args.out_dir) + "/"
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if args.plot_offset:
offset="auto"
else:
offset=0
# Establish output pathnames,
path_csvf = out_dir + NAME_CSVF
clean_event_stuff(out_dir)
# Make the h5 and dat lists.
# Default to auto-generation?
if args.h5dat_lists is None:
SZ_user_list = 0
else:
SZ_user_list = len(args.h5dat_lists)
if args.debug:
print(f"DEBUG h5dats_list: #{SZ_user_list} {args.h5dat_lists}")
if SZ_user_list == 0: # Default to auto-generation.
path_h5_list = out_dir + NAME_H5_LIST
path_dat_list = out_dir + NAME_DAT_LIST
number_in_cadence = make_lists(h5_dir, path_h5_list, dat_dir, path_dat_list)
if number_in_cadence == 0:
return RETURN_ERROR
else: # User-specified lists
if SZ_user_list != 2:
print(f"\n*** plotSETI: h5dat_lists had {SZ_user_list} elements; must be 2 (one for h5 and one for dat)!")
return RETURN_ERROR
if args.h5dat_lists[0] is None or args.h5dat_lists[1] is None:
print(f"\n*** plotSETI: h5dat_lists had {SZ_user_list} elements; must be 2 (one for h5 and one for dat)!")
return RETURN_ERROR
# Check the list of h5 files.
path_h5_list = args.h5dat_lists[0]
if not os.path.exists(path_h5_list):
print(f"\n*** plotSETI: File {path_h5_list} does not exist!")
return RETURN_ERROR
N_h5 = count_text_lines(path_h5_list)
print(f"plotSETI: Found {N_h5} h5 files.")
# Check the list of dat files.
path_dat_list = args.h5dat_lists[1]
if not os.path.exists(path_dat_list):
print(f"\n*** plotSETI: File {path_dat_list} does not exist!")
return RETURN_ERROR
N_dat = count_text_lines(path_dat_list)
print(f"plotSETI: Found {N_dat} dat files.")
# Make sure that the lists are of the same size.
if N_h5 != N_dat:
print("\n*** plotSETI: Count of dat files must = count of h5 files!")
return RETURN_ERROR
number_in_cadence = N_h5
# Run find_event_pipeline()
if complex_cadence:
df_check = find_event_pipeline(path_dat_list,
path_h5_list,
filter_threshold = args.filter_threshold,
number_in_cadence = number_in_cadence,
on_source_complex_cadence=args.source_name,
sortby_tstart=True,
check_zero_drift=False,
SNR_cut=args.snr_threshold,
min_drift_rate=args.min_drift_rate,
max_drift_rate=args.max_drift_rate,
user_validation=False,
csv_name=path_csvf,
saving=True)
else: # not a complex cadence
df_check = find_event_pipeline(path_dat_list,
path_h5_list,
filter_threshold = args.filter_threshold,
number_in_cadence = number_in_cadence,
on_source_complex_cadence=False,
on_off_first=first_file,
sortby_tstart=True,
check_zero_drift=False,
SNR_cut=args.snr_threshold,
min_drift_rate=args.min_drift_rate,
max_drift_rate=args.max_drift_rate,
user_validation=False,
csv_name=path_csvf,
saving=True)
if df_check is None:
print("\n*** plotSETI: No events produced in find_event_pipeline()!")
return RETURN_ERROR
# Make the plots for all of the HDF5/DAT file pairs in batch mode.
matplotlib.use("agg", force=True)
plot_event_pipeline(path_csvf,
path_h5_list,
plot_dir=out_dir,
filter_spec=args.filter_threshold,
offset=offset,
user_validation=False)
print(f"\nplotSETI: Plots are stored in directory {out_dir}.")
return RETURN_NORMAL
if __name__ == "__main__":
# Start the show!
main()
|
ucberkeleysetiREPO_NAMEturbo_setiPATH_START.@turbo_seti_extracted@turbo_seti-master@turbo_seti@find_event@run_pipelines.py@.PATH_END.py
|
{
"filename": "installation.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/concepts/installation.md",
"type": "Markdown"
}
|
# Installation
Component-specific information:
- [{{ python-package }} installation](python-installation.md)
- [{{ catboost-spark }} installation](spark-installation.md)
- [{{ r-package }} installation](r-installation.md)
- [Command-line version binary](cli-installation.md)
[Build from source](build-from-source.md)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@concepts@installation.md@.PATH_END.py
|
{
"filename": "index.md",
"repo_name": "Schwarzam/splusdata",
"repo_path": "splusdata_extracted/splusdata-master/docs/index.md",
"type": "Markdown"
}
|
This site contains the project documentation for the
`splusdata` package, that is a python package created to get S-PLUS data.
More info at [splus.cloud](https://splus.cloud).
## Table Of Contents
1. [Examples](examples.md)
2. [Main Usage](splusdata.md)
Quickly find what you're looking for depending on
your use case by looking at the different pages.
## Acknowledgements
This is an open package to all that want to contribute, feel free to leave your mark!
|
SchwarzamREPO_NAMEsplusdataPATH_START.@splusdata_extracted@splusdata-master@docs@index.md@.PATH_END.py
|
{
"filename": "_ypad.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/treemap/marker/colorbar/_ypad.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="ypad", parent_name="treemap.marker.colorbar", **kwargs
):
super(YpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@treemap@marker@colorbar@_ypad.py@.PATH_END.py
|
{
"filename": "_k.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/mesh3d/_k.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class KValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="k", parent_name="mesh3d", **kwargs):
super(KValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@mesh3d@_k.py@.PATH_END.py
|
{
"filename": "catboost.py",
"repo_name": "vaexio/vaex",
"repo_path": "vaex_extracted/vaex-master/packages/vaex-ml/vaex/ml/catboost.py",
"type": "Python"
}
|
import base64
import tempfile
import traitlets
import vaex
import vaex.serialize
from . import state
from . import generate
import numpy as np
import catboost
@vaex.serialize.register
@generate.register
class CatBoostModel(state.HasState):
'''The CatBoost algorithm.
This class provides an interface to the CatBoost aloritham.
CatBoost is a fast, scalable, high performance Gradient Boosting on
Decision Trees library, used for ranking, classification, regression and
other machine learning tasks. For more information please visit
https://github.com/catboost/catboost
Example:
>>> import vaex
>>> import vaex.ml.catboost
>>> df = vaex.datasets.iris()
>>> features = ['sepal_width', 'petal_length', 'sepal_length', 'petal_width']
>>> df_train, df_test = df.ml.train_test_split()
>>> params = {
'leaf_estimation_method': 'Gradient',
'learning_rate': 0.1,
'max_depth': 3,
'bootstrap_type': 'Bernoulli',
'objective': 'MultiClass',
'eval_metric': 'MultiClass',
'subsample': 0.8,
'random_state': 42,
'verbose': 0}
>>> booster = vaex.ml.catboost.CatBoostModel(features=features, target='class_', num_boost_round=100, params=params)
>>> booster.fit(df_train)
>>> df_train = booster.transform(df_train)
>>> df_train.head(3)
# sepal_length sepal_width petal_length petal_width class_ catboost_prediction
0 5.4 3 4.5 1.5 1 [0.00615039 0.98024259 0.01360702]
1 4.8 3.4 1.6 0.2 0 [0.99034267 0.00526382 0.0043935 ]
2 6.9 3.1 4.9 1.5 1 [0.00688241 0.95190908 0.04120851]
>>> df_test = booster.transform(df_test)
>>> df_test.head(3)
# sepal_length sepal_width petal_length petal_width class_ catboost_prediction
0 5.9 3 4.2 1.5 1 [0.00464228 0.98883351 0.00652421]
1 6.1 3 4.6 1.4 1 [0.00350424 0.9882139 0.00828186]
2 6.6 2.9 4.6 1.3 1 [0.00325705 0.98891631 0.00782664]
'''
snake_name = "catboost_model"
features = traitlets.List(traitlets.Unicode(), help='List of features to use when fitting the CatBoostModel.')
target = traitlets.Unicode(allow_none=False, help='The name of the target column.')
num_boost_round = traitlets.CInt(default_value=None, allow_none=True, help='Number of boosting iterations.')
params = traitlets.Dict(help='A dictionary of parameters to be passed on to the CatBoostModel model.')
pool_params = traitlets.Dict(default_value={}, help='A dictionary of parameters to be passed to the Pool data object construction')
prediction_name = traitlets.Unicode(default_value='catboost_prediction', help='The name of the virtual column housing the predictions.')
prediction_type = traitlets.Enum(values=['Probability', 'Class', 'RawFormulaVal'], default_value='Probability',
help='The form of the predictions. Can be "RawFormulaVal", "Probability" or "Class".')
batch_size = traitlets.CInt(default_value=None, allow_none=True, help='If provided, will train in batches of this size.')
batch_weights = traitlets.List(traitlets.Float(), default_value=[], allow_none=True, help='Weights to sum models at the end of training in batches.')
evals_result_ = traitlets.List(traitlets.Dict(), default_value=[], help="Evaluation results")
ctr_merge_policy = traitlets.Enum(values=['FailIfCtrsIntersects', 'LeaveMostDiversifiedTable', 'IntersectingCountersAverage'],
default_value='IntersectingCountersAverage', help="Strategy for summing up models. Only used when training in batches. See the CatBoost documentation for more info.")
def __call__(self, *args):
data2d = np.stack([np.asarray(arg, np.float64) for arg in args], axis=1)
dmatrix = catboost.Pool(data2d, **self.pool_params)
return self.booster.predict(dmatrix, prediction_type=self.prediction_type)
def transform(self, df):
'''Transform a DataFrame such that it contains the predictions of the CatBoostModel in form of a virtual column.
:param df: A vaex DataFrame. It should have the same columns as the DataFrame used to train the model.
:return copy: A shallow copy of the DataFrame that includes the CatBoostModel prediction as a virtual column.
:rtype: DataFrame
'''
copy = df.copy()
lazy_function = copy.add_function('catboost_prediction_function', self, unique=True)
expression = lazy_function(*self.features)
copy.add_virtual_column(self.prediction_name, expression, unique=False)
return copy
def fit(self, df, evals=None, early_stopping_rounds=None, verbose_eval=None, plot=False, progress=None, **kwargs):
'''Fit the CatBoostModel model given a DataFrame.
This method accepts all key word arguments for the catboost.train method.
:param df: A vaex DataFrame containing the features and target on which to train the model.
:param evals: A list of DataFrames to be evaluated during training.
This allows user to watch performance on the validation sets.
:param int early_stopping_rounds: Activates early stopping.
:param bool verbose_eval: Requires at least one item in *evals*.
If *verbose_eval* is True then the evaluation metric on the validation set is printed at each boosting stage.
:param bool plot: if True, display an interactive widget in the Jupyter
notebook of how the train and validation sets score on each boosting iteration.
:param progress: If True display a progressbar when the training is done in batches.
'''
self.pool_params['feature_names'] = self.features
if evals is not None:
for i, item in enumerate(evals):
data = item[self.features].values
target_data = item[self.target].to_numpy()
evals[i] = catboost.Pool(data=data, label=target_data, **self.pool_params)
# This does the actual training/fitting of the catboost model
if self.batch_size is None:
data = df[self.features].values
target_data = df[self.target].to_numpy()
dtrain = catboost.Pool(data=data, label=target_data, **self.pool_params)
model = catboost.train(params=self.params,
dtrain=dtrain,
num_boost_round=self.num_boost_round,
evals=evals,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
plot=plot,
**kwargs)
self.booster = model
self.evals_result_ = [model.evals_result_]
self.feature_importances_ = list(model.feature_importances_)
else:
models = []
# Set up progressbar
n_samples = len(df)
progressbar = vaex.utils.progressbars(progress, title="fit(catboost)")
column_names = self.features + [self.target]
iterator = df[column_names].to_pandas_df(chunk_size=self.batch_size)
for i1, i2, chunk in iterator:
progressbar(i1 / n_samples)
data = chunk[self.features].values
target_data = chunk[self.target].values
dtrain = catboost.Pool(data=data, label=target_data, **self.pool_params)
model = catboost.train(params=self.params,
dtrain=dtrain,
num_boost_round=self.num_boost_round,
evals=evals,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
plot=plot,
**kwargs)
self.evals_result_.append(model.evals_result_)
models.append(model)
progressbar(1.0)
# Weights are key when summing models
if len(self.batch_weights) == 0:
batch_weights = [1/len(models)] * len(models)
elif self.batch_weights is not None and len(self.batch_weights) != len(models):
raise ValueError("'batch_weights' must be te same length as the number of models.")
else:
batch_weights = self.batch_weights
# Sum the models
self.booster = catboost.sum_models(models, weights=batch_weights, ctr_merge_policy=self.ctr_merge_policy)
def predict(self, df, **kwargs):
'''Provided a vaex DataFrame, get an in-memory numpy array with the predictions from the CatBoostModel model.
This method accepts the key word arguments of the predict method from catboost.
:param df: a vaex DataFrame
:returns: A in-memory numpy array containing the CatBoostModel predictions.
:rtype: numpy.array
'''
data = df[self.features].values
dmatrix = catboost.Pool(data, **self.pool_params)
return self.booster.predict(dmatrix, prediction_type=self.prediction_type, **kwargs)
def state_get(self):
filename = tempfile.mktemp()
self.booster.save_model(filename)
with open(filename, 'rb') as f:
data = f.read()
return dict(tree_state=base64.encodebytes(data).decode('ascii'),
substate=super(CatBoostModel, self).state_get())
def state_set(self, state, trusted=True):
super(CatBoostModel, self).state_set(state['substate'])
data = base64.decodebytes(state['tree_state'].encode('ascii'))
filename = tempfile.mktemp()
with open(filename, 'wb') as f:
f.write(data)
self.booster = catboost.CatBoost().load_model(fname=filename)
|
vaexioREPO_NAMEvaexPATH_START.@vaex_extracted@vaex-master@packages@vaex-ml@vaex@ml@catboost.py@.PATH_END.py
|
{
"filename": "Components.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/GalfitModule/Classes/Components.py",
"type": "Python"
}
|
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os
import sys
from os.path import join as pj
from os.path import exists
import subprocess
from copy import deepcopy
from IPython import get_ipython
from astropy.io import fits
import warnings
import pandas as pd
import numpy as np
# In[2]:
# For debugging purposes
from IPython import get_ipython
def in_notebook():
ip = get_ipython()
if ip:
return True
else:
return False
# In[3]:
_HOME_DIR = os.path.expanduser("~")
if in_notebook():
_SPARCFIRE_DIR = pj(_HOME_DIR, "sparcfire_matt")
_MODULE_DIR = pj(_SPARCFIRE_DIR, "GalfitModule")
else:
try:
_SPARCFIRE_DIR = os.environ["SPARCFIRE_HOME"]
_MODULE_DIR = pj(_SPARCFIRE_DIR, "GalfitModule")
except KeyError:
if __name__ == "__main__":
print("SPARCFIRE_HOME is not set. Please run 'setup.bash' inside SpArcFiRe directory if not done so already.")
print("Checking the current directory for GalfitModule, otherwise quitting.")
_MODULE_DIR = pj(os.getcwd(), "GalfitModule")
if not exists(_MODULE_DIR):
raise Exception("Could not find GalfitModule!")
sys.path.append(_MODULE_DIR)
from Functions.helper_functions import *
from Classes.Parameters import *
# In[4]:
# TODO: MAKE ITERABLE via generator
# def reverse(data):
# for index in range(len(data)-1, -1, -1):
# yield data[index]
class GalfitComponent:
def __init__(self,
#parameters = {}, #load_default_parameters(),
component_type,
component_name = "",
component_number = 0,
param_prefix = " ",
**kwargs
):
self.component_type = component_type
# If you decide to name your component *besides and/or including* the
# variable itself
self.component_name = component_name
self.component_number = component_number
self.param_prefix = param_prefix
default = load_default_parameters().get(self.component_type, {})
assert default, f"Component type {self.component_type} improperly specified or not in defaults."
# Assume if an argument is given called 'parameters' they mean to pass
# all the parameters for their component in at once. kwargs however take precedence
# since we sometimes use parameters for the default.
parameters = kwargs.pop("parameters", default)
for k, v in kwargs.items():
# Only overwrite if the arguments are given correctly
# Look in sub dictionary
if k in default.keys():
#try:
parameters.get(k, default[k]).value = v
# except KeyError:
# print(f"{self.component_type} instantiation not properly specified. Continuing...")
self._parameters = parameters
# Generically handles the parameters fed in
for name, parameter in self._parameters.items():
setattr(self, name, parameter)
getattr(self, name, parameter)
# For reading from file
self._start_dict = f"COMP_{self.component_number}"
self._end_dict = f"COMP_{self.component_number + 1}"
#self._start_dict_value = self.component_type
# This should work but clarity is probably best here
# self._start_text = str(ComponentType(
# self.component_type,
# parameter_number = f"{self.param_prefix}0",
# component_number = component_number
# ))
self._start_text = f"# Component number: {self.component_number}"
self._end_text = f"{self.param_prefix.strip()}10"
self._section_sep = "="*80
# ==========================================================================================================
def check_parameters_types(self, input_dict):
if not input_dict:
input_dict = self.parameters
for k, parameter in input_dict.items():
assert isinstance(parameter, GalfitParameter), f"The parameter fed into the GalfitComponent, {k}, is not a valid type."
# ==========================================================================================================
@property
def parameters(self):
# Generically handles the parameters fed in
for name, parameter in self._parameters.items():
setattr(self, name, parameter)
getattr(self, name, parameter)
return self._parameters
@parameters.setter
def parameters(self, new_dict):
self.check_parameters_types(new_dict)
self._parameters = deepcopy(new_dict)
# Generically handles the parameters fed in
for name, parameter in self._parameters.items():
setattr(self, name, parameter)
getattr(self, name, parameter)
# ==========================================================================================================
# @staticmethod
# def component_get_set(component_dict = None, component_type = ""):
# exec_dict = {}
# # TODO: Set up logging
# if not component_dict:
# exec_dict = load_default_parameters()
# warnings.warn(f"Component type: {component_type} not properly specified " \
# f"upon initializing {GalfitComponent.__name__}.\n" \
# f"Will initalize all component types currently set in 'load_default_parameters()' " \
# f"from 'Parameters' module." \
# "\n".join(exec_dict.keys())
# )
# else:
# # Give it an empty key so that the following loop works
# exec_dict[""] = component_dict
# full_str = ""
# return "\n".join(
# [
# generate_get_set(
# # inner dict looks like this:
# # "position" : parameters["position"]
# {k : f"parameters[\"{k}\"]"
# for k in e_dict.keys()
# }
# )
# for e_dict in exec_dict.values()
# ]
# )
# ==========================================================================================================
# def update_values(self, **kwargs):
# for key, value in kwargs.items():
# setattr(self, key, value)
# Function must return a dict otherwise I can't use
# my fancy decorator
def update_parameter_dict_with_dict(func):
def func_wrapper(self, input_dict): #*args, **kwargs):
input_dict = func(self, input_dict)
assert isinstance(input_dict, dict), \
f"update_parameter_dict_with_dict decorator from GalfitComponent " \
f"improperly used for a {type(self).__name__} component. " \
f"No dictionary was given."
for pname, pval in input_dict.items():
if pname in self.parameters:
self.parameters[pname].value = pval
return func_wrapper
# This is by far the more dangerous way
def update_parameter_dict_with_list(func):
def func_wrapper(self, input_list): #*args, **kwargs):
input_list = func(self, input_list)
assert isinstance(input_list, list), \
f"update_parameter_dict_with_list decorator from GalfitComponent " \
f"improperly used for a {type(self).__name__} component. " \
f"No list was given."
# Must check for position to see if we need to add or subtract a list element
# since position is multivalued in the log line
parameter_names = self.parameters.keys()
if "position" in parameter_names:
input_list[1] = (input_list[0], input_list[1])
input_list[0] = self.component_type
else:
input_list = [self.component_type] + input_list
if "skip" in parameter_names:
input_list.append("0")
assert len(input_list) == len(self.parameters), \
f"update_parameter_dict_with_list decorator from GalfitComponent " \
f"improperly used for a {type(self).__name__} component. " \
f"List is not the same length as the dictionary of parameters."
for pname, pval in zip(parameter_names, input_list):
self.parameters[pname].value = pval
return func_wrapper
# ==========================================================================================================
# TODO: Add comparison function via subtract(?)
def __sub__(self):
pass
# ==========================================================================================================
def __str__(self):
return f"# Component number: {self.component_number}\n" + \
"\n".join([str(v) for v in self.parameters.values()]) + \
"\n"
def __repr__(self):
return f"# Component number: {self.component_number}\n" + \
"\n".join([repr(v) for v in self.parameters.values()]) + \
"\n"
# ==========================================================================================================
@update_parameter_dict_with_list
def update_from_log(self, in_line:str):
# Used to update from stdout i.e. what we see in fit.log
# Requires outside function to loop through log file
# NOTE: These necessarily round to two digits because that's
# all Galfit outputs to stdout
#print("Did this get properly overwritten from base in GalfitComponent?")
# Extra empty slot to account for component name in parameter dictionary
return [
i.strip("[*,]() ") for i in in_line.split()
if any(map(str.isdigit, i))
]
# ==========================================================================================================
# TODO: Update to work with series
def from_pandas(self, input_df):
param_names = [n.split(f"_{self.component_type}")[0] for n in input_df.columns]
param_values = input_df.iloc[0].values.astype(float).round(4)
new_param_dict = dict(zip(param_names, param_values))
pos = "position"
if pos in self.parameters.keys():
new_param_dict[pos] = (new_param_dict[f"{pos}_x"], new_param_dict[f"{pos}_y"])
new_param_dict.pop(f"{pos}_x")
new_param_dict.pop(f"{pos}_y")
# No graceful way to do this...
# TODO: Can this be used for bending modes as well?
if self.component_type in ("fourier"):
f_modes = set([pn.split("_")[0] for pn in param_names])
a = "amplitude"
pha = "phase_angle"
for mode in f_modes:
new_param_dict[mode] = (new_param_dict[f"{mode}_{a}"], new_param_dict[f"{mode}_{pha}"])
new_param_dict.pop(f"{mode}_{a}")
new_param_dict.pop(f"{mode}_{pha}")
for k in self.parameters.keys():
if k.startswith("_"):
continue
self.parameters[k].value = new_param_dict[k]
# ==========================================================================================================
def to_pandas(self):
name = f"{self.component_type}_{self.component_number}"
parameter_dict = deepcopy(self.parameters)
for pname, pval in self.parameters.items():
if pname.startswith("_"):
parameter_dict.pop(pname)
continue
# Split multivalued parameters like position
# briefly convert to NumParameter to coincide with others
if isinstance(pval, MultiParameter):
old_keys = pval.value._asdict()
parameter_dict.pop(pname)
parameter_dict.update({f"{pname}_{k}" : NumParameter(v) for k, v in old_keys.items()})
parameter_dict = {f"{k}_{name}" : v.value for k, v in parameter_dict.items()}
all_data = pd.DataFrame(
parameter_dict,
index = [name],
dtype = np.float32
)
# Move skip to end for reasons
skip_col = f"skip_{name}"
if skip_col in all_data.columns:
all_data.insert(len(all_data.columns) - 1, skip_col, all_data.pop(skip_col))
return all_data
# ==========================================================================================================
def from_file_helper(*args, **kwargs):
# This function is a placeholder
# All components will have this, it'll make it easier and less
# confusing to debug/parse the inputs
raise Exception("Wrong file helper function called! It's either from_file_helper[_list | _dict].")
# ==========================================================================================================
def update_parameters_file_helper(self, file_dict):
for k, v in file_dict.items():
if k.startswith("_"):
continue
if issubclass(type(self.parameters[k]), MultiParameter):
value = v[:2]
fix_value = v[2:]
else:
value = v[0]
fix_value = v[1]
self.parameters[k].value = value
self.parameters[k].fix = fix_value
# ==========================================================================================================
# File dict is only for fits
def from_file_helper_dict(self, file_in):
# Feed in just the chunk from the main 'from_file' caller
# This requires determining param_begin/end in that caller
# This can handle component type but it is unnecessary
assert self.component_type, f"Component type must be specified to read from file."
# to be abundantly safe
file_in = deepcopy(file_in)
p_names = [
k for k in load_default_parameters()[self.component_type].keys()
if not k.startswith("_") and k != "position"
]
# File dict is only for fits
file_in = {k : v for k,v in file_in.items() if v != self.component_type}
list_vals = list(file_in.values())
#file_in = {k:v for k,v in file_in.items() if not k.endswith("_YC") and not k.endswith("_XC")}
file_dict = {}
count = 0
for k, v in file_in.items():
if not k.endswith("_YC") and not k.endswith("_XC"):
file_dict[p_names[count]] = v.split()[0].strip("[]*"), 0 if ("[" in v) and ("]" in v) else 1
count += 1
if "position" in load_default_parameters()[self.component_type].keys():
# Assume position is always the first and second index after the component
file_dict["position"] = [i.strip("[]*") for i in list_vals[:2]] + \
[0 if ("[" in i) and ("]" in i) else 1 for i in list_vals[:2]]
self.update_parameters_file_helper(file_dict)
# ==========================================================================================================
# These used to be unified but Fourier threw a wrench in all that
# ==========================================================================================================
def from_file_helper_list(self, file_in):
# Feed in just the chunk from the main 'from_file' caller
# This requires determining param_begin/end in that caller
# This can handle component type but it is unnecessary
assert self.component_type, f"Component type must be specified to read from file."
# to be abundantly safe
file_in = deepcopy(file_in)
# Excludes 0
#p_numbers = list(self.param_numbers.keys())[1:]
# Invert keys and values for the dict comp a few lines down
p_numbers = {
str(v.parameter_number) : k
for k, v in load_default_parameters()[self.component_type].items()
if not k.startswith("_")
}
file_list = file_in
file_dict = {
line[:line.index(")")].strip(f" {self.param_prefix}") : line[line.index(")") + 1 : line.index("#")].strip()
for line in file_list
if line.strip()[0] not in ("#") #, "6", "7", "8")
} #, "Z")}
# join split split gets rid of all extra spacing except for one between everything
file_dict = {
p_numbers[num] : " ".join(v.split()).split()
for num, v in file_dict.items()
# Sometimes GALFIT outputs empty lines so make sure the line is valid
if str(num) in p_numbers
}
# This is for setting the fix value later
if "skip" in file_dict:
file_dict["skip"].append(None)
self.update_parameters_file_helper(file_dict)
# ==========================================================================================================
def from_file(self, filename):
# This function handles grabbing and storing the values from galfit files (input and output???)
# It's written to generally handle both and stores everything in the respective component objects
def from_fits(self, filename = "", image_num = 2):
try:
# Grabbing the filename
#input_filename = glob_name(galaxy_path, '', filename)
input_file = fits.open(filename)
except FileNotFoundError:
print(f"Can't open to read the file, {filename}. Check name/permissions/directory.")
return None
except OSError as ose:
print(f"Something went wrong! {ose}")
return None
input_in = dict(input_file[image_num].header)
keys = list(input_in.keys())
# Power and Fourier now require component number (for the component they modify) at instantiation
# Should not affect output so here try/except is for anything else
try:
feed_in = {key : value for idx, (key, value) in enumerate(input_in.items())
if keys.index(self._start_dict) < idx <= keys.index(self._end_dict)}
except ValueError as ve:
# Trying to recover...
# End will *always* (header excluded) be #_param
component_end = [k for k in keys if k.endswith(self._end_dict[2:])][0]
if component_end[0].isnumeric():
component_start = f"{component_end[0]}_{self._start_dict[2:]}"
else:
print(f"Can't find start/end of {self.component_type} segment.")
print(f"Check the filename or start/end_dict variables.")
print(f"Filename: {filename}")
print(f"Start/End: {self._start_dict}/{self._end_dict}")
raise ValueError(ve)
# Mix check with value (component type) and end key because that's usually known
# Comp type should be handled now that we include the beginning
feed_in = {key : value for idx, (key, value) in enumerate(input_in.items())
if keys.index(component_start) <= idx <= keys.index(component_end)}
input_file.close()
return feed_in
def from_text(self, filename = ""):
try:
# Grabbing the filename
#input_filename = glob_name(galaxy_path, '', filename)
input_file = open(filename,'r')
except FileNotFoundError:
print(f"Can't open to read the file, {filename}. Check name/permissions/directory.")
return None
except OSError as ose:
print(f"Something went wrong! {ose}")
return None
store = False
feed_in = []
for line in input_file:
if line.strip().startswith(self._start_text):
store = True
if store:
feed_in.append(line)
if line.strip().startswith(self._end_text):
store = False
input_file.close()
return feed_in
ext = os.path.splitext(filename)[1]
if ext == ".fits":
feed_in = from_fits(self, filename)
self.from_file_helper_dict(feed_in)
else:
feed_in = from_text(self, filename)
self.from_file_helper_list(feed_in)
#raise Exception("Something went wrong importing from text!")
#update_parameters_file_helper(self, file_dict)
# ==========================================================================================================
def to_file(self, filename, *args):
# For skipped power and fourier
# if self.parameters.get("skip", 0) == 1 and self.component_type in ("power", "fourier"):
# return None
try:
with open(filename, "w") as f:
f.write("\n")
f.write(str(self))
f.write("\n")
# *args for writing in additional classes at the same time (save I/O)
comp_names = [c.component_type for c in args]
with_fourier = "fourier" in comp_names
# Arbitrary #
fourier_index = 1000
if with_fourier:
fourier_index = comp_names.index("fourier")
for i, component in enumerate(args):
# Skip means GALFIT will still optimize on these but not display them in the final fit
# so commenting the below out.
# For skipped power and fourier
# if component.parameters.get("skip",0) == 1 and component.component_type in ("power", "fourier"):
# continue
f.write(str(component))
if i != fourier_index - 1:
f.write("\n")
f.write("="*80 + "\n")
except FileNotFoundError:
print(f"Can't open to write the file, {filename}. Check permissions/directory.")
except OSError as ose:
print(f"Something went wrong! {ose}")
# In[5]:
class Sersic(GalfitComponent):
def __init__(self, component_number, **kwargs):
# SersicParameters
GalfitComponent.__init__(self,
component_type = "sersic",
component_number = component_number,
parameters = load_default_sersic_parameters(component_number = component_number),
**kwargs
)
# For reading from file
self._start_dict = f"COMP_{self.component_number}"
self._end_dict = f"{self.component_number}_PA"
# text kept at defaults
#self._start_text = f"# Component number: {self.component_number}"
#self._end_text = f"{self.param_prefix.strip()}10"
# Maybe it's silly to do it this way but in the future, it should be easier
# to implement new components and it should be safer
#exec(GalfitComponent.component_get_set(load_default_sersic_parameters()))
# In[6]:
class Power(GalfitComponent):
def __init__(self, component_number, **kwargs):
#self.component_type = "power"
#power_parameters = load_default_power_parameters(component_number = component_number)
GalfitComponent.__init__(self,
component_type = "power",
component_number = component_number,
param_prefix = "R",
parameters = load_default_power_parameters(component_number = component_number),
**kwargs
)
# For reading from file
# 2_ may not always be the case but that's why I have a try except in there ;)
self._start_dict = f"{self.component_number}_ROTF"
self._end_dict = f"{self.component_number}_SPA"
self._start_text = f"{self.param_prefix}0) power"
# end kept at defult
#self._end_text = f"{self.param_prefix.strip()}10"
# dict for get set looks like this:
# "position" : parameters["position"]
#exec(GalfitComponent.component_get_set(load_default_power_parameters()))
# Since this one does not have a component number, it gets special treatment
def __str__(self):
return "\n".join(GalfitComponent.__str__(self).split("\n")[1:])
def __repr__(self):
return "\n".join(GalfitComponent.__repr__(self).split("\n")[1:])
# In[7]:
class Fourier(GalfitComponent):
# kwargs is a placeholder
def __init__(self, component_number, n = {1 : (0.05, 45), 3 : (0.05, 25)}, **kwargs):
parameters = load_default_fourier_parameters(component_number = component_number)
if n:
for fnum, (amplitude, phase_angle) in n.items():
parameters[f"F{fnum}"] = FourierMode(
mode = str(fnum),
amplitude = amplitude,
phase_angle = phase_angle,
component_number = component_number
)
GalfitComponent.__init__(self,
component_type = "fourier",
param_prefix = "F",
component_number = component_number,
parameters = parameters,
**kwargs
)
self.sort_parameters()
# normal rules don't apply here
# Still use inheritance for the other functions
# TODO: FIND SOME WAY TO UPDATE THIS WHEN OBJECT IS UPDATED
# preferably without copying and pasting things
# TODO: These do not update via update_param_values...
self._amplitudes = [mode.amplitude for pname, mode in self.parameters.items() if pname != "skip"]
self._phase_angles = [mode.phase_angle for pname, mode in self.parameters.items() if pname != "skip"]
p_numbers = list(self.parameters.keys())
# For reading from file
self._start_dict = f"{self.component_number}_F{p_numbers[0]}"
self._end_dict = f"{self.component_number}_F{p_numbers[-1]}PA"
self._start_text = f"F{p_numbers[0]}"
self._end_text = f"{self.param_prefix}{p_numbers[-1]}"
#exec(GalfitComponent.component_get_set(load_default_fourier_parameters()))
exec(
generate_get_set(
{
"amplitudes" : "_amplitudes",
"phase_angles" : "_phase_angles"
}
)
)
# ==========================================================================================================
def __str__(self):
return "\n".join(GalfitComponent.__str__(self).split("\n")[1:])
def __repr__(self):
return "\n".join(GalfitComponent.__repr__(self).split("\n")[1:])
# ==========================================================================================================
# To keep things in proper order
def sort_parameters(self):
self.parameters = dict(sorted(self.parameters.items()))
# ==========================================================================================================
def include_fn(self, n:dict):
for fnum, values in n.items():
self.parameters[f"{self.param_prefix}{str(fnum)}"] = FourierMode(
mode = str(fnum),
amplitude = values[0],
phase_angle = values[1],
component_number = self.component_number
)
self.sort_parameters()
# ==========================================================================================================
def from_file_helper_dict(self, file_in):
# to be abundantly safe
file_in = deepcopy(file_in)
# Excludes 0
#p_numbers = list(self.param_numbers.keys())[1:]
n_dict = {}
for k,v in file_in.items():
if "+/-" in v:
v = v.split("+/-")[0]
#ex: 1_F1 -> 1
# 1_F3PA -> 3
k_num = int(k.split("_")[1][1])
if k_num not in n_dict:
n_dict[k_num] = [float(v.strip("[* ] "))]
else:
n_dict[k_num] += [float(v.strip("[* ] "))]
self.include_fn(n_dict)
for k,v in file_in.items():
# For param_fix
if "[" in v and "]" in v:
if "PA" in k:
self.parameters[f"{self.param_prefix}{k_num}"].fix_phase_angle = "0"
else:
self.parameters[f"{self.param_prefix}{k_num}"].fix_amplitude = "0"
else:
if "PA" in k:
self.parameters[f"{self.param_prefix}{k_num}"].fix_phase_angle = "1"
else:
self.parameters[f"{self.param_prefix}{k_num}"].fix_amplitude = "1"
# ==========================================================================================================
def from_pandas(self, input_df):
param_names = [n.split(f"_{self.component_type}")[0] for n in input_df.columns]
param_values = input_df.iloc[0].values.astype(float)
new_param_dict = dict(zip(param_names, param_values))
# pos = "position"
# if pos in self.parameters.keys():
# new_param_dict[pos] = (new_param_dict[f"{pos}_x"], new_param_dict[f"{pos}_y"])
# new_param_dict.pop(f"{pos}_x")
# new_param_dict.pop(f"{pos}_y")
# No graceful way to do this...
f_modes = set([pn.split("_")[0] for pn in param_names])
a = "amplitude"
pha = "phase_angle"
for mode in f_modes:
if mode == "skip":
continue
new_param_dict[mode] = (new_param_dict[f"{mode}_{a}"], new_param_dict[f"{mode}_{pha}"])
new_param_dict.pop(f"{mode}_{a}")
new_param_dict.pop(f"{mode}_{pha}")
for k in self.parameters.keys():
if k.startswith("_"):
continue
self.parameters[k].value = new_param_dict[k]
# ==========================================================================================================
def update_from_log(self, in_line):
# example
# fourier : (1: 0.06, -6.67) (3: 0.05, 0.18)
# rstrip avoids a hanging ) later
params = in_line.lstrip("fourier : ").replace(" ", "").rstrip(")").split(")(")
for i, pname in enumerate(self.parameters.keys()):
if pname != "skip":
self.parameters[pname].value = eval(f"({params[i].split(':')[1].replace('*', '')})")
# In[8]:
class Sky(GalfitComponent):
def __init__(self, component_number, **kwargs):
GalfitComponent.__init__(self,
component_type = "sky",
component_number = component_number,
parameters = load_default_sky_parameters(component_number = component_number),
**kwargs
)
# For reading from file
self._start_dict = f"COMP_{self.component_number}"
self._end_dict = f"{self.component_number}_DSDY"
self._start_text = f"# Component number: {self.component_number}"
self._end_text = f"{self.param_prefix.strip()}3"
# dict for get set looks like this:
# "position" : parameters["position"]
#exec(GalfitComponent.component_get_set(load_default_sky_parameters()))
# ==========================================================================================================
@GalfitComponent.update_parameter_dict_with_list
def update_from_log(self, in_line):
# # example
# #sky : [ 63.00, 63.00] 1130.51 -4.92e-02 1.00e-02
# Ignore position
return [i.strip("[*,]") for i in in_line.split()
if any(map(str.isdigit, i))
][2:]
# In[9]:
class GalfitHeader(GalfitComponent):
def __init__(self, parameters = {}, galaxy_name = "", **kwargs):
# normal rules don't apply here
# Still use inheritance for the other functions
# If not fully specified, will use galaxy_name as default so it's good to use
# it as an argument even if I am specifying each individually
GalfitComponent.__init__(self,
component_type = "header",
param_prefix = "",
parameters = load_default_header_parameters(galaxy_name = galaxy_name),
**kwargs
)
# For reading from file
self._start_dict = "INITFILE"
self._end_dict = "MAGZPT"
self._start_text = f"A" # {self.input_image}"
self._end_text = f"P" #{self.optimize}"
# No newlines added so the strings can be added to directly
self.input_menu_file = f"# Input menu file: {kwargs.get('input_menu_file', '')}.in"
self.extra_header_info = f"# {kwargs.get('extra_header_info', '')}"
# Don't mess with this tabbing
self.post_header = """
# INITIAL FITTING PARAMETERS
#
# For component type, the allowed functions are:
# sersic, expdisk, edgedisk, devauc, king, nuker, psf,
# gaussian, moffat, ferrer, and sky.
#
# Hidden parameters will only appear when they're specified:
# Bn (n=integer, Bending Modes).
# C0 (diskyness/boxyness),
# Fn (n=integer, Azimuthal Fourier Modes).
# R0-R10 (coordinate rotation, for creating spiral structures).
# To, Ti, T0-T10 (truncation function).
#
# ------------------------------------------------------------------------------
# par) par value(s) fit toggle(s) # parameter description
# ------------------------------------------------------------------------------
"""
# dict for get set looks like this:
# "position" : parameters["position"]
#exec(GalfitComponent.component_get_set(load_default_header_parameters()))
# ==========================================================================================================
def __str__(self):
return self.input_menu_file + "\n\n" + \
self.extra_header_info + "\n\n" + \
self._section_sep + "\n" + \
"# IMAGE and GALFIT CONTROL PARAMETERS\n" + \
"\n".join(GalfitComponent.__str__(self).split("\n")[1:]) + \
self.post_header
# def __repr__(self):
# return self.input_menu_file + "\n\n" + \
# self.extra_header_info + "\n\n" + \
# self._section_sep + "\n" + \
# "# IMAGE and GALFIT CONTROL PARAMETERS\n" + \
# "\n".join(GalfitComponent.__repr__(self).split("\n")[1:]) + \
# self.post_header
# ==========================================================================================================
# def to_pandas(self):
# name = f"{self.component_type}_{self.component_number}"
# parameter_dict = deepcopy(self.parameters)
# for pname, pval in self.parameters.items():
# if pname.startswith("_"):
# parameter_dict.pop(pname)
# continue
# # Split multivalued parameters like position
# # briefly convert to NumParameter to coincide with others
# if isinstance(pval, MultiParameter):
# old_keys = pval.value._asdict()
# parameter_dict.pop(pname)
# parameter_dict.update({f"{pname}_{k}" : NumParameter(v) for k, v in old_keys.items()})
# parameter_dict = {f"{k}_{name}" : v.value for k, v in parameter_dict.items()}
# all_data = pd.DataFrame(
# parameter_dict,
# index = [name]
# )
# # Move skip to end for reasons
# skip_col = f"skip_{name}"
# if skip_col in all_data.columns:
# all_data.insert(len(all_data.columns) - 1, skip_col, all_data.pop(skip_col))
# return all_data
# ==========================================================================================================
def update_parameters_file_helper(self, file_dict):
for k,v in file_dict.items():
if k.startswith("_"):
continue
value = v
if not issubclass(type(self.parameters[k]), MultiParameter):
value = v[0]
self.parameters[k].value = value
# ==========================================================================================================
# This one is unlike the others so does not call
# self.update_parameters_file_helper(file_dict)
@GalfitComponent.update_parameter_dict_with_dict
def from_file_helper_dict(self, file_in, **kwargs):
# Feed in just the chunk from the main 'from_file' caller
# This requires determining param_begin/end in that caller
# to be abundantly safe
file_in = deepcopy(file_in)
file_dict = kwargs
# What can actually be gleaned from file
file_dict["sigma_image"] = file_in["SIGMA"]
file_dict["psf"] = file_in["PSF"]
file_dict["constraint_file"] = file_in["CONSTRNT"]
file_dict["region_to_fit"] = tuple([int(v) for v in eval(file_in["FITSECT"].replace(":", ","))])
file_dict["convolution_box"] = tuple([int(v) for v in eval(f"({file_in['CONVBOX']})")])
file_dict["mag_photo_zeropoint"] = float(file_in["MAGZPT"])
return file_dict
# In[10]:
def load_all_components(with_header = True):
all_components = {}
if with_header:
all_components["header"] = GalfitHeader()
all_components["sersic"] = Sersic(1)
# Alias for convenience
#all_components["bulge"] = all_components["sersic"]
#all_components["disk"] = Sersic(2)
all_components["power"] = Power(1)
# Alias for convenience
#all_components["arms"] = all_components["power"]
all_components["fourier"] = Fourier(1)
all_components["sky"] = Sky(2)
return all_components
# In[11]:
if __name__ == "__main__":
from RegTest.RegTest import *
end_str = "--\n"
# In[12]:
if __name__ == "__main__":
def unit_tests(component, parameter, bogus_list, bogus_dict, log_line, pvalue = 555555, end_str = "--\n", base_out = ""):
component_copy = deepcopy(component)
print(f"Defaults{end_str}", component.parameters)
print()
component.parameters[parameter].value = pvalue
print(f"Modifying {parameter} directly{end_str}", component)
component.from_file_helper_list(bogus_list)
print(f"From file helper, list{end_str}", component)
print(f"Sending to file{end_str}", component)
component.to_file(f"{base_out}_{component.component_type.capitalize()}.txt")
component = deepcopy(component_copy)
component.from_file_helper_dict(bogus_dict)
print(f"From file helper, dict{end_str}", component)
component_df = component.to_pandas()
print(f"To pandas{end_str}", component_df)
print()
component_df.iloc[0,0] = 111
component_df.iloc[0,1] = 112
component_df.iloc[0,2] = 113
component.from_pandas(component_df)
print(f"From pandas, modified parameters{end_str}", component)
component.update_from_log(log_line)
print(f"From log line{end_str}", component)
# In[13]:
# Unit Test for GalfitComponent
if __name__ == "__main__":
component = GalfitComponent("header")
print(f"Testing default values of base class GalfitComponent{end_str}")
for k,v in component.__dict__.items():
print(k,v)
# In[14]:
if __name__ == "__main__":
bogus_list = """A) /home/portmanm/run6_1000_galfit_two_fit/sparcfire-in/1237667783385612464.fits # Input data image (FITS file)
B) /home/portmanm/run6_1000_galfit_two_fit/sparcfire-tmp/galfits/1237667783385612464_out.fits # Output data image block
C) none # Sigma image name (made from data if blank or "none")
D) none # Input PSF image and (optional) diffusion kernel
E) 1 # PSF fine sampling factor relative to data
F) /home/portmanm/run6_1000_galfit_two_fit/sparcfire-tmp/galfit_masks/1237667783385612464_star-rm.fits # Bad pixel mask (FITS image or ASCII coord list)
G) none # File with parameter constraints (ASCII file)
H) 43 111 43 111 # Image region to fit (xmin xmax ymin ymax)
I) 50 50 # Size of the convolution box (x y)
J) 24.800 # Magnitude photometric zeropoint
K) 0.396 0.396 # Plate scale (dx dy) [arcsec per pixel]
O) regular # Display type (regular, curses, both)
P) 0 # Choose: 0=optimize, 1=model, 2=imgblock, 3=subcomps""".split("\n")
bogus_dict = eval("""{'INITFILE': '/home/portmanm/testing_python_control/sparcfire-out/1237667429560025',
'DATAIN': '/home/portmanm/testing_python_control/sparcfire-in/12376674295600251',
'SIGMA': 'none',
'PSF': 'none',
'CONSTRNT': 'none',
'MASK': '/home/portmanm/testing_python_control/sparcfire-tmp/galfit_masks/123',
'FITSECT': '[36:98,37:99]',
'CONVBOX': '51, 50',
'MAGZPT': 24.8}""")
print(f"Initializing header{end_str}")
header = GalfitHeader()
print(header)
print()
print(f"Reading header from file via list{end_str}")
header.from_file_helper_list(bogus_list)
#header.update_param_values()
print(header)
print()
print(f"Printing header to file {base_out}_header.txt{end_str}")
header.to_file(f"{base_out}_header.txt")
print()
print(f"Reading header from file via dict {end_str}")
header.from_file_helper_dict(bogus_dict)
#header.update_param_values()
print(header)
print()
# In[15]:
if __name__ == "__main__":
bulge = Sersic(
component_number = 1,
position = (25,25),
magnitude = 15,
# Sometimes sparcfire messes this up
effective_radius = 10.010101001,
# According to other paper GALFIT usually doesn't have a problem with the index
sersic_index = 4.52,
axis_ratio = 0.6,
position_angle = 90.01
)
print(f"Updating Sersic (as an example GalfitComponent) from kwargs{end_str}", bulge)
# In[16]:
if __name__ == "__main__":
bogus_list = """ 0) sersic # Component type
1) 76.7000 76.5000 0 0 # Position x, y
3) 12.9567 1 # Integrated magnitude
4) 18.5147 1 # R_e (effective radius) [pix]
5) 0.6121 1 # Sersic index n (de Vaucouleurs n=4)
6) 0.0000 0 # -----
7) 0.0000 0 # -----
8) 0.0000 0 # -----
9) 0.3943 1 # Axis ratio (b/a)
10) -48.3372 1 # Position angle (PA) [deg: Up=0, Left=90]
Z) 0 # Skip this model in output image? (yes=1, no=0)""".split("\n")
bogus_dict = eval("""{'1_XC': '[67.3796]',
'1_YC': '[67.7662]',
'1_MAG': '13.1936 +/- 0.0257',
'1_RE': '15.5266 +/- 0.1029',
'1_N': '0.3433 +/- 0.0064',
'1_AR': '0.6214 +/- 0.0039',
'1_PA': '-19.1534 +/- 0.5867'}""")
log_line = "sersic : ( [62.90], [62.90]) 14.11* 13.75 0.30 0.63 60.82"
bulge = Sersic(1)
unit_tests(bulge, "magnitude", bogus_list, bogus_dict, log_line, base_out = base_out)
# In[17]:
if __name__ == "__main__":
bogus_list = """ R0) power # PA rotation func. (power, log, none)
R1) 0.0000 0 # Spiral inner radius [pixels]
R2) 42.0200 0 # Spiral outer radius [pixels]
R3) 595.0912 1 # Cumul. rotation out to outer radius [degrees]
R4) -0.1961 1 # Asymptotic spiral powerlaw
R9) 49.1328 1 # Inclination to L.o.S. [degrees]
R10) 72.0972 1 # Sky position angle""".split("\n")
bogus_dict = eval("""{'2_ROTF': 'power',
'2_RIN': '[0.0000]',
'2_ROUT': '[22.0110]',
'2_RANG': '79.0069 +/- 11.7225',
'2_ALPHA': '-2.3697 +/- 0.0691',
'2_INCL': '40.8043 +/- 2.7380',
'2_SPA': '24.3010 +/- 4.5444'}""")
log_line = "power : [0.00] 23.51 219.64 -0.16* --- -44.95 -15.65"
arms = Power(2)
unit_tests(arms, "powerlaw_index", bogus_list, bogus_dict, log_line, base_out = base_out)
# In[18]:
if __name__ == "__main__":
bogus_list = """ F1) 0.2721 -56.9126 1 1 # Azim. Fourier mode 1, amplitude, & phase angle
F3) -0.0690 -31.8175 1 1 # Azim. Fourier mode 3, amplitude, & phase angle""".split("\n")
bogus_dict = eval("""{'2_F1': '0.1449 +/- 0.0123',
'2_F1PA': '44.3015 +/- 7.1154',
'2_F3': '0.0979 +/- 0.0104',
'2_F3PA': '-35.1366 +/- 4.4060'}""")
log_line = "fourier : (1: 0.06, -6.67) (3: 0.05, 0.18)"
fourier = Fourier(2)
unit_tests(fourier, "F1", bogus_list, bogus_dict, log_line, pvalue = (555, 555), base_out = base_out)
# In[19]:
if __name__ == "__main__":
bogus_list = """ 0) sky # Component type
1) 1112.1005 1 # Sky background at center of fitting region [ADUs]
2) 1.264e-02 1 # dsky/dx (sky gradient in x) [ADUs/pix]
3) 1.813e-02 1 # dsky/dy (sky gradient in y) [ADUs/pix]
Z) 0 # Skip this model in output image? (yes=1, no=0)""".split("\n")
bogus_dict = eval("""{'COMP_3': 'sky',
'3_XC': '[67.0000]',
'3_YC': '[68.0000]',
'3_SKY': '1133.4166 +/- 0.1595',
'3_DSDX': '0.0119 +/- 0.0048',
'3_DSDY': '-0.0131 +/- 0.0047'}""")
log_line = "sky : [ 63.00, 63.00] 1130.51 -4.92e-02 1.00e-02"
sky = Sky(3)
unit_tests(sky, "sky_background", bogus_list, bogus_dict, log_line, base_out = base_out)
# In[20]:
if __name__ == "__main__":
print(f"Checking load_all_components() function{end_str}")
for component_name, component in load_all_components().items():
print(component_name)
print(component)
# In[21]:
if __name__ == "__main__":
export_to_py("Components", pj(_MODULE_DIR, "Classes", "Components"))
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@GalfitModule@Classes@Components.py@.PATH_END.py
|
{
"filename": "settings_tutorial.py",
"repo_name": "ageller/firefly",
"repo_path": "firefly_extracted/firefly-main/src/firefly/ntbks/py_conversions/settings_tutorial.py",
"type": "Python"
}
|
#!/usr/bin/env python
# coding: utf-8
# `firefly/ntbks/settings_tutorial.ipynb`
# In[1]:
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
from IPython.display import YouTubeVideo
# A recording of this jupyter notebook in action is available at:
# In[2]:
YouTubeVideo("Xt1vce_2DYo")
# In[3]:
YouTubeVideo("ft0Y3XNJhl4")
# In[4]:
import sys
import os
import numpy as np
from firefly.data_reader import ParticleGroup,Settings,ArrayReader
# # Tutorial notebook: Managing Custom Settings
# One of the core features of Firefly is the ability to customize the user interface (UI) and the startup behavior to make bespoke iterations of Firefly using ones own data. We have organized the different options that one can customize into different settings groups that fall into two categories: those that affect the app as a whole and those that are particular to an individual group of particles.
#
# **App Settings** | |**Particle Group Settings**
# :-----:|:--:|:------:
# Startup| |Startup
# UI| |UI
# Window| |Filter
# Camera| |Colormap
#
#
#
# Specific information for each key can be found in <a href="https://alexbgurvi.ch/Firefly/docs/build/html/data_reader/settings.html">this documentation</a>.
#
# To create the necessary JSON files one should use the `firefly.data_reader.Settings` class to create a `Settings` object. Once you have a `Settings` object you can manipulate the settings as you see fit and then either
# 1. manually save it to a file using the `outputToJSON()` method or
# 2. connect it to a `firefly.data_reader.Reader` object in order to link it to a specific visualization (see the <a href="https://alexbgurvi.ch/Firefly/docs/build/html/data_reader/reader.html">reader documentation</a> for details on how to use a `Reader` object).
# In[5]:
## let's create an settings object with the default keys
settings = Settings()
## we'll print the current settings to the console, organized into groups
## (but we'll use the values=False keyword argument because we only want to see the keys for now)
settings.printKeys(values=False)
# ## Settings can be changed the same way you would change a key in a dictionary
# There is key validation (so you can't attempt to set a setting that doesn't exist) but there is no value validation, so be careful that you use appropriate values or your app might not work. See the <a href="https://alexbgurvi.ch/Firefly/docs/build/html/data_reader/settings.html">settings documentation</a> for details on what values each setting can take.
# In[6]:
## let's change the title that shows up in the browser's tab list
print("before:")
## print only the settings that have to do with the window
settings.printKeys(pattern='window')
## update the title using dictionary syntax
settings['title']='---> My Favorite Data <--- '
print("after:")
## print only the settings that have to do with the window to confirm it changed
settings.printKeys(pattern='window')
# ## Settings are most useful when connected to a `firefly.data_reader.Reader` object
# Doing so allows many of the necessary settings to be automatically generated as additional particle groups are added.
# In[7]:
## let's create some sample data, a grid of points in a 3d cube
my_coords = np.linspace(-10,10,20)
xs,ys,zs = np.meshgrid(my_coords,my_coords,my_coords)
xs,ys,zs = xs.flatten(),ys.flatten(),zs.flatten()
coords = np.array([xs,ys,zs]).T
## we'll pick some random field values to demonstrate filtering/colormapping
fields = np.random.random(size=xs.size)
# Before we've attached the `Settings` object the particle settings are all empty.
# In[8]:
settings.printKeys(pattern='particle')
# We'll use a `firefly.data_reader.ArrayReader`, a workhorse `firefly.data_reader.Reader` sub-class with many convenient functions. See the <a href="https://alexbgurvi.ch/Firefly/docs/build/html/data_reader/reader.html">reader documentation</a> for details that are outside the scope of this tutorial.
# In[9]:
## initialize an ArrayReader
reader = ArrayReader(
coordinates=[coords[:-1],coords], ## pass in two particle groups as a demonstration (just copies of our sample data)
write_to_disk=False,
settings=settings, ## the settings object to link
fields=[[],[fields,fields]]) ## field data for each particle group, 0 fields for 1 and 2 repeated fields for the other.
# The original `Settings` object is stored in `reader.settings`.
# In[10]:
## demonstrate that reader.settings is the original settings object
print('(reader.settings is settings) =',reader.settings is settings)
print()
reader.settings.printKeys(pattern='particle')
# Notice that the dictionaries are filled with keys corresponding to each of the particle groups we passed in and sensible default values for each. The values of nested dictionaries should be changed by accessing each in turn, e.g.
# ```python
# settings['colormapLims']['PGroup_1']['field0'] = [0,1]
# ```
# for the purposes of this tutorial, we'll just go ahead and output the `Settings` object we have manually
# In[11]:
## output the example settings file to a .json in this directory
settings.outputToJSON('.','example')
# ## Settings can also be imported from `.json` files
# Only settings defined in the file will be overwritten, so you can also mix-and-match settings files.
# In[12]:
## initialize a new settings object
new_settings = Settings()
## import the settings from what we just saved above; prints the settings that are updated
new_settings.loadFromJSON("./exampleSettings.json")
# ## Attaching a ParticleGroup to a Settings
# One other thing you may want to do (perhaps in the course of building your own custom `Reader` sub-class) is link a `firefly.data_reader.ParticleGroup` object to a `Settings` object so that the different particle settings can be imported.
# `ParticleGroup` settings can be changed in `settings_default` attribute (which is just a normal python dictionary).
# In[13]:
## create a test particle group
particleGroup = ParticleGroup('test',coords)
## update the color of this particle group *before* attaching it to a settings object
particleGroup.settings_default['color'] = [0,0,1,1]
# In[14]:
## attach the particle group to the settings object
## you can find the settings in the "particleGroup.attached_settings attribute"
new_settings.attachSettings(particleGroup)
print('(particleGroup.attached_settings is new_settings) =',particleGroup.attached_settings is new_settings)
print()
particleGroup.attached_settings.printKeys(pattern='particle')
# Notice that the `'test'` particle group now appears in the particle settings dictionaries (and in particular, note that `settings['color']['test'] = [0,0,1,1]`.
|
agellerREPO_NAMEfireflyPATH_START.@firefly_extracted@firefly-main@src@firefly@ntbks@py_conversions@settings_tutorial.py@.PATH_END.py
|
{
"filename": "_legendgrouptitle.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/scatter/_legendgrouptitle.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Legendgrouptitle(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter"
_path_str = "scatter.legendgrouptitle"
_valid_props = {"font", "text"}
# font
# ----
@property
def font(self):
"""
Sets this legend group's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.legendgrouptitle.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
size
style
Sets whether a font should be styled with a
normal or italic face from its family.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
plotly.graph_objs.scatter.legendgrouptitle.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the legend group.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
"""
def __init__(self, arg=None, font=None, text=None, **kwargs):
"""
Construct a new Legendgrouptitle object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter.Legendgrouptitle`
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
Legendgrouptitle
"""
super(Legendgrouptitle, self).__init__("legendgrouptitle")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter.Legendgrouptitle
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.Legendgrouptitle`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@scatter@_legendgrouptitle.py@.PATH_END.py
|
{
"filename": "fgmc_functions.py",
"repo_name": "gwastro/pycbc",
"repo_path": "pycbc_extracted/pycbc-master/pycbc/population/fgmc_functions.py",
"type": "Python"
}
|
# Copyright (C) 2021 Thomas Dent
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
"""
A set of helper functions for evaluating event rates, densities etc.
See https://dcc.ligo.org/LIGO-T2100060/public for technical explanations
"""
from os.path import basename
import bisect
from itertools import chain as it_chain, combinations as it_comb
import numpy as np
from pycbc import conversions as conv
from pycbc import events
from pycbc.events.coinc import mean_if_greater_than_zero as coinc_meanigz
from pycbc.events import triggers
from pycbc.io.hdf import HFile
def filter_bin_lo_hi(values, lo, hi):
in_bin = np.sign((values - lo) * (hi - values))
if np.any(in_bin == 0):
raise RuntimeError('Edge case! Bin edges', lo, hi,
'value(s)', values[in_bin == 0])
return in_bin == 1
def filter_tmplt_mchirp(bankf, lo_mchirp, hi_mchirp):
with HFile(bankf) as bank:
mchirp = conv.mchirp_from_mass1_mass2(bank['mass1'][:], bank['mass2'][:])
# Boolean over template id
return filter_bin_lo_hi(mchirp, lo_mchirp, hi_mchirp)
def read_full_data(fullf, rhomin, tmplt_filter=None):
"""Read the zero- and time-lagged triggers identified by a specific
set of templates.
Parameters
----------
fullf:
File that stores zerolag and slide triggers
bankf:
File with template mass/spin information
rhomin: float
Ranking statistic threshold
tmplt_filter: array of Booleans
Filter over the array of templates stored in bankf
Returns
-------
dictionary
containing foreground triggers and background information
"""
with HFile(fullf, 'r') as full_data:
# apply template filter
tid_bkg = full_data['background_exc/template_id'][:]
tid_fg = full_data['foreground/template_id'][:]
bkg_inbin = tmplt_filter[tid_bkg] # Boolean over bg events
fg_inbin = tmplt_filter[tid_fg] # Boolean over fg events
zerolagstat = full_data['foreground/stat'][:][fg_inbin]
zerolagifar = full_data['foreground/ifar'][:][fg_inbin]
# arbitrarily choose time from one of the ifos
zerolagtime = full_data['foreground/time1'][:][fg_inbin]
cstat_back_exc = full_data['background_exc/stat'][:][bkg_inbin]
dec_factors = full_data['background_exc/decimation_factor'][:][bkg_inbin]
# filter on stat value
above = zerolagstat > rhomin
back_above = cstat_back_exc > rhomin
return {'zerolagstat': zerolagstat[above],
'zerolagifar': zerolagifar[above],
'zerolagtime': zerolagtime[above],
'dec_factors': dec_factors[back_above],
'cstat_back_exc': cstat_back_exc[back_above],
'file_name': fullf}
def read_full_data_mchirp(fullf, bankf, rhomin, mc_lo, mc_hi):
tmp_filter = filter_tmplt_mchirp(bankf, mc_lo, mc_hi)
return read_full_data(fullf, rhomin, tmp_filter)
def log_rho_bg(trigs, counts, bins):
"""
trigs: zerolag event statistic values
counts: background histogram
bins: bin edges of the background histogram
Returns:
log of background PDF at the zerolag statistic values,
fractional uncertainty due to Poisson count (set to 100% for empty bins)
"""
trigs = np.atleast_1d(trigs)
if len(trigs) == 0: # corner case
return np.array([]), np.array([])
assert np.all(trigs >= np.min(bins)), "can't have triggers below bin lower limit"
N = sum(counts)
log_rhos = []
fracerr = []
# If any zerolag triggers that are louder than the max bin, put one
# fictitious count in a bin that extends from the limits of the slide triggers
# out to the loudest trigger.
if np.any(trigs >= np.max(bins)):
N = N + 1
for t in trigs:
if t >= np.max(bins):
# For a trigger louder than the max bin, put one fictitious count in
# a bin that extends from the limits of the slide triggers out to the
# loudest trigger. Fractional error is 100%
log_rhos.append(-np.log(N) - np.log(np.max(trigs) - bins[-1]))
fracerr.append(1.)
else:
i = bisect.bisect(bins, t) - 1
# If there are no counts for a foreground trigger put a fictitious
# count in the background bin
if counts[i] == 0:
counts[i] = 1
log_rhos.append(np.log(counts[i]) - np.log(bins[i+1] - bins[i])
- np.log(N))
fracerr.append(counts[i] ** -0.5)
return np.array(log_rhos), np.array(fracerr)
def log_rho_fg_analytic(trigs, rhomin):
# PDF of a rho^-4 distribution defined above the threshold rhomin
return np.log(3.) + 3. * np.log(rhomin) - 4 * np.log(trigs)
def log_rho_fg(trigs, injstats, bins):
"""
trigs: zerolag event statistic values
injstats: injection event statistic values
bins: histogram bins
Returns:
log of signal PDF at the zerolag statistic values,
fractional uncertainty from Poisson count
"""
trigs = np.atleast_1d(trigs)
if len(trigs) == 0: # corner case
return np.array([])
assert np.min(trigs) >= np.min(bins)
# allow 'very loud' triggers
bmax = np.max(bins)
if np.max(trigs) >= bmax:
print('Replacing stat values lying above highest bin')
print(str(bmax))
trigs = np.where(trigs >= bmax, bmax - 1e-6, trigs)
assert np.max(trigs) < np.max(bins) # check it worked
counts, bins = np.histogram(injstats, bins)
N = sum(counts)
dens = counts / np.diff(bins) / float(N)
fracerr = counts ** -0.5
tinds = np.searchsorted(bins, trigs) - 1
return np.log(dens[tinds]), fracerr[tinds]
def get_start_dur(path):
fname = basename(path) # remove directory path
# file name is IFOS-DESCRIPTION-START-DURATION.type
pieces = fname.split('.')[0].split('-')
return pieces[2], pieces[3]
def in_coinc_time_incl(f, cstring, test_times):
""" filter to all times where coincs of type given by cstring exist
"""
in_time = np.zeros(len(test_times))
starts = np.array(f['segments/%s/start' % cstring][:])
ends = np.array(f['segments/%s/end' % cstring][:])
idx_within_segment = events.indices_within_times(test_times, starts, ends)
in_time[idx_within_segment] = np.ones_like(idx_within_segment)
return in_time
# what to change for more/fewer ifos
_ifoset = ('H1', 'L1', 'V1')
# all combinations of ifos with length mincount or more
# each returned as a tuple in same order as ifos
def alltimes(ifos, mincount=1):
assert mincount <= len(ifos)
assert len(set(ifos)) == len(ifos) # can't work with duplicates
return it_chain.from_iterable(it_comb(ifos, r) for r in
np.arange(mincount, len(ifos) + 1))
_alltimes = frozenset(alltimes(_ifoset, mincount=1))
_alltimestring = frozenset([''.join(t) for t in _alltimes])
_allctimes = frozenset(alltimes(_ifoset, mincount=2))
def ifos_from_combo(ct):
# extract ifos in alphabetical order from a coinc time string
return sorted([ct[i:i + 2] for i in range(0, len(ct), 2)])
def type_in_time(ct, cty):
# returns True if the given coinc type can exist in the coinc time ct
return all(i in ct for i in cty)
class EventRate(object):
def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp',
bin_lo=None, bin_hi=None):
"""
coinc_times: iterable of strings indicating combinations of ifos operating
coinc_types: list of strings indicating coinc event types to be considered
"""
# allow for single-ifo time although not supported in pipeline yet
if hasattr(args, 'min_ifos'):
self.mincount = args.min_ifos
else:
self.mincount = 2
if hasattr(args, 'network') and sorted(args.network) != list(_ifoset):
self.ifos = sorted(args.network)
else:
self.ifos = _ifoset
self.allctimes = frozenset(alltimes(self.ifos, mincount=self.mincount))
self.allctimestring = frozenset([''.join(t) for t in self.allctimes])
for ct in coinc_times:
assert ct in list(self.allctimestring)
self.ctimes = coinc_times
if coinc_types is None:
# all types possible during given times
self.coinc_types = self.allctimestring
else:
# any coinc type must also be a time (?)
for ty in coinc_types:
assert ty in list(self.allctimes)
self.coinc_types = frozenset([''.join(t) for t in coinc_types])
if args.verbose:
print('Using', self.coinc_types, 'coincs in',
self.allctimestring, 'times')
self.args = args
self.thr = self.args.stat_threshold
self.bin_param = bin_param
self.lo = bin_lo
self.hi = bin_hi
self.bank = None
self.massspins = None
self.tpars = None
self.tmplt_filter = None
self.in_bin = None
self.incl_livetimes = {}
self.livetimes = {}
def add_bank(self, bank_file):
self.bank = bank_file
with HFile(self.bank, 'r') as b:
tids = np.arange(len(b['mass1'][:]))
# tuples of m1, m2, s1z, s2z in template id order
self.massspins = triggers.get_mass_spin(b, tids)
def filter_templates(self):
"""
calculate array of Booleans in template id order to filter events
"""
assert self.massspins is not None
assert self.lo is not None
assert self.hi is not None
if self.args.verbose:
print('Cutting on %s between %f - %f' %
(self.bin_param, self.lo, self.hi))
self.tpars = triggers.get_param(self.bin_param, None, *self.massspins)
self.in_bin = filter_bin_lo_hi(self.tpars, self.lo, self.hi)
def make_bins(self, maxval, choice='bg'):
# allow options to be strings describing bin formulae as well as floats?
try:
linbw = getattr(self.args, choice + '_bin_width')
logbw = getattr(self.args, choice + '_log_bin_width')
except AttributeError:
pass
if linbw is not None:
n_bins = int((maxval - self.thr) / float(linbw))
bins = np.linspace(self.thr - 0.0001, maxval, n_bins + 1)
elif logbw is not None:
n_bins = int(np.log(maxval / self.thr) / float(logbw))
bins = np.logspace(np.log10(self.thr) - 0.0001, np.log10(maxval),
n_bins + 1)
else:
raise RuntimeError("Can't make bins without a %s bin width option!"
% choice)
if self.args.verbose:
print(str(n_bins) + ' ' + choice + ' stat bins')
return bins
def get_ctypes(self, tdict):
# tdict is a ifo -> trigger time dictionary
ifotimes = zip(*[tdict[i] for i in self.ifos])
cty = []
for ts in ifotimes:
# if an ifo doesn't participate, time is sentinel value -1
cty.append(''.join([i for i, t in zip(self.ifos, ts) if t > 0]))
# return is array of coinc types strings
return np.array(cty)
def moreifotimes(self, ctstring):
# get list of coinc times with more ifos than ctstring
allctime_moreifos = [ct for ct in self.allctimestring if
len(ct) > len(ctstring)]
# only return those when at least the same ifos are operating
ret = []
ifos = ifos_from_combo(ctstring)
for act in allctime_moreifos:
if all(i in act for i in ifos):
ret.append(act)
return ret
def in_coinc_time_excl(self, f, cstring, test_times):
""" filter to all times where exactly the ifos in cstring are observing
"""
if len(cstring) == max(len(s) for s in self.allctimestring):
# ctime string already uniquely specifies time
return in_coinc_time_incl(f, cstring, test_times)
in_time = in_coinc_time_incl(f, cstring, test_times)
# if 'more-ifo' coinc times exist, exclude them
for combo in self.moreifotimes(cstring):
in_moreifo_time = in_coinc_time_incl(f, combo, test_times)
# subtract one if in more-ifo time
in_time -= in_moreifo_time
# if subtraction yields anything other than 1, set to 0
np.putmask(in_time, in_time != 1, 0)
return in_time
def get_livetimes(self, fi):
with HFile(fi, 'r') as f:
for ct in self.ctimes:
# 'inclusive' time when at least the ifos specified by ct are on
fgt = conv.sec_to_year(f[ct].attrs['foreground_time'])
# index dict on chunk start time / coinc type
self.incl_livetimes[(get_start_dur(fi)[0], ct)] = fgt
# subtract times during which 1 more ifo was on,
# ie subtract H1L1* time from H1L1; subtract H1* time from H1; etc
for combo in self.moreifotimes(ct):
if len(combo) == len(ct) + 2:
fgt -= conv.sec_to_year(f[combo].attrs['foreground_time'])
# index dict on chunk start time / coinc time
self.livetimes[(get_start_dur(fi)[0], ct)] = fgt
class ForegroundEvents(EventRate):
def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp',
bin_lo=None, bin_hi=None):
EventRate.__init__(self, args, coinc_times, coinc_types=coinc_types,
bin_param=bin_param, bin_lo=bin_lo, bin_hi=bin_hi)
self.thr = self.args.stat_threshold
# set of arrays in parallel containing zerolag event properties
self.starttimes = []
self.gpst = np.array([])
self.stat = np.array([])
self.ifar = np.array([])
self.masspars = np.array([])
self.start = np.array([])
self.ctime = np.array([], dtype=object) # allow unequal length strings
self.ctype = np.array([], dtype=object)
self.bg_pdf = np.array([])
self.sg_pdf = np.array([])
def add_zerolag(self, full_file):
start = get_start_dur(full_file)[0]
self.starttimes.append(start)
with HFile(full_file, 'r') as f:
# get stat values & threshold
_stats = f['foreground/stat'][:]
_keepstat = _stats > self.thr
# get templates & apply filter
_tids = f['foreground/template_id'][:]
# we need the template filter to have already been made
assert self.in_bin is not None
_keep = np.logical_and(_keepstat, self.in_bin[_tids])
massp = self.tpars[_tids][_keep] # filtered template params
# assign times and coinc types
_times = {}
for i in self.ifos:
_times[i] = f['foreground/' + i + '/time'][:][_keep]
# if an ifo doesn't participate, time is sentinel value -1
# event time is mean of remaining positive GPS times
meantimes = np.array([coinc_meanigz(ts)[0]
for ts in zip(*_times.values())])
_ctype = self.get_ctypes(_times)
if len(_ctype) == 0:
if self.args.verbose:
print('No events in ' + start)
return
# filter events
in_ctypes = np.array([cty in self.coinc_types for cty in _ctype])
meantimes = meantimes[in_ctypes]
# get coinc time as strings
# (strings may have different lengths)
_ctime = np.repeat(np.array([''], dtype=object), len(meantimes))
for ct in self.allctimestring:
intime = self.in_coinc_time_excl(f, ct, meantimes)
_ctime[intime == 1] = ct
if self.args.verbose:
print('Got %i events in %s time' % (len(_ctime[intime == 1]), ct))
# store
self.stat = np.append(self.stat, _stats[_keep][in_ctypes])
try: # injection analyses only have 'ifar_exc', not 'ifar'
self.ifar = np.append(self.ifar,
f['foreground/ifar'][:][_keep][in_ctypes])
except KeyError:
self.ifar = np.append(self.ifar,
f['foreground/ifar_exc'][:][_keep][in_ctypes])
self.gpst = np.append(self.gpst, meantimes)
self.masspars = np.append(self.masspars, massp)
self.start = np.append(self.start, int(start) *
np.ones_like(meantimes))
self.ctime = np.append(self.ctime, _ctime)
self.ctype = np.append(self.ctype, _ctype[in_ctypes])
def get_bg_pdf(self, bg_rate):
assert isinstance(bg_rate, BackgroundEventRate)
self.bg_pdf = np.zeros_like(self.stat) # initialize
# do the calculation by chunk / coinc time / coinc type
for st in self.starttimes:
for ct in self.allctimestring:
for cty in self.coinc_types:
if not type_in_time(ct, cty):
continue
_idx = np.logical_and((self.ctime == ct), (self.ctype == cty))
_idx = np.logical_and(_idx, (self.start == int(st)))
_vals = self.stat[_idx]
if len(_vals) == 0:
continue
# evaluate bg pdf for specific chunk, coinc time & type
_pdf = bg_rate.eval_pdf(st, ct, cty, _vals)
# store
self.bg_pdf[_idx] = _pdf
if self.args.verbose:
print('Found bg PDFs for ' + cty + ' coincs from ' + st)
def get_sg_pdf(self, sg_rate):
assert isinstance(sg_rate, SignalEventRate)
self.sg_pdf = np.zeros_like(self.stat)
for st in self.starttimes:
for ct in self.allctimestring:
for cty in self.coinc_types:
if not type_in_time(ct, cty):
continue
_idx = np.logical_and((self.ctime == ct), (self.ctype == cty))
_idx = np.logical_and(_idx, (self.start == int(st)))
_vals = self.stat[_idx]
if len(_vals) == 0:
continue
# norm of PDF is chunk-dependent so need the chunk start time
_pdf = sg_rate.eval_pdf(st, ct, cty, _vals)
# store
self.sg_pdf[_idx] = _pdf
if self.args.verbose:
print('Found sg PDFs for %s coincs in %s time from %s' %
(cty, ct, st))
class BackgroundEventRate(EventRate):
def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp',
bin_lo=None, bin_hi=None):
EventRate.__init__(self, args, coinc_times, coinc_types=coinc_types,
bin_param=bin_param, bin_lo=bin_lo, bin_hi=bin_hi)
self.thr = self.args.stat_threshold
# BG values in dict indexed on tuple (chunk start, coinc type)
self.bg_vals = {}
self.bg_dec = {}
# BG livetimes
self.bg_livetimes = {}
# BG hist stored as bin heights / edges
self.bg_hist = {}
# Expected counts of BG events
self.exp_bg = {}
# Total expected BG count
self.norm = 0
def add_background(self, full_file):
start = get_start_dur(full_file)[0]
self.get_livetimes(full_file)
with HFile(full_file, 'r') as ff:
# get stat values and threshold
_bgstat = ff['background_exc/stat'][:]
_keepstat = _bgstat > self.thr
# get template ids and filter
_bgtid = ff['background_exc/template_id'][:]
# need the template filter to have already been made
assert self.in_bin is not None
_keep = np.logical_and(_keepstat, self.in_bin[_bgtid])
_bgstat = _bgstat[_keep]
_bgdec = ff['background_exc/decimation_factor'][:][_keep]
# assign coinc types
_times = {}
for i in self.ifos:
# NB times are time-shifted between ifos
_times[i] = ff['background_exc/' + i + '/time'][:][_keep]
_ctype = self.get_ctypes(_times)
for cty in self.coinc_types:
self.bg_vals[(start, cty)] = _bgstat[_ctype == cty]
self.bg_dec[(start, cty)] = _bgdec[_ctype == cty]
# get bg livetime for noise rate estimate
# - convert to years
self.bg_livetimes[(start, cty)] = conv.sec_to_year(
ff[cty].attrs['background_time_exc'])
# make histogram
bins = self.make_bins(np.max(_bgstat[_ctype == cty]), 'bg')
# hack to make larger bins for H1L1V1
if cty == 'H1L1V1':
if self.args.verbose:
print('Halving bg bins for triple bg hist')
bins = bins[::2].copy() # take every 2nd bin edge
self.bg_hist[(start, cty)] = \
np.histogram(_bgstat[_ctype == cty],
weights=_bgdec[_ctype == cty], bins=bins)
# get expected number of bg events for this chunk and coinc type
self.exp_bg[(start, cty)] = _bgdec[_ctype == cty].sum() * \
self.incl_livetimes[(start, cty)] / \
self.bg_livetimes[(start, cty)]
def plot_bg(self):
from matplotlib import pyplot as plt
for chunk_type, hist in self.bg_hist.items():
print('Plotting', chunk_type, 'background PDF ...')
xplot = np.linspace(self.thr, self.args.plot_max_stat, 500)
heights, bins = hist[0], hist[1]
logpdf, _ = log_rho_bg(xplot, heights, bins)
plt.plot(xplot, np.exp(logpdf))
# plot error bars at bin centres
lpdf, fracerr = log_rho_bg(0.5 * (bins[:-1] + bins[1:]), heights, bins)
plt.errorbar(0.5 * (bins[:-1] + bins[1:]), np.exp(lpdf),
yerr=np.exp(lpdf) * fracerr, fmt='none')
plt.semilogy()
plt.grid(True)
plt.xlim(xmax=self.args.plot_max_stat + 0.5)
plt.ylim(ymin=0.7 * np.exp(logpdf.min()))
plt.xlabel('Ranking statistic')
plt.ylabel('Background PDF')
plt.savefig(self.args.plot_dir + '%s-bg_pdf-%s' %
(chunk_type[1], chunk_type[0]) + '.png')
plt.close()
def get_norms(self):
for count in self.exp_bg.values():
self.norm += count
def eval_pdf(self, chunk, ctime, ctype, statvals):
# given statistic values all in the same data chunk and coinc type,
# evaluate the background pdf normalized over all chunks & types
assert self.norm > 0
chunk_type = (chunk, ctype)
# fraction of expected noise events in given chunk & coinc type
frac_chunk_type = self.exp_bg[chunk_type] / self.norm
# fraction of inj in specified chunk, coinc type *and* time
frac_in_time = self.livetimes[(chunk, ctime)] /\
self.incl_livetimes[chunk_type]
# unpack heights / bins from bg hist object
local_pdfs, _ = log_rho_bg(statvals, *self.bg_hist[chunk_type])
return local_pdfs + np.log(frac_chunk_type * frac_in_time)
class SignalEventRate(EventRate):
def __init__(self, args, coinc_times, coinc_types=None, bin_param='mchirp',
bin_lo=None, bin_hi=None):
EventRate.__init__(self, args, coinc_times, coinc_types=coinc_types,
bin_param=bin_param, bin_lo=bin_lo, bin_hi=bin_hi)
self.thr = self.args.stat_threshold
self.starts = [] # bookkeeping
# for the moment roll all inj chunks together
# but sort both by coinc time and coinc type
self.inj_vals = {} # dict indexed on tuple (coinc time, coinc type)
self.fg_bins = {}
self.norm = 0
def add_injections(self, inj_file, fg_file):
# fg_file only needed for coinc time info :/
self.starts.append(get_start_dur(inj_file)[0])
self.get_livetimes(inj_file)
with HFile(inj_file, 'r') as jf:
# get stat values and threshold
_injstat = jf['found_after_vetoes/stat'][:]
_keepstat = _injstat > self.thr
# get template ids and filter
_injtid = jf['found_after_vetoes/template_id'][:]
assert self.in_bin is not None
_keep = np.logical_and(_keepstat, self.in_bin[_injtid])
_injstat = _injstat[_keep]
# assign coinc types
_times = {}
for i in self.ifos:
_times[i] = jf['found_after_vetoes/' + i + '/time'][:][_keep]
meantimes = np.array([coinc_meanigz(ts)[0]
for ts in zip(*_times.values())])
_ctype = self.get_ctypes(_times)
# get coinc time as strings
# (strings may have different lengths)
_ctime = np.repeat(np.array([''], dtype=object), len(meantimes))
for ct in self.allctimestring:
# get coinc time info from segments in fg file
intime = self.in_coinc_time_excl(
HFile(fg_file, 'r'), ct, meantimes)
_ctime[intime == 1] = ct # do we need this?
if self.args.verbose:
print('Got %i ' % (intime == 1).sum() + 'inj in %s time' % ct)
# filter by coinc type and add to array
for cty in self.coinc_types:
if not type_in_time(ct, cty):
continue
my_vals = _injstat[np.logical_and(_ctype == cty, intime == 1)]
if self.args.verbose:
print('%d ' % len(my_vals) + 'are %s coincs' % cty)
if (ct, cty) not in self.inj_vals: # initialize
self.inj_vals[(ct, cty)] = np.array([])
if len(my_vals) > 0:
self.inj_vals[(ct, cty)] = \
np.append(self.inj_vals[(ct, cty)], my_vals)
del intime, my_vals
def make_all_bins(self):
for ct in self.allctimestring:
for cty in self.coinc_types:
if not type_in_time(ct, cty):
continue
vals = self.inj_vals[(ct, cty)]
# get norm of fg histogram by taking bins out to max injection stat
binmax = vals.max() * 1.01
self.fg_bins[(ct, cty)] = self.make_bins(binmax, 'inj')
def plot_inj(self):
from matplotlib import pyplot as plt
for ct in self.allctimestring:
for cty in self.coinc_types:
if not type_in_time(ct, cty):
continue
print('Plotting ' + cty + ' signal PDF in ' + ct + ' time ...')
samples = self.inj_vals[(ct, cty)]
bins = self.fg_bins[(ct, cty)]
xplot = np.logspace(np.log10(self.thr),
np.log10(samples.max()), 500)
logpdf, _ = log_rho_fg(xplot, samples, bins)
plt.plot(xplot, np.exp(logpdf))
# plot error bars at bin centres
lpdf, fracerr = log_rho_fg(0.5 * (bins[:-1] + bins[1:]),
samples, bins)
plt.errorbar(0.5 * (bins[:-1] + bins[1:]), np.exp(lpdf),
yerr=np.exp(lpdf) * fracerr, fmt='none')
plt.semilogy()
plt.grid(True)
# zoom in on the 'interesting' range
plt.xlim(xmin=self.thr, xmax=2. * self.args.plot_max_stat)
plt.ylim(ymin=0.7 * np.exp(logpdf.min()))
plt.title(r'%i injs plotted, \# of bins %i' %
(len(samples), len(bins) - 1))
plt.xlabel('Ranking statistic')
plt.ylabel('Signal PDF')
plt.savefig(self.args.plot_dir + '%s-fg_pdf-%s' % (ct, cty)
+ '.png')
plt.close()
def get_norms(self):
for vals in self.inj_vals.values():
# injections don't have weights/decimation
self.norm += float(len(vals))
def eval_pdf(self, chunk, ctime, ctype, statvals):
# given statistic values in the same chunk, coinc time and coinc type,
# evaluate the signal pdf normalized over all chunks, times and types
assert self.norm > 0
time_type = (ctime, ctype)
# fraction of inj in specified coinc time and type
frac_time_type = float(len(self.inj_vals[time_type])) / self.norm
# total livetime for specified coinc time
total_coinc_time = sum([self.livetimes[(ch, ctime)] for ch in self.starts])
# fraction of inj in specified chunk *and* coinc time/type
this_norm = frac_time_type * self.livetimes[(chunk, ctime)] / \
total_coinc_time
local_pdfs, _ = log_rho_fg(statvals, self.inj_vals[time_type],
self.fg_bins[time_type])
return local_pdfs + np.log(this_norm)
__all__ = ['filter_bin_lo_hi', 'filter_tmplt_mchirp', 'read_full_data',
'read_full_data_mchirp', 'log_rho_bg', 'log_rho_fg_analytic',
'log_rho_fg', 'get_start_dur', 'in_coinc_time_incl', 'alltimes',
'ifos_from_combo', 'type_in_time', 'EventRate', 'ForegroundEvents',
'BackgroundEventRate', 'SignalEventRate']
|
gwastroREPO_NAMEpycbcPATH_START.@pycbc_extracted@pycbc-master@pycbc@population@fgmc_functions.py@.PATH_END.py
|
{
"filename": "ephemeris.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/twins/ephemeris.py",
"type": "Python"
}
|
from .load import load
# This routine was moved out of __init__.py. Please see that file for previous revision history.
def ephemeris(trange=['2018-11-5', '2018-11-6'],
probe='1',
datatype='or',
prefix='',
suffix='',
get_support_data=False,
varformat=None,
varnames=[],
downloadonly=False,
force_download=False,
notplot=False,
no_update=False,
time_clip=False):
"""
This function loads TWINS ephemeris data
Parameters
----------
trange : list of str
time range of interest [starttime, endtime] with the format
['YYYY-MM-DD','YYYY-MM-DD'] or to specify more or less than a day
['YYYY-MM-DD/hh:mm:ss','YYYY-MM-DD/hh:mm:ss']
Default: ['2018-11-5', '2018-11-6']
probe: str or list of str
Probe to load. Valid options: '1', '2'
Default: '1'
datatype: str
Data type; Valid options:
Default: 'or'
prefix: str
The tplot variable names will be given this prefix.
Default: ''
suffix: str
The tplot variable names will be given this suffix.
Default: ''
get_support_data: bool
If True, data with an attribute "VAR_TYPE" with a value of "support_data"
will be loaded into tplot.
Default: False
varformat: str
The file variable formats to load into tplot. Wildcard character
"*" is accepted.
Default: '' (all variables will be loaded)
varnames: list of str
List of variable names to load
Default: [] (all variables will be loaded)
downloadonly: bool
Set this flag to download the CDF files, but not load them into
tplot variables
Default: False
force_download: bool
Set this flag to download the CDF files, even if the local copy is newer.
Default: False
notplot: bool
Return the data in hash tables instead of creating tplot variables
Default: False
no_update: bool
If set, only load data from your local cache
Default: False
time_clip: bool
Time clip the variables to exactly the range specified in the trange keyword
Default: False
Returns
-------
list of str
List of tplot variables created.
Examples
--------
>>> import pyspedas
>>> from pytplot import tplot
>>> # Note: variables have the same names for both probes, so only load one at a time
>>> ephem_vars = pyspedas.projects.twins.ephemeris(probe=['1'],trange=['2008-04-01','2008-04-02'])
>>> tplot('FEQUATORIALGSM')
"""
return load(instrument='ephemeris', trange=trange, probe=probe, datatype=datatype, prefix=prefix, suffix=suffix, get_support_data=get_support_data, varformat=varformat, varnames=varnames, downloadonly=downloadonly, force_download=force_download, notplot=notplot, time_clip=time_clip, no_update=no_update)
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@twins@ephemeris.py@.PATH_END.py
|
{
"filename": "_internals.py",
"repo_name": "peppedilillo/mescal",
"repo_path": "mescal_extracted/mescal-main/source/cli/beaupy/_internals.py",
"type": "Python"
}
|
"""
Copyright (c) 2022 Peter Vyboch
Copyright (c) 2018 Hans Schülein
Code from userpetereon@github's Beaupy (thanks).
https://github.com/petereon/beaupy
A copy of the license file has been attached in the `licenses' folder.
Introduced small changes to allow passing of console object and redistribution.
~Peppe
----
A Python library of interactive CLI elements you have been looking for
"""
from contextlib import contextmanager
from typing import Iterator, List, Union
from rich._emoji_replace import _emoji_replace
from rich.console import Console
from rich.console import ConsoleRenderable
from rich.live import Live
from rich.text import Text
class ValidationError(Exception):
pass
class ConversionError(Exception):
pass
def _replace_emojis(text: str) -> str:
return str(_emoji_replace(text, " "))
def _format_option_select(
i: int, cursor_index: int, option: str, cursor_style: str, cursor: str
) -> str:
return "{}{}".format(
(
f"[{cursor_style}]{cursor}[/{cursor_style}] "
if i == cursor_index
else " " * (len(_replace_emojis(cursor)) + 1)
),
option,
)
def _render_option_select_multiple(
option: str,
ticked: bool,
tick_character: str,
tick_style: str,
selected: bool,
cursor_style: str,
) -> str:
prefix = "\[{}]".format(" " * len(_replace_emojis(tick_character))) # noqa: W605
if ticked:
prefix = f"\[[{tick_style}]{tick_character}[/{tick_style}]]" # noqa: W605
if selected:
option = f"[{cursor_style}]{option}[/{cursor_style}]"
return f"{prefix} {option}"
def _update_rendered(live: Live, renderable: Union[ConsoleRenderable, str]) -> None:
live.update(renderable=renderable)
live.refresh()
def _render_prompt(
secure: bool, typed_values: List[str], prompt: str, cursor_position: int, error: str
) -> str:
render_value = (len(typed_values) * "*" if secure else "".join(typed_values)) + " "
render_value = Text(render_value)
render_value.stylize("black on white", cursor_position, cursor_position + 1)
confirm_text = Text("\n\n(Confirm with enter, exit with esc)")
confirm_text.stylize("bold", 16, 21)
render_value = Text.from_markup(prompt + "\n") + render_value + confirm_text
if error:
render_value = f"{render_value}\n[red]Error:[/red] {error}"
return render_value
@contextmanager
def _cursor_hidden(console: Console) -> Iterator:
console.show_cursor(False)
yield
console.show_cursor(True)
|
peppedililloREPO_NAMEmescalPATH_START.@mescal_extracted@mescal-main@source@cli@beaupy@_internals.py@.PATH_END.py
|
{
"filename": "test_util.py",
"repo_name": "tomasstolker/species",
"repo_path": "species_extracted/species-main/species/util/test_util.py",
"type": "Python"
}
|
"""
Utility functions for running the unit tests.
"""
import os
def create_config(test_path):
"""
Function for creating a configuration file in the test folder.
Parameters
----------
test_path : str
Folder where the unit tests are located.
Returns
-------
NoneType
None
"""
config_file = os.path.join(test_path, "species_config.ini")
database_file = os.path.join(test_path, "species_database.hdf5")
data_folder = os.path.join(test_path, "data/")
with open(config_file, "w", encoding="utf-8") as config:
config.write("[species]\n")
config.write(f"database = {database_file}\n")
config.write(f"data_folder = {data_folder}\n")
config.write("vega_mag = 0.03")
|
tomasstolkerREPO_NAMEspeciesPATH_START.@species_extracted@species-main@species@util@test_util.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/README.md",
"type": "Markdown"
}
|
# The yt Project
[](https://pypi.org/project/yt)
[](https://pypi.org/project/yt/)
[](http://yt-project.org/docs/dev/)
[](https://mail.python.org/archives/list/yt-users@python.org//)
[](https://mail.python.org/archives/list/yt-dev@python.org//)
[](https://hub.yt/)
[](http://numfocus.org)
[](https://numfocus.org/donate-to-yt)
<!--- Tests and style --->
[](https://github.com/yt-project/yt/actions/workflows/build-test.yaml)
[](https://github.com/yt-project/yt/actions/workflows/bleeding-edge.yaml)
[](https://results.pre-commit.ci/latest/github/yt-project/yt/main)
[](https://github.com/charliermarsh/ruff)
<!--- [](https://codecov.io/gh/yt-project/yt) --->
<a href="http://yt-project.org"><img src="https://raw.githubusercontent.com/yt-project/yt/main/doc/source/_static/yt_logo.png" width="300"></a>
yt is an open-source, permissively-licensed Python library for analyzing and
visualizing volumetric data.
yt supports structured, variable-resolution meshes, unstructured meshes, and
discrete or sampled data such as particles. Focused on driving
physically-meaningful inquiry, yt has been applied in domains such as
astrophysics, seismology, nuclear engineering, molecular dynamics, and
oceanography. Composed of a friendly community of users and developers, we want
to make it easy to use and develop - we'd love it if you got involved!
We've written a [method
paper](https://ui.adsabs.harvard.edu/abs/2011ApJS..192....9T) you may be interested
in; if you use yt in the preparation of a publication, please consider citing
it.
## Code of Conduct
yt abides by a code of conduct partially modified from the PSF code of conduct,
and is found [in our contributing
guide](http://yt-project.org/docs/dev/developing/developing.html#yt-community-code-of-conduct).
## Installation
You can install the most recent stable version of yt either with conda from
[conda-forge](https://conda-forge.org/):
```shell
conda install -c conda-forge yt
```
or with pip:
```shell
python -m pip install yt
```
More information on the various ways to install yt, and in particular to install from source,
can be found on [the project's website](https://yt-project.org/docs/dev/installing.html).
## Getting Started
yt is designed to provide meaningful analysis of data. We have some Quickstart
example notebooks in the repository:
* [Introduction](https://github.com/yt-project/yt/tree/main/doc/source/quickstart/1\)_Introduction.ipynb)
* [Data Inspection](https://github.com/yt-project/yt/tree/main/doc/source/quickstart/2\)_Data_Inspection.ipynb)
* [Simple Visualization](https://github.com/yt-project/yt/tree/main/doc/source/quickstart/3\)_Simple_Visualization.ipynb)
* [Data Objects and Time Series](https://github.com/yt-project/yt/tree/main/doc/source/quickstart/4\)_Data_Objects_and_Time_Series.ipynb)
* [Derived Fields and Profiles](https://github.com/yt-project/yt/tree/main/doc/source/quickstart/5\)_Derived_Fields_and_Profiles.ipynb)
* [Volume Rendering](https://github.com/yt-project/yt/tree/main/doc/source/quickstart/6\)_Volume_Rendering.ipynb)
If you'd like to try these online, you can visit our [yt Hub](https://hub.yt/)
and run a notebook next to some of our example data.
## Contributing
We love contributions! yt is open source, built on open source, and we'd love
to have you hang out in our community.
We have developed some [guidelines](CONTRIBUTING.rst) for contributing to yt.
**Imposter syndrome disclaimer**: We want your help. No, really.
There may be a little voice inside your head that is telling you that you're not
ready to be an open source contributor; that your skills aren't nearly good
enough to contribute. What could you possibly offer a project like this one?
We assure you - the little voice in your head is wrong. If you can write code at
all, you can contribute code to open source. Contributing to open source
projects is a fantastic way to advance one's coding skills. Writing perfect code
isn't the measure of a good developer (that would disqualify all of us!); it's
trying to create something, making mistakes, and learning from those
mistakes. That's how we all improve, and we are happy to help others learn.
Being an open source contributor doesn't just mean writing code, either. You can
help out by writing documentation, tests, or even giving feedback about the
project (and yes - that includes giving feedback about the contribution
process). Some of these contributions may be the most valuable to the project as
a whole, because you're coming to the project with fresh eyes, so you can see
the errors and assumptions that seasoned contributors have glossed over.
(This disclaimer was originally written by
[Adrienne Lowe](https://github.com/adriennefriend) for a
[PyCon talk](https://www.youtube.com/watch?v=6Uj746j9Heo), and was adapted by yt
based on its use in the README file for the
[MetPy project](https://github.com/Unidata/MetPy))
## Resources
We have some community and documentation resources available.
* Our latest documentation is always at http://yt-project.org/docs/dev/ and it
includes recipes, tutorials, and API documentation
* The [discussion mailing
list](https://mail.python.org/archives/list/yt-users@python.org//)
should be your first stop for general questions
* The [development mailing
list](https://mail.python.org/archives/list/yt-dev@python.org//) is
better suited for more development issues
* You can also join us on Slack at yt-project.slack.com ([request an
invite](https://yt-project.org/slack.html))
Is your code compatible with yt ? Great ! Please consider giving us a shoutout as a shiny badge in your README
- markdown
```markdown
[](https://yt-project.org)
```
- rst
```reStructuredText
|yt-project|
.. |yt-project| image:: https://img.shields.io/static/v1?label="works%20with"&message="yt"&color="blueviolet"
:target: https://yt-project.org
```
## Powered by NumFOCUS
yt is a fiscally sponsored project of [NumFOCUS](https://numfocus.org/).
If you're interested in
supporting the active maintenance and development of this project, consider
[donating to the project](https://numfocus.salsalabs.org/donate-to-yt/index.html).
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@README.md@.PATH_END.py
|
{
"filename": "_ticklabelposition.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/colorbar/_ticklabelposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklabelpositionValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="ticklabelposition", parent_name="surface.colorbar", **kwargs
):
super(TicklabelpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop(
"values",
[
"outside",
"inside",
"outside top",
"inside top",
"outside left",
"inside left",
"outside right",
"inside right",
"outside bottom",
"inside bottom",
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@colorbar@_ticklabelposition.py@.PATH_END.py
|
{
"filename": "tools.py",
"repo_name": "geodynamics/burnman",
"repo_path": "burnman_extracted/burnman-main/contrib/anisotropic_eos/tools.py",
"type": "Python"
}
|
import numpy as np
from tabulate import tabulate
def print_table_for_mineral_constants(mineral, indices):
constants = []
for i, j in indices:
constants.append(mineral.anisotropic_params["c"][i - 1, j - 1, :, :])
constants = np.array(constants)
mn_pairs = []
for n in range(constants.shape[2]):
for m in range(constants.shape[1]):
if not np.all(constants[:, m, n] == 0):
mn_pairs.append((m, n))
rows = [[f"$c_{{pq{m}{n}}}$" for (m, n) in mn_pairs]]
for ci, (i, j) in enumerate(indices):
row = [f"$c_{{{i}{j}}}$"]
row.extend([f"{constants[ci, m, n]:.4e}" for (m, n) in mn_pairs])
rows.append(row)
print(tabulate(rows, headers="firstrow", tablefmt="latex_raw"))
def print_table_for_mineral_constants_2(mineral, param_list, indices):
constants = []
for i, j in indices:
cs = []
for param in param_list:
cs.append(mineral.anisotropic_params[param][i - 1, j - 1])
constants.append(cs)
constants = np.array(constants)
param_list = [p + "_" for p in param_list]
rows = [["$p$", "$q$"]]
rows[0].extend(
[f'${param.split("_")[0]}_{{{param.split("_")[1]}pq}}$' for param in param_list]
)
for ci, (i, j) in enumerate(indices):
row = [f"{i}", f"{j}"]
row.extend(
[
(
f"{constants[ci, i]:.4e}"
if constants[ci, i] != 0 and constants[ci, i] != 1
else "-"
)
for i in range(len(param_list))
]
)
rows.append(row)
print(tabulate(rows, headers="firstrow", tablefmt="latex_raw"))
|
geodynamicsREPO_NAMEburnmanPATH_START.@burnman_extracted@burnman-main@contrib@anisotropic_eos@tools.py@.PATH_END.py
|
{
"filename": "_spikecolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/yaxis/_spikecolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SpikecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="spikecolor", parent_name="layout.yaxis", **kwargs):
super(SpikecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@yaxis@_spikecolor.py@.PATH_END.py
|
{
"filename": "rfs.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/projects/psp/rfs.py",
"type": "Python"
}
|
import cdflib
def rfs_variables_to_load(files):
"""
This function finds a list of variables to load
from the RFS files (essentially the same behavior
as the IDL code).
"""
out = []
if len(files) == 0:
return []
# the variables should be the same across all files
file = files[0]
new_cdflib = False
if cdflib.__version__ > "0.4.9":
new_cdflib = True
else:
new_cdflib = False
cdf_file = cdflib.CDF(file)
cdf_info = cdf_file.cdf_info()
if new_cdflib:
variables = cdf_info.rVariables + cdf_info.zVariables
else:
variables = cdf_info["rVariables"] + cdf_info["zVariables"]
for variable in variables:
if variable[0:7] != 'psp_fld':
continue
try:
elements = cdf_file.varget(variable)
except ValueError:
continue
if elements is None:
continue
if variable in out:
continue
out.append(variable)
return out
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@projects@psp@rfs.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.