metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "diverging.py",
"repo_name": "jiffyclub/palettable",
"repo_path": "palettable_extracted/palettable-master/palettable/cmocean/diverging.py",
"type": "Python"
}
|
"""
Diverging color maps from the cmocean package:
https://github.com/matplotlib/cmocean.
"""
from __future__ import absolute_import
import itertools
from . import cmoceanpalette
from . import colormaps
from .. import utils
_PALETTE_TYPE = 'diverging'
_NAMES_TO_DATA = {
'Balance': colormaps._BALANCE,
'Curl': colormaps._CURL,
'Delta': colormaps._DELTA,
}
_NAME_MAP = utils.make_name_map(_NAMES_TO_DATA.keys())
_NAMES_AND_LENGTHS = utils.make_names_and_lengths(
sorted(_NAMES_TO_DATA.keys()))
print_maps = utils.print_maps_factory(
'diverging cmocean', _NAMES_AND_LENGTHS, _PALETTE_TYPE)
get_map = utils.get_map_factory(
'divergine cmocean', __name__, _NAMES_TO_DATA, _PALETTE_TYPE,
cmoceanpalette.CmoceanMap)
globals().update(utils.load_all_palettes(_NAMES_AND_LENGTHS, get_map))
|
jiffyclubREPO_NAMEpalettablePATH_START.@palettable_extracted@palettable-master@palettable@cmocean@diverging.py@.PATH_END.py
|
{
"filename": "lens_distribution.py",
"repo_name": "sibirrer/hierArc",
"repo_path": "hierArc_extracted/hierArc-main/hierarc/Sampling/Distributions/lens_distribution.py",
"type": "Python"
}
|
import numpy as np
class LensDistribution(object):
"""Class to draw lens parameters of individual lens from distributions."""
def __init__(
self,
lambda_mst_distribution="NONE",
gamma_in_sampling=False,
gamma_in_distribution="NONE",
log_m2l_sampling=False,
log_m2l_distribution="NONE",
alpha_lambda_sampling=False,
beta_lambda_sampling=False,
alpha_gamma_in_sampling=False,
alpha_log_m2l_sampling=False,
log_scatter=False,
mst_ifu=False,
lambda_scaling_property=0,
lambda_scaling_property_beta=0,
kwargs_min=None,
kwargs_max=None,
gamma_pl_index=None,
gamma_pl_global_sampling=False,
gamma_pl_global_dist="NONE",
):
"""
:param lambda_mst_sampling: bool, if True adds a global mass-sheet transform parameter in the sampling
:param lambda_mst_distribution: string, distribution function of the MST transform
:param gamma_in_sampling: bool, if True samples the inner slope of the GNFW profile
:param gamma_in_distribution: string, distribution function of the inner
slope of the GNFW profile
:param log_m2l_sampling: bool, if True samples the mass to light ratio of
the stars in logarithmic scale
:param log_m2l_distribution: string, distribution function of the logarithm of mass to
light ratio of the lens
:param alpha_lambda_sampling: bool, if True samples a parameter alpha_lambda, which scales lambda_mst linearly
according to a predefined quantity of the lens
:param beta_lambda_sampling: bool, if True samples a parameter beta_lambda, which scales lambda_mst linearly
according to a predefined quantity of the lens
:param alpha_gamma_in_sampling: bool, if True samples a parameter alpha_gamma_in, which scales gamma_in linearly
:param alpha_log_m2l_sampling: bool, if True samples a parameter alpha_log_m2l, which scales log_m2l linearly
:param log_scatter: boolean, if True, samples the Gaussian scatter amplitude in log space
(and thus flat prior in log)
:param mst_ifu: bool, if True replaces the lambda_mst parameter by the lambda_ifu parameter (and distribution)
in sampling this lens.
:param lambda_scaling_property: float (optional), scaling of
lambda_mst = lambda_mst_global + alpha * lambda_scaling_property
:param lambda_scaling_property_beta: float (optional), scaling of
lambda_mst = lambda_mst_global + beta * lambda_scaling_property_beta
:param kwargs_min: minimum arguments of parameters supported by each lens
:type kwargs_min: dict or None
:param kwargs_max: maximum arguments of parameters supported by each lens
:type kwargs_max: dict or None
:param gamma_pl_index: index of gamma_pl parameter associated with this lens
:type gamma_pl_index: int or None
:param gamma_pl_global_sampling: if sampling a global power-law density slope distribution
:type gamma_pl_global_sampling: bool
:param gamma_pl_global_dist: distribution of global gamma_pl distribution ("GAUSSIAN" or "NONE")
"""
self._lambda_mst_distribution = lambda_mst_distribution
self._gamma_in_sampling = gamma_in_sampling
self._gamma_in_distribution = gamma_in_distribution
self._log_m2l_sampling = log_m2l_sampling
self._log_m2l_distribution = log_m2l_distribution
self._alpha_lambda_sampling = alpha_lambda_sampling
self._beta_lambda_sampling = beta_lambda_sampling
self._alpha_gamma_in_sampling = alpha_gamma_in_sampling
self._alpha_log_m2l_sampling = alpha_log_m2l_sampling
self._mst_ifu = mst_ifu
self._lambda_scaling_property = lambda_scaling_property
self._lambda_scaling_property_beta = lambda_scaling_property_beta
self._gamma_pl_global_sampling = gamma_pl_global_sampling
self._gamma_pl_global_dist = gamma_pl_global_dist
self._log_scatter = log_scatter
if kwargs_max is None:
kwargs_max = {}
if kwargs_min is None:
kwargs_min = {}
self._gamma_in_min, self._gamma_in_max = kwargs_min.get(
"gamma_in", -np.inf
), kwargs_max.get("gamma_in", np.inf)
self._log_m2l_min, self._log_m2l_max = kwargs_min.get(
"log_m2l", -np.inf
), kwargs_max.get("log_m2l", np.inf)
if gamma_pl_index is not None:
self._gamma_pl_model = True
self.gamma_pl_index = gamma_pl_index
else:
self._gamma_pl_model = False
self.gamma_pl_index = None
if self._lambda_mst_distribution in ["GAUSSIAN"]:
self._lambda_mst_sampling = True
else:
self._lambda_mst_sampling = False
def draw_lens(
self,
lambda_mst=1,
lambda_mst_sigma=0,
gamma_ppn=1,
lambda_ifu=1,
lambda_ifu_sigma=0,
alpha_lambda=0,
beta_lambda=0,
gamma_in=1,
gamma_in_sigma=0,
alpha_gamma_in=0,
log_m2l=1,
log_m2l_sigma=0,
alpha_log_m2l=0,
gamma_pl_list=None,
gamma_pl_mean=2,
gamma_pl_sigma=0,
):
"""Draws a realization of a specific model from the hyperparameter distribution.
:param lambda_mst: MST transform
:param lambda_mst_sigma: spread in the distribution
:param gamma_ppn: Post-Newtonian parameter
:param lambda_ifu: secondary lambda_mst parameter for subset of lenses specified
for
:param lambda_ifu_sigma: secondary lambda_mst_sigma parameter for subset of
lenses specified for
:param alpha_lambda: float, linear slope of the lambda_int scaling relation with
lens quantity self._lambda_scaling_property
:param beta_lambda: float, a second linear slope of the lambda_int scaling
relation with lens quantity self._lambda_scaling_property_beta
:param gamma_in: inner slope of the NFW profile
:param gamma_in_sigma: spread in the distribution
:param alpha_gamma_in: float, linear slope of the gamma_in scaling relation with
lens quantity self._lambda_scaling_property
:param log_m2l: log(mass-to-light ratio)
:param log_m2l_sigma: spread in the distribution
:param alpha_log_m2l: float, linear slope of the log(m2l) scaling relation with
lens quantity self._lambda_scaling_property
:param gamma_pl_list: power-law density slopes as lists (for multiple lenses)
:type gamma_pl_list: list or None
:param gamma_pl_mean: mean of gamma_pl of the global distribution
:param gamma_pl_sigma: sigma of the gamma_pl global distribution
:return: draw from the distributions
"""
kwargs_return = {}
if self._mst_ifu is True:
lambda_mst_mean_lens = lambda_ifu
lambda_mst_sigma_ = lambda_ifu_sigma
else:
lambda_mst_mean_lens = lambda_mst
lambda_mst_sigma_ = lambda_mst_sigma
lambda_lens = (
lambda_mst_mean_lens
+ alpha_lambda * self._lambda_scaling_property
+ beta_lambda * self._lambda_scaling_property_beta
)
lambda_mst_draw = lambda_lens
if self._lambda_mst_sampling:
if self._lambda_mst_distribution in ["GAUSSIAN"]:
lambda_mst_draw = np.random.normal(lambda_lens, lambda_mst_sigma_)
kwargs_return["lambda_mst"] = lambda_mst_draw
kwargs_return["gamma_ppn"] = gamma_ppn
if self._gamma_in_sampling:
if gamma_in < self._gamma_in_min or gamma_in > self._gamma_in_max:
raise ValueError(
"gamma_in parameter is out of bounds of the interpolated range!"
)
if self._gamma_in_distribution in ["GAUSSIAN"]:
gamma_in_lens = (
gamma_in + alpha_gamma_in * self._lambda_scaling_property
)
else:
gamma_in_lens = gamma_in
gamma_in_draw = np.random.normal(gamma_in_lens, gamma_in_sigma)
if gamma_in_draw < self._gamma_in_min or gamma_in_draw > self._gamma_in_max:
return self.draw_lens(
lambda_mst=lambda_mst,
lambda_mst_sigma=lambda_mst_sigma,
gamma_ppn=gamma_ppn,
lambda_ifu=lambda_ifu,
lambda_ifu_sigma=lambda_ifu_sigma,
alpha_lambda=alpha_lambda,
beta_lambda=beta_lambda,
gamma_in=gamma_in,
gamma_in_sigma=gamma_in_sigma,
alpha_gamma_in=alpha_gamma_in,
log_m2l=log_m2l,
log_m2l_sigma=log_m2l_sigma,
alpha_log_m2l=alpha_log_m2l,
gamma_pl_list=gamma_pl_list,
gamma_pl_mean=gamma_pl_mean,
gamma_pl_sigma=gamma_pl_sigma,
)
kwargs_return["gamma_in"] = gamma_in_draw
if self._log_m2l_sampling:
if log_m2l < self._log_m2l_min or log_m2l > self._log_m2l_max:
raise ValueError(
"m2l parameter is out of bounds of the interpolated range!"
)
log_m2l_lens = log_m2l + alpha_log_m2l * self._lambda_scaling_property
log_m2l_draw = np.random.normal(log_m2l_lens, log_m2l_sigma)
if log_m2l_draw < self._log_m2l_min or log_m2l_draw > self._log_m2l_max:
return self.draw_lens(
lambda_mst=lambda_mst,
lambda_mst_sigma=lambda_mst_sigma,
gamma_ppn=gamma_ppn,
lambda_ifu=lambda_ifu,
lambda_ifu_sigma=lambda_ifu_sigma,
alpha_lambda=alpha_lambda,
beta_lambda=beta_lambda,
gamma_in=gamma_in,
gamma_in_sigma=gamma_in_sigma,
alpha_gamma_in=alpha_gamma_in,
log_m2l=log_m2l,
log_m2l_sigma=log_m2l_sigma,
alpha_log_m2l=alpha_log_m2l,
gamma_pl_list=gamma_pl_list,
gamma_pl_mean=gamma_pl_mean,
gamma_pl_sigma=gamma_pl_sigma,
)
kwargs_return["log_m2l"] = log_m2l_draw
if self._gamma_pl_model is True:
kwargs_return["gamma_pl"] = gamma_pl_list[self.gamma_pl_index]
elif self._gamma_pl_global_sampling is True:
if self._gamma_pl_global_dist in ["GAUSSIAN"]:
gamma_pl_draw = np.random.normal(gamma_pl_mean, gamma_pl_sigma)
else:
gamma_pl_draw = gamma_pl_mean
kwargs_return["gamma_pl"] = gamma_pl_draw
return kwargs_return
|
sibirrerREPO_NAMEhierArcPATH_START.@hierArc_extracted@hierArc-main@hierarc@Sampling@Distributions@lens_distribution.py@.PATH_END.py
|
{
"filename": "test_pub_sub_client.py",
"repo_name": "mwvgroup/Pitt-Google-Broker",
"repo_path": "Pitt-Google-Broker_extracted/Pitt-Google-Broker-master/tests/test_pub_sub_client.py",
"type": "Python"
}
|
"""This file provides tests for the ``broker.pub_sub_client`` module."""
import os
from pathlib import Path
import unittest
from deepdiff import DeepDiff
from broker import pub_sub_client as psc
from broker.alert_ingestion.gen_valid_schema import _load_Avro
PROJECT_ID = os.getenv('GOOGLE_CLOUD_PROJECT')
test_alerts_dir = Path(__file__).parent / 'test_alerts'
test_alert_path = test_alerts_dir / 'ztf_3.3_1154308030015010004.avro'
topic_name = 'test_alerts_PS_publish'
subscription_name = 'test_alerts_PS_subscribe'
class TestPubSub(unittest.TestCase):
"""Test the functions in ``message_service`` for correct output,
given an input.
"""
def test_publish_pubsub(self):
""" Tests that the generic publish_pubsub() wrapper function works by
publishing the data from a test alert to a PS stream.
"""
with open(test_alert_path, 'rb') as f:
sample_alert_data = f.read()
future_result = psc.message_service.publish_pubsub(topic_name, sample_alert_data)
# future_result should be the message ID as a string.
# if the job fails, future.results() raises an exception
# https://googleapis.dev/python/pubsub/latest/publisher/api/futures.html
self.assertIs(type(future_result), str)
@unittest.skip("subscribe_alerts() failing. Not currently used. Skipping.")
def test_input_match_output(self):
"""Publish an alert via ``publish_pubsub`` and retrieve the message
via ``subscribe_alerts``.
Check that the input alert matches the decoded output alert.
"""
with open(test_alert_path, 'rb') as f:
sample_alert_data = f.read()
psc.message_service.publish_pubsub(topic_name, sample_alert_data)
message = psc.message_service.subscribe_alerts(subscription_name, max_alerts=1)
# this test fails in the subscribe_alerts() fnc at the following line:
# message = pickle.loads(encoded)
# with the error:
# _pickle.UnpicklingError: invalid load key, 'O'.
self.assertEqual(DeepDiff(sample_alert_data[0], message[0]), {})
|
mwvgroupREPO_NAMEPitt-Google-BrokerPATH_START.@Pitt-Google-Broker_extracted@Pitt-Google-Broker-master@tests@test_pub_sub_client.py@.PATH_END.py
|
{
"filename": "histograms_plot.ipynb",
"repo_name": "snad-space/zwad",
"repo_path": "zwad_extracted/zwad-master/notebooks/histograms_plot.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sys
import warnings
warnings.filterwarnings("ignore")
from zwad.utils import latex_feature_names
```
# Pretty $\LaTeX$ feature names, colors and field labels
```python
latex_names = latex_feature_names('../data/latex_feature_names.csv')
colors = {
'm31': '#1b9e77',
'deep': '#fc8d62',
'disk': '#8da0cb',
}
field_names = {
'm31': r'\textsc{M\,31}',
'deep': r'\textsc{Deep}',
'disk': r'\textsc{Disk}',
}
```
## Run `make_anomalies_tables.ipynb` first to create `../data/*_expert_anomalies.csv`
```python
# M31
m31_oid = np.memmap('../data/oid_m31.dat', mode='r', dtype=np.uint64)
m31_names = open('../data/feature_m31.name').read().split()
#x = np.memmap('feature_m31.dat', mode='r', dtype=np.float32, shape=(oid.size, len(names)))
# OR
m31_dtype = [(name, np.float32) for name in m31_names]
m31_x = np.memmap('../data/feature_m31.dat', mode='r', dtype=m31_dtype, shape=m31_oid.shape)
# Disk
disk_oid = np.memmap('../data/oid_disk.dat', mode='r', dtype=np.uint64)
disk_names = open('../data/feature_disk.name').read().split()
#x = np.memmap('feature_m31.dat', mode='r', dtype=np.float32, shape=(oid.size, len(names)))
# OR
disk_dtype = [(name, np.float32) for name in disk_names]
disk_x = np.memmap('../data/feature_disk.dat', mode='r', dtype=disk_dtype, shape=disk_oid.shape)
# Deep
deep_oid = np.memmap('../data/oid_deep.dat', mode='r', dtype=np.uint64)
deep_names = open('../data/feature_deep.name').read().split()
#x = np.memmap('feature_m31.dat', mode='r', dtype=np.float32, shape=(oid.size, len(names)))
# OR
deep_dtype = [(name, np.float32) for name in deep_names]
deep_x = np.memmap('../data/feature_deep.dat', mode='r', dtype=deep_dtype, shape=deep_oid.shape)
```
```python
# #updated from ANOMALIES_PAD 11/23/2020
m31_anom_tab = pd.read_csv('../data/m31_expert_anomalies.csv', index_col='oid')
disk_anom_tab = pd.read_csv('../data/disk_expert_anomalies.csv', index_col='oid')
deep_anom_tab = pd.read_csv('../data/deep_expert_anomalies.csv', index_col='oid')
```
```python
m31_anom_tab
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
<tr>
<th>oid</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>695211200058391</th>
<td>1.080000</td>
<td>0.330097</td>
<td>0.038835</td>
<td>0.395421</td>
<td>0.336107</td>
<td>96941336.0</td>
<td>0.565500</td>
<td>1.034199</td>
<td>0.377343</td>
<td>-0.017752</td>
<td>...</td>
<td>0.134401</td>
<td>0.022435</td>
<td>0.874175</td>
<td>1.656223</td>
<td>34.118484</td>
<td>3.478692</td>
<td>0.811280</td>
<td>0.415326</td>
<td>0.798510</td>
<td>20.626154</td>
</tr>
<tr>
<th>695211200035023</th>
<td>0.919500</td>
<td>0.187500</td>
<td>0.086538</td>
<td>0.372327</td>
<td>0.135560</td>
<td>220374260.0</td>
<td>0.467499</td>
<td>1.099400</td>
<td>1.052788</td>
<td>-0.002589</td>
<td>...</td>
<td>0.111273</td>
<td>0.020920</td>
<td>0.829133</td>
<td>1.990572</td>
<td>78.201820</td>
<td>8.496761</td>
<td>1.292874</td>
<td>0.393981</td>
<td>0.870150</td>
<td>19.604841</td>
</tr>
<tr>
<th>695211400009049</th>
<td>0.763500</td>
<td>0.428571</td>
<td>0.000000</td>
<td>0.438763</td>
<td>0.194513</td>
<td>72289330.0</td>
<td>0.808002</td>
<td>1.165999</td>
<td>-1.336752</td>
<td>0.011980</td>
<td>...</td>
<td>0.093476</td>
<td>0.017952</td>
<td>1.078872</td>
<td>3.831684</td>
<td>49.701527</td>
<td>5.552580</td>
<td>-0.282411</td>
<td>0.444718</td>
<td>0.910447</td>
<td>20.274681</td>
</tr>
<tr>
<th>695211400046832</th>
<td>0.979000</td>
<td>0.145631</td>
<td>0.029126</td>
<td>0.224395</td>
<td>0.786437</td>
<td>295042620.0</td>
<td>0.212999</td>
<td>0.440001</td>
<td>18.727877</td>
<td>0.002106</td>
<td>...</td>
<td>0.082955</td>
<td>0.139150</td>
<td>0.902825</td>
<td>0.786272</td>
<td>9.366056</td>
<td>0.839313</td>
<td>3.378764</td>
<td>0.245349</td>
<td>0.721399</td>
<td>20.518684</td>
</tr>
<tr>
<th>695211400025927</th>
<td>0.701500</td>
<td>0.297030</td>
<td>0.039604</td>
<td>0.333839</td>
<td>0.780629</td>
<td>337253700.0</td>
<td>0.323002</td>
<td>0.626799</td>
<td>0.552218</td>
<td>0.014561</td>
<td>...</td>
<td>0.140121</td>
<td>0.027622</td>
<td>0.623367</td>
<td>1.225854</td>
<td>27.272932</td>
<td>1.195005</td>
<td>0.324640</td>
<td>0.242561</td>
<td>0.791279</td>
<td>20.601425</td>
</tr>
<tr>
<th>695211200057718</th>
<td>0.928000</td>
<td>0.326389</td>
<td>0.027778</td>
<td>0.416115</td>
<td>0.243381</td>
<td>404207460.0</td>
<td>0.675501</td>
<td>1.319599</td>
<td>-0.692586</td>
<td>-0.001005</td>
<td>...</td>
<td>0.094246</td>
<td>0.017760</td>
<td>1.049509</td>
<td>3.760212</td>
<td>55.255604</td>
<td>6.077719</td>
<td>0.755290</td>
<td>0.480539</td>
<td>0.802381</td>
<td>20.208523</td>
</tr>
<tr>
<th>695211100015190</th>
<td>0.981999</td>
<td>0.138211</td>
<td>0.081301</td>
<td>0.353806</td>
<td>0.233171</td>
<td>137418660.0</td>
<td>0.464251</td>
<td>0.811600</td>
<td>2.162920</td>
<td>0.005433</td>
<td>...</td>
<td>0.169431</td>
<td>0.049908</td>
<td>0.822073</td>
<td>1.333406</td>
<td>35.070377</td>
<td>4.851675</td>
<td>1.575419</td>
<td>0.429781</td>
<td>0.804553</td>
<td>19.868694</td>
</tr>
</tbody>
</table>
<p>7 rows × 42 columns</p>
</div>
```python
fig = plt.figure(figsize=(14, 20))
plt.rcParams.update({'text.usetex': True})
for i, f in enumerate(m31_names):
ax = fig.add_subplot(9, 5, i+1) #42 features
plt.hist(m31_x['{}'.format(f)], histtype='step', color=colors['m31'],
linestyle='dashed', linewidth=1, label=field_names['m31'])
plt.hist(deep_x['{}'.format(f)], histtype='step', color=colors['deep'],
linestyle='dashed', linewidth=1, label=field_names['deep'])
plt.hist(disk_x['{}'.format(f)], histtype='step', color=colors['disk'],
linestyle='dashed', linewidth=1, label=field_names['disk'])
plt.hist(m31_anom_tab['{}'.format(f)], histtype='step', color=colors['m31'],
alpha=1, linewidth=1, label=field_names['m31'] + ' uncategorised anomaly candidates')
plt.hist(deep_anom_tab['{}'.format(f)], histtype='step', color=colors['deep'],
alpha=1, linewidth=1, label=field_names['deep'] + ' uncategorised anomaly candidates')
plt.hist(disk_anom_tab['{}'.format(f)], histtype='step', color=colors['disk'],
alpha=1, linewidth=1, label=field_names['disk'] + ' uncategorised anomaly candidates')
ax.set_xlabel(latex_names[f])
ax.set_yscale('log')
plt.tight_layout()
plt.legend(loc='center right', bbox_to_anchor=(3.0, 0.5), ncol=2, fancybox=True, shadow=True)
fig.savefig('../figs/histograms/all_features_histogram_uncat_anomalies.pdf')
```

# PLOT 3 MOST IMPORTANT FEATURES (MOST IMPORTANT FROM 1st THREE PRINCIPAL COMPONENT)
```python
three_most_important_features = ['standard_deviation', 'periodogram_amplitude', 'chi2']
```
```python
plt.clf()
# fig, axes = plt.subplots(1, 3, gridspec_kw = {'wspace':0, 'hspace':0}, sharey=True, figsize=(20, 5))
fig, axes = plt.subplots(1, 3, figsize=(12, 2.8))
for ax, f in zip(axes, three_most_important_features):
m31_bins = np.logspace(np.log10(m31_x[f].min()), np.log10(m31_x[f].max()), 10)
deep_bins = np.logspace(np.log10(deep_x[f].min()), np.log10(deep_x[f].max()), 10)
disk_bins = np.logspace(np.log10(disk_x[f].min()), np.log10(disk_x[f].max()), 10)
ax.hist(m31_x[f], bins=m31_bins, histtype='step', color=colors['m31'],
linestyle='dashed', linewidth=1, label=field_names['m31'])
ax.hist(deep_x[f], bins=deep_bins, histtype='step', color=colors['deep'],
linestyle='dashed', linewidth=1, label=field_names['deep'])
ax.hist(disk_x[f], bins=disk_bins, histtype='step', color=colors['disk'],
linestyle='dashed', linewidth=1, label=field_names['disk'])
ax.hist(m31_anom_tab[f], bins=m31_bins, histtype='step', color=colors['m31'],
alpha=1, linewidth=1, label=field_names['m31'] + ' uncategorised anomaly candidates')
ax.hist(deep_anom_tab[f], bins=deep_bins, histtype='step', color=colors['deep'],
alpha=1, linewidth=1, label=field_names['deep'] + ' uncategorised anomaly candidates')
ax.hist(disk_anom_tab[f], bins=disk_bins, histtype='step', color=colors['disk'],
alpha=1, linewidth=1, label=field_names['disk'] + ' uncategorised anomaly candidates')
ax.set_xlabel(latex_names[f])
ax.set_yscale('log')
ax.set_xscale('log')
axes[1].set_ylabel('Counts')
plt.legend(loc='center left', bbox_to_anchor=(1.05, 0.5), ncol=2, fancybox=True, shadow=True)
fig.tight_layout()
fig.savefig('../figs/histograms/three_most_important_features_uncat_anomalies.pdf', bbox_inches='tight')
```
<Figure size 432x288 with 0 Axes>

# Plot artefacts and interesting objects of outliers lists only
```python
m31_maria_list = pd.read_csv('../data/m31_maria.csv')
m31_maria_list= m31_maria_list.set_index('oid')
disk_maria_list = pd.read_csv('../data/disk_maria.csv')
disk_maria_list= disk_maria_list.set_index('oid')
deep_maria_list = pd.read_csv('../data/deep_maria.csv', encoding='latin-1')
deep_maria_list= deep_maria_list.set_index('oid')
## Below are outliers tables (incl. artefacts), not expert anomalies (no incl. artefacts)!
m31_outlier_tab = pd.read_csv('../data/m31_outliers_table.csv', index_col='oid')
disk_outlier_tab = pd.read_csv('../data/disk_outliers_table.csv', index_col='oid')
deep_outlier_tab = pd.read_csv('../data/deep_outliers_table.csv', index_col='oid')
```
```python
m31_full_tab = pd.concat([m31_outlier_tab, m31_maria_list], axis=1)
disk_full_tab = pd.concat([disk_outlier_tab, disk_maria_list], axis=1)
deep_full_tab = pd.concat([deep_outlier_tab, deep_maria_list], axis=1)
m31_full_tab.head()
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
<th>-1</th>
<th>tag</th>
<th>tag_detailed</th>
<th>Comments</th>
<th>Unnamed: 5</th>
</tr>
<tr>
<th>oid</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>695211400034403</th>
<td>2.3800</td>
<td>0.293706</td>
<td>0.048951</td>
<td>0.210137</td>
<td>1.039928</td>
<td>1.205708e+09</td>
<td>1.702999</td>
<td>2.813601</td>
<td>-0.436671</td>
<td>-0.000443</td>
<td>...</td>
<td>1568.27230</td>
<td>0.410859</td>
<td>1.097088</td>
<td>0.875042</td>
<td>16.457632</td>
<td>0</td>
<td>artefact</td>
<td>bright star</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>695211400124577</th>
<td>2.0445</td>
<td>0.358025</td>
<td>0.006173</td>
<td>0.203197</td>
<td>1.588497</td>
<td>2.624290e+09</td>
<td>1.459000</td>
<td>3.120001</td>
<td>-0.784893</td>
<td>-0.003234</td>
<td>...</td>
<td>1209.48120</td>
<td>0.397880</td>
<td>1.082259</td>
<td>0.903058</td>
<td>16.626255</td>
<td>1</td>
<td>artefact</td>
<td>bright star</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>695211400102351</th>
<td>2.3620</td>
<td>0.396694</td>
<td>0.024793</td>
<td>0.155299</td>
<td>1.549895</td>
<td>2.136628e+09</td>
<td>1.404751</td>
<td>3.065599</td>
<td>-0.579718</td>
<td>0.001458</td>
<td>...</td>
<td>747.95715</td>
<td>0.321721</td>
<td>1.132575</td>
<td>0.900564</td>
<td>17.092886</td>
<td>2</td>
<td>artefact</td>
<td>bright star</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>695211400053697</th>
<td>2.2125</td>
<td>0.310000</td>
<td>0.080000</td>
<td>0.229030</td>
<td>1.349776</td>
<td>4.898475e+08</td>
<td>1.032000</td>
<td>2.337502</td>
<td>0.153038</td>
<td>0.000263</td>
<td>...</td>
<td>1371.10170</td>
<td>0.280188</td>
<td>0.917519</td>
<td>0.851317</td>
<td>16.688284</td>
<td>3</td>
<td>artefact</td>
<td>bright star</td>
<td>NaN</td>
<td>NaN</td>
</tr>
<tr>
<th>695211400000352</th>
<td>2.4000</td>
<td>0.288288</td>
<td>0.045045</td>
<td>0.160094</td>
<td>1.602353</td>
<td>9.396195e+08</td>
<td>1.299751</td>
<td>2.475200</td>
<td>0.269096</td>
<td>-0.001185</td>
<td>...</td>
<td>1488.70950</td>
<td>0.583173</td>
<td>0.983108</td>
<td>0.864887</td>
<td>16.493820</td>
<td>4</td>
<td>artefact</td>
<td>bright star</td>
<td>NaN</td>
<td>NaN</td>
</tr>
</tbody>
</table>
<p>5 rows × 47 columns</p>
</div>
```python
m31_artefacts = m31_full_tab[m31_full_tab.tag == 'artefact']
m31_interesting = m31_full_tab[m31_full_tab.tag != 'artefact']
disk_artefacts = disk_full_tab[disk_full_tab.tag == 'artefact']
disk_interesting = disk_full_tab[disk_full_tab.tag != 'artefact']
deep_artefacts = deep_full_tab[deep_full_tab.tag == 'artefact']
deep_interesting = deep_full_tab[deep_full_tab.tag != 'artefact']
```
```python
fig = plt.figure(figsize=(14, 20))
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=None)
# plt.rcParams.update({'font.size': 26})
for i, f in enumerate(m31_names):
ax = fig.add_subplot(9, 5, i+1) #42 features
plt.hist(m31_artefacts[f], histtype='step', color='purple', linewidth=1,
ls='--', label=field_names['m31'] + ' bogus')
plt.hist(m31_interesting[f], histtype='step', color='red',
alpha=1, linewidth=1, label=field_names['m31'] + ' anomaly candidates')
#ax.set_title(r'{}'.format(f), fontsize=14)
#ax.set_ylabel('Counts', fontsize=14)
ax.set_xlabel(latex_names[f])
#ax.set_yscale('log')
#ax.tick_params(labelsize=20)
#ax.legend(loc='best')
fig.tight_layout()
plt.legend(loc='center right', bbox_to_anchor=(3.0, 0.5), ncol=2, fancybox=True, shadow=True)
fig.savefig('../figs/histograms/m31_artefacts_vs_interesting_objs.pdf', bbox_inches='tight')
```

```python
fig = plt.figure(figsize=(14, 20))
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
# plt.rcParams.update({'font.size': 26})
for i, f in enumerate(map(latex_names.get, m31_names)):
ax = fig.add_subplot(9, 5, i+1) #42 features
plt.hist(disk_artefacts[f], histtype='step', color='cyan',
linewidth=1, label=field_names['disk'] + ' bogus')
plt.hist(disk_interesting[f], histtype='step', ls='--', color='green',
alpha=1, linewidth=1, label=field_names['disk'] + ' anomaly candidates')
#ax.set_title(r'{}'.format(f), fontsize=14)
#ax.set_ylabel('Counts', fontsize=14)
ax.set_xlabel(f)
#ax.set_yscale('log')
#ax.tick_params(labelsize=20)
#ax.legend(loc='best')
fig.tight_layout()
plt.legend(loc='center right', bbox_to_anchor=(3.0, 0.5), ncol=2, fancybox=True, shadow=True)
fig.savefig('../figs/histograms/disk_artefacts_vs_interesting_objs.pdf', bbox_inches='tight')
```

```python
fig = plt.figure(figsize=(14, 20))
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
# plt.rcParams.update({'font.size': 26})
for i, f in enumerate(map(latex_names.get, m31_names)):
ax = fig.add_subplot(9, 5, i+1) #42 features
plt.hist(deep_artefacts[f], ls='--', histtype='step', color='magenta', linewidth=1,
label=field_names['deep'] + ' bogus')
plt.hist(deep_interesting[f], histtype='step', color='blue',
alpha=1, linewidth=1, label=field_names['deep'] + ' anomaly candidates')
#ax.set_title(r'{}'.format(f), fontsize=14)
#ax.set_ylabel('Counts', fontsize=14)
ax.set_xlabel(f)
#ax.set_yscale('log')
#ax.tick_params(labelsize=20)
#ax.legend(loc='best')
fig.tight_layout()
plt.legend(loc='center right', bbox_to_anchor=(3.0, 0.5), ncol=2, fancybox=True, shadow=True)
fig.savefig('../figs/histograms/deep_artefacts_vs_interesting_objs.pdf', bbox_inches='tight')
```

```python
```
# REPEAT FOR OUTLIERS HISTOGRAMS!!
```python
# #updated from ANOMALIES_PAD 11/23/2020
m31_outliers_tab = pd.read_csv('../data/m31_outliers_table.csv', index_col='oid')
disk_outliers_tab = pd.read_csv('../data/disk_outliers_table.csv', index_col='oid')
deep_outliers_tab = pd.read_csv('../data/deep_outliers_table.csv', index_col='oid')
```
```python
m31_outliers_tab
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>amplitude</th>
<th>beyond_1_std</th>
<th>beyond_2_std</th>
<th>cusum</th>
<th>eta</th>
<th>eta_e</th>
<th>inter_percentile_range_25</th>
<th>inter_percentile_range_10</th>
<th>kurtosis</th>
<th>linear_fit_slope</th>
<th>...</th>
<th>periodogram_cusum</th>
<th>periodogram_eta</th>
<th>periodogram_inter_percentile_range_25</th>
<th>periodogram_standard_deviation</th>
<th>periodogram_percent_amplitude</th>
<th>chi2</th>
<th>skew</th>
<th>standard_deviation</th>
<th>stetson_K</th>
<th>weighted_mean</th>
</tr>
<tr>
<th>oid</th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<th>695211400034403</th>
<td>2.380000</td>
<td>0.293706</td>
<td>0.048951</td>
<td>0.210137</td>
<td>1.039928</td>
<td>1.205708e+09</td>
<td>1.702999</td>
<td>2.813601</td>
<td>-0.436671</td>
<td>-0.000443</td>
<td>...</td>
<td>0.118531</td>
<td>0.011458</td>
<td>0.861847</td>
<td>1.873147</td>
<td>28.882809</td>
<td>1568.272300</td>
<td>0.410859</td>
<td>1.097088</td>
<td>0.875042</td>
<td>16.457632</td>
</tr>
<tr>
<th>695211400124577</th>
<td>2.044500</td>
<td>0.358025</td>
<td>0.006173</td>
<td>0.203197</td>
<td>1.588497</td>
<td>2.624290e+09</td>
<td>1.459000</td>
<td>3.120001</td>
<td>-0.784893</td>
<td>-0.003234</td>
<td>...</td>
<td>0.112881</td>
<td>0.019039</td>
<td>0.824700</td>
<td>0.893768</td>
<td>9.279490</td>
<td>1209.481200</td>
<td>0.397880</td>
<td>1.082259</td>
<td>0.903058</td>
<td>16.626255</td>
</tr>
<tr>
<th>695211400102351</th>
<td>2.362000</td>
<td>0.396694</td>
<td>0.024793</td>
<td>0.155299</td>
<td>1.549895</td>
<td>2.136628e+09</td>
<td>1.404751</td>
<td>3.065599</td>
<td>-0.579718</td>
<td>0.001458</td>
<td>...</td>
<td>0.144106</td>
<td>0.022466</td>
<td>0.992880</td>
<td>0.968951</td>
<td>8.050076</td>
<td>747.957150</td>
<td>0.321721</td>
<td>1.132575</td>
<td>0.900564</td>
<td>17.092886</td>
</tr>
<tr>
<th>695211400053697</th>
<td>2.212500</td>
<td>0.310000</td>
<td>0.080000</td>
<td>0.229030</td>
<td>1.349776</td>
<td>4.898475e+08</td>
<td>1.032000</td>
<td>2.337502</td>
<td>0.153038</td>
<td>0.000263</td>
<td>...</td>
<td>0.182627</td>
<td>0.026980</td>
<td>1.074533</td>
<td>0.876942</td>
<td>4.537641</td>
<td>1371.101700</td>
<td>0.280188</td>
<td>0.917519</td>
<td>0.851317</td>
<td>16.688284</td>
</tr>
<tr>
<th>695211400000352</th>
<td>2.400000</td>
<td>0.288288</td>
<td>0.045045</td>
<td>0.160094</td>
<td>1.602353</td>
<td>9.396195e+08</td>
<td>1.299751</td>
<td>2.475200</td>
<td>0.269096</td>
<td>-0.001185</td>
<td>...</td>
<td>0.143338</td>
<td>0.017688</td>
<td>0.736078</td>
<td>1.019456</td>
<td>9.457602</td>
<td>1488.709500</td>
<td>0.583173</td>
<td>0.983108</td>
<td>0.864887</td>
<td>16.493820</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>695211100015190</th>
<td>0.981999</td>
<td>0.138211</td>
<td>0.081301</td>
<td>0.353806</td>
<td>0.233171</td>
<td>1.374187e+08</td>
<td>0.464251</td>
<td>0.811600</td>
<td>2.162920</td>
<td>0.005433</td>
<td>...</td>
<td>0.169431</td>
<td>0.049908</td>
<td>0.822073</td>
<td>1.333406</td>
<td>35.070377</td>
<td>4.851675</td>
<td>1.575419</td>
<td>0.429781</td>
<td>0.804553</td>
<td>19.868694</td>
</tr>
<tr>
<th>695211200075348</th>
<td>1.108000</td>
<td>0.458333</td>
<td>0.013889</td>
<td>0.400897</td>
<td>0.117357</td>
<td>2.367448e+07</td>
<td>1.024000</td>
<td>1.484999</td>
<td>-1.029066</td>
<td>0.017021</td>
<td>...</td>
<td>0.101488</td>
<td>0.048847</td>
<td>0.894277</td>
<td>2.820131</td>
<td>63.978363</td>
<td>12.322401</td>
<td>0.163349</td>
<td>0.557800</td>
<td>0.902715</td>
<td>20.001472</td>
</tr>
<tr>
<th>695211400121607</th>
<td>1.044000</td>
<td>0.361111</td>
<td>0.000000</td>
<td>0.435610</td>
<td>0.365767</td>
<td>7.381361e+08</td>
<td>0.948000</td>
<td>1.390501</td>
<td>-1.225617</td>
<td>0.010255</td>
<td>...</td>
<td>0.136348</td>
<td>0.009870</td>
<td>0.953997</td>
<td>3.646029</td>
<td>42.746563</td>
<td>8.932074</td>
<td>0.155026</td>
<td>0.562205</td>
<td>0.877161</td>
<td>20.012375</td>
</tr>
<tr>
<th>695211400070144</th>
<td>0.763000</td>
<td>0.406250</td>
<td>0.005208</td>
<td>0.401009</td>
<td>0.054155</td>
<td>1.422277e+08</td>
<td>0.557999</td>
<td>1.121000</td>
<td>-0.859215</td>
<td>-0.008022</td>
<td>...</td>
<td>0.103577</td>
<td>0.036872</td>
<td>1.068409</td>
<td>2.863027</td>
<td>81.517940</td>
<td>38.527668</td>
<td>-0.230873</td>
<td>0.383597</td>
<td>0.870191</td>
<td>18.670090</td>
</tr>
<tr>
<th>695211200019653</th>
<td>0.136500</td>
<td>0.358268</td>
<td>0.003937</td>
<td>0.118390</td>
<td>0.522857</td>
<td>1.983431e+08</td>
<td>0.124001</td>
<td>0.183499</td>
<td>-1.141636</td>
<td>-0.000115</td>
<td>...</td>
<td>0.104361</td>
<td>0.015273</td>
<td>0.764947</td>
<td>2.052644</td>
<td>105.831245</td>
<td>38.085136</td>
<td>0.187029</td>
<td>0.070679</td>
<td>0.884578</td>
<td>15.309025</td>
</tr>
</tbody>
</table>
<p>101 rows × 42 columns</p>
</div>
```python
fig = plt.figure(figsize=(14, 20))
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=None, hspace=0.3)
# plt.rcParams.update({'font.size': 32})
for i, f in enumerate(m31_names):
ax = fig.add_subplot(9, 5, i+1) #42 features
plt.hist(m31_x['{}'.format(f)], histtype='step', color=colors['m31'],
linestyle='dashed', linewidth=1, label=field_names['m31'])
plt.hist(deep_x['{}'.format(f)], histtype='step', color=colors['deep'],
linestyle='dashed', linewidth=1, label=field_names['deep'])
plt.hist(disk_x['{}'.format(f)], histtype='step', color=colors['disk'],
linestyle='dashed', linewidth=1, label=field_names['disk'])
plt.hist(m31_anom_tab['{}'.format(f)], histtype='step', color=colors['m31'],
alpha=1, linewidth=1, label=field_names['m31'] + ' outliers')
plt.hist(deep_anom_tab['{}'.format(f)], histtype='step', color=colors['deep'],
alpha=1, linewidth=1, label=field_names['deep'] + ' outliers')
plt.hist(disk_anom_tab['{}'.format(f)], histtype='step', color=colors['disk'],
alpha=1, linewidth=1, label=field_names['disk'] + ' outliers')
ax.set_xlabel(latex_names[f])
ax.set_yscale('log')
fig.tight_layout()
plt.legend(loc='center right', bbox_to_anchor=(3.0, 0.5), ncol=2, fancybox=True, shadow=True)
fig.savefig('../figs/histograms/all_features_histogram_outliers.pdf', bbox_inches='tight')
```

```python
```
|
snad-spaceREPO_NAMEzwadPATH_START.@zwad_extracted@zwad-master@notebooks@histograms_plot.ipynb@.PATH_END.py
|
{
"filename": "config.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/crossbar/shell/config.py",
"type": "Python"
}
|
###############################################################################
#
# Crossbar.io Shell
# Copyright (c) typedef int GmbH. Licensed under EUPLv1.2.
#
###############################################################################
import os
from six.moves import configparser
# pair a node from a node public key from a local file:
#
# cbf pair node --realm "myrealm" --node "mynode" /var/local/crossbar/.crossbar/key.pub
# pair a node from a node public key served from a HTTP URL:
#
# cbf pair node --realm "myrealm" --node "mynode" http://localhost:9140/key.pub
from txaio import make_logger
class Profile(object):
log = make_logger()
def __init__(self,
name=None,
url=None,
reconnect=None,
debug=None,
realm=None,
role=None,
pubkey=None,
privkey=None,
tls_hostname=None,
tls_certificates=None):
self.name = name
self.url = url
self.reconnect = reconnect
self.debug = debug
self.realm = realm
self.role = role
self.pubkey = pubkey
self.privkey = privkey
self.tls_hostname = tls_hostname
self.tls_certificates = tls_certificates
def __str__(self):
return u'Profile(name={}, url={}, reconnect={}, debug={}, realm={}, role={}, pubkey={}, privkey={}, tls_hostname={}, tls_certificates={})'.format(
self.name, self.url, self.reconnect, self.debug, self.realm, self.role, self.pubkey, self.privkey,
self.tls_hostname, self.tls_certificates)
@staticmethod
def parse(name, items):
url = None
reconnect = None
debug = None
realm = None
role = None
pubkey = None
privkey = None
tls_hostname = None
tls_certificates = None
for k, v in items:
if k == 'url':
url = str(v)
elif k == 'reconnect':
reconnect = int(v)
elif k == 'debug':
debug = bool(v)
elif k == 'realm':
realm = str(v)
elif k == 'role':
role = str(v)
elif k == 'pubkey':
pubkey = str(v)
elif k == 'privkey':
privkey = str(v)
elif k == 'tls_hostname':
tls_hostname = str(v)
elif k == 'tls_certificates':
tls_certificates = [x.strip() for x in str(v).split(',')]
else:
# skip unknown attribute
Profile.log.warn('unprocessed config attribute "{}"'.format(k))
return Profile(name, url, reconnect, debug, realm, role, pubkey, privkey, tls_hostname, tls_certificates)
class UserConfig(object):
log = make_logger()
def __init__(self, config_path):
self._config_path = os.path.abspath(config_path)
config = configparser.ConfigParser()
config.read(config_path)
self.config = config
profiles = {}
for profile_name in config.sections():
profile = Profile.parse(profile_name, config.items(profile_name))
profiles[profile_name] = profile
self.log.info('Profile "{profile_name}" parsed: {profile}', profile_name=profile_name, profile=profile)
self.profiles = profiles
self.log.info('Profiles loaded for: {profiles}', profiles=sorted(self.profiles.keys()))
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@crossbar@shell@config.py@.PATH_END.py
|
{
"filename": "bench_kmeans.py",
"repo_name": "facebookresearch/faiss",
"repo_path": "faiss_extracted/faiss-main/benchs/bench_all_ivf/bench_kmeans.py",
"type": "Python"
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
import faiss
import argparse
import datasets
from datasets import sanitize
######################################################
# Command-line parsing
######################################################
parser = argparse.ArgumentParser()
def aa(*args, **kwargs):
group.add_argument(*args, **kwargs)
group = parser.add_argument_group('dataset options')
aa('--db', default='deep1M', help='dataset')
aa('--nt', default=65536, type=int)
aa('--nb', default=100000, type=int)
aa('--nt_sample', default=0, type=int)
group = parser.add_argument_group('kmeans options')
aa('--k', default=256, type=int)
aa('--seed', default=12345, type=int)
aa('--pcadim', default=-1, type=int, help='PCA to this dimension')
aa('--niter', default=25, type=int)
aa('--eval_freq', default=100, type=int)
args = parser.parse_args()
print("args:", args)
os.system('echo -n "nb processors "; '
'cat /proc/cpuinfo | grep ^processor | wc -l; '
'cat /proc/cpuinfo | grep ^"model name" | tail -1')
ngpu = faiss.get_num_gpus()
print("nb GPUs:", ngpu)
######################################################
# Load dataset
######################################################
xt, xb, xq, gt = datasets.load_data(dataset=args.db)
if args.nt_sample == 0:
xt_pca = xt[args.nt:args.nt + 10000]
xt = xt[:args.nt]
else:
xt_pca = xt[args.nt_sample:args.nt_sample + 10000]
rs = np.random.RandomState(args.seed)
idx = rs.choice(args.nt_sample, size=args.nt, replace=False)
xt = xt[idx]
xb = xb[:args.nb]
d = xb.shape[1]
if args.pcadim != -1:
print("training PCA: %d -> %d" % (d, args.pcadim))
pca = faiss.PCAMatrix(d, args.pcadim)
pca.train(sanitize(xt_pca))
xt = pca.apply_py(sanitize(xt))
xb = pca.apply_py(sanitize(xb))
d = xb.shape[1]
######################################################
# Run clustering
######################################################
index = faiss.IndexFlatL2(d)
if ngpu > 0:
print("moving index to GPU")
index = faiss.index_cpu_to_all_gpus(index)
clustering = faiss.Clustering(d, args.k)
clustering.verbose = True
clustering.seed = args.seed
clustering.max_points_per_centroid = 10**6
clustering.min_points_per_centroid = 1
centroids = None
for iter0 in range(0, args.niter, args.eval_freq):
iter1 = min(args.niter, iter0 + args.eval_freq)
clustering.niter = iter1 - iter0
if iter0 > 0:
faiss.copy_array_to_vector(centroids.ravel(), clustering.centroids)
clustering.train(sanitize(xt), index)
index.reset()
centroids = faiss.vector_to_array(clustering.centroids).reshape(args.k, d)
index.add(centroids)
_, I = index.search(sanitize(xb), 1)
error = ((xb - centroids[I.ravel()]) ** 2).sum()
print("iter1=%d quantization error on test: %.4f" % (iter1, error))
|
facebookresearchREPO_NAMEfaissPATH_START.@faiss_extracted@faiss-main@benchs@bench_all_ivf@bench_kmeans.py@.PATH_END.py
|
{
"filename": "monkeypatch.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pytest/py2/_pytest/monkeypatch.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
""" monkeypatching and mocking functionality. """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
import sys
import warnings
from contextlib import contextmanager
import six
import pytest
from _pytest.fixtures import fixture
from _pytest.pathlib import Path
RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$")
@fixture
def monkeypatch():
"""The returned ``monkeypatch`` fixture provides these
helper methods to modify objects, dictionaries or os.environ::
monkeypatch.setattr(obj, name, value, raising=True)
monkeypatch.delattr(obj, name, raising=True)
monkeypatch.setitem(mapping, name, value)
monkeypatch.delitem(obj, name, raising=True)
monkeypatch.setenv(name, value, prepend=False)
monkeypatch.delenv(name, raising=True)
monkeypatch.syspath_prepend(path)
monkeypatch.chdir(path)
All modifications will be undone after the requesting
test function or fixture has finished. The ``raising``
parameter determines if a KeyError or AttributeError
will be raised if the set/deletion operation has no target.
"""
mpatch = MonkeyPatch()
yield mpatch
mpatch.undo()
def resolve(name):
# simplified from zope.dottedname
parts = name.split(".")
used = parts.pop(0)
found = __import__(used)
for part in parts:
used += "." + part
try:
found = getattr(found, part)
except AttributeError:
pass
else:
continue
# we use explicit un-nesting of the handling block in order
# to avoid nested exceptions on python 3
try:
__import__(used)
except ImportError as ex:
# str is used for py2 vs py3
expected = str(ex).split()[-1]
if expected == used:
raise
else:
raise ImportError("import error in %s: %s" % (used, ex))
found = annotated_getattr(found, part, used)
return found
def annotated_getattr(obj, name, ann):
try:
obj = getattr(obj, name)
except AttributeError:
raise AttributeError(
"%r object at %s has no attribute %r" % (type(obj).__name__, ann, name)
)
return obj
def derive_importpath(import_path, raising):
if not isinstance(import_path, six.string_types) or "." not in import_path:
raise TypeError("must be absolute import path string, not %r" % (import_path,))
module, attr = import_path.rsplit(".", 1)
target = resolve(module)
if raising:
annotated_getattr(target, attr, ann=module)
return attr, target
class Notset(object):
def __repr__(self):
return "<notset>"
notset = Notset()
class MonkeyPatch(object):
""" Object returned by the ``monkeypatch`` fixture keeping a record of setattr/item/env/syspath changes.
"""
def __init__(self):
self._setattr = []
self._setitem = []
self._cwd = None
self._savesyspath = None
@contextmanager
def context(self):
"""
Context manager that returns a new :class:`MonkeyPatch` object which
undoes any patching done inside the ``with`` block upon exit:
.. code-block:: python
import functools
def test_partial(monkeypatch):
with monkeypatch.context() as m:
m.setattr(functools, "partial", 3)
Useful in situations where it is desired to undo some patches before the test ends,
such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples
of this see `#3290 <https://github.com/pytest-dev/pytest/issues/3290>`_.
"""
m = MonkeyPatch()
try:
yield m
finally:
m.undo()
def setattr(self, target, name, value=notset, raising=True):
""" Set attribute value on target, memorizing the old value.
By default raise AttributeError if the attribute did not exist.
For convenience you can specify a string as ``target`` which
will be interpreted as a dotted import path, with the last part
being the attribute name. Example:
``monkeypatch.setattr("os.getcwd", lambda: "/")``
would set the ``getcwd`` function of the ``os`` module.
The ``raising`` value determines if the setattr should fail
if the attribute is not already present (defaults to True
which means it will raise).
"""
__tracebackhide__ = True
import inspect
if value is notset:
if not isinstance(target, six.string_types):
raise TypeError(
"use setattr(target, name, value) or "
"setattr(target, value) with target being a dotted "
"import string"
)
value = name
name, target = derive_importpath(target, raising)
oldval = getattr(target, name, notset)
if raising and oldval is notset:
raise AttributeError("%r has no attribute %r" % (target, name))
# avoid class descriptors like staticmethod/classmethod
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
setattr(target, name, value)
def delattr(self, target, name=notset, raising=True):
""" Delete attribute ``name`` from ``target``, by default raise
AttributeError it the attribute did not previously exist.
If no ``name`` is specified and ``target`` is a string
it will be interpreted as a dotted import path with the
last part being the attribute name.
If ``raising`` is set to False, no exception will be raised if the
attribute is missing.
"""
__tracebackhide__ = True
import inspect
if name is notset:
if not isinstance(target, six.string_types):
raise TypeError(
"use delattr(target, name) or "
"delattr(target) with target being a dotted "
"import string"
)
name, target = derive_importpath(target, raising)
if not hasattr(target, name):
if raising:
raise AttributeError(name)
else:
oldval = getattr(target, name, notset)
# Avoid class descriptors like staticmethod/classmethod.
if inspect.isclass(target):
oldval = target.__dict__.get(name, notset)
self._setattr.append((target, name, oldval))
delattr(target, name)
def setitem(self, dic, name, value):
""" Set dictionary entry ``name`` to value. """
self._setitem.append((dic, name, dic.get(name, notset)))
dic[name] = value
def delitem(self, dic, name, raising=True):
""" Delete ``name`` from dict. Raise KeyError if it doesn't exist.
If ``raising`` is set to False, no exception will be raised if the
key is missing.
"""
if name not in dic:
if raising:
raise KeyError(name)
else:
self._setitem.append((dic, name, dic.get(name, notset)))
del dic[name]
def _warn_if_env_name_is_not_str(self, name):
"""On Python 2, warn if the given environment variable name is not a native str (#4056)"""
if six.PY2 and not isinstance(name, str):
warnings.warn(
pytest.PytestWarning(
"Environment variable name {!r} should be str".format(name)
)
)
def setenv(self, name, value, prepend=None):
""" Set environment variable ``name`` to ``value``. If ``prepend``
is a character, read the current environment variable value
and prepend the ``value`` adjoined with the ``prepend`` character."""
if not isinstance(value, str):
warnings.warn(
pytest.PytestWarning(
"Value of environment variable {name} type should be str, but got "
"{value!r} (type: {type}); converted to str implicitly".format(
name=name, value=value, type=type(value).__name__
)
),
stacklevel=2,
)
value = str(value)
if prepend and name in os.environ:
value = value + prepend + os.environ[name]
self._warn_if_env_name_is_not_str(name)
self.setitem(os.environ, name, value)
def delenv(self, name, raising=True):
""" Delete ``name`` from the environment. Raise KeyError if it does
not exist.
If ``raising`` is set to False, no exception will be raised if the
environment variable is missing.
"""
self._warn_if_env_name_is_not_str(name)
self.delitem(os.environ, name, raising=raising)
def syspath_prepend(self, path):
""" Prepend ``path`` to ``sys.path`` list of import locations. """
from pkg_resources import fixup_namespace_packages
if self._savesyspath is None:
self._savesyspath = sys.path[:]
sys.path.insert(0, str(path))
# https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171
fixup_namespace_packages(str(path))
# A call to syspathinsert() usually means that the caller wants to
# import some dynamically created files, thus with python3 we
# invalidate its import caches.
# This is especially important when any namespace package is in used,
# since then the mtime based FileFinder cache (that gets created in
# this case already) gets not invalidated when writing the new files
# quickly afterwards.
if sys.version_info >= (3, 3):
from importlib import invalidate_caches
invalidate_caches()
def chdir(self, path):
""" Change the current working directory to the specified path.
Path can be a string or a py.path.local object.
"""
if self._cwd is None:
self._cwd = os.getcwd()
if hasattr(path, "chdir"):
path.chdir()
elif isinstance(path, Path):
# modern python uses the fspath protocol here LEGACY
os.chdir(str(path))
else:
os.chdir(path)
def undo(self):
""" Undo previous changes. This call consumes the
undo stack. Calling it a second time has no effect unless
you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
Note that the same `monkeypatch` fixture is used across a
single test function invocation. If `monkeypatch` is used both by
the test function itself and one of the test fixtures,
calling `undo()` will undo all of the changes made in
both functions.
"""
for obj, name, value in reversed(self._setattr):
if value is not notset:
setattr(obj, name, value)
else:
delattr(obj, name)
self._setattr[:] = []
for dictionary, name, value in reversed(self._setitem):
if value is notset:
try:
del dictionary[name]
except KeyError:
pass # was already deleted, so we have the desired state
else:
dictionary[name] = value
self._setitem[:] = []
if self._savesyspath is not None:
sys.path[:] = self._savesyspath
self._savesyspath = None
if self._cwd is not None:
os.chdir(self._cwd)
self._cwd = None
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pytest@py2@_pytest@monkeypatch.py@.PATH_END.py
|
{
"filename": "AUTHORS.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/libs/openjpeg/AUTHORS.md",
"type": "Markdown"
}
|
# Authors of OpenJPEG
See also [THANKS](https://github.com/uclouvain/openjpeg/blob/master/THANKS.md)
David Janssens designed and implemented the first version of OpenJPEG.
Kaori Hagihara designed and implemented the first version of OpenJPIP.
Jerome Fimes implemented the alpha version of OpenJPEG 2.0.
Giuseppe Baruffa added the JPWL functionalities.
Mickaël Savinaud implemented the final OpenJPEG 2.0 version based on a big merge between 1.5 version and alpha version of 2.0.
Mathieu Malaterre participated to the OpenJPEG 2.0 version and improved the libraries and utilities.
Yannick Verschueren,
Herve Drolon,
Francois-Olivier Devaux,
Antonin Descampe
improved the libraries and utilities.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@libs@openjpeg@AUTHORS.md@.PATH_END.py
|
{
"filename": "test_gpu_examples.py",
"repo_name": "huggingface/peft",
"repo_path": "peft_extracted/peft-main/tests/test_gpu_examples.py",
"type": "Python"
}
|
# Copyright 2023-present the HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import importlib
import itertools
import os
import re
import subprocess
import sys
import tempfile
import unittest
from collections import Counter, defaultdict
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Dict, List, Union
import pytest
import torch
from accelerate import infer_auto_device_map
from accelerate.test_utils.testing import run_command
from accelerate.utils import patch_environment
from datasets import Audio, Dataset, DatasetDict, load_dataset
from packaging import version
from parameterized import parameterized
from torch.distributed import init_process_group
from torch.distributed.fsdp import FullyShardedDataParallel as FSDP
from torch.utils.data import DataLoader
from transformers import (
AutoModelForCausalLM,
AutoModelForSeq2SeqLM,
AutoTokenizer,
BitsAndBytesConfig,
DataCollatorForLanguageModeling,
Seq2SeqTrainer,
Seq2SeqTrainingArguments,
Trainer,
TrainingArguments,
WhisperFeatureExtractor,
WhisperForConditionalGeneration,
WhisperProcessor,
WhisperTokenizer,
)
from transformers.pytorch_utils import Conv1D
from peft import (
AdaLoraConfig,
EvaConfig,
LoftQConfig,
LoraConfig,
PeftModel,
PrefixTuningConfig,
PromptEncoderConfig,
TaskType,
VeraConfig,
get_peft_model,
get_peft_model_state_dict,
initialize_lora_eva_weights,
inject_adapter_in_model,
prepare_model_for_kbit_training,
replace_lora_weights_loftq,
set_peft_model_state_dict,
)
from peft.tuners import boft
from peft.utils import SAFETENSORS_WEIGHTS_NAME, infer_device
from peft.utils.loftq_utils import NFQuantizer
from peft.utils.other import fsdp_auto_wrap_policy
from .testing_utils import (
require_aqlm,
require_auto_awq,
require_auto_gptq,
require_bitsandbytes,
require_eetq,
require_hqq,
require_non_cpu,
require_non_xpu,
require_optimum,
require_torch_gpu,
require_torch_multi_gpu,
require_torchao,
)
# A full testing suite that tests all the necessary features on GPU. The tests should
# rely on the example scripts to test the features.
@dataclass
class DataCollatorSpeechSeq2SeqWithPadding:
r"""
Directly copied from:
https://github.com/huggingface/peft/blob/main/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb
"""
processor: Any
def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]:
# split inputs and labels since they have to be of different lengths and need different padding methods
# first treat the audio inputs by simply returning torch tensors
input_features = [{"input_features": feature["input_features"]} for feature in features]
batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt")
# get the tokenized label sequences
label_features = [{"input_ids": feature["labels"]} for feature in features]
# pad the labels to max length
labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt")
# replace padding with -100 to ignore loss correctly
labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100)
# if bos token is appended in previous tokenization step,
# cut bos token here as it's append later anyways
if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item():
labels = labels[:, 1:]
batch["labels"] = labels
return batch
@require_torch_gpu
@require_bitsandbytes
class PeftBnbGPUExampleTests(unittest.TestCase):
r"""
A single GPU int8 + fp4 test suite, this will test if training fits correctly on a single GPU device (1x NVIDIA T4
16GB) using bitsandbytes.
The tests are the following:
- Seq2Seq model training based on:
https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_flan_t5_large_bnb_peft.ipynb
- Causal LM model training based on:
https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb
- Audio model training based on:
https://github.com/huggingface/peft/blob/main/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb
"""
def setUp(self):
self.seq2seq_model_id = "google/flan-t5-base"
self.causal_lm_model_id = "facebook/opt-6.7b"
self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
self.audio_model_id = "openai/whisper-large"
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
def _check_inference_finite(self, model, batch):
# try inference without Trainer class
training = model.training
model.eval()
output = model(**batch.to(model.device))
assert torch.isfinite(output.logits).all()
model.train(training)
@pytest.mark.single_gpu_tests
def test_causal_lm_training(self):
r"""
Test the CausalLM training on a single GPU device. This test is a converted version of
https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train
`opt-6.7b` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set
correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_causal_lm_training_4bit(self):
r"""
Test the CausalLM training on a single GPU device. This test is a converted version of
https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train
`opt-6.7b` on `english_quotes` dataset in few steps using 4bit base model. The test would simply fail if the
adapters are not set correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
def test_causal_lm_training_multi_gpu_4bit(self):
r"""
Test the CausalLM training on a multi-GPU device with 4bit base model. The test would simply fail if the
adapters are not set correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
model = prepare_model_for_kbit_training(model)
setattr(model, "model_parallel", True)
setattr(model, "is_parallelizable", True)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("Abirate/english_quotes")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
@require_torch_gpu
def test_4bit_adalora_causalLM(self):
r"""
Tests the 4bit training with adalora
"""
model_id = "facebook/opt-350m"
# for >3 GPUs, might need: device_map={"": "cuda:0"}
model = AutoModelForCausalLM.from_pretrained(
model_id, quantization_config=BitsAndBytesConfig(load_in_4bit=True)
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model.gradient_checkpointing_enable()
model = prepare_model_for_kbit_training(model)
peft_config = AdaLoraConfig(
init_r=6,
target_r=4,
tinit=50,
tfinal=100,
deltaT=5,
beta1=0.3,
beta2=0.3,
orth_reg_weight=0.2,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, peft_config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
batch = tokenizer(data["train"][:3]["quote"], return_tensors="pt", padding=True)
self._check_inference_finite(model, batch)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
@require_torch_gpu
def test_8bit_adalora_causalLM(self):
r"""
Tests the 8bit training with adalora
"""
model_id = "facebook/opt-350m"
model = AutoModelForCausalLM.from_pretrained(
model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True)
)
tokenizer = AutoTokenizer.from_pretrained(model_id)
model.gradient_checkpointing_enable()
model = prepare_model_for_kbit_training(model)
peft_config = AdaLoraConfig(
init_r=6,
target_r=4,
tinit=50,
tfinal=100,
deltaT=5,
beta1=0.3,
beta2=0.3,
orth_reg_weight=0.2,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, peft_config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
batch = tokenizer(data["train"][:3]["quote"], return_tensors="pt", padding=True)
self._check_inference_finite(model, batch)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
@require_torch_multi_gpu
def test_causal_lm_training_multi_gpu(self):
r"""
Test the CausalLM training on a multi-GPU device. This test is a converted version of
https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train
`opt-6.7b` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set
correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
device_map="auto",
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
model = prepare_model_for_kbit_training(model)
setattr(model, "model_parallel", True)
setattr(model, "is_parallelizable", True)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("Abirate/english_quotes")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_seq2seq_lm_training_single_gpu(self):
r"""
Test the Seq2SeqLM training on a single GPU device. This test is a converted version of
https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train
`flan-large` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set
correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForSeq2SeqLM.from_pretrained(
self.seq2seq_model_id,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
device_map={"": 0},
)
assert set(model.hf_device_map.values()) == {0}
tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q", "v"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
@require_torch_multi_gpu
def test_seq2seq_lm_training_multi_gpu(self):
r"""
Test the Seq2SeqLM training on a multi-GPU device. This test is a converted version of
https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train
`flan-large` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set
correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForSeq2SeqLM.from_pretrained(
self.seq2seq_model_id,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
device_map="balanced",
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q", "v"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir="outputs",
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_audio_model_training(self):
r"""
Test the audio model training on a single GPU device. This test is a converted version of
https://github.com/huggingface/peft/blob/main/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb
"""
with tempfile.TemporaryDirectory() as tmp_dir:
dataset_name = "ybelkada/common_voice_mr_11_0_copy"
task = "transcribe"
language = "Marathi"
common_voice = DatasetDict()
common_voice["train"] = load_dataset(dataset_name, split="train+validation")
common_voice = common_voice.remove_columns(
["accent", "age", "client_id", "down_votes", "gender", "locale", "path", "segment", "up_votes"]
)
feature_extractor = WhisperFeatureExtractor.from_pretrained(self.audio_model_id)
tokenizer = WhisperTokenizer.from_pretrained(self.audio_model_id, language=language, task=task)
processor = WhisperProcessor.from_pretrained(self.audio_model_id, language=language, task=task)
common_voice = common_voice.cast_column("audio", Audio(sampling_rate=16000))
def prepare_dataset(batch):
# load and resample audio data from 48 to 16kHz
audio = batch["audio"]
# compute log-Mel input features from input audio array
batch["input_features"] = feature_extractor(
audio["array"], sampling_rate=audio["sampling_rate"]
).input_features[0]
# encode target text to label ids
batch["labels"] = tokenizer(batch["sentence"]).input_ids
return batch
common_voice = common_voice.map(
prepare_dataset, remove_columns=common_voice.column_names["train"], num_proc=2
)
data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor)
model = WhisperForConditionalGeneration.from_pretrained(
self.audio_model_id, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto"
)
model.config.forced_decoder_ids = None
model.config.suppress_tokens = []
model = prepare_model_for_kbit_training(model)
# as Whisper model uses Conv layer in encoder, checkpointing disables grad computation
# to avoid this, make the inputs trainable
def make_inputs_require_grad(module, input, output):
output.requires_grad_(True)
model.model.encoder.conv1.register_forward_hook(make_inputs_require_grad)
config = LoraConfig(
r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none"
)
model = get_peft_model(model, config)
model.print_trainable_parameters()
training_args = Seq2SeqTrainingArguments(
output_dir=tmp_dir, # change to a repo name of your choice
per_device_train_batch_size=8,
gradient_accumulation_steps=1, # increase by 2x for every 2x decrease in batch size
learning_rate=1e-3,
warmup_steps=2,
max_steps=3,
fp16=True,
per_device_eval_batch_size=8,
generation_max_length=128,
logging_steps=25,
remove_unused_columns=False, # required as the PeftModel forward doesn't have the signature of the wrapped model's forward
label_names=["labels"], # same reason as above
)
trainer = Seq2SeqTrainer(
args=training_args,
model=model,
train_dataset=common_voice["train"],
data_collator=data_collator,
tokenizer=processor.feature_extractor,
)
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_4bit_non_default_adapter_name(self):
# See PR 1294
config = LoraConfig(
r=16,
target_modules=["q_proj", "v_proj"],
bias="none",
task_type="CAUSAL_LM",
)
# default adapter name
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-125m",
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
)
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, config)
n_trainable_default, n_total_default = model.get_nb_trainable_parameters()
# other adapter name
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-125m",
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
)
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, config, adapter_name="other")
n_trainable_other, n_total_other = model.get_nb_trainable_parameters()
assert n_trainable_other > 0
# sanity check
assert n_trainable_default == n_trainable_other
assert n_total_default == n_total_other
@pytest.mark.single_gpu_tests
def test_8bit_non_default_adapter_name(self):
# See PR 1294
config = LoraConfig(
r=16,
target_modules=["q_proj", "v_proj"],
bias="none",
task_type="CAUSAL_LM",
)
# default adapter name
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-125m",
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, config)
n_trainable_default, n_total_default = model.get_nb_trainable_parameters()
# other adapter name
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-125m",
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, config, adapter_name="other")
n_trainable_other, n_total_other = model.get_nb_trainable_parameters()
assert n_trainable_other > 0
# sanity check
assert n_trainable_default == n_trainable_other
assert n_total_default == n_total_other
@pytest.mark.single_gpu_tests
def test_causal_lm_training_4bit_dora(self):
r"""
Same as test_causal_lm_training_4bit but with DoRA
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
use_dora=True,
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
def test_causal_lm_training_multi_gpu_4bit_dora(self):
r"""
Same as test_causal_lm_training_multi_gpu_4bit but with DoRA
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
model = prepare_model_for_kbit_training(model)
setattr(model, "model_parallel", True)
setattr(model, "is_parallelizable", True)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
use_dora=True,
)
model = get_peft_model(model, config)
data = load_dataset("Abirate/english_quotes")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_causal_lm_training_8bit_dora(self):
r"""
Same as test_causal_lm_training_4bit_dora but with 8bit
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
use_dora=True,
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
def test_causal_lm_training_multi_gpu_8bit_dora(self):
r"""
Same as test_causal_lm_training_multi_gpu_4bit_dora but with 8bit
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
model = prepare_model_for_kbit_training(model)
setattr(model, "model_parallel", True)
setattr(model, "is_parallelizable", True)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
use_dora=True,
)
model = get_peft_model(model, config)
data = load_dataset("Abirate/english_quotes")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_causal_lm_training_gpt2_dora(self):
r"""
Same as test_causal_lm_training_4bit but with DoRA
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained("gpt2", device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
use_dora=True,
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@parameterized.expand(["4bit", "8bit"])
def test_initialize_dora_with_bnb_on_cpu(self, kbit):
# 1674
# The issue is that to initialize DoRA, we need to dequantize the weights. That only works on GPU for bnb.
# Therefore, intializing DoRA with bnb on CPU used to fail.
model_id = "facebook/opt-125m"
if kbit == "4bit":
bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4")
elif kbit == "8bit":
bnb_config = BitsAndBytesConfig(load_in_8bit=True)
else:
raise ValueError("Only 4bit and 8bit bnb allowed")
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)
model = model.cpu() # ensure that we're on CPU
# sanity check that all weights are on CPU
weights_not_cpu = [name for name, p in model.named_parameters() if p.device != torch.device("cpu")]
assert not weights_not_cpu
lora_config = LoraConfig(use_dora=True)
# should not raise
peft_model = get_peft_model(model, lora_config)
# check that the weights are still on CPU
weights_not_cpu = [name for name, p in peft_model.named_parameters() if p.device != torch.device("cpu")]
assert not weights_not_cpu
@pytest.mark.single_gpu_tests
def test_causal_lm_training_vera(self):
r"""
Same as test_causal_lm_training but with VeRA
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
model = prepare_model_for_kbit_training(model)
config = VeraConfig(
r=16,
target_modules=["q_proj", "v_proj"],
vera_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_causal_lm_training_4bit_vera(self):
r"""
Same as test_causal_lm_training_4bit but with VeRA
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
device_map="auto",
)
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
model = prepare_model_for_kbit_training(model)
config = VeraConfig(
r=16,
target_modules=["q_proj", "v_proj"],
vera_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
def test_causal_lm_training_multi_gpu_vera(self):
r"""
Same as test_causal_lm_training_multi_gpu but with VeRA
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_8bit=True),
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
model = prepare_model_for_kbit_training(model)
setattr(model, "model_parallel", True)
setattr(model, "is_parallelizable", True)
config = VeraConfig(
r=16,
target_modules=["q_proj", "v_proj"],
vera_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("Abirate/english_quotes")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
def test_causal_lm_training_multi_gpu_4bit_vera(self):
r"""
Same as test_causal_lm_training_multi_gpu_4bit but with VeRA
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
quantization_config=BitsAndBytesConfig(load_in_4bit=True),
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
model = prepare_model_for_kbit_training(model)
setattr(model, "model_parallel", True)
setattr(model, "is_parallelizable", True)
config = VeraConfig(
r=16,
target_modules=["q_proj", "v_proj"],
vera_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("Abirate/english_quotes")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@require_torch_gpu
@require_auto_gptq
@require_optimum
class PeftGPTQGPUTests(unittest.TestCase):
r"""
GPTQ + peft tests
"""
def setUp(self):
from transformers import GPTQConfig
self.causal_lm_model_id = "marcsun13/opt-350m-gptq-4bit"
# TODO : check if it works for Exllamav2 kernels
self.quantization_config = GPTQConfig(bits=4, use_exllama=False)
self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
torch.cuda.empty_cache()
def _check_inference_finite(self, model, batch):
# try inference without Trainer class
training = model.training
model.eval()
output = model(**batch.to(model.device))
assert torch.isfinite(output.logits).all()
model.train(training)
@pytest.mark.single_gpu_tests
def test_causal_lm_training(self):
r"""
Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set
correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
device_map="auto",
quantization_config=self.quantization_config,
)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_adalora_causalLM(self):
r"""
Tests the gptq training with adalora
"""
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
device_map="auto",
quantization_config=self.quantization_config,
)
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
model = prepare_model_for_kbit_training(model)
peft_config = AdaLoraConfig(
init_r=6,
target_r=4,
tinit=50,
tfinal=100,
deltaT=5,
beta1=0.3,
beta2=0.3,
orth_reg_weight=0.2,
lora_alpha=32,
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, peft_config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
batch = tokenizer(data["train"][:3]["quote"], return_tensors="pt", padding=True)
self._check_inference_finite(model, batch)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
@require_torch_multi_gpu
def test_causal_lm_training_multi_gpu(self):
r"""
Test the CausalLM training on a multi-GPU device. The test would simply fail if the adapters are not set
correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
device_map="auto",
quantization_config=self.quantization_config,
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
model = prepare_model_for_kbit_training(model)
setattr(model, "model_parallel", True)
setattr(model, "is_parallelizable", True)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("Abirate/english_quotes")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
fp16=True,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_non_default_adapter_name(self):
# See issue 1346
config = LoraConfig(
r=16,
target_modules=["q_proj", "v_proj"],
task_type="CAUSAL_LM",
)
# default adapter name
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
device_map="auto",
quantization_config=self.quantization_config,
)
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, config)
n_trainable_default, n_total_default = model.get_nb_trainable_parameters()
# other adapter name
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
device_map="auto",
quantization_config=self.quantization_config,
)
model = prepare_model_for_kbit_training(model)
model = get_peft_model(model, config, adapter_name="other")
n_trainable_other, n_total_other = model.get_nb_trainable_parameters()
assert n_trainable_other > 0
# sanity check
assert n_trainable_default == n_trainable_other
assert n_total_default == n_total_other
@require_non_cpu
class OffloadSaveTests(unittest.TestCase):
def setUp(self):
self.causal_lm_model_id = "gpt2"
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
torch.cuda.empty_cache()
def test_offload_load(self):
r"""
Test the loading of a LoRA model with CPU- and disk-offloaded modules
"""
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id)
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
memory_limits = {"cpu": "0.4GIB"} # no "disk" for PeftModel.from_pretrained() compatibility
# offload around half of all transformer modules to the disk
device_map = infer_auto_device_map(model, max_memory=memory_limits)
assert "cpu" in device_map.values()
assert "disk" in device_map.values()
config = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False, target_modules=["c_attn"])
model = get_peft_model(model, config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, device_map="cpu")
lora_model = PeftModel.from_pretrained(model, tmp_dir).eval()
input_tokens = tokenizer.encode("Four score and seven years ago", return_tensors="pt")
output = lora_model(input_tokens)[0]
# load the model with device_map
offloaded_model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, device_map=device_map)
assert len({p.device for p in offloaded_model.parameters()}) == 2 # 'cpu' and 'meta'
offloaded_lora_model = PeftModel.from_pretrained(offloaded_model, tmp_dir, max_memory=memory_limits).eval()
offloaded_output = offloaded_lora_model(input_tokens)[0]
assert torch.allclose(output, offloaded_output, atol=1e-5)
@pytest.mark.single_gpu_tests
def test_offload_merge(self):
r"""
Test merging, unmerging, and unloading of a model with CPU- and disk- offloaded modules.
"""
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id)
tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
memory_limits = {0: "0.2GIB", "cpu": "0.2GIB"} # no "disk" for PeftModel.from_pretrained() compatibility
# offloads around half of all transformer modules
device_map = infer_auto_device_map(model, max_memory=memory_limits)
assert 0 in device_map.values()
assert "cpu" in device_map.values()
assert "disk" in device_map.values()
config = LoraConfig(task_type="CAUSAL_LM", init_lora_weights=False, target_modules=["c_attn"])
model = get_peft_model(model, config)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
# load the model with device_map
model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, device_map=device_map).eval()
assert len({p.device for p in model.parameters()}) == 2
model = PeftModel.from_pretrained(model, tmp_dir, max_memory=memory_limits)
input_tokens = tokenizer.encode("Four score and seven years ago", return_tensors="pt")
model.eval()
# test peft model adapter merge
pre_merge_olayer = model(input_tokens)[0]
model.merge_adapter()
post_merge_olayer = model(input_tokens)[0]
assert torch.allclose(post_merge_olayer, pre_merge_olayer)
# test peft model adapter unmerge
model.unmerge_adapter()
post_unmerge_olayer = model(input_tokens)[0]
assert torch.allclose(post_unmerge_olayer, pre_merge_olayer)
# test LoRA merge and unload
model = model.merge_and_unload()
post_unload_merge_olayer = model(input_tokens)[0]
assert torch.allclose(post_unload_merge_olayer, pre_merge_olayer)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires a GPU")
@pytest.mark.single_gpu_tests
class TestPiSSA:
r"""
Tests for PiSSA to ensure that it reduces the quantization error compared to normal LoRA quantization.
"""
# The error factor indicates by how much the quantization error should be decreased when using PiSSA compared to
# quantization without PiSSA. Thus 1.03 means that the error should be decreased by 3% at least. This is a very
# conservative value to prevent flakiness, in practice most gains are > 1.5
error_factor = 1.03
def quantize_model(self, model, num_bits=4, device="cuda"):
# Quantize the `weight.data` of the linear layer in the model to `num_bits` and store it with full precision.
quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64)
for name, module in model.named_modules():
if isinstance(module, (torch.nn.Linear, Conv1D)) and "lm_head" not in name:
quantized_weight, max_abs, shape = quantizer.quantize_block(module.weight.data.to(device))
module.weight.data = quantizer.dequantize_block(quantized_weight, max_abs, shape)
return model
def nuclear_norm(self, base_model, quantized_model):
# Calculate the nuclear norm (sum of singular values) of the error matrices between the `quantized_model` and the `base_model`.
error_list = []
for name, module in base_model.named_modules():
if isinstance(module, (torch.nn.Linear, Conv1D)) and "lm_head" not in name:
quant_module = quantized_model.get_submodule(name)
error_list.append(torch.linalg.svdvals(module.weight.data - quant_module.weight.data).sum())
return torch.Tensor(error_list).sum()
def get_errors(
self,
tmp_path,
bits=4,
device="cuda",
model_id="hf-internal-testing/tiny-random-BloomForCausalLM",
):
# Comparing the quantized LoRA model to the base model, vs the PiSSA quantized model to the base model.
# We expect the PiSSA quantized model to have less error than the normal LoRA quantized model.
cls = AutoModelForSeq2SeqLM if "t5" in str(model_id) else AutoModelForCausalLM
base_model = cls.from_pretrained(model_id).eval().to(device)
task_type = TaskType.SEQ_2_SEQ_LM if base_model.config.is_encoder_decoder else TaskType.CAUSAL_LM
# logits from the normal quantized LoRA model
target_modules = "all-linear" if task_type != TaskType.SEQ_2_SEQ_LM else ["o", "k", "wi", "q", "v"]
lora_config = LoraConfig(task_type=task_type, target_modules=target_modules)
qlora_model = self.quantize_model(cls.from_pretrained(model_id).eval().to(device), bits, device)
qlora_model = get_peft_model(
qlora_model,
lora_config,
)
qlora_model = qlora_model.merge_and_unload()
qlora_error = self.nuclear_norm(base_model, qlora_model)
del qlora_model
gc.collect()
torch.cuda.empty_cache()
# logits from quantized LoRA model using PiSSA
lora_config = LoraConfig(
task_type=task_type,
init_lora_weights="pissa",
target_modules=target_modules,
)
pissa_model = cls.from_pretrained(model_id).eval().to(device)
pissa_model = get_peft_model(pissa_model, lora_config)
# save LoRA weights, they should be initialized such that they minimize the quantization error
pissa_model.base_model.peft_config["default"].init_lora_weights = True
pissa_model.save_pretrained(tmp_path / "pissa_model")
pissa_model = pissa_model.unload()
pissa_model.save_pretrained(tmp_path / "residual_model")
del pissa_model
gc.collect()
torch.cuda.empty_cache()
# now load quantized model and apply PiSSA-initialized weights on top
qpissa_model = self.quantize_model(
cls.from_pretrained(tmp_path / "residual_model").eval().to(device), bits, device
)
qpissa_model = PeftModel.from_pretrained(qpissa_model, tmp_path / "pissa_model")
qpissa_model = qpissa_model.merge_and_unload()
qpissa_error = self.nuclear_norm(base_model, qpissa_model)
del qpissa_model
gc.collect()
torch.cuda.empty_cache()
assert qlora_error > 0.0
assert qpissa_error > 0.0
# next, check that PiSSA quantization errors are smaller than LoRA errors by a certain margin
assert qpissa_error < (qlora_error / self.error_factor)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_bloomz_pissa_4bit(self, device, tmp_path):
# In this test, we compare the logits of the base model, the quantized LoRA model, and the quantized model
# using PiSSA. When quantizing, we expect a certain level of error. However, we expect the PiSSA quantized
# model to have less error than the normal LoRA quantized model. Note that when using normal LoRA, the
# quantization error is simply the error from quantization without LoRA, as LoRA is a no-op before training.
# We still apply LoRA for the test for consistency.
self.get_errors(bits=4, device=device, tmp_path=tmp_path)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_bloomz_pissa_8bit(self, device, tmp_path):
# Same test as test_bloomz_pissa_4bit but with 8 bits.
self.get_errors(bits=8, device=device, tmp_path=tmp_path)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_t5_pissa_4bit(self, device, tmp_path):
self.get_errors(bits=4, device=device, model_id="t5-small", tmp_path=tmp_path)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_t5_pissa_8bit(self, device, tmp_path):
self.get_errors(bits=8, device=device, model_id="t5-small", tmp_path=tmp_path)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_gpt2_pissa_4bit(self, device, tmp_path):
# see 2104
self.get_errors(bits=4, device=device, model_id="gpt2", tmp_path=tmp_path)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_gpt2_pissa_8bit(self, device, tmp_path):
# see 2104
self.get_errors(bits=8, device=device, model_id="gpt2", tmp_path=tmp_path)
@require_bitsandbytes
def test_lora_pissa_conversion_same_output_after_loading_with_quantization(self, tmp_path):
# A copy of the test `test_lora_pissa_conversion_same_output_after_loading` in peft/tests/test_initialization.py,
# that would fail if bitsandbytes quantization is used because Quant(W_res) + AB !=Quant(W) + \Delta(AB).
import bitsandbytes as bnb
torch.manual_seed(0)
data = torch.rand(10, 1000).to("cuda")
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
# choose a large weight so that averages are close to expected values
self.linear = torch.nn.Linear(1000, 1000)
self.embed = torch.nn.Embedding(1000, 1000)
self.conv2d = torch.nn.Conv2d(100, 100, 3)
def forward(self, x):
x_int = (100 * x).int()
x_4d = x.flatten().reshape(1, 100, 10, 10)
return self.linear(x), self.embed(x_int), self.conv2d(x_4d)
model = MyModule().to("cuda")
output_base = model(data)[0]
config = LoraConfig(init_lora_weights="pissa", target_modules=["linear"], r=8)
peft_model = get_peft_model(deepcopy(model), config)
# save the initial model
peft_model.peft_config["default"].init_lora_weights = True
peft_model.save_pretrained(tmp_path / "init-model")
peft_model = peft_model.unload()
torch.save(peft_model.state_dict(), tmp_path / "residual-model")
del peft_model
# create 4bit base model
base_model = deepcopy(model)
base_model.load_state_dict(torch.load(tmp_path / "residual-model"))
# sanity check: the base model weights were indeed changed
tol = 1e-06
assert not torch.allclose(model.linear.weight, base_model.linear.weight, atol=tol, rtol=tol)
# quantize the linear layer
linear4bit = bnb.nn.Linear4bit(base_model.linear.in_features, base_model.linear.out_features)
linear4bit.load_state_dict(base_model.linear.state_dict())
linear4bit.to(0)
base_model.linear = linear4bit
peft_model = PeftModel.from_pretrained(deepcopy(base_model), tmp_path / "init-model")
output_quantized_pissa = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_base, output_quantized_pissa, atol=tol, rtol=tol)
# modify the weights, or else the adapter performs an identity transformation
peft_model.base_model.linear.lora_B["default"].weight.data *= 2.0
output_finetuned_pissa = peft_model(data)[0]
# sanity check
tol = 1e-06
assert not torch.allclose(output_quantized_pissa, output_finetuned_pissa, atol=tol, rtol=tol)
# save the model normally
peft_model.save_pretrained(tmp_path / "pissa-model")
model_loaded = PeftModel.from_pretrained(deepcopy(base_model), tmp_path / "pissa-model")
output_loaded = model_loaded(data)[0]
assert torch.allclose(output_finetuned_pissa, output_loaded, atol=tol, rtol=tol)
# sanity check: ranks should still be 8 as initially
assert model_loaded.peft_config["default"].r == 8
assert model_loaded.base_model.model.linear.lora_A["default"].weight.shape[0] == 8
# save the model with conversion
peft_model.save_pretrained(
tmp_path / "pissa-model-converted", path_initial_model_for_weight_conversion=tmp_path / "init-model"
)
model_converted = PeftModel.from_pretrained(deepcopy(model), tmp_path / "pissa-model-converted")
output_converted = model_converted(data)[0]
# rank should be double of what it was initially
assert model_converted.peft_config["default"].r == 16
assert model_converted.base_model.model.linear.lora_A["default"].weight.shape[0] == 16
# base model weights should be the same as the initial model
assert torch.allclose(
model.linear.weight, model_converted.base_model.model.linear.base_layer.weight, atol=tol, rtol=tol
)
# This check is expected to fail when using bnb
assert not torch.allclose(output_finetuned_pissa, output_converted, atol=tol, rtol=tol)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires a GPU")
@pytest.mark.single_gpu_tests
class TestOLoRA:
r"""
Tests for OLoRA to ensure that it reduces the quantization error compared to normal LoRA quantization.
"""
# The error factor indicates by how much the quantization error should be decreased when using OLoRA compared to
# quantization without OLoRA. Thus 1.03 means that the error should be decreased by 3% at least. This is a very
# conservative value to prevent flakiness, in practice most gains are > 1.5
error_factor = 1.2
def quantize_model(self, model, num_bits=4, device="cuda"):
# Quantize the `weight.data` of the linear layer in the model to `num_bits` and store it with full precision.
quantizer = NFQuantizer(num_bits=num_bits, device=device, method="normal", block_size=64)
for name, module in model.named_modules():
if isinstance(module, torch.nn.Linear) and "lm_head" not in name:
quantized_weight, max_abs, shape = quantizer.quantize_block(module.weight.data.to(device))
module.weight.data = quantizer.dequantize_block(quantized_weight, max_abs, shape)
return model
def nuclear_norm(self, base_model, quantized_model):
# Calculate the nuclear norm (sum of singular values) of the error matrices between the `quantized_model` and the `base_model`.
error_list = []
for name, module in base_model.named_modules():
if isinstance(module, torch.nn.Linear) and "lm_head" not in name:
quant_module = quantized_model.get_submodule(name)
error_list.append(torch.linalg.svdvals(module.weight.data - quant_module.weight.data).sum())
return torch.Tensor(error_list).sum()
def get_errors(
self,
tmp_path,
bits=4,
device="cuda",
model_id="hf-internal-testing/tiny-random-BloomForCausalLM",
):
# Comparing the quantized LoRA model to the base model, vs the OLoRA quantized model to the base model.
# We expect the OLoRA quantized model to have less error than the normal LoRA quantized model.
cls = AutoModelForSeq2SeqLM if "t5" in str(model_id) else AutoModelForCausalLM
base_model = cls.from_pretrained(model_id).eval().to(device)
task_type = TaskType.SEQ_2_SEQ_LM if base_model.config.is_encoder_decoder else TaskType.CAUSAL_LM
# logits from the normal quantized LoRA model
target_modules = "all-linear" if task_type != TaskType.SEQ_2_SEQ_LM else ["o", "k", "wi", "q", "v"]
lora_config = LoraConfig(task_type=task_type, target_modules=target_modules)
qlora_model = self.quantize_model(cls.from_pretrained(model_id).eval().to(device), bits, device)
qlora_model = get_peft_model(
qlora_model,
lora_config,
)
qlora_model = qlora_model.merge_and_unload()
qlora_error = self.nuclear_norm(base_model, qlora_model)
del qlora_model
gc.collect()
torch.cuda.empty_cache()
# logits from quantized LoRA model using OLoRA
lora_config = LoraConfig(
task_type=task_type,
init_lora_weights="olora",
target_modules=target_modules,
)
olora_model = cls.from_pretrained(model_id).eval().to(device)
olora_model = get_peft_model(olora_model, lora_config)
# save LoRA weights, they should be initialized such that they minimize the quantization error
olora_model.base_model.peft_config["default"].init_lora_weights = True
olora_model.save_pretrained(tmp_path / "olora_model")
olora_model = olora_model.unload()
olora_model.save_pretrained(tmp_path / "residual_model")
del olora_model
gc.collect()
torch.cuda.empty_cache()
# now load quantized model and apply OLoRA-initialized weights on top
qolora_model = self.quantize_model(
cls.from_pretrained(tmp_path / "residual_model").eval().to(device), bits, device
)
qolora_model = PeftModel.from_pretrained(qolora_model, tmp_path / "olora_model")
qolora_model = qolora_model.merge_and_unload()
qolora_error = self.nuclear_norm(base_model, qolora_model)
del qolora_model
gc.collect()
torch.cuda.empty_cache()
assert qlora_error > 0.0
assert qolora_error > 0.0
# next, check that OLoRA quantization errors are smaller than LoRA errors by a certain margin
assert qolora_error < (qlora_error / self.error_factor)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_bloomz_olora_4bit(self, device, tmp_path):
# In this test, we compare the logits of the base model, the quantized LoRA model, and the quantized model
# using OLoRA. When quantizing, we expect a certain level of error. However, we expect the OLoRA quantized
# model to have less error than the normal LoRA quantized model. Note that when using normal LoRA, the
# quantization error is simply the error from quantization without LoRA, as LoRA is a no-op before training.
# We still apply LoRA for the test for consistency.
self.get_errors(bits=4, device=device, tmp_path=tmp_path)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_bloomz_olora_8bit(self, device, tmp_path):
# Same test as test_bloomz_olora_4bit but with 8 bits.
self.get_errors(bits=8, device=device, tmp_path=tmp_path)
@pytest.mark.parametrize("bits", [4, 8])
def test_olora_with_quantized_model(self, bits):
import bitsandbytes as bnb
# issue 1999
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
if bits == 4:
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_quant_storage=torch.float16,
bnb_4bit_use_double_quant=True,
)
elif bits == 8:
bnb_config = BitsAndBytesConfig(load_in_8bit=True)
else:
raise ValueError("bits must be 4 or 8")
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(init_lora_weights="olora")
model = get_peft_model(model, config)
# check that the correct type is used for the weights
base_layer = model.base_model.model.model.decoder.layers[0].self_attn.v_proj.base_layer.weight
if bits == 4:
assert isinstance(base_layer, bnb.nn.modules.Params4bit)
else:
assert isinstance(base_layer, bnb.nn.modules.Int8Params)
inputs = torch.arange(10).unsqueeze(0).to(model.device)
logits = model(inputs).logits # does not raise
assert torch.isfinite(logits).all()
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires a GPU")
class TestLoftQ:
r"""
Tests for LoftQ to ensure that it reduces the quantization error compared to normal LoRA quantization.
"""
# The error factor indicates by how much the quantization error should be decreased when using LoftQ compared to
# quantization without LoftQ. Thus 1.03 means that the error should be decreased by 3% at least. This is a very
# conservative value to prevent flakiness, in practice most gains are > 1.5
error_factor = 1.03
def get_input(self, model_id, device):
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer("All I want is", padding=True, return_tensors="pt")
if device == "cuda":
inputs = inputs.to("cuda")
return inputs
def get_base_model(self, model_id, device, **kwargs):
cls = AutoModelForSeq2SeqLM if "t5" in str(model_id) else AutoModelForCausalLM
model = cls.from_pretrained(model_id, **kwargs).eval()
if device == "cuda":
model = model.to("cuda")
return model
def get_logits(self, model, inputs):
if model.config.is_encoder_decoder:
input_ids = inputs["input_ids"]
return model(input_ids=input_ids, decoder_input_ids=input_ids).logits
return model(**inputs).logits
def get_errors(
self,
tmp_path,
bits=4,
loftq_iter=1,
device="cuda",
model_id="hf-internal-testing/tiny-random-BloomForCausalLM",
use_dora=False,
):
# Helper function that returns the quantization errors (MAE and MSE) when comparing the quantized LoRA model
# to the base model, vs the LoftQ quantized model to the base model. We expect the LoftQ quantized model to
# have less error than the normal LoRA quantized model. Since we compare logits, the observed error is
# already somewhat dampened because of the softmax.
torch.manual_seed(0)
model = self.get_base_model(model_id, device)
task_type = TaskType.SEQ_2_SEQ_LM if model.config.is_encoder_decoder else TaskType.CAUSAL_LM
inputs = self.get_input(model_id, device)
# the base logits are the reference, we try to match those as closely as possible
logits_base = self.get_logits(model, inputs)
# clean up
del model
gc.collect()
torch.cuda.empty_cache()
# logits from the normal quantized LoRA model
target_modules = "all-linear" if task_type != TaskType.SEQ_2_SEQ_LM else ["o", "k", "wi", "q", "v"]
lora_config = LoraConfig(task_type=task_type, use_dora=use_dora, target_modules=target_modules)
kwargs = {}
if bits == 4:
kwargs["quantization_config"] = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4")
elif bits == 8:
kwargs["quantization_config"] = BitsAndBytesConfig(load_in_8bit=True)
else:
raise ValueError("bits must be 4 or 8")
quantized_model = get_peft_model(
self.get_base_model(model_id, device=None, **kwargs),
lora_config,
)
torch.manual_seed(0)
logits_quantized = self.get_logits(quantized_model, inputs)
del quantized_model
gc.collect()
torch.cuda.empty_cache()
# logits from quantized LoRA model using LoftQ
loftq_config = LoftQConfig(loftq_bits=bits, loftq_iter=loftq_iter)
lora_config = LoraConfig(
task_type=task_type,
init_lora_weights="loftq",
loftq_config=loftq_config,
use_dora=use_dora,
target_modules=target_modules,
)
model = self.get_base_model(model_id, device)
if device == "cuda":
model = model.to("cuda")
loftq_model = get_peft_model(model, lora_config)
if device == "cuda":
loftq_model = loftq_model.to("cuda")
# save LoRA weights, they should be initialized such that they minimize the quantization error
loftq_model.base_model.peft_config["default"].init_lora_weights = True
loftq_model.save_pretrained(tmp_path / "loftq_model")
loftq_model = loftq_model.unload()
loftq_model.save_pretrained(tmp_path / "base_model")
del loftq_model
gc.collect()
torch.cuda.empty_cache()
# now load quantized model and apply LoftQ-initialized weights on top
base_model = self.get_base_model(tmp_path / "base_model", device=None, **kwargs, torch_dtype=torch.float32)
loftq_model = PeftModel.from_pretrained(base_model, tmp_path / "loftq_model", is_trainable=True)
# TODO sanity check: model is quantized
torch.manual_seed(0)
logits_loftq = self.get_logits(loftq_model, inputs)
del loftq_model
gc.collect()
torch.cuda.empty_cache()
mae_quantized = torch.abs(logits_base - logits_quantized).mean()
mse_quantized = torch.pow(logits_base - logits_quantized, 2).mean()
mae_loftq = torch.abs(logits_base - logits_loftq).mean()
mse_loftq = torch.pow(logits_base - logits_loftq, 2).mean()
return mae_quantized, mse_quantized, mae_loftq, mse_loftq
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_bloomz_loftq_4bit(self, device, tmp_path):
# In this test, we compare the logits of the base model, the quantized LoRA model, and the quantized model
# using LoftQ. When quantizing, we expect a certain level of error. However, we expect the LoftQ quantized
# model to have less error than the normal LoRA quantized model. Note that when using normal LoRA, the
# quantization error is simply the error from quantization without LoRA, as LoRA is a no-op before training.
# We still apply LoRA for the test for consistency.
mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors(bits=4, device=device, tmp_path=tmp_path)
# first, sanity check that all errors are > 0.0
assert mae_quantized > 0.0
assert mse_quantized > 0.0
assert mae_loftq > 0.0
assert mse_loftq > 0.0
# next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin
assert mse_loftq < (mse_quantized / self.error_factor)
assert mae_loftq < (mae_quantized / self.error_factor)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_bloomz_loftq_4bit_iter_5(self, device, tmp_path):
# Same test as the previous one but with 5 iterations. We should expect the error to be even smaller with more
# iterations, but in practice the difference is not that large, at least not for this small base model.
mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors(
bits=4, loftq_iter=5, device=device, tmp_path=tmp_path
)
# first, sanity check that all errors are > 0.0
assert mae_quantized > 0.0
assert mse_quantized > 0.0
assert mae_loftq > 0.0
assert mse_loftq > 0.0
# next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin
assert mse_loftq < (mse_quantized / self.error_factor)
assert mae_loftq < (mae_quantized / self.error_factor)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_bloomz_loftq_8bit(self, device, tmp_path):
# Same test as test_bloomz_loftq_4bit but with 8 bits.
mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors(bits=8, device=device, tmp_path=tmp_path)
# first, sanity check that all errors are > 0.0
assert mae_quantized > 0.0
assert mse_quantized > 0.0
assert mae_loftq > 0.0
assert mse_loftq > 0.0
# next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin
assert mse_loftq < (mse_quantized / self.error_factor)
assert mae_loftq < (mae_quantized / self.error_factor)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_bloomz_loftq_8bit_iter_5(self, device, tmp_path):
# Same test as test_bloomz_loftq_4bit_iter_5 but with 8 bits.
mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors(
bits=8, loftq_iter=5, device=device, tmp_path=tmp_path
)
# first, sanity check that all errors are > 0.0
assert mae_quantized > 0.0
assert mse_quantized > 0.0
assert mae_loftq > 0.0
assert mse_loftq > 0.0
# next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin
assert mse_loftq < (mse_quantized / self.error_factor)
assert mae_loftq < (mae_quantized / self.error_factor)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_t5_loftq_4bit(self, device, tmp_path):
mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors(
bits=4, device=device, model_id="t5-small", tmp_path=tmp_path
)
# first, sanity check that all errors are > 0.0
assert mae_quantized > 0.0
assert mse_quantized > 0.0
assert mae_loftq > 0.0
assert mse_loftq > 0.0
# next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin
assert mse_loftq < (mse_quantized / self.error_factor)
assert mae_loftq < (mae_quantized / self.error_factor)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_t5_loftq_8bit(self, device, tmp_path):
mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors(
bits=8, device=device, model_id="t5-small", tmp_path=tmp_path
)
# first, sanity check that all errors are > 0.0
assert mae_quantized > 0.0
assert mse_quantized > 0.0
assert mae_loftq > 0.0
assert mse_loftq > 0.0
# next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin
assert mse_loftq < (mse_quantized / self.error_factor)
assert mae_loftq < (mae_quantized / self.error_factor)
@pytest.mark.xfail # failing for now, but having DoRA pass is only a nice-to-have, not a must, so we're good
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_bloomz_loftq_4bit_dora(self, device, tmp_path):
# same as test_bloomz_loftq_4bit but with DoRA
mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors(
bits=4, device=device, use_dora=True, tmp_path=tmp_path
)
# first, sanity check that all errors are > 0.0
assert mae_quantized > 0.0
assert mse_quantized > 0.0
assert mae_loftq > 0.0
assert mse_loftq > 0.0
# next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin
factor = 3
assert mae_loftq < (mae_quantized / factor)
assert mse_loftq < (mse_quantized / factor)
@pytest.mark.parametrize("device", ["cuda", "cpu"])
def test_bloomz_loftq_8bit_dora(self, device, tmp_path):
# same as test_bloomz_loftq_8bit but with DoRA
mae_quantized, mse_quantized, mae_loftq, mse_loftq = self.get_errors(
bits=8, device=device, use_dora=True, tmp_path=tmp_path
)
# first, sanity check that all errors are > 0.0
assert mae_quantized > 0.0
assert mse_quantized > 0.0
assert mae_loftq > 0.0
assert mse_loftq > 0.0
# next, check that LoftQ quantization errors are smaller than LoRA errors by a certain margin
assert mae_loftq < (mae_quantized / self.error_factor)
assert mse_loftq < (mse_quantized / self.error_factor)
def test_replace_lora_weights_with_loftq_using_callable(self):
"""
Test replacing LoRa weights with LoFTQ using a callable.
Using the replace_lora_weights_loftq function, we replace the LoRa weights of a bnb-quantized model with LoRA
weights initialized by LoftQ on the fly. We use a callable to decide whether to replace the weights or not.
This callable checks, for each weight, if replacing it would actually result in logits that are closer to the
original logits of the non-quantized model.
"""
torch.manual_seed(0)
model_id = "bigscience/bloomz-560m"
device = "cuda"
tokenizer = AutoTokenizer.from_pretrained(model_id)
inputs = tokenizer("The dog was", padding=True, return_tensors="pt").to(device)
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(model_id).to(device)
logits_base = model(**inputs).logits
model.save_pretrained(tmp_dir)
# load in 4bit
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
)
model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=bnb_config)
model = get_peft_model(model, LoraConfig(task_type="CAUSAL_LM", target_modules="all-linear"))
logits_lora = model(**inputs).logits
current_mse = float("inf")
logs = []
def my_callback(model, module_name):
"""Callable to replace weights with LoFTQ if the mse is lower than the current best one."""
nonlocal current_mse
logits = model(**inputs).logits
mse = ((logits_base - logits) ** 2).mean()
if mse < current_mse:
current_mse = mse
logs.append(True)
return True
logs.append(False)
return False
replace_lora_weights_loftq(model, model_path=tmp_dir, callback=my_callback)
logits_loftq = model(**inputs).logits
mae_lora = (logits_base - logits_lora).abs().mean()
mae_loftq = (logits_base - logits_loftq).abs().mean()
mse_lora = ((logits_base - logits_lora) ** 2).mean()
mse_loftq = ((logits_base - logits_loftq) ** 2).mean()
# check that the error was reduced by a certain margin
assert mae_loftq * 1.5 < mae_lora
assert mse_loftq * 2.5 < mse_lora
# check that the callback has returned some True and some False values
assert any(logs)
assert not all(logs)
del model
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
def test_replace_lora_weights_with_local_model(self):
# see issue 2020
torch.manual_seed(0)
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
device = "cuda"
with tempfile.TemporaryDirectory() as tmp_dir:
# save base model locally
model = AutoModelForCausalLM.from_pretrained(model_id).to(device)
model.save_pretrained(tmp_dir)
del model
# load in 4bit
bnb_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=True,
)
# load the base model from local directory
model = AutoModelForCausalLM.from_pretrained(tmp_dir, quantization_config=bnb_config)
model = get_peft_model(model, LoraConfig())
# passing the local path directly works
replace_lora_weights_loftq(model, model_path=tmp_dir)
del model
# load the base model from local directory
model = AutoModelForCausalLM.from_pretrained(tmp_dir, quantization_config=bnb_config)
model = get_peft_model(model, LoraConfig())
# when not passing, ensure that users are made aware of the `model_path` argument
with pytest.raises(ValueError, match="model_path"):
replace_lora_weights_loftq(model)
del model
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
def test_config_no_loftq_init(self):
with pytest.warns(
UserWarning,
match="`loftq_config` specified but will be ignored when `init_lora_weights` is not 'loftq'.",
):
LoraConfig(loftq_config=LoftQConfig())
def test_config_no_loftq_config(self):
with pytest.raises(ValueError, match="`loftq_config` must be specified when `init_lora_weights` is 'loftq'."):
LoraConfig(init_lora_weights="loftq")
@require_bitsandbytes
@require_torch_gpu
class MultiprocessTester(unittest.TestCase):
def test_notebook_launcher(self):
script_path = os.path.join("scripts", "launch_notebook_mp.py")
cmd = ["python", script_path]
with patch_environment(omp_num_threads=1):
run_command(cmd, env=os.environ.copy())
@require_non_cpu
class MixedPrecisionTests(unittest.TestCase):
def setUp(self):
self.causal_lm_model_id = "facebook/opt-125m"
self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
self.config = LoraConfig(
r=16,
lora_alpha=32,
task_type="CAUSAL_LM",
)
data = load_dataset("ybelkada/english_quotes_copy")
self.data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
gc.collect()
@pytest.mark.single_gpu_tests
def test_model_using_float16_with_amp_raises(self):
# This test shows the issue with using a model in fp16 and then trying to use it with mixed precision training,
# which should not use fp16.
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
)
model = get_peft_model(model, self.config, autocast_adapter_dtype=False)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = Trainer(
model=model,
train_dataset=self.data["train"],
args=TrainingArguments(
fp16=True, # <= this is required for the error to be raised
output_dir=tmp_dir,
max_steps=3,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
with pytest.raises(ValueError, match="Attempting to unscale FP16 gradients."):
trainer.train()
@pytest.mark.single_gpu_tests
def test_model_using_float16_autocast_dtype(self):
# Here we use autocast_adapter_dtype=True (the default) to automatically promote the adapter weights to float32.
# No exception should be raised.
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
)
model = get_peft_model(model, self.config, autocast_adapter_dtype=True)
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = Trainer(
model=model,
train_dataset=self.data["train"],
args=TrainingArguments(
fp16=True, # <= this is required for the error to be raised
output_dir=tmp_dir,
max_steps=3,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
trainer.train() # does not raise
@pytest.mark.single_gpu_tests
def test_model_using_float16_explicit_cast(self):
# Same test as above but containing the fix to make it work
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
)
model = get_peft_model(model, self.config, autocast_adapter_dtype=False)
# here we manually promote the adapter weights to float32
for param in model.parameters():
if param.requires_grad:
param.data = param.data.float()
dtype_counts_before = Counter(p.dtype for p in model.parameters())
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
)
model = get_peft_model(model, self.config, autocast_adapter_dtype=True)
dtype_counts_after = Counter(p.dtype for p in model.parameters())
assert dtype_counts_before == dtype_counts_after
with tempfile.TemporaryDirectory() as tmp_dir:
trainer = Trainer(
model=model,
train_dataset=self.data["train"],
args=TrainingArguments(
fp16=True, # <= this is required for the error to be raised
max_steps=3,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
trainer.train() # does not raise
@pytest.mark.single_gpu_tests
def test_load_model_using_float16_with_amp_raises(self):
# Same as previous tests, but loading the adapter with PeftModel.from_pretrained instead
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
)
model = get_peft_model(model, self.config, autocast_adapter_dtype=False)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, torch_dtype=torch.float16)
model = PeftModel.from_pretrained(model, tmp_dir, autocast_adapter_dtype=False, is_trainable=True)
trainer = Trainer(
model=model,
train_dataset=self.data["train"],
args=TrainingArguments(
fp16=True, # <= this is required for the error to be raised
output_dir=tmp_dir,
max_steps=3,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
with pytest.raises(ValueError, match="Attempting to unscale FP16 gradients."):
trainer.train()
@pytest.mark.single_gpu_tests
def test_load_model_using_float16_autocast_dtype(self):
# Same as previous tests, but loading the adapter with PeftModel.from_pretrained instead
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
)
# Below, we purposefully set autocast_adapter_dtype=False so that the saved adapter uses float16. We still want
# the loaded adapter to use float32 when we load it with autocast_adapter_dtype=True.
model = get_peft_model(model, self.config, autocast_adapter_dtype=False)
# sanity check: this should have float16 adapter weights:
assert (
model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["default"].weight.dtype
== torch.float16
)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, torch_dtype=torch.float16)
model = PeftModel.from_pretrained(model, tmp_dir, autocast_adapter_dtype=True, is_trainable=True)
# sanity check: this should NOT have float16 adapter weights:
assert (
model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["default"].weight.dtype
== torch.float32
)
trainer = Trainer(
model=model,
train_dataset=self.data["train"],
args=TrainingArguments(
fp16=True, # <= this is required for the error to be raised
output_dir=tmp_dir,
max_steps=3,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
trainer.train() # does not raise
@pytest.mark.single_gpu_tests
def test_load_adapter_using_float16_autocast_dtype(self):
# Here we test the load_adapter method with autocast_adapter_dtype. We show that autocasting is prevented when
# calling load_model(..., autocast_adapter_dtype=False) and that it is enabled when calling
# load_model(..., autocast_adapter_dtype=True) (the default).
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
torch_dtype=torch.float16,
)
# Below, we purposefully set autocast_adapter_dtype=False so that the saved adapter uses float16. We still want
# the loaded adapter to use float32 when we load it with autocast_adapter_dtype=True.
model = get_peft_model(model, self.config, autocast_adapter_dtype=False)
# sanity check: this should have float16 adapter weights:
assert (
model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["default"].weight.dtype
== torch.float16
)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, torch_dtype=torch.float16)
# the default adapter is now in float16
model = get_peft_model(model, self.config, autocast_adapter_dtype=False)
# sanity check: this should NOT have float16 adapter weights:
assert (
model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["default"].weight.dtype
== torch.float16
)
# now load the first adapter in float16 using the adapter name "loaded16"
model.load_adapter(tmp_dir, "loaded16", autocast_adapter_dtype=False)
assert (
model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["loaded16"].weight.dtype
== torch.float16
)
# now load the first adapter in float32 using the adapter name "loaded32"
model.load_adapter(tmp_dir, "loaded32", autocast_adapter_dtype=True)
assert (
model.base_model.model.model.decoder.layers[0].self_attn.v_proj.lora_A["loaded32"].weight.dtype
== torch.float32
)
# training with the default adapter, which is in float16, should raise
model.set_adapter("default")
trainer = Trainer(
model=model,
train_dataset=self.data["train"],
args=TrainingArguments(
fp16=True, # <= this is required for the error to be raised
output_dir=tmp_dir,
max_steps=3,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
with pytest.raises(ValueError, match="Attempting to unscale FP16 gradients."):
trainer.train()
# training the model with the adapter "loaded16", which is in float16, should also raise
model.set_adapter("loaded16")
trainer = Trainer(
model=model,
train_dataset=self.data["train"],
args=TrainingArguments(
fp16=True, # <= this is required for the error to be raised
output_dir=tmp_dir,
max_steps=3,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
with pytest.raises(ValueError, match="Attempting to unscale FP16 gradients."):
trainer.train()
# training the model with the adapter "loaded32", which is in float32, should not raise
model.set_adapter("loaded32")
trainer = Trainer(
model=model,
train_dataset=self.data["train"],
args=TrainingArguments(
fp16=True, # <= this is required for the error to be raised
output_dir=tmp_dir,
max_steps=3,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
trainer.train() # does not raise
@require_non_xpu
@require_torch_gpu
@require_aqlm
@unittest.skipUnless(
version.parse(importlib.metadata.version("transformers")) >= version.parse("4.38.0"),
"test requires `transformers>=4.38.0`",
)
class PeftAqlmGPUTests(unittest.TestCase):
r"""
AQLM + peft tests
"""
def setUp(self):
self.causal_lm_model_id = "BlackSamorez/TinyLlama-1_1B-Chat-v1_0-AQLM-2Bit-1x16-hf"
self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
torch.cuda.empty_cache()
def _check_inference_finite(self, model, batch):
# try inference without Trainer class
training = model.training
model.eval()
output = model(**batch.to(model.device))
assert torch.isfinite(output.logits).all()
model.train(training)
@pytest.mark.single_gpu_tests
# see https://github.com/Vahe1994/AQLM/pull/139
@pytest.mark.xfail(reason="AQLM does not work with PyTorch 2.5 (yet)", strict=True, raises=AttributeError)
def test_causal_lm_training_aqlm(self):
r"""
Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set
correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="cuda",
torch_dtype="auto",
)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
logging_steps=1,
output_dir=tmp_dir,
fp16=True,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@require_non_xpu
@require_torch_gpu
@require_hqq
@unittest.skipUnless(
version.parse(importlib.metadata.version("transformers")) >= version.parse("4.36.1"),
"test requires `transformers>=4.36.1`",
)
class PeftHqqGPUTests(unittest.TestCase):
r"""
HQQ + peft tests
"""
def setUp(self):
self.causal_lm_model_id = "TinyLlama/TinyLlama-1.1B-Chat-v1.0"
self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
torch.cuda.empty_cache()
@pytest.mark.single_gpu_tests
@parameterized.expand([False, True])
def test_causal_lm_training_hqq(self, use_dora):
r"""
Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set
correctly.
"""
from transformers import HqqConfig
with tempfile.TemporaryDirectory() as tmp_dir:
device = "cuda"
compute_dtype = torch.float16
quant_config = HqqConfig(nbits=4, group_size=64)
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map=device,
torch_dtype=compute_dtype,
quantization_config=quant_config,
)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
use_dora=use_dora,
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
logging_steps=1,
output_dir=tmp_dir,
fp16=True,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_hqq_lora_model_outputs(self):
# check that the outputs generated by HQQ with LoRA are similar to those without HQQ
from transformers import HqqConfig
device = "cuda"
compute_dtype = torch.float16
# first load the model without HQQ
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map=device,
torch_dtype=compute_dtype,
)
config = LoraConfig(
target_modules=["q_proj", "v_proj"],
task_type="CAUSAL_LM",
init_lora_weights=False,
)
torch.manual_seed(0)
model = get_peft_model(model, config).eval()
inputs = self.tokenizer("The meaning of unit tests is", return_tensors="pt").to(model.device)
with torch.inference_mode():
output_normal = model(**inputs).logits
assert torch.isfinite(output_normal).all()
del model
gc.collect()
torch.cuda.empty_cache()
# now load with HQQ
quant_config = HqqConfig(nbits=4, group_size=64)
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map=device,
torch_dtype=compute_dtype,
quantization_config=quant_config,
)
torch.manual_seed(0)
model = get_peft_model(model, config).eval()
with torch.inference_mode():
output_hqq = model(**inputs).logits
# check that outputs of HQQ are highly correlated; there are outliers, so don't check for equality
cc_matrix = torch.corrcoef(torch.stack((output_normal.float().flatten(), output_hqq.float().flatten())))
assert cc_matrix.min() > 0.97
# check that outputs are the same after merging
cc_matrix = torch.corrcoef(torch.stack((output_normal.float().flatten(), output_hqq.float().flatten())))
assert cc_matrix.min() > 0.97
# check outputs are the same after unmerging
model.unmerge_adapter()
with torch.inference_mode():
output_unmerged = model(**inputs).logits
cc_matrix = torch.corrcoef(torch.stack((output_normal.float().flatten(), output_unmerged.float().flatten())))
assert cc_matrix.min() > 0.97
# check that the results are the same after saving and loading
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
del model
gc.collect()
torch.cuda.empty_cache()
quant_config = HqqConfig(nbits=4, group_size=64)
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map=device,
torch_dtype=compute_dtype,
quantization_config=quant_config,
)
model = PeftModel.from_pretrained(model, tmp_dir)
with torch.inference_mode():
output_loaded = model(**inputs).logits
# for loading, we expect high precision, so check for equality and not just correlation
atol, rtol = 1e-6, 1e-6
assert torch.allclose(output_hqq, output_loaded, atol=atol, rtol=rtol)
# check that outputs are the same after merge_and_unload
model = model.merge_and_unload()
with torch.inference_mode():
output_merged_unloaded = model(**inputs).logits
cc_matrix = torch.corrcoef(
torch.stack((output_normal.float().flatten(), output_merged_unloaded.float().flatten()))
)
assert cc_matrix.min() > 0.97
@require_torch_gpu
@require_auto_awq
class PeftAwqGPUTests(unittest.TestCase):
r"""
Awq + peft tests
"""
def setUp(self):
self.causal_lm_model_id = "peft-internal-testing/opt-125m-awq"
self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
torch.cuda.empty_cache()
def _check_inference_finite(self, model, batch):
# try inference without Trainer class
training = model.training
model.eval()
output = model(**batch.to(model.device))
assert torch.isfinite(output.logits).all()
model.train(training)
@pytest.mark.single_gpu_tests
def test_causal_lm_training_awq(self):
r"""
Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set
correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
# TODO: deal correctly with this case in transformers
model._is_quantized_training_enabled = True
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
logging_steps=1,
output_dir=tmp_dir,
fp16=True,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
@require_torch_multi_gpu
def test_causal_lm_training_multi_gpu(self):
r"""
Test the CausalLM training on a multi-GPU device. The test would simply fail if the adapters are not set
correctly.
"""
with tempfile.TemporaryDirectory() as tmp_dir:
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
model = prepare_model_for_kbit_training(model)
setattr(model, "model_parallel", True)
setattr(model, "is_parallelizable", True)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("Abirate/english_quotes")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@require_non_xpu
@require_torch_gpu
@require_eetq
class PeftEetqGPUTests(unittest.TestCase):
r"""
EETQ + peft tests
"""
def setUp(self):
self.causal_lm_model_id = "facebook/opt-125m"
self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
torch.cuda.empty_cache()
def _check_inference_finite(self, model, batch):
# try inference without Trainer class
training = model.training
model.eval()
output = model(**batch.to(model.device))
assert torch.isfinite(output.logits).all()
model.train(training)
@pytest.mark.single_gpu_tests
def test_causal_lm_training_eetq(self):
r"""
Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set
correctly.
"""
from transformers import EetqConfig
with tempfile.TemporaryDirectory() as tmp_dir:
quantization_config = EetqConfig("int8")
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id, device_map="auto", quantization_config=quantization_config
)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
@require_torch_multi_gpu
def test_causal_lm_training_multi_gpu_eetq(self):
r"""
Test the CausalLM training on a multi-GPU device. The test would simply fail if the adapters are not set
correctly.
"""
from transformers import EetqConfig
with tempfile.TemporaryDirectory() as tmp_dir:
quantization_config = EetqConfig("int8")
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
quantization_config=quantization_config,
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
model = prepare_model_for_kbit_training(model)
setattr(model, "model_parallel", True)
setattr(model, "is_parallelizable", True)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("Abirate/english_quotes")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
model.config.use_cache = False
trainer.train()
model.cpu().save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@require_non_xpu
@require_torch_gpu
@require_torchao
class PeftTorchaoGPUTests(unittest.TestCase):
r"""
torchao + peft tests
"""
supported_quant_types = [
"int8_weight_only",
"int8_dynamic_activation_int8_weight",
# int4_weight_only raises an error:
# RuntimeError: derivative for aten::_weight_int4pack_mm is not implemented
# "int4_weight_only",
]
def setUp(self):
self.causal_lm_model_id = "facebook/opt-125m"
self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id)
# torchao breaks with fp16 and if a previous test uses fp16, transformers will set this env var, which affects
# subsequent tests, therefore the env var needs to be cleared explicitly
#
# TODO: remove this once https://github.com/huggingface/transformers/pull/34886 is merged
os.environ.pop("ACCELERATE_MIXED_PRECISION", None)
def tearDown(self):
r"""
Efficient mechanism to free GPU memory after each test. Based on
https://github.com/huggingface/transformers/issues/21094
"""
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(supported_quant_types)
@pytest.mark.single_gpu_tests
def test_causal_lm_training_single_gpu_torchao(self, quant_type):
from transformers import TorchAoConfig
device = 0
with tempfile.TemporaryDirectory() as tmp_dir:
quantization_config = TorchAoConfig(quant_type=quant_type)
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id, device_map=device, quantization_config=quantization_config
)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
trainer.model.config.use_cache = False
trainer.train()
model.save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_causal_lm_training_single_gpu_torchao_dora_int8_weight_only(self):
from transformers import TorchAoConfig
device = 0
with tempfile.TemporaryDirectory() as tmp_dir:
quantization_config = TorchAoConfig(quant_type="int8_weight_only")
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id, device_map=device, quantization_config=quantization_config
)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
use_dora=True,
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
trainer.model.config.use_cache = False
trainer.train()
model.save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.single_gpu_tests
def test_causal_lm_training_single_gpu_torchao_dora_int8_dynamic_activation_int8_weight_raises(self):
from transformers import TorchAoConfig
device = 0
quantization_config = TorchAoConfig(quant_type="int8_dynamic_activation_int8_weight")
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id, device_map=device, quantization_config=quantization_config
)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
use_dora=True,
)
with pytest.raises(NotImplementedError):
get_peft_model(model, config)
@pytest.mark.single_gpu_tests
def test_causal_lm_training_single_gpu_torchao_int4_raises(self):
# int4_weight_only raises an error:
# RuntimeError: derivative for aten::_weight_int4pack_mm is not implemented
# TODO: Once proper torchao support for int4 is added, remove this test and add int4 to supported_quant_types
from transformers import TorchAoConfig
device = 0
quantization_config = TorchAoConfig(quant_type="int4_weight_only")
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id, device_map=device, quantization_config=quantization_config
)
model = prepare_model_for_kbit_training(model)
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
msg = re.escape("TorchaoLoraLinear only supports int8 weights for now")
with pytest.raises(ValueError, match=msg):
get_peft_model(model, config)
@parameterized.expand(supported_quant_types)
@pytest.mark.multi_gpu_tests
@require_torch_multi_gpu
def test_causal_lm_training_multi_gpu_torchao(self, quant_type):
from transformers import TorchAoConfig
with tempfile.TemporaryDirectory() as tmp_dir:
quantization_config = TorchAoConfig(quant_type=quant_type)
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
quantization_config=quantization_config,
torch_dtype=torch.bfloat16,
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
model = prepare_model_for_kbit_training(model)
model.model_parallel = True
model.is_parallelizable = True
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
model = get_peft_model(model, config)
data = load_dataset("ybelkada/english_quotes_copy")
data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True)
trainer = Trainer(
model=model,
train_dataset=data["train"],
args=TrainingArguments(
per_device_train_batch_size=4,
gradient_accumulation_steps=4,
warmup_steps=2,
max_steps=3,
learning_rate=2e-4,
logging_steps=1,
output_dir=tmp_dir,
),
data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False),
)
trainer.model.config.use_cache = False
trainer.train()
model.save_pretrained(tmp_dir)
assert "adapter_config.json" in os.listdir(tmp_dir)
assert SAFETENSORS_WEIGHTS_NAME in os.listdir(tmp_dir)
# assert loss is not None
assert trainer.state.log_history[-1]["train_loss"] is not None
@pytest.mark.multi_gpu_tests
@require_torch_multi_gpu
def test_causal_lm_training_multi_gpu_torchao_int4_raises(self):
# int4_weight_only raises an error:
# RuntimeError: derivative for aten::_weight_int4pack_mm is not implemented
# TODO: Once proper torchao support for int4 is added, remove this test and add int4 to supported_quant_types
from transformers import TorchAoConfig
quantization_config = TorchAoConfig(quant_type="int4_weight_only")
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id,
device_map="auto",
quantization_config=quantization_config,
torch_dtype=torch.bfloat16,
)
assert set(model.hf_device_map.values()) == set(range(torch.cuda.device_count()))
model = prepare_model_for_kbit_training(model)
model.model_parallel = True
model.is_parallelizable = True
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
)
msg = re.escape("TorchaoLoraLinear only supports int8 weights for now")
with pytest.raises(ValueError, match=msg):
get_peft_model(model, config)
@pytest.mark.single_gpu_tests
def test_torchao_merge_layers_int8_weight_only(self):
from torchao.dtypes import AffineQuantizedTensor
from transformers import TorchAoConfig
quant_type = "int8_weight_only"
torch.manual_seed(0)
device = 0
dummy_input = torch.arange(10).view(-1, 1).to(device)
quantization_config = TorchAoConfig(quant_type=quant_type)
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id, device_map=device, quantization_config=quantization_config
).eval()
logits_base = model(dummy_input)[0]
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
init_lora_weights=False,
)
model = get_peft_model(model, config)
model.eval()
logits = model(dummy_input)[0]
# sanity check: outputs changed
# precision is quite low, so we need to use high atol and rtol
atol, rtol = 1e-1, 1e-1
assert not torch.allclose(logits, logits_base, atol=atol, rtol=rtol)
model.merge_adapter()
logits_merged = model(dummy_input)[0]
for name, module in model.named_modules():
if "base_layer" in name:
assert isinstance(module.weight, AffineQuantizedTensor)
model.unmerge_adapter()
logits_unmerged = model(dummy_input)[0]
for name, module in model.named_modules():
if "base_layer" in name:
assert isinstance(module.weight, AffineQuantizedTensor)
model = model.merge_and_unload()
logits_merged_unloaded = model(dummy_input)[0]
assert torch.allclose(logits, logits_merged, atol=atol, rtol=rtol)
assert torch.allclose(logits, logits_unmerged, atol=atol, rtol=rtol)
assert torch.allclose(logits, logits_merged_unloaded, atol=atol, rtol=rtol)
@pytest.mark.single_gpu_tests
def test_torchao_merge_layers_int8_dynamic_activation_int8_weight_raises(self):
# int8_dynamic_activation_int8_weight does not support dequantize, thus merging does not work
from transformers import TorchAoConfig
quant_type = "int8_dynamic_activation_int8_weight"
torch.manual_seed(0)
device = 0
quantization_config = TorchAoConfig(quant_type=quant_type)
model = AutoModelForCausalLM.from_pretrained(
self.causal_lm_model_id, device_map=device, quantization_config=quantization_config
).eval()
config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM",
init_lora_weights=False,
)
model = get_peft_model(model, config)
msg = re.escape(
"Weights of type LinearActivationQuantizedTensor do not support dequantization (yet), which is needed to "
"support merging."
)
with pytest.raises(NotImplementedError, match=msg):
model.merge_adapter()
PRECISIONS = [(torch.float32), (torch.float16), (torch.bfloat16)]
LORA_PARAMS = {
"r": 8,
"lora_alpha": 16,
"lora_dropout": 0.05,
}
class SimpleModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.embedding_layer = torch.nn.Embedding(1000, 768)
self.layer_norm = torch.nn.LayerNorm(768)
self.linear_transform = torch.nn.Linear(768, 256)
def forward(self, input_ids):
embedded_output = self.embedding_layer(input_ids)
norm_output = self.layer_norm(embedded_output)
linear_output = self.linear_transform(norm_output)
return linear_output
class SimpleConv2DModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.embedding_layer = torch.nn.Embedding(1000, 768)
self.layer_norm = torch.nn.LayerNorm(768)
self.conv2d_transform = torch.nn.Conv2d(1, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
def forward(self, input_ids):
# Additional layers for your custom model
embedded_output = self.embedding_layer(input_ids)
norm_output = self.layer_norm(embedded_output)
# Reshape for Conv2d input (add batch size dimension)
norm_output = norm_output.unsqueeze(1)
conv_output = self.conv2d_transform(norm_output)
# Remove batch size dimension
conv_output = conv_output.squeeze(1)
return conv_output
@require_non_cpu
class TestAutoCast(unittest.TestCase):
device = infer_device()
# This test makes sure, that Lora dtypes are consistent with the types
# infered by torch.autocast under tested PRECISIONS
@parameterized.expand(PRECISIONS)
def test_simple_model(self, *args, **kwargs):
self._test_model(SimpleModel(), *args, **kwargs)
@parameterized.expand(PRECISIONS)
def test_simple_lora_linear_model(self, *args, **kwargs):
simple_model = SimpleModel()
config = LoraConfig(
**LORA_PARAMS,
target_modules=["linear_transform"],
)
lora_model = get_peft_model(simple_model, config)
self._test_model(lora_model, *args, **kwargs)
@parameterized.expand(PRECISIONS)
def test_simple_lora_embedding_model(self, *args, **kwargs):
simple_model = SimpleModel()
config = LoraConfig(
**LORA_PARAMS,
target_modules=["embedding_layer"],
)
lora_model = get_peft_model(simple_model, config)
self._test_model(lora_model, *args, **kwargs)
@parameterized.expand(PRECISIONS)
def test_simple_conv2d_model(self, *args, **kwargs):
self._test_model(SimpleConv2DModel(), *args, **kwargs)
@parameterized.expand(PRECISIONS)
def test_simple_lora_conv2d_model(self, *args, **kwargs):
simple_model = SimpleConv2DModel()
config = LoraConfig(
**LORA_PARAMS,
target_modules=["conv2d_transform"],
)
lora_model = get_peft_model(simple_model, config)
self._test_model(lora_model, *args, **kwargs)
def _test_model(self, model, precision):
# Move model to GPU
model = model.to(self.device)
# Prepare dummy inputs
input_ids = torch.randint(0, 1000, (2, 10)).to(self.device)
if precision == torch.bfloat16:
is_xpu = self.device == "xpu"
is_cuda_bf16 = self.device == "cuda" and torch.cuda.is_bf16_supported()
if not (is_xpu or is_cuda_bf16):
self.skipTest("Bfloat16 not supported on this device")
# Forward pass with test precision
with torch.autocast(enabled=True, dtype=precision, device_type=self.device):
outputs = model(input_ids)
assert outputs.dtype == precision
class TestFSDPWrap:
"""
Test that we can successfully initialize an FSDP instance of the module.
This is a very simple test, as it does not perform actual FSDP training. Here we just ensure that the FSDP instance
can be created. This can fail for several reasons, e.g. int dtype from BNB or inconsistent requires_grad settings
due to the auto wrap policy.
"""
@pytest.mark.single_gpu_tests
@require_bitsandbytes
def test_bnb_4bit_wrap_fsdp(self):
quant_config = BitsAndBytesConfig(
load_in_4bit=True,
# float32 must be used, or else FSDP will complain about mixed int and float dtypes
bnb_4bit_compute_dtype=torch.float32,
bnb_4bit_quant_storage=torch.float32,
bnb_4bit_use_double_quant=True,
)
model = AutoModelForCausalLM.from_pretrained(
"facebook/opt-125m",
quantization_config=quant_config,
torch_dtype=torch.float32,
)
# model = prepare_model_for_kbit_training(model)
config = LoraConfig(
target_modules=["q_proj", "v_proj"],
task_type="CAUSAL_LM",
use_dora=True,
)
model = get_peft_model(model, config)
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29501"
init_process_group(world_size=1, rank=0)
# check that this does not raise:
FSDP(model, auto_wrap_policy=fsdp_auto_wrap_policy(model), use_orig_params=False, sync_module_states=True)
def test_fsdp_auto_wrap_policy_does_not_raise_on_custom_model(self):
# See #2167
# Avoid raising on custom models since Trainer uses fsdp_auto_wrap_policy automatically for PEFT + FSDP
fsdp_auto_wrap_policy(SimpleModel()) # does not raise
class TestBOFT:
"""
Test that we can correctly use half-precision models with BOFT.
"""
@require_torch_gpu
@pytest.mark.single_gpu_tests
def test_boft_half_linear(self):
# Check that we can use BoFT with model loaded in half precision
layer = torch.nn.Linear(160, 160).cuda()
layer = boft.layer.Linear(layer, "layer", boft_n_butterfly_factor=2).to(dtype=torch.bfloat16)
x = torch.randn(160, 160, device="cuda", dtype=torch.bfloat16)
layer(x) # does not raise
@require_torch_gpu
@pytest.mark.single_gpu_tests
def test_boft_half_conv(self):
conv = torch.nn.Conv2d(1, 1, 4).cuda()
conv = boft.layer.Conv2d(conv, "conv", boft_n_butterfly_factor=2).to(dtype=torch.bfloat16)
x = torch.randn(1, 160, 160, device="cuda", dtype=torch.bfloat16)
conv(x) # does not raise
@require_torch_gpu
class TestPTuningReproducibility:
device = infer_device()
def test_p_tuning_exactly_reproducible_after_loading(self, tmp_path):
# See: https://github.com/huggingface/peft/issues/2043#issuecomment-2321522577
# Ensure that after loading a p-tuning checkpoint, results are exactly reproducible (before the patch, they were
# only _almost_ identical).
# The model must be sufficiently large for the effect to be measurable, which is why this test requires is not
# run on CPU.
model_id = "facebook/opt-125m"
inputs = torch.arange(10).view(-1, 1).to(self.device)
torch.manual_seed(0)
model = AutoModelForCausalLM.from_pretrained(model_id).to(self.device)
peft_config = PromptEncoderConfig(task_type="CAUSAL_LM", num_virtual_tokens=20, encoder_hidden_size=128)
model = get_peft_model(model, peft_config).eval()
with torch.inference_mode():
output_peft = model(inputs).logits
gen_peft = model.generate(inputs, min_new_tokens=10, max_new_tokens=10)
model.save_pretrained(tmp_path)
del model
torch.cuda.empty_cache()
gc.collect()
model = AutoModelForCausalLM.from_pretrained(model_id).to(self.device)
model = PeftModel.from_pretrained(model, tmp_path)
with torch.inference_mode():
output_loaded = model(inputs).logits
gen_loaded = model.generate(inputs, min_new_tokens=10, max_new_tokens=10)
torch.testing.assert_close(output_loaded, output_peft)
torch.testing.assert_close(gen_loaded, gen_peft)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires a GPU")
@pytest.mark.single_gpu_tests
class TestLowCpuMemUsageDifferentDevices:
"""Test for the low CPU memory usage option for loading PEFT models.
There are already tests for this in test_initialization.py but here we want to specifically test diverging devices
for the model and state_dict.
"""
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
@pytest.mark.parametrize("device_model, device_sd", [("cpu", "cuda"), ("cuda", "cpu")])
def test_low_cpu_mem_usage_model_model_on_gpu_state_dict_on_cpu_works(self, device_model, device_sd):
inputs = {"input_ids": torch.randint(0, 100, (1, 10)), "attention_mask": torch.ones(1, 10)}
inputs = {k: v.to(device_model) for k, v in inputs.items()}
model = AutoModelForCausalLM.from_pretrained(self.model_id).to(device_model)
lora_config = LoraConfig(init_lora_weights=False, target_modules="all-linear")
model = get_peft_model(model, lora_config)
model.eval()
logits_not_low_cpu_mem = model(**inputs).logits
state_dict = get_peft_model_state_dict(model)
peft_model_state_dict = {}
# remap the state dict so that it can be correctly loaded, and move weights to the other device
prefix = "base_model.model."
for k, v in state_dict.items():
k = k[len(prefix) :]
peft_model_state_dict[k] = v.to(device_sd)
del model
model = AutoModelForCausalLM.from_pretrained(self.model_id).to(device_model)
model.eval()
inject_adapter_in_model(lora_config, model, low_cpu_mem_usage=True)
load_result = set_peft_model_state_dict(model, peft_model_state_dict, low_cpu_mem_usage=True)
# sanity check: all lora keys are matched
assert not any("lora" in k for k in load_result.missing_keys)
assert not any("lora" in k for k in load_result.unexpected_keys)
logits_low_cpu_mem = model(**inputs).logits
assert torch.allclose(logits_low_cpu_mem, logits_not_low_cpu_mem)
assert {p.device.type for p in model.parameters()} == {device_model}
class TestEvaInitializationGPU:
"""GPU tests for the Eva initialization method."""
# Constants for test configuration
COSINE_SIMILARITY_THRESHOLD = 0.75
NUM_SEEDS = 3
BATCH_SIZE = 4
MAX_LENGTH = 256
LORA_DIM = 8
LORA_ALPHA = 1
DEVICE = "cuda"
@pytest.fixture
def tokenizer(self):
tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
tokenizer.pad_token = tokenizer.eos_token
return tokenizer
@pytest.fixture
def dataset(self, tokenizer):
dataset = load_dataset("ybelkada/english_quotes_copy", split="train")
# concatenate examples
examples = []
example = ""
for data in dataset:
if len(example) >= self.MAX_LENGTH:
examples.append(example)
example = ""
example = example + " " + data["quote"]
dataset = Dataset.from_dict({"text": examples})
# tokenize
dataset = dataset.map(
lambda x: tokenizer(x["text"], padding="max_length", truncation=True, max_length=self.MAX_LENGTH),
batched=True,
remove_columns=dataset.column_names,
)
dataset.set_format(type="torch")
return dataset
@pytest.fixture
def model(self):
model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
model.transformer.h = model.transformer.h[:2] # truncate to 2 layers
return model.to(self.DEVICE)
@pytest.fixture
def model_bnb(self):
bnb_config = BitsAndBytesConfig(load_in_4bit=True)
model = AutoModelForCausalLM.from_pretrained(
"openai-community/gpt2",
quantization_config=bnb_config,
attn_implementation="eager", # gpt2 doesnt support flash attention
)
model.transformer.h = model.transformer.h[:2] # truncate to 2 layers
model = prepare_model_for_kbit_training(model)
return model
@pytest.fixture
def model_fixture(self, request):
return request.getfixturevalue(request.param)
@pytest.fixture
def peft_config(self):
return LoraConfig(
r=self.LORA_DIM,
lora_alpha=self.LORA_ALPHA,
target_modules=["c_attn"],
init_lora_weights="eva",
eva_config=EvaConfig(rho=2),
)
def is_bnb_model(self, model):
return hasattr(model.config, "quantization_config")
@staticmethod
def collate_fn(examples):
return {k: torch.stack([v[k] for v in examples], dim=0) for k in examples[0].keys()}
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires a GPU")
@pytest.mark.single_gpu_tests
@pytest.mark.parametrize("model_fixture", ["model", "model_bnb"], indirect=True)
def test_eva_initialization_consistency(self, model_fixture, dataset, peft_config):
"""Test that the state dict returned by get_eva_state_dict loaded correctly and is consistent across different seeds based
on the cosine similarity of the svd components."""
state_dicts = []
for seed in range(self.NUM_SEEDS):
shuffled_dataset = dataset.shuffle(seed=seed)
dataloader = DataLoader(
shuffled_dataset,
batch_size=self.BATCH_SIZE,
collate_fn=lambda examples: {
k: torch.stack([v[k] for v in examples], dim=0) for k in examples[0].keys()
},
shuffle=False,
)
peft_model = get_peft_model(deepcopy(model_fixture), peft_config)
initialize_lora_eva_weights(peft_model, dataloader)
state_dicts.append(
{k: v.cpu() for k, v in peft_model.state_dict().items() if "lora_A.default.weight" in k}
)
cos_sims = defaultdict(list)
for i, j in itertools.combinations(range(self.NUM_SEEDS), 2):
for k, v1 in state_dicts[i].items():
v2 = state_dicts[j][k]
min_size = min(v1.size(0), v2.size(0))
cos_sims[k].extend(torch.cosine_similarity(v1[:min_size], v2[:min_size], dim=1).abs().tolist())
mean_cosine_similarities = {k: torch.tensor(v).mean() for k, v in cos_sims.items()}
for layer_name, mean_cosine_similarity in mean_cosine_similarities.items():
assert mean_cosine_similarity > self.COSINE_SIMILARITY_THRESHOLD, (
f"Mean absolute cosine similarity {mean_cosine_similarity:.4f} "
f"is not greater than {self.COSINE_SIMILARITY_THRESHOLD}"
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires a GPU")
@pytest.mark.multi_gpu_tests
class TestPrefixTuning:
def test_prefix_tuning_multiple_devices_decoder_model(self):
# See issue 2134
model_id = "hf-internal-testing/tiny-random-MistralForCausalLM"
tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left")
inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda")
device_map = {
"model.embed_tokens": 0,
"model.layers.0": 0,
"model.layers.1": 1,
"model.norm": 1,
"lm_head": 1,
}
model = AutoModelForCausalLM.from_pretrained(model_id, device_map=device_map)
# sanity check, as the test passes trivially for a single device
assert len({p.device for p in model.parameters()}) > 1
# sanity check: this should work without peft
model.generate(**inputs) # does not raise
peft_config = PrefixTuningConfig(num_virtual_tokens=10, task_type="CAUSAL_LM")
model = get_peft_model(model, peft_config)
model.generate(**inputs) # does not raise
def test_prefix_tuning_multiple_devices_encoder_decoder_model(self):
# See issue 2134
model_id = "hf-internal-testing/tiny-random-T5Model"
tokenizer = AutoTokenizer.from_pretrained(model_id, padding="left")
inputs = tokenizer(["A list of colors: red, blue"], return_tensors="pt").to("cuda")
device_map = {
"shared": 0,
"encoder.embed_tokens": 0,
"encoder.block.0": 0,
"encoder.block.1": 0,
"encoder.block.2": 1,
"encoder.block.3": 1,
"encoder.block.4": 1,
"encoder.final_layer_norm": 1,
"decoder.embed_tokens": 0,
"decoder.block.0": 0,
"decoder.block.1": 0,
"decoder.block.2": 1,
"decoder.block.3": 1,
"decoder.block.4": 1,
"decoder.final_layer_norm": 1,
"lm_head": 0,
}
model = AutoModelForSeq2SeqLM.from_pretrained(model_id, device_map=device_map)
# sanity check, as the test passes trivially for a single device
assert len({p.device for p in model.parameters()}) > 1
# sanity check: this should work without peft
model.generate(**inputs) # does not raise
peft_config = PrefixTuningConfig(num_virtual_tokens=10, task_type="SEQ_2_SEQ_LM")
model = get_peft_model(model, peft_config)
model.generate(**inputs) # does not raise
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires a GPU")
@pytest.mark.single_gpu_tests
class TestHotSwapping:
def test_hotswapping_compiled_model_does_not_trigger_recompilation(self):
env = os.environ.copy()
env["TORCH_LOGS"] = "guards,recompiles"
here = os.path.dirname(__file__)
file_name = os.path.join(here, "run_compiled_model_hotswap.py")
process = subprocess.Popen(
[sys.executable, file_name, "1"], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Communicate will read the output and error streams, preventing deadlock
stdout, stderr = process.communicate()
exit_code = process.returncode
# sanity check:
assert exit_code == 0
# check that the recompilation message is not present
assert "__recompiles" not in stderr.decode()
# contingency check: without hotswapping, we *do* get recompilation
process = subprocess.Popen(
[sys.executable, file_name, "0"], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Communicate will read the output and error streams, preventing deadlock
stdout, stderr = process.communicate()
exit_code = process.returncode
# sanity check:
assert exit_code == 0
# check that the recompilation message is not present
assert "__recompiles" in stderr.decode()
@pytest.mark.xfail(strict=True, reason="Requires hotswap to be implemented in diffusers")
def test_hotswapping_compiled_diffusion_model_does_not_trigger_recompilation(self):
env = os.environ.copy()
env["TORCH_LOGS"] = "guards,recompiles"
here = os.path.dirname(__file__)
file_name = os.path.join(here, "run_compiled_diffusion_model_hotswap.py")
process = subprocess.Popen(
[sys.executable, file_name, "1"], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Communicate will read the output and error streams, preventing deadlock
stdout, stderr = process.communicate()
exit_code = process.returncode
# sanity check:
assert exit_code == 0
# check that the recompilation message is not present
assert "__recompiles" not in stderr.decode()
# contingency check: without hotswapping, we *do* get recompilation
process = subprocess.Popen(
[sys.executable, file_name, "0"], env=env, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# Communicate will read the output and error streams, preventing deadlock
stdout, stderr = process.communicate()
exit_code = process.returncode
# sanity check:
assert exit_code == 0
# check that the recompilation message is not present
assert "__recompiles" in stderr.decode()
|
huggingfaceREPO_NAMEpeftPATH_START.@peft_extracted@peft-main@tests@test_gpu_examples.py@.PATH_END.py
|
{
"filename": "_version.py",
"repo_name": "NickSwainston/pulsar_spectra",
"repo_path": "pulsar_spectra_extracted/pulsar_spectra-main/pulsar_spectra/_version.py",
"type": "Python"
}
|
__version__ = '2.0.4'
|
NickSwainstonREPO_NAMEpulsar_spectraPATH_START.@pulsar_spectra_extracted@pulsar_spectra-main@pulsar_spectra@_version.py@.PATH_END.py
|
{
"filename": "_stream.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/surface/_stream.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Stream(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "surface"
_path_str = "surface.stream"
_valid_props = {"maxpoints", "token"}
# maxpoints
# ---------
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
# token
# -----
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.surface.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super(Stream, self).__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.surface.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.surface.Stream`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("maxpoints", None)
_v = maxpoints if maxpoints is not None else _v
if _v is not None:
self["maxpoints"] = _v
_v = arg.pop("token", None)
_v = token if token is not None else _v
if _v is not None:
self["token"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@surface@_stream.py@.PATH_END.py
|
{
"filename": "_sizesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/marker/_sizesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SizesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="sizesrc", parent_name="scatter.marker", **kwargs):
super(SizesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@marker@_sizesrc.py@.PATH_END.py
|
{
"filename": "Results_file_save.py",
"repo_name": "astrom-tom/SPARTAN",
"repo_path": "SPARTAN_extracted/SPARTAN-master/spartan/Results_file_save.py",
"type": "Python"
}
|
'''
The SPARTAN Project
-------------------
This module save results in the disk
@author: R. THOMAS
@year: 2016
@place: UV/LAM/UCBJ
@License: GPL v3.0 - see LICENCE.txt
'''
##Python LIB
import os
import sys
##############
###Third party
import numpy
import h5py
###############
def save_to_file_fail(resfile, galaxy, CONF):
'''
This method saves the status of the fit if Fitted = 'No' in the main of the
fit
Parameter
---------
ID str, ID of the object
resfile str, path/and/name of the result file
CONF obj, configuration from the user
Return
------
NONE
'''
with h5py.File(resfile) as Res:
## we look for the fitting status
Fitted = str(numpy.array(Res['%s/General/Fitted'%galaxy.ID]))[2:-1]
## We update ToFit accordingly to the status reported
## in the result file
if Fitted == 'Fitted' and CONF.FIT['OverFit'].lower() == 'yes':
##if we already fitted t
del Res['%s'%galaxy.ID]
##open the result file and save the status as 'FAIL'
with h5py.File(resfile) as Res:
try:
del Res['%s'%galaxy.ID]
except:
pass
obj = Res.create_group(galaxy.ID)
gen = Res.create_group('%s/General'%galaxy.ID)
gen.create_dataset('Fitted', data=numpy.string_('FAIL'))
def save_phot(Results, galaxy, CONF):
'''
This function saves the results of the studied objet
into the result file
Parameter
---------
Results dict, of the results
galaxy objct, galaxy object with results
CONF dict, configuration of the user
Return
------
'''
###first we have to check if some data where already in the result file
deleted = 0
with h5py.File(Results, 'a') as Res:
## we look for the fitting status
Fitted = str(numpy.array(Res['%s/General/Fitted'%galaxy.ID]))[2:-1]
## We update the result file accordingly to the status reported
## in the result file and the overfit choice.
if galaxy.status == 'Fitted' and CONF.FIT['OverFit'].lower() == 'yes':
del Res['%s'%galaxy.ID]
deleted = 1
##open the result file and save the results
with h5py.File(Results) as Res:
if deleted == 0:
del Res['%s'%galaxy.ID]
#-1-# general information
obj = Res.create_group(galaxy.ID)
gen = Res.create_group('%s/General'%galaxy.ID)
gen.create_dataset('Fitted', data=numpy.string_(galaxy.status))
#[print(numpy.array(gen[i])) for i in list(gen.keys())]
#-2-# Observable directory
Obs = Res.create_group('%s/Observable'%galaxy.ID)
Obs.create_dataset('Redshift', data = numpy.array(galaxy.Redshift))
Obs.create_dataset('Npoints', data = numpy.array(galaxy.Nband))
Obs.create_dataset('waveband', data = numpy.array(galaxy.waveband))
Obs.create_dataset('obsmag', data = numpy.array(galaxy.obsmag))
Obs.create_dataset('obserr', data = numpy.array(galaxy.obserr))
Obs.create_dataset('obsflux', data = numpy.array(galaxy.obsflux))
Obs.create_dataset('obsfluxerr', data = numpy.array(galaxy.obsfluxerr))
Obs.create_dataset('Names_mag', data = [numpy.string_(i) for i in galaxy.Names])
Obs.create_dataset('Upper_limits', data = [numpy.string_(i) for i in galaxy.uppers])
#[print(numpy.array(Obs[i])) for i in list(Obs.keys())]
#-2-# Template directory
Temp = Res.create_group('%s/Template'%galaxy.ID)
Temp.create_dataset('Bestchi2', data = galaxy.bestchi2red)
Temp.create_dataset('Best_template_full', data = galaxy.besttemplate)
Temp.create_dataset('Bestfit_mag', data = galaxy.bestfit_mag[0])
Temp.create_dataset('Bestfit_flux', data = galaxy.bestfit_flux)
Temp.create_dataset('Best_template_wave', data = galaxy.besttemplate_wave)
#[print(numpy.array(Temp[i])) for i in list(Temp.keys())]
#-3-# BF Parameter directory
ParametersBF = Res.create_group('%s/Parameters_BF'%galaxy.ID)
for i in list(galaxy.BFparam.keys()):
ParametersBF.create_dataset(i, data = numpy.array(galaxy.BFparam[i]))
#-4-# PDF Parameter directory
ParametersPDF = Res.create_group('%s/Parameters_PDF'%galaxy.ID)
PDFCDF = Res.create_group('%s/PDF_CDF'%galaxy.ID)
for i in list(galaxy.chi2p.keys()):
m = galaxy.chi2p[i][0]
m1 = galaxy.chi2p[i][2]
p1 = galaxy.chi2p[i][1]
#print(m, m1, p1)
grid = galaxy.chi2p[i][3]
pdf = galaxy.chi2p[i][4]
cdf = galaxy.chi2p[i][5]
ParametersPDF.create_dataset(i, data = numpy.array([m, m1, p1]))
PDFCDF.create_dataset(i, data = numpy.array([pdf, cdf, grid]))
#-5-# Absolute Magnitude
Magabs = Res.create_group('%s/Mag_abs'%galaxy.ID)
for i in range(len(galaxy.MagAbs['Name'])):
M = galaxy.MagAbs['Meas'][i]
Magabs.create_dataset(galaxy.MagAbs['Name'][i], data = numpy.array(M))
del galaxy
del Magabs, ParametersPDF, PDFCDF, ParametersBF, Temp, gen
def save_spec(Results, galaxy, CONF):
'''
This function saves the results of the studied objet
into the result file
Parameter
---------
Results dict, of the results
galaxy obj, galaxy object with results
CONF dict, configuration of the user
Return
------
'''
###first we have to check if some data where already in the result file
deleted = 0
with h5py.File(Results, 'a') as Res:
## we look for the fitting status
Fitted = str(numpy.array(Res['%s/General/Fitted'%galaxy.ID]))[2:-1]
## We update the result file accordingly to the status reported
## in the result file and the overfit choice.
if galaxy.status == 'Fitted' and CONF.FIT['OverFit'].lower() == 'yes':
del Res['%s'%galaxy.ID]
deleted = 1
##open the result file and save the results
with h5py.File(Results) as Res:
if deleted == 0:
del Res['%s'%galaxy.ID]
#-1-# general information
obj = Res.create_group(galaxy.ID)
gen = Res.create_group('%s/General'%galaxy.ID)
gen.create_dataset('Fitted', data=numpy.string_(galaxy.status))
#-2-# Observable directory
Obs = Res.create_group('%s/Observable'%galaxy.ID)
Obs.create_dataset('Redshift', data = numpy.array(galaxy.Redshift))
Obs.create_dataset('Nspec', data = numpy.array(int(CONF.CONF['NSpec'])))
specs = ['specwave', 'specflux', 'specerr', 'mags', 'mags_flux', 'mags_Leff', 'mags_Tran']
for i in galaxy.SPECS.keys():
for j in specs:
if j == 'mags':
Obs.create_dataset('%s_%s'%(j,i),\
data = [numpy.string_(i) for i \
in numpy.array(galaxy.__dict__['%s_%s'%(j,i)])])
else:
Obs.create_dataset('%s_%s'%(j,i), \
data = numpy.array(galaxy.__dict__['%s_%s'%(j,i)]))
###number of point / spec
Obs.create_dataset('Npoints_%s'%i, \
data = numpy.array(len(numpy.array(galaxy.__dict__['specwave_%s'%(i)]))))
#-3-# BF Parameter directory
ParametersBF = Res.create_group('%s/Parameters_BF'%galaxy.ID)
for i in list(galaxy.BFparam.keys()):
ParametersBF.create_dataset(i, data = numpy.array(galaxy.BFparam[i]))
#-2-# Template directory
Temp = Res.create_group('%s/Template'%galaxy.ID)
Temp.create_dataset('Bestchi2', data = galaxy.bestchi2red)
Temp.create_dataset('Best_template_full', data = galaxy.besttemplate)
Temp.create_dataset('Best_template_wave', data = galaxy.besttemplate_wave)
Temp.create_dataset('Bestfit_newgrid', data = galaxy.regrid_template)
Temp.create_dataset('Bestfit_newgrid_wave', data = galaxy.regrid_wave)
#-4-# PDF Parameter directory
ParametersPDF = Res.create_group('%s/Parameters_PDF'%galaxy.ID)
PDFCDF = Res.create_group('%s/PDF_CDF'%galaxy.ID)
for i in list(galaxy.chi2p.keys()):
m = galaxy.chi2p[i][0]
m1 = galaxy.chi2p[i][2]
p1 = galaxy.chi2p[i][1]
#print(m, m1, p1)
grid = galaxy.chi2p[i][3]
pdf = galaxy.chi2p[i][4]
cdf = galaxy.chi2p[i][5]
ParametersPDF.create_dataset(i, data = numpy.array([m, m1, p1]))
PDFCDF.create_dataset(i, data = numpy.array([pdf, cdf, grid]))
def save_comb(Results, galaxy, CONF):
'''
This function saves the results of the studied objet
into the result file
Parameter
---------
Results dict, of the results
galaxy obj, galaxy object with results
CONF dict, configuration of the user
Return
------
'''
###first we have to check if some data where already in the result file
deleted = 0
with h5py.File(Results, 'a') as Res:
## we look for the fitting status
Fitted = str(numpy.array(Res['%s/General/Fitted'%galaxy.ID]))[2:-1]
## We update the result file accordingly to the status reported
## in the result file and the overfit choice.
if galaxy.status == 'Fitted' and CONF.FIT['OverFit'].lower() == 'yes':
del Res['%s'%galaxy.ID]
deleted = 1
##open the result file and save the results
with h5py.File(Results) as Res:
if deleted == 0:
del Res['%s'%galaxy.ID]
#-1-# general information
obj = Res.create_group(galaxy.ID)
gen = Res.create_group('%s/General'%galaxy.ID)
gen.create_dataset('Fitted', data=numpy.string_(galaxy.status))
#-2-# Observable directory
Obs = Res.create_group('%s/Observable'%galaxy.ID)
Obs.create_dataset('Redshift', data = numpy.array(galaxy.Redshift))
Obs.create_dataset('Nspec', data = numpy.array(int(CONF.CONF['NSpec'])))
Obs.create_dataset('Kept_phot', data = numpy.array(galaxy.kept_phot))
specs = ['specwave', 'specflux', 'specerr', 'mags', 'mags_flux', 'mags_Leff', 'mags_Tran']
Npoints_spec = 0
for i in galaxy.SPECS.keys():
for j in specs:
if j == 'mags':
Obs.create_dataset('%s_%s'%(j,i),\
data = [numpy.string_(i) for i \
in numpy.array(galaxy.__dict__['%s_%s'%(j,i)])])
else:
Obs.create_dataset('%s_%s'%(j,i), \
data = numpy.array(galaxy.__dict__['%s_%s'%(j,i)]))
###number of point / spec
Npoints_spec += len(numpy.array(galaxy.__dict__['specwave_%s'%(i)]))
Obs.create_dataset('Npoints_spec', data = numpy.array(Npoints_spec))
Obs.create_dataset('Npoints_mags', data = numpy.array(galaxy.Nband))
Obs.create_dataset('waveband', data = numpy.array(galaxy.waveband))
Obs.create_dataset('obsmag', data = numpy.array(galaxy.obsmag))
Obs.create_dataset('obserr', data = numpy.array(galaxy.obserr))
Obs.create_dataset('obsflux', data = numpy.array(galaxy.obsflux))
Obs.create_dataset('obsfluxerr', data = numpy.array(galaxy.obsfluxerr))
Obs.create_dataset('Names_mag', data = [numpy.string_(i) for i in galaxy.Names])
Obs.create_dataset('Upper_limits', data = [numpy.string_(i) for i in galaxy.uppers])
#-2-# Template directory
Temp = Res.create_group('%s/Template'%galaxy.ID)
Temp.create_dataset('Bestchi2', data = galaxy.bestchi2red)
Temp.create_dataset('Best_template_full', data = galaxy.besttemplate)
Temp.create_dataset('Best_template_wave', data = galaxy.besttemplate_wave)
Temp.create_dataset('Bestfit_newgrid', data = galaxy.regrid_template)
Temp.create_dataset('Bestfit_newgrid_wave', data = galaxy.regrid_wave)
Temp.create_dataset('Bestfit_mag', data = galaxy.bestfit_mag[0])
Temp.create_dataset('Bestfit_flux', data = galaxy.bestfit_flux)
#-3-# BF Parameter directory
ParametersBF = Res.create_group('%s/Parameters_BF'%galaxy.ID)
for i in list(galaxy.BFparam.keys()):
ParametersBF.create_dataset(i, data = numpy.array(galaxy.BFparam[i]))
#-4-# PDF Parameter directory
ParametersPDF = Res.create_group('%s/Parameters_PDF'%galaxy.ID)
PDFCDF = Res.create_group('%s/PDF_CDF'%galaxy.ID)
for i in list(galaxy.chi2p.keys()):
m = galaxy.chi2p[i][0]
m1 = galaxy.chi2p[i][2]
p1 = galaxy.chi2p[i][1]
#print(m, m1, p1)
grid = galaxy.chi2p[i][3]
pdf = galaxy.chi2p[i][4]
cdf = galaxy.chi2p[i][5]
ParametersPDF.create_dataset(i, data = numpy.array([m, m1, p1]))
PDFCDF.create_dataset(i, data = numpy.array([pdf, cdf, grid]))
|
astrom-tomREPO_NAMESPARTANPATH_START.@SPARTAN_extracted@SPARTAN-master@spartan@Results_file_save.py@.PATH_END.py
|
{
"filename": "_ticksuffix.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/marker/colorbar/_ticksuffix.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicksuffixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="ticksuffix", parent_name="bar.marker.colorbar", **kwargs
):
super(TicksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@marker@colorbar@_ticksuffix.py@.PATH_END.py
|
{
"filename": "YT_volrender_tutorial2.ipynb",
"repo_name": "geodynamics/Rayleigh",
"repo_path": "Rayleigh_extracted/Rayleigh-main/post_processing/YT_volrender_tutorial2.ipynb",
"type": "Jupyter Notebook"
}
|
# Tutorial 2: Converting Spherical Quantities to Cartesian Quantities before Volume Rendering
This notebook provides an example of how to take Rayleigh Spherical_3D output quantities (e.g. V_r, V_theta, V_phi or B_r, B_theta, B_phi) and convert them into Cartesian quantities (e.g. V_x, V_y, V_z or B_x, B_y, B_z). This works on Spherical_3D data already interpolated onto a uniform Cartesian grid of NxNxN. This is useful particularly for looking at z quantities or using Cartesian quantities in visualization renderers to map streamlines.
Then we show a few simple examples of how to create volume renderings, modify the transfer function, and manipulate the camera angle.
## About yt and installing yt
*This section is under development*
Developed in Python, yt is an analysis and visualization toolkit for volumetric data. To install yt on your machine and activate it, visit this website: [https://yt-project.org/doc/installing.html](https://yt-project.org/doc/installing.html)
## Preparing your Rayleigh Spherical_3D output for volume rendering with yt
*This section is under development*
Rayleigh's Spherical_3D quantities are output in a spherical grid. Many visualization software packages do not fully support spherical grids. One solution is to interpolate the quantities onto a uniform Cartesian grid with dimensions XxYxZ. Quantity values outside the spherical volume are given values of zero and can easily be made transparent using an appropriately chosen tranfer function when rendering the visualization.
To prepare your Spherical_3D output: Interpolating Spherical_3D quantities onto a uniform Cartesian grid
1. Copy the directory **/Rayleigh/post_processing/kyle_interp** onto your local system
2. Compile the executable. We use the gcc compiler: **make -f Makefile.gfortran**
3. Create a directory called **vis_test** (or something equally suitable) in which to run the interpolator. Copy the executable **interpolator.app** and the **input** file from **/Rayleigh/post_processing/kyle_interp** there.
4. Create a directory in **vis_test** called **Spherical_3D**. In directory **Spherical_3D** copy the 3D spherical output files you wish to interpolate, including the **_grid** file.
5. In the input file, modify the uniform radial grid resolution **nr** appropriate for your files, choose the resolution **ncube** of the resulting Cartesian cube, and modify the iteration range. When interpolating a single file, **initial_iteration** and **final_iteration** should be the same.
6. Run the interpolation code: **./interpolator.app XXXX** (Where **XXXX** is the four digit quantity code of the quantity you wish to interpolate. For example, to interpolate a radial velocity spherical 3D output of quantity code **0001**, type: **./interpolator.app 0001**)
7. If the code executed correctly, you should have a new file in your **/vis_test/Spherical_3D** directory called **_cube**. These files are what you will import into this notebook.
8. Copy your new **_cube** files into the directory where you will run this Jupyter Notebook.
## Reading in your prepared data and creating simple yt visualizations
*This section is under development. There are known issues with YT that need to be resolved.*
After (1) your yt installation has been activated and (2) you have interpolated your Rayleigh Spherical_3D output onto a uniform cartesian grid, you are ready to read the data into Python and begin visualizations with yt.
First we will start by defining a class to convert Spherical_3D quantites to Cartesian quantities.
```python
# This class converts from Spherical quantities to Cartesian quantities
import numpy
class cart_field:
def __init__(self,nx, rfile,tfile,pfile):
self.nx = nx
self.nz = nx
self.ny = nx
ny = nx
nz = nx
self.vx = numpy.zeros( (nx,ny,nz), dtype = 'float64' )
self.vy = numpy.zeros( (nx,ny,nz), dtype = 'float64' )
self.vz = numpy.zeros( (nx,ny,nz), dtype = 'float64' )
arr = numpy.fromfile(rfile,dtype='float64')
vr = numpy.reshape(arr,(nx,nx,nx))
arr = numpy.fromfile(tfile,dtype='float64')
vtheta = numpy.reshape(arr,(nx,nx,nx))
arr = numpy.fromfile(pfile,dtype='float64')
vphi = numpy.reshape(arr,(nx,nx,nx))
x = numpy.linspace(-1,1,nx)
y = numpy.linspace(-1,1,nx)
z = numpy.linspace(-1,1,nx)
x2d = numpy.zeros((ny,nx),dtype='float64')
y2d = numpy.zeros((ny,nx),dtype='float64')
# We will generate vx,vy,vz one z-slice at a time.
# Need some 2D support arrays that are functions of x and y
for i in range(ny):
x2d[i,:] = x[:]
for i in range(nx):
y2d[:,i] = y[:]
rcyl = numpy.sqrt(x2d**2 + y2d**2) # cylindrical radius based on x-y
cosphi = x2d/rcyl # cosine of angle from x-axis in x-y plane
sinphi = y2d/rcyl # sine of same angle
for i in range(nz):
zval = z[i]
r2d = numpy.sqrt(zval**2+ x2d**2+y2d**2) # spherical radius (at this z)
costheta = zval/r2d # costheta
sintheta = numpy.sqrt(1.0-costheta**2) #sintheta
# vz = vr*costheta - vtheta*sintheta
self.vz[i,:,:]=vr[i,:,:]*costheta - vtheta[i,:,:]*sintheta
# v_r_cylindrical = vr*sintheta + vtheta*costheta
vrcyl = vr[i,:,:]*sintheta+vtheta[i,:,:]*costheta
#vx = v_r_cylindrical*costheta - vphi*sinphi
self.vx[i,:,:] = vrcyl*cosphi -vphi[i,:,:]*sinphi
# vy = vrcyl*sinphi + vphi*cosphi
self.vy[i,:,:] = vrcyl*sinphi + vphi[i,:,:]*cosphi
```
Read in the interpolated data files and perform the Cartesian conversion.
```python
#This reads in the cubed data files
#All cube data has been interpolated onto a uniform cartesian grid with dimensions 128x128x128
rfile = '32600001_0001_cube' #V_r
tfile = '32600001_0002_cube' #V_theta
pfile = '32600001_0003_cube' #V_phi
velocity = cart_field(128,rfile,tfile,pfile) #Although labeled as 'velocity', B fields can be used here instead
```
Load the data into YT for volume rendering and create a YT scene.
```python
import yt
import numpy as np
cube = velocity.vz #Choose the quantity you want to volume render, here vz
nx = 128
data = dict(velocity = (cube, "cm/s"))
bbox = numpy.array([[-1.5, 1.5], [-1.5, 1.5], [-1.5, 1.5]]) #Define a bounding box
ds = yt.load_uniform_grid(data, cube.shape, bbox=bbox, nprocs=nx) #Load a cube into YT
sc = yt.create_scene(ds, field=('velocity')) #Set up a basic Scene in YT
maxup = np.amax(cube)
minup = np.amin(cube)
print (maxup, minup) #Print min and max values in cube
```
Show the volume rendering and plot the default transfer function.
```python
sc.show() #Show the scene
source = sc[0]
source.tfh.plot(profile_field='velocity') #Plot the default transfer function with the field quantity histogram.
```
Let's create a custom transfer function and plot it. The method outlined below is closer to that of an isosurface render.
```python
#Let's try to render the scene with a new tranfer function that we define ourselves
source = sc[0]
source.set_field('velocity')
source.set_log(False) #Enforce a linear space - appears to work for the transfer function, but not for volume render
#Set the quantity bounds over which to apply the colormap
bounds = (0., 300.)
#Set the bounds of the transfer function
tf = yt.ColorTransferFunction(bounds)
#This is how to add new opacity points to the colormap with a gaussian shape
tf.sample_colormap(50., w=50., colormap='arbre', alpha = 0.25) #Gaussian centered at 50, width 50, and alpha of 0.25
tf.sample_colormap(100., w=50., colormap='arbre')
tf.sample_colormap(200., w=50., colormap='arbre', alpha = 1.5)
tf.sample_colormap(300., w=50., colormap='arbre')
source.tfh.tf = tf
source.tfh.bounds = bounds
source.tfh.plot(profile_field='velocity') #Plot the new transfer function
```
Now render the scene with the new transfer function applied.
```python
# Grab the first render source and set it to use the new transfer function
render_source = sc.get_source()
render_source.transfer_function = tf
sc.render()
sc.show(sigma_clip=2.0) #Attempt a better contrast with sigma clipping
```
Render the same scene but looking down from the North pole. To accomplish this, we must move the camera.
```python
# Try to change view to see something more interesting
# pitch the camera by pi / 4 radians:
sc.camera.pitch(np.pi/4.0) #observe from pole - move camera.
#Note that every time this is run, the camera will move again by pi/4.
sc.render()
sc.show(sigma_clip=2.0)
```
```python
```
|
geodynamicsREPO_NAMERayleighPATH_START.@Rayleigh_extracted@Rayleigh-main@post_processing@YT_volrender_tutorial2.ipynb@.PATH_END.py
|
{
"filename": "_duration.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/slider/transition/_duration.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DurationValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="duration", parent_name="layout.slider.transition", **kwargs
):
super(DurationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "arraydraw"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@slider@transition@_duration.py@.PATH_END.py
|
{
"filename": "analyse_Z.ipynb",
"repo_name": "astrockragh/Mangrove",
"repo_path": "Mangrove_extracted/Mangrove-main/analysis/analyse_Z.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import pickle, os, torch
import os.path as osp
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn.metrics import r2_score
from mpl_toolkits.axes_grid1 import make_axes_locatable
```
```python
from matplotlib.ticker import NullFormatter
from scipy import stats
from matplotlib.ticker import NullFormatter
from scipy import stats
def multi_plot(yss, preds):
fig, ax = plt.subplots(2, 3, figsize=(22, 12))
ax = ax.flatten()
# target=r"log($SFR/M_{\odot}/yr$)"
targets=[r"$log(v_{disk}/km/s)$", r"log($M_{cold}/M_{\odot}$)", r"log($SFR/M_{\odot}/yr$)"]
targets=[r"$log(M_{*}/M_{\odot})$", r"log($M_{cold}/M_{\odot}$)", r"$log(Z_{gas})$", r"log($SFR/M_{\odot}/yr$)",
r"log($SFR_{100}/M_{\odot}/yr$)", r"log($M_{BH}/M_{\odot}$)"]
# target=r"$v_{disk}$"
for i in range(len(targets)):
target=targets[i]
ys = yss[:,i]
pred = preds[:,i]
nullfmt = NullFormatter() # no labels
# definitions for the axes
left, width = 0.15, 0.67
bottom, height = 0.0, 0.75
bottom_h = bottom +height
left_h = left + width
# rect_Cbar = [0, bottom, 0.1, height]
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.15]
rect_histy = [left_h, bottom, 0.15+0.07, height]
# rect_cbar = [left_h+0.15+0.06, bottom, 0.05, height]
# start with a rectangular Figure
ax[i].set_axis_off()
# axCbar = plt.axes(rect_Cbar)
axHist = ax[i].inset_axes(rect_scatter)
axHistx = ax[i].inset_axes(rect_histx)
axHisty = ax[i].inset_axes(rect_histy)
# axCbar = ax[i][j].inset_axes(rect_cbar)
# no labels
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
axHistx.yaxis.set_major_formatter(nullfmt)
axHisty.xaxis.set_major_formatter(nullfmt)
bins=50
l=0.0
n_contour=4
# the scatter plot:
vals, x, y, hist =axHist.hist2d( ys, pred,bins=bins, range=[np.percentile(np.hstack([ys,pred]), [0+l,100-l]),
np.percentile(np.hstack([ys,pred]), [0+l,100-l])], norm=mpl.colors.LogNorm(), cmap=mpl.cm.viridis)
X, Y = np.meshgrid((x[1:]+x[:-1])/2, (y[1:]+y[:-1])/2)
axHist.contour(X,Y, np.log(vals.T+1), levels=n_contour, colors='white')
axHist.plot([min(ys),max(ys)],[min(ys),max(ys)], 'k--', label='Perfect correspondance')
axHist.set(xlabel=f'SAM {target}',ylabel=f'GNN {target}')
axHist.xaxis.label.set_fontsize(13)
axHist.yaxis.label.set_fontsize(13)
if i==0:
axHist.legend(fontsize=12, loc='upper left')
X, Y = X[0], Y.T[0]
axHist.set_xlim((min(X), max(X)))
axHist.set_ylim((min(Y), max(Y)))
yhat=r'$\hat{y}$'
#calculate metrics
pct=np.sum(np.abs(ys-pred)<0.2)/len(ys-pred)
r2=r2_score(ys,pred)
rho = np.corrcoef(ys,pred)[0,1]
print('bias', np.mean(ys-pred))
print('std', np.std(ys-pred))
print('rho', rho)
print('r2', r2)
print('pct',pct)
xt=0.45
yt=0.2
dy=0.07
font = {'weight': 'normal',
'size': 15}
axHist.text(xt,yt, f'Bias : {np.mean(ys-pred):.2f} dex', fontdict=font, transform=axHist.transAxes)
axHist.text(xt,yt-dy, r'$\sigma$ : '+f'{np.std(ys-pred):.3f} dex', fontdict=font, transform=axHist.transAxes)
axHist.text(xt,yt-2*dy, r'Pearson: '+f'{rho:.2f}', fontdict=font, transform=axHist.transAxes)
# axHist.text(xt,yt-3*dy, r'$R^2$: '+f'{r2:.3f}', fontdict=font, transform=axHist.transAxes)
# axHist.text(xt,yt-4*dy, '% < 0.2 dex: '+f'{pct*100:.1f}', fontdict=font, transform=axHist.transAxes)
# axHistx.hist(ys[:,n], bins=bins, histtype='step', density=1)
# axy=axHisty.hist(pred[:,n], bins=bins, histtype='step', density=1, orientation='horizontal')
ys_kde = stats.gaussian_kde(ys, 0.1)
pred_kde = stats.gaussian_kde(pred, 0.1)
axHistx.plot(X, ys_kde(X), 'k--', label=f'SAM')
axHisty.plot(pred_kde(Y), Y, "k-.", label=f'GNN')
axHistx.legend(fontsize=12)
axHisty.legend(loc='upper left', bbox_to_anchor=(0.,1.15), fontsize=12)
font = {'family' : 'Serif',
'weight' : 'normal',
'size' : 14}
matplotlib.rc('font', **font)
axHistx.set(title=f'SAM-GNN {target}', )
axHistx.set_xlim(axHist.get_xlim())
axHisty.set_ylim(axHist.get_ylim())
divider = make_axes_locatable(axHisty)
cax = divider.append_axes("right", size="15%", pad=0.18)
divider = make_axes_locatable(axHist)
# cax = divider.append_axes("left", size="15%", pad=0.18)
# Plot vertical colorbar
plt.colorbar(hist, cax=cax)
# plt.show()
# plt.colorbar(hist, ax=axCbar)
# plt.show()
# fig.tight_layout()
# plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.1, hspace=None)
return fig
```
```python
# this is the full tree run
folder = 'results_final_Z_270322'
run = 'Sage_vlarge_all_allt_z0.0_quantile_raw_rm_msdgfh_1_2'
# folder = 'results_final_Z_300322'
# run = 'Sage_vlarge_all_allt_z0.0_quantile_raw_rm_mllteo'
pointer=osp.expanduser(f'~/../../scratch/gpfs/cj1223/GraphResults/{folder}/{run}')
results=pickle.load(open(osp.join(pointer,'result_dict.pkl'), 'rb'))
config=pickle.load(open(osp.join(pointer,'construct_dict.pkl'), 'rb'))
ys, pred = results['low_ys'], results['low_pred']
mstar_ys, mstar_pred = ys[:,0]+9, pred[:,0]+9
# mcold_ys, mcold_pred = ys[:,2]+9, pred[:,2]+9
Zy, Zpred = ys[:,2]+9, pred[:,2]+9
Coldy, Coldpred = ys[:,1]+9, pred[:,1]+9
sfr_ys, sfr_pred = ys[:,3], pred[:,3]
sfr100_ys, sfr100_pred = ys[:,4], pred[:,4]
Mbh_ys, Mbh_pred = ys[:,5]+9, pred[:,5]+9
```
```python
# # this is the final halo run
# folder = 'results_finalhalo_Z_280322'
# run = 'Sage_vlarge_all_t_quantile_raw_rm_final_zvwpvm_1_2'
# pointer=osp.expanduser(f'~/../../scratch/gpfs/cj1223/GraphResults/{folder}/{run}')
# results=pickle.load(open(osp.join(pointer,'result_dict.pkl'), 'rb'))
# config=pickle.load(open(osp.join(pointer,'construct_dict.pkl'), 'rb'))
# ys, pred = results['low_ys'], results['low_pred']
# mstar_ys, mstar_pred = ys[:,0]+9, pred[:,0]+9
# # mcold_ys, mcold_pred = ys[:,2]+9, pred[:,2]+9
# Zy, Zpred = ys[:,2]+9, pred[:,2]+9
# Coldy, Coldpred = ys[:,1]+9, pred[:,1]+9
# sfr_ys, sfr_pred = ys[:,3], pred[:,3]
# sfr100_ys, sfr100_pred = ys[:,4], pred[:,4]
# Mbh_ys, Mbh_pred = ys[:,5]+9, pred[:,5]+9
```
```python
np.corrcoef(Zpred, Coldpred), np.corrcoef(Zpred-Zy, Coldpred-Coldy)
```
(array([[1. , 0.85291465],
[0.85291465, 1. ]]),
array([[1. , 0.69464096],
[0.69464096, 1. ]]))
```python
np.corrcoef(Coldy, Coldpred), np.corrcoef(Zy,Zpred)
```
(array([[1. , 0.95331536],
[0.95331536, 1. ]]),
array([[1. , 0.98463976],
[0.98463976, 1. ]]))
```python
## this may not be the way to go
# Zy=np.log10((10**Zy)/(10**Coldy))
# Zpred=np.log10((10**Zpred)/(10**Coldpred))
# Zpred/=Coldpred
# Zy/=Coldy
```
```python
Zy-=Coldy
Zpred-=Coldpred
```
```python
np.corrcoef(Zpred, Coldpred), np.corrcoef(Zpred-Zy, Coldpred-Coldy)
```
(array([[1. , 0.4673269],
[0.4673269, 1. ]]),
array([[ 1. , -0.41737178],
[-0.41737178, 1. ]]))
```python
np.corrcoef(Coldy, Coldpred), np.corrcoef(Zy,Zpred)
```
(array([[1. , 0.95331536],
[0.95331536, 1. ]]),
array([[1. , 0.97264417],
[0.97264417, 1. ]]))
```python
# # this is the correlated run
# folder = 'results_final_Gauss4d_310122'
# run='Sage_vlarge_all_4t_z0.0_quantile_raw_seqqhl_5_6'
# pointer=osp.expanduser(f'~/../../scratch/gpfs/cj1223/GraphResults/{folder}/{run}')
# results=pickle.load(open(osp.join(pointer,'result_dict.pkl'), 'rb'))
# config=pickle.load(open(osp.join(pointer,'construct_dict.pkl'), 'rb'))
# ys, pred = results['low_ys'], results['low_pred']
# # mstar_ys, mstar_pred = ys[:,0], pred[:,0]
# vdisk_ys, vdisk_pred = ys[:,1], pred[:,1]
# sfr_ys, sfr_pred = ys[:,3], pred[:,3]
```
```python
ys = np.vstack([ mstar_ys, Coldy, Zy, sfr_ys, sfr100_ys, Mbh_ys]).T
pred = np.vstack([mstar_pred, Coldpred, Zpred, sfr_pred, sfr100_pred, Mbh_pred]).T
```
```python
import matplotlib
font = {'family' : 'Serif',
'weight' : 'normal',
'size' : 16}
matplotlib.rc('font', **font)
fig=multi_plot(ys, pred)
#0.16064388
```
bias 0.00021275837
std 0.07269396
rho 0.9969849269779993
r2 0.9939787761927356
pct 0.9807186678352322
bias -0.006112256
std 0.1622766
rho 0.9533153563918364
r2 0.9086030758276337
pct 0.8308501314636284
bias 0.0041905446
std 0.12532122
rho 0.9726441695327039
r2 0.9459046639577691
pct 0.9610683149591771
bias 0.0017956522
std 0.35535666
rho 0.935086789217517
r2 0.874378784247249
pct 0.5688915540384704
bias 0.022473007
std 0.34818822
rho 0.9375466063130855
r2 0.8784238542710784
pct 0.5669080677153006
bias -0.0007372683
std 0.12856358
rho 0.9740467195038096
r2 0.9487591542349237
pct 0.9083444808339868

```python
# Zy, Zpred = ys[:,2], pred[:,2]
# Coldy, Coldpred = ys[:,1]+9, pred[:,1]+9
# Zy/=Coldy
# Zpred/=Coldpred
# # plt.hist2d()
```
```python
# plt.hist2d((Zy*Coldy)**10/Coldy**, (Zpred*Coldpred)**10, bins=100);
```
```python
# fig.savefig('../paper_figures/performance_others.png', bbox_inches='tight')
```
```python
plt.plot((ys[:,1]-pred[:,1])/np.std((ys[:,1]-pred[:,1])),(ys[:,2]-pred[:,2])/np.std((ys[:,2]-pred[:,2])), 'ro')
#corre
# plt.ylim(-0.3,0.2)
```
[<matplotlib.lines.Line2D at 0x2ae739f02160>]

```python
np.corrcoef(np.transpose(ys-pred))
```
array([[ 1. , 0.05145701, 0.2875132 , 0.19346708, 0.19697201,
0.099901 ],
[ 0.05145701, 1. , -0.41737178, 0.73757938, 0.75231507,
-0.06637543],
[ 0.2875132 , -0.41737178, 1. , -0.03423662, -0.04519383,
-0.01208083],
[ 0.19346708, 0.73757938, -0.03423662, 1. , 0.93034466,
-0.06529561],
[ 0.19697201, 0.75231507, -0.04519383, 0.93034466, 1. ,
-0.06592073],
[ 0.099901 , -0.06637543, -0.01208083, -0.06529561, -0.06592073,
1. ]])
```python
ssfr_ys = sfr_ys-mstar_ys
ssfr_pred= sfr_pred-mstar_pred
```
```python
plt.hist2d(ssfr_ys, ssfr_pred, bins=100, norm=mpl.colors.LogNorm(), cmap=mpl.cm.viridis);
```

```python
nbins=20
edges=np.linspace(8, max(mstar_ys),nbins)
centers=(edges[:-1]+edges[1:])/2
frac_ys=[]
for i in range(len(edges)-1):
mask=np.logical_and(edges[i]<=mstar_ys,edges[i+1]>=mstar_ys)
f = sum(ssfr_ys[mask]<-11)/sum(mask)
frac_ys.append(f)
frac_ys=np.array(frac_ys)
```
```python
plt.plot(centers, frac_ys)
```
[<matplotlib.lines.Line2D at 0x2ae739faf460>]

```python
nbins=20
edges=np.linspace(8, max(mstar_pred),nbins)
centers=(edges[:-1]+edges[1:])/2
frac_pred=[]
for i in range(len(edges)-1):
mask=np.logical_and(edges[i]<=mstar_pred,edges[i+1]>=mstar_pred)
f = sum(ssfr_pred[mask]<-11)/sum(mask)
frac_pred.append(f)
frac_pred=np.array(frac_pred)
```
```python
plt.plot(centers, frac_pred)
```
[<matplotlib.lines.Line2D at 0x2ae739f781c0>]

```python
```
|
astrockraghREPO_NAMEMangrovePATH_START.@Mangrove_extracted@Mangrove-main@analysis@analyse_Z.ipynb@.PATH_END.py
|
{
"filename": "plot_routines.py",
"repo_name": "tcassanelli/pyoof",
"repo_path": "pyoof_extracted/pyoof-master/pyoof/plot_routines.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Tomas Cassanelli
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy import interpolate
from astropy.table import Table
from astropy import units as apu
import warnings
import os
import yaml
from .aperture import radiation_pattern, phase
from .aux_functions import uv_ratio
from .math_functions import norm
__all__ = [
'plot_beam', 'plot_beam_data', 'plot_phase', 'plot_phase_data',
'plot_variance', 'plot_fit_path'
]
# TODO: Generalize this functions for multiple d_z
def plot_beam(
I_coeff, K_coeff, d_z, wavel, illum_func, telgeo, resolution, box_factor,
plim, angle, title
):
"""
Beam maps, :math:`P_\\mathrm{norm}(u, v)`, figure given fixed
``I_coeff`` coefficients and ``K_coeff`` set of coefficients. It is the
straight forward result from a least squares minimization
(`~pyoof.fit_zpoly`). There will be three maps, for three radial offsets,
:math:`d_z^-`, :math:`0` and :math:`d_z^+` (in meters).
Parameters
----------
I_coeff : `list`
List which contains 4 parameters, the illumination amplitude,
:math:`A_{E_\\mathrm{a}}`, the illumination taper,
:math:`c_\\mathrm{dB}` and the two coordinate offset, :math:`(x_0,
y_0)`. The illumination coefficients must be listed as follows,
``I_coeff = [i_amp, c_dB, x0, y0]``.
K_coeff : `~numpy.ndarray`
Constants coefficients, :math:`K_{n\\ell}`, for each of them there is
only one Zernike circle polynomial, :math:`U^\\ell_n(\\varrho,
\\varphi)`.
d_z : `~astropy.units.quantity.Quantity`
Radial offset :math:`d_z`, added to the sub-reflector in length units.
This characteristic measurement adds the classical interference
pattern to the beam maps, normalized squared (field) radiation
pattern, which is an out-of-focus property. The radial offset list
must be as follows, ``d_z = [d_z-, 0., d_z+]`` all of them in length
units.
wavel : `~astropy.units.quantity.Quantity`
Wavelength, :math:`\\lambda`, of the observation in length units.
illum_func : `function`
Illumination function, :math:`E_\\mathrm{a}(x, y)`, to be evaluated
with the key ``I_coeff``. The illumination functions available are
`~pyoof.aperture.illum_pedestal` and `~pyoof.aperture.illum_gauss`.
telgeo : `list`
List that contains the blockage distribution, optical path difference
(OPD) function, and the primary radius (`float`) in meters. The list
must have the following order, ``telego = [block_dist, opd_func, pr]``.
resolution : `int`
Fast Fourier Transform resolution for a rectangular grid. The input
value has to be greater or equal to the telescope resolution and with
power of 2 for faster FFT processing. It is recommended a value higher
than ``resolution = 2 ** 8``.
box_factor : `int`
Related to the FFT resolution (**resolution** key), defines the image
pixel size level. It depends on the primary radius, ``pr``, of the
telescope, e.g. a ``box_factor = 5`` returns ``x = np.linspace(-5 *
pr, 5 * pr, resolution)``, an array to be used in the FFT2
(`~numpy.fft.fft2`).
plim : `~astropy.units.quantity.Quantity`
Contains the maximum values for the :math:`u` and :math:`v`
wave-vectors in angle units. The `~astropy.units.quantity.Quantity`
must be in the following order, ``plim = [umin, umax, vmin, vmax]``.
angle : `~astropy.units.quantity.Quantity` or `str`
Angle unit. Power pattern axes.
title : `str`
Figure title.
Returns
-------
fig : `~matplotlib.figure.Figure`
The three beam maps plotted from the input parameters. Each map with a
different offset :math:`d_z` value. From left to right, :math:`d_z^-`,
:math:`0` and :math:`d_z^+`.
"""
power_norm = np.zeros((d_z.size, resolution, resolution), dtype=np.float64)
u = np.zeros((d_z.size, resolution), dtype=np.float64) << apu.rad
v = np.zeros((d_z.size, resolution), dtype=np.float64) << apu.rad
for k, _d_z in enumerate(d_z):
u[k, :], v[k, :], F = radiation_pattern(
K_coeff=K_coeff,
I_coeff=I_coeff,
d_z=_d_z,
wavel=wavel,
illum_func=illum_func,
telgeo=telgeo,
resolution=resolution,
box_factor=box_factor
)
power_norm[k, ...] = norm(np.abs(F) ** 2)
# Limits, they need to be transformed to degrees
if plim is None:
pr = telgeo[2] # primary reflector radius
bw = 1.22 * apu.rad * wavel / (2 * pr) # Beamwidth radians
s_bw = bw * 8 # size-beamwidth ratio radians
# Finding central point for shifted maps
uu, vv = np.meshgrid(u[1, :], v[1, :])
u_offset = uu[power_norm[1, ...] == power_norm[1, ...].max()][0]
v_offset = vv[power_norm[1, ...] == power_norm[1, ...].max()][0]
plim = [
(-s_bw + u_offset).to_value(apu.rad),
(s_bw + u_offset).to_value(apu.rad),
(-s_bw + v_offset).to_value(apu.rad),
(s_bw + v_offset).to_value(apu.rad)
] * apu.rad
plim = plim.to_value(angle)
plim_u, plim_v = plim[:2], plim[2:]
subtitle = [
'$P_{\\textrm{\\scriptsize{norm}}}(u,v)$ $d_z=' +
str(round(d_z[i].to_value(apu.cm), 3)) + '$ cm' for i in range(3)
]
fig = plt.figure(figsize=uv_ratio(plim_u, plim_v), constrained_layout=True)
gs = GridSpec(
nrows=2,
ncols=3,
figure=fig,
width_ratios=[1] * 3,
height_ratios=[1, 0.03],
wspace=0.03
)
ax = [plt.subplot(gs[i]) for i in range(6)]
ax[1].set_yticklabels([])
ax[2].set_yticklabels([])
ax[0].set_ylabel(f'$v$ {angle}')
cax = [ax[i + 3] for i in range(3)]
for i in range(3):
vmin, vmax = power_norm[i, ...].min(), power_norm[i, ...].max()
extent = [
u[i, :].to_value(angle).min(), u[i, :].to_value(angle).max(),
v[i, :].to_value(angle).min(), v[i, :].to_value(angle).max()
]
levels = np.linspace(vmin, vmax, 10)
im = ax[i].imshow(
X=power_norm[i, ...],
extent=extent,
vmin=vmin,
vmax=vmax
)
ax[i].contour(
u[i, :].to_value(angle),
v[i, :].to_value(angle),
power_norm[i, ...],
levels=levels,
colors='k',
linewidths=0.4
)
ax[i].set_title(subtitle[i])
ax[i].set_xlabel(f'$u$ {angle}')
# limits don't work with astropy units
ax[i].set_ylim(*plim_v)
ax[i].set_xlim(*plim_u)
ax[i].grid(False)
plt.colorbar(
im, cax=cax[i], orientation='horizontal', use_gridspec=True
)
cax[i].set_xlabel('Amplitude [arb]')
cax[i].set_yticklabels([])
cax[i].yaxis.set_ticks_position('none')
fig.suptitle(title)
# fig.tight_layout()
return fig
def plot_beam_data(
u_data, v_data, beam_data, d_z, resolution, angle, title, res_mode
):
"""
Real data beam maps, :math:`P^\\mathrm{obs}(x, y)`, figures given
given 3 out-of-focus radial offsets, :math:`d_z`.
Parameters
----------
u_data : `list`
:math:`x` axis value for the 3 beam maps in radians. The values have
to be flatten, in one dimension, and stacked in the same order as the
``d_z = [d_z-, 0., d_z+]`` values from each beam map.
v_data : `list`
:math:`y` axis value for the 3 beam maps in radians. The values have
to be flatten, one dimensional, and stacked in the same order as the
``d_z = [d_z-, 0., d_z+]`` values from each beam map.
beam_data : `~numpy.ndarray`
Amplitude value for the beam map in mJy. The values have to be
flatten, one dimensional, and stacked in the same order as the ``d_z =
[d_z-, 0., d_z+]`` values from each beam map. If ``res_mode = False``,
the beam map will be normalized.
resolution : `int`
Fast Fourier Transform resolution for a rectangular grid. The input
value has to be greater or equal to the telescope resolution and with
power of 2 for faster FFT processing. It is recommended a value higher
than ``resolution = 2 ** 8``.
d_z : `~astropy.units.quantity.Quantity`
Radial offset :math:`d_z`, added to the sub-reflector in meters. This
characteristic measurement adds the classical interference pattern to
the beam maps, normalized squared (field) radiation pattern, which is
an out-of-focus property. The radial offset list must be as follows,
``d_z = [d_z-, 0., d_z+]`` all of them in length units.
angle : `~astropy.units.quantity.Quantity` or `str`
Angle unit. Power pattern axes.
title : `str`
Figure title.
res_mode : `bool`
If `True` the beam map will not be normalized. This feature is used
to compare the residual outputs from the least squares minimization
(`~pyoof.fit_zpoly`).
Returns
-------
fig : `~matplotlib.figure.Figure`
Figure from the three observed beam maps. Each map with a different
offset :math:`d_z` value. From left to right, :math:`d_z^-`, :math:`0`
and :math:`d_z^+`.
"""
if not res_mode:
# Power pattern normalization
beam_data = norm(beam_data, axis=1)
subtitle = [
'$P_{\\textrm{\\scriptsize{norm}}}(u,v)$ $d_z=' +
str(round(d_z[i].to_value(apu.cm), 3)) + '$ cm' for i in range(3)
]
fig = plt.figure(
figsize=uv_ratio(u_data[0], v_data[0]),
constrained_layout=True
)
gs = GridSpec(
nrows=2,
ncols=3,
figure=fig,
width_ratios=[1] * 3,
height_ratios=[1, 0.03],
wspace=0.03
)
ax = [plt.subplot(gs[i]) for i in range(6)]
ax[1].set_yticklabels([])
ax[2].set_yticklabels([])
ax[0].set_ylabel(f'$v$ {angle}')
cax = [ax[i + 3] for i in range(3)]
for i in range(3):
# new grid for beam_data
u_ng = np.linspace(
u_data[i, :].to(angle).min(),
u_data[i, :].to(angle).max(),
resolution
)
v_ng = np.linspace(
v_data[i, :].to(angle).min(),
v_data[i, :].to(angle).max(),
resolution
)
beam_ng = interpolate.griddata(
# coordinates of grid points to interpolate from.
points=(u_data[i, :].to(angle), v_data[i, :].to(angle)),
values=beam_data[i, :],
# coordinates of grid points to interpolate to.
xi=tuple(np.meshgrid(u_ng, v_ng)),
method='cubic'
)
vmin, vmax = beam_ng.min(), beam_ng.max()
levels = np.linspace(vmin, vmax, 10)
extent = [
u_ng.to_value(angle).min(), u_ng.to_value(angle).max(),
v_ng.to_value(angle).min(), v_ng.to_value(angle).max()
]
im = ax[i].imshow(X=beam_ng, extent=extent, vmin=vmin, vmax=vmax)
ax[i].contour(
u_ng.to_value(angle),
v_ng.to_value(angle),
beam_ng,
levels=levels,
colors='k',
linewidths=0.4
)
ax[i].set_xlabel(f'$u$ {angle}')
ax[i].set_title(subtitle[i])
ax[i].grid(False)
plt.colorbar(
im, cax=cax[i], orientation='horizontal', use_gridspec=True
)
cax[i].set_xlabel('Amplitude [arb]')
cax[i].set_yticklabels([])
cax[i].yaxis.set_ticks_position('none')
fig.suptitle(title)
return fig
def plot_phase(K_coeff, pr, piston, tilt, title):
"""
Aperture phase distribution (phase-error), :math:`\\varphi(x, y)`, figure,
given the Zernike circle polynomial coefficients, ``K_coeff``, solution
from the least squares minimization.
Parameters
----------
K_coeff : `~numpy.ndarray`
Constants coefficients, :math:`K_{n\\ell}`, for each of them there is
only one Zernike circle polynomial, :math:`U^\\ell_n(\\varrho,
\\varphi)`.
pr : `float`
Primary reflector radius in length units.
piston : `bool`
Boolean to include or exclude the piston coefficient in the aperture
phase distribution. The Zernike circle polynomials are related to
piston through :math:`U^{0}_0(\\varrho, \\varphi)`.
tilt : `bool`
Boolean to include or exclude the tilt coefficients in the aperture
phase distribution. The Zernike circle polynomials are related to tilt
through :math:`U^{-1}_1(\\varrho, \\varphi)` and
:math:`U^1_1(\\varrho, \\varphi)`.
title : `str`
Figure title.
Returns
-------
fig : `~matplotlib.figure.Figure`
Aperture phase distribution parametrized in terms of the Zernike
circle polynomials, and represented for the telescope's primary
reflector.
"""
if (not tilt) and (not piston):
cbartitle = ' '.join((
'$\\varphi_{\\scriptsize{\\textrm{no-piston, no-tilt}}}(x,y)$',
'amplitude rad'
))
elif (not tilt) and piston:
cbartitle = (
'$\\varphi_{\\scriptsize{\\textrm{no-tilt}}}(x,y)$ amplitude rad'
)
elif tilt and (not piston):
cbartitle = (
'$\\varphi_{\\scriptsize{\\textrm{no-piston}}}(x,y)$ amplitude rad'
)
else:
cbartitle = '$\\varphi(x, y)$ amplitude rad'
extent = [-pr.to_value(apu.m), pr.to_value(apu.m)] * 2
levels = np.linspace(-2, 2, 9) # radians
_x, _y, _phase = phase(K_coeff=K_coeff, pr=pr, tilt=tilt, piston=piston)
fig, ax = plt.subplots(figsize=(6, 5.8))
im = ax.imshow(X=_phase.to_value(apu.rad), extent=extent)
# Partial solution for contour Warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax.contour(
_x.to_value(apu.m),
_y.to_value(apu.m),
_phase.to_value(apu.rad),
levels=levels,
colors='k',
alpha=0.3
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.03)
cb = fig.colorbar(im, cax=cax)
cb.ax.set_ylabel(cbartitle)
ax.set_title(title)
ax.set_ylabel('$y$ m')
ax.set_xlabel('$x$ m')
ax.grid(False)
fig.tight_layout()
return fig
def plot_phase_data(phase_data, pr, title):
"""
Aperture phase distribution (phase-error), :math:`\\varphi(x, y)`, figure.
The plot is made by giving the phase_data in radians and the primary
reflector in length units. Notice that if the tilt term is not required
this has to be removed manually from the ``phase_data`` array.
Parameters
----------
phase_data : `astropy.units.quantity.Quantity`
Aperture phase distribution data in angle or radian units.
pr : `astropy.units.quantity.Quantity`
Primary reflector radius in length units.
title : `str`
Figure title.
Returns
-------
fig : `~matplotlib.figure.Figure`
Aperture phase distribution represented for the telescope's primary
reflector.
"""
_x = np.linspace(-pr, pr, phase_data.shape[0])
_y = np.linspace(-pr, pr, phase_data.shape[0])
extent = [-pr.to_value(apu.m), pr.to_value(apu.m)] * 2
levels = np.linspace(-2, 2, 9) # radians
fig, ax = plt.subplots(figsize=(6, 5.8))
im = ax.imshow(X=phase_data.to_value(apu.rad), extent=extent)
# Partial solution for contour Warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax.contour(
_x.to_value(apu.m),
_y.to_value(apu.m),
phase_data.to_value(apu.rad),
levels=levels,
colors='k',
alpha=0.3
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.03)
cb = fig.colorbar(im, cax=cax)
cbartitle = '$\\varphi(x, y)$ amplitude rad'
cb.ax.set_ylabel(cbartitle)
ax.set_title(title)
ax.set_ylabel('$y$ m')
ax.set_xlabel('$x$ m')
ax.grid(False)
fig.tight_layout()
return fig
def plot_variance(matrix, order, diag, cbtitle, title):
"""
Variance-Covariance matrix or Correlation matrix figure. It returns
the triangle figure with a color amplitude value for each element. Used to
check/compare the correlation between the fitted parameters in a least
squares minimization.
Parameters
----------
matrix : `~numpy.ndarray`
Two dimensional array containing the Variance-Covariance or
Correlation function. Output from the fit procedure.
order : `int`
Order used for the Zernike circle polynomial, :math:`n`.
diag : `bool`
If `True` it will plot the matrix diagonal.
cbtitle : `str`
Color bar title.
title : `str`
Figure title.
Returns
-------
fig : `~matplotlib.figure.Figure`
Triangle figure representing Variance-Covariance or Correlation matrix.
"""
n = order
N_K_coeff = (n + 1) * (n + 2) // 2
ln = [(j, i) for i in range(0, n + 1) for j in range(-i, i + 1, 2)]
L = np.array(ln)[:, 0]
N = np.array(ln)[:, 1]
params_names = [
'$A_{E_\\mathrm{a}}$', '$c_\\mathrm{dB}$', 'q', '$x_0$', '$y_0$'
]
for i in range(N_K_coeff):
params_names.append(
''.join(('$K_{', f'{N[i]}', '\\,', f'{L[i]}', '}$'))
)
params_names = np.array(params_names)
params_used = [int(i) for i in matrix[:1][0]]
_matrix = matrix[1:]
x_ticks, y_ticks = _matrix.shape
extent = [0, x_ticks, 0, y_ticks]
if diag:
k = -1
# idx represents the ignored elements
labels_x = params_names[params_used]
labels_y = labels_x[::-1]
else:
k = 0
# idx represents the ignored elements
labels_x = params_names[params_used][:-1]
labels_y = labels_x[::-1][:-1]
# selecting half covariance
mask = np.tri(_matrix.shape[0], k=k)
matrix_mask = np.ma.array(_matrix, mask=mask).T
# mask out the lower triangle
fig, ax = plt.subplots()
# get rid of the frame
for spine in plt.gca().spines.values():
spine.set_visible(False)
im = ax.imshow(
X=matrix_mask,
extent=extent,
vmax=_matrix.max(),
vmin=_matrix.min(),
cmap=plt.cm.Reds,
interpolation='nearest',
origin='upper'
)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="3%", pad=0.03)
cb = fig.colorbar(im, cax=cax)
cb.formatter.set_powerlimits((0, 0))
cb.ax.yaxis.set_offset_position('left')
cb.update_ticks()
cb.ax.set_ylabel(cbtitle)
ax.set_title(title)
ax.set_xticks(np.arange(x_ticks) + 0.5)
ax.set_xticklabels(labels_x, rotation='vertical')
ax.set_yticks(np.arange(y_ticks) + 0.5)
ax.set_yticklabels(labels_y)
ax.grid(False)
fig.tight_layout()
return fig
def plot_fit_path(
path_pyoof_out, order, illum_func, telgeo, angle='deg', plim=None,
save=False
):
"""
Plot all important figures after a least squares minimization.
TODO: Change all information to be read from the pyoof_out directory.
Parameters
----------
path_pyoof_out : `str`
Path to the pyoof output, ``'pyoof_out/directory'``.
order : `int`
Order used for the Zernike circle polynomial, :math:`n`.
illum_func : `function`
Illumination function, :math:`E_\\mathrm{a}(x, y)`, to be evaluated
with the key ``I_coeff``. The illumination functions available are
`~pyoof.aperture.illum_pedestal` and `~pyoof.aperture.illum_gauss`.
telgeo : `list`
List that contains the blockage distribution, optical path difference
(OPD) function, and the primary radius (`float`) in meters. The list
must have the following order, ``telego = [block_dist, opd_func, pr]``.
angle : `~astropy.units.quantity.Quantity` or `str`
Angle unit. Power pattern axes.
plim : `~astropy.units.quantity.Quantity`
Contains the maximum values for the :math:`u` and :math:`v`
wave-vectors in angle units. The `~astropy.units.quantity.Quantity`
must be in the following order, ``plim = [umin, umax, vmin, vmax]``.
save : `bool`
If `True`, it stores all plots in the ``'pyoof_out/directory'``
directory.
Returns
-------
fig_beam : `~matplotlib.figure.Figure`
The three beam maps plotted from the input parameters. Each map with a
different offset :math:`d_z` value. From left to right, :math:`d_z^-`,
:math:`0` and :math:`d_z^+`.
fig_phase : `~matplotlib.figure.Figure`
Aperture phase distribution for the Zernike circle polynomials for the
telescope primary reflector.
fig_res : `~matplotlib.figure.Figure`
Figure from the three observed beam maps residual. Each map with a
different offset :math:`d_z` value. From left to right, :math:`d_z^-`,
:math:`0` and :math:`d_z^+`.
fig_data : `~matplotlib.figure.Figure`
Figure from the three observed beam maps. Each map with a different
offset :math:`d_z` value. From left to right, :math:`d_z^-`, :math:`0`
and :math:`d_z^+`.
fig_cov : `~matplotlib.figure.Figure`
Triangle figure representing Variance-Covariance matrix.
fig_corr : `~matplotlib.figure.Figure`
Triangle figure representing Correlation matrix.
"""
try:
path_pyoof_out
except NameError:
print(f'pyoof directory does not exist: {path_pyoof_out}')
else:
pass
path_plot = os.path.join(path_pyoof_out, 'plots')
if not os.path.exists(path_plot):
os.makedirs(path_plot)
# Reading least squares minimization output
n = order
params = Table.read(
os.path.join(path_pyoof_out, f'fitpar_n{n}.csv'), format='ascii'
)
with open(os.path.join(path_pyoof_out, 'pyoof_info.yml'), 'r') as infile:
pyoof_info = yaml.load(infile, Loader=yaml.Loader)
obs_object = pyoof_info['obs_object']
meanel = round(pyoof_info['meanel'], 2)
resolution = pyoof_info['fft_resolution']
box_factor = pyoof_info['box_factor']
# Beam and residual
beam_data = np.genfromtxt(os.path.join(path_pyoof_out, 'beam_data.csv'))
res = np.genfromtxt(os.path.join(path_pyoof_out, f'res_n{n}.csv'))
u_data = np.genfromtxt(
os.path.join(path_pyoof_out, 'u_data.csv')) * apu.rad
v_data = np.genfromtxt(
os.path.join(path_pyoof_out, 'v_data.csv')) * apu.rad
wavel = pyoof_info['wavel'] * apu.m
d_z = np.array(pyoof_info['d_z']) * apu.m
pr = pyoof_info['pr'] * apu.m
K_coeff = params['parfit'][5:]
# Covariance and Correlation matrix
cov = np.genfromtxt(os.path.join(path_pyoof_out, f'cov_n{n}.csv'))
corr = np.genfromtxt(os.path.join(path_pyoof_out, f'corr_n{n}.csv'))
if n == 1:
fig_data = plot_beam_data(
u_data=u_data,
v_data=v_data,
beam_data=beam_data,
d_z=d_z,
resolution=resolution,
title='{} observed power pattern $\\alpha={}$ deg'.format(
obs_object, meanel),
angle=angle,
res_mode=False
)
fig_beam = plot_beam(
I_coeff=params['parfit'][:5],
K_coeff=K_coeff,
title='{} fit power pattern $n={}$ $\\alpha={}$ degrees'.format(
obs_object, n, meanel
),
d_z=d_z,
wavel=wavel,
illum_func=illum_func,
telgeo=telgeo,
plim=plim,
angle=angle,
resolution=resolution,
box_factor=box_factor
)
fig_phase = plot_phase(
K_coeff=K_coeff,
title=(
'{} phase-error $d_z=\\pm {}$ cm $n={}$ $\\alpha={}$ deg'
).format(obs_object, round(d_z[2].to_value(apu.cm), 3), n, meanel),
pr=pr,
piston=False,
tilt=False
)
fig_res = plot_beam_data(
u_data=u_data,
v_data=v_data,
beam_data=res,
d_z=d_z,
resolution=resolution,
title='{} residual $n={}$'.format(obs_object, n),
angle=angle,
res_mode=True
)
fig_cov = plot_variance(
matrix=cov,
order=n,
title='{} variance-covariance matrix $n={}$'.format(obs_object, n),
cbtitle='$\\sigma_{ij}^2$',
diag=True,
)
fig_corr = plot_variance(
matrix=corr,
order=n,
title='{} correlation matrix $n={}$'.format(obs_object, n),
cbtitle='$\\rho_{ij}$',
diag=True,
)
if save:
fig_beam.savefig(os.path.join(path_plot, f'fitbeam_n{n}.pdf'))
fig_phase.savefig(os.path.join(path_plot, f'fitphase_n{n}.pdf'))
fig_res.savefig(os.path.join(path_plot, f'residual_n{n}.pdf'))
fig_cov.savefig(os.path.join(path_plot, f'cov_n{n}.pdf'))
fig_corr.savefig(os.path.join(path_plot, f'corr_n{n}.pdf'))
if n == 1:
fig_data.savefig(os.path.join(path_plot, 'obsbeam.pdf'))
|
tcassanelliREPO_NAMEpyoofPATH_START.@pyoof_extracted@pyoof-master@pyoof@plot_routines.py@.PATH_END.py
|
{
"filename": "autocall.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py2/IPython/core/autocall.py",
"type": "Python"
}
|
# encoding: utf-8
"""
Autocall capabilities for IPython.core.
Authors:
* Brian Granger
* Fernando Perez
* Thomas Kluyver
Notes
-----
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class IPyAutocall(object):
""" Instances of this class are always autocalled
This happens regardless of 'autocall' variable state. Use this to
develop macro-like mechanisms.
"""
_ip = None
rewrite = True
def __init__(self, ip=None):
self._ip = ip
def set_ip(self, ip):
""" Will be used to set _ip point to current ipython instance b/f call
Override this method if you don't want this to happen.
"""
self._ip = ip
class ExitAutocall(IPyAutocall):
"""An autocallable object which will be added to the user namespace so that
exit, exit(), quit or quit() are all valid ways to close the shell."""
rewrite = False
def __call__(self):
self._ip.ask_exit()
class ZMQExitAutocall(ExitAutocall):
"""Exit IPython. Autocallable, so it needn't be explicitly called.
Parameters
----------
keep_kernel : bool
If True, leave the kernel alive. Otherwise, tell the kernel to exit too
(default).
"""
def __call__(self, keep_kernel=False):
self._ip.keepkernel_on_exit = keep_kernel
self._ip.ask_exit()
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py2@IPython@core@autocall.py@.PATH_END.py
|
{
"filename": "model_base.py",
"repo_name": "simonsobs/nextline-rdb",
"repo_path": "nextline-rdb_extracted/nextline-rdb-main/src/nextline_rdb/alembic/models/rev_f9a742bb2297/base/model_base.py",
"type": "Python"
}
|
from sqlalchemy import MetaData
from sqlalchemy.orm import DeclarativeBase
from .convention import NAMING_CONVENTION
from .repr_ import ReprMixin
metadata = MetaData(naming_convention=dict(NAMING_CONVENTION))
class Model(ReprMixin, DeclarativeBase):
metadata = metadata
|
simonsobsREPO_NAMEnextline-rdbPATH_START.@nextline-rdb_extracted@nextline-rdb-main@src@nextline_rdb@alembic@models@rev_f9a742bb2297@base@model_base.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "changhoonhahn/pySpectrum",
"repo_path": "pySpectrum_extracted/pySpectrum-master/pyspectrum/__init__.py",
"type": "Python"
}
|
import os
# Path to data files required for cosmos
_PYSPEC_DAT_DIR=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'dat')
def dat_dir():
return _PYSPEC_DAT_DIR
|
changhoonhahnREPO_NAMEpySpectrumPATH_START.@pySpectrum_extracted@pySpectrum-master@pyspectrum@__init__.py@.PATH_END.py
|
{
"filename": "ftrl_test.py",
"repo_name": "fchollet/keras",
"repo_path": "keras_extracted/keras-master/keras/src/optimizers/ftrl_test.py",
"type": "Python"
}
|
# flake8: noqa
import numpy as np
from keras.src import backend
from keras.src import testing
from keras.src.optimizers.ftrl import Ftrl
class FtrlTest(testing.TestCase):
def test_config(self):
optimizer = Ftrl(
learning_rate=0.05,
learning_rate_power=-0.2,
initial_accumulator_value=0.4,
l1_regularization_strength=0.05,
l2_regularization_strength=0.15,
l2_shrinkage_regularization_strength=0.01,
beta=0.3,
)
self.run_class_serialization_test(optimizer)
def test_single_step(self):
optimizer = Ftrl(learning_rate=0.5)
grads = np.array([1.0, 6.0, 7.0, 2.0])
vars = backend.Variable([1.0, 2.0, 3.0, 4.0])
optimizer.apply_gradients(zip([grads], [vars]))
self.assertAllClose(
vars, [0.2218, 1.3954, 2.3651, 2.8814], rtol=1e-4, atol=1e-4
)
def test_correctness_with_golden(self):
optimizer = Ftrl(
learning_rate=0.05,
learning_rate_power=-0.2,
initial_accumulator_value=0.4,
l1_regularization_strength=0.05,
l2_regularization_strength=0.15,
l2_shrinkage_regularization_strength=0.01,
beta=0.3,
)
x = backend.Variable(np.ones([10]))
grads = np.arange(0.1, 1.1, 0.1)
first_grads = np.full((10,), 0.01)
# fmt: off
golden = np.array(
[[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[-0.0034, -0.0077, -0.0118, -0.0157, -0.0194, -0.023, -0.0263, -0.0294, -0.0325, -0.0354],
[-0.0078, -0.0162, -0.0242, -0.0317, -0.0387, -0.0454, -0.0516, -0.0575, -0.0631, -0.0685],
[-0.0121, -0.0246, -0.0363, -0.0472, -0.0573, -0.0668, -0.0757, -0.0842, -0.0922, -0.0999],
[-0.0164, -0.0328, -0.0481, -0.0623, -0.0753, -0.0875, -0.099, -0.1098, -0.1201, -0.1299]]
)
# fmt: on
optimizer.apply_gradients(zip([first_grads], [x]))
for i in range(5):
self.assertAllClose(x, golden[i], rtol=5e-4, atol=5e-4)
optimizer.apply_gradients(zip([grads], [x]))
def test_clip_norm(self):
optimizer = Ftrl(clipnorm=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [2**0.5 / 2, 2**0.5 / 2])
def test_clip_value(self):
optimizer = Ftrl(clipvalue=1)
grad = [np.array([100.0, 100.0])]
clipped_grad = optimizer._clip_gradients(grad)
self.assertAllClose(clipped_grad[0], [1.0, 1.0])
|
fcholletREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@optimizers@ftrl_test.py@.PATH_END.py
|
{
"filename": "flows.py",
"repo_name": "pyro-ppl/numpyro",
"repo_path": "numpyro_extracted/numpyro-master/numpyro/distributions/flows.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from jax import lax
import jax.numpy as jnp
from numpyro.distributions.constraints import real_vector
from numpyro.distributions.transforms import Transform
from numpyro.util import fori_loop
def _clamp_preserve_gradients(x, min, max):
return x + lax.stop_gradient(jnp.clip(x, min, max) - x)
# adapted from https://github.com/pyro-ppl/pyro/blob/dev/pyro/distributions/transforms/iaf.py
class InverseAutoregressiveTransform(Transform):
"""
An implementation of Inverse Autoregressive Flow, using Eq (10) from Kingma et al., 2016,
:math:`\\mathbf{y} = \\mu_t + \\sigma_t\\odot\\mathbf{x}`
where :math:`\\mathbf{x}` are the inputs, :math:`\\mathbf{y}` are the outputs, :math:`\\mu_t,\\sigma_t`
are calculated from an autoregressive network on :math:`\\mathbf{x}`, and :math:`\\sigma_t>0`.
**References**
1. *Improving Variational Inference with Inverse Autoregressive Flow* [arXiv:1606.04934],
Diederik P. Kingma, Tim Salimans, Rafal Jozefowicz, Xi Chen, Ilya Sutskever, Max Welling
"""
domain = real_vector
codomain = real_vector
def __init__(
self, autoregressive_nn, log_scale_min_clip=-5.0, log_scale_max_clip=3.0
):
"""
:param autoregressive_nn: an autoregressive neural network whose forward call returns a real-valued
mean and log scale as a tuple
"""
self.arn = autoregressive_nn
self.log_scale_min_clip = log_scale_min_clip
self.log_scale_max_clip = log_scale_max_clip
def __call__(self, x):
"""
:param numpy.ndarray x: the input into the transform
"""
return self.call_with_intermediates(x)[0]
def call_with_intermediates(self, x):
mean, log_scale = self.arn(x)
log_scale = _clamp_preserve_gradients(
log_scale, self.log_scale_min_clip, self.log_scale_max_clip
)
scale = jnp.exp(log_scale)
return scale * x + mean, log_scale
def _inverse(self, y):
"""
:param numpy.ndarray y: the output of the transform to be inverted
"""
# NOTE: Inversion is an expensive operation that scales in the dimension of the input
def _update_x(i, x):
mean, log_scale = self.arn(x)
inverse_scale = jnp.exp(
-_clamp_preserve_gradients(
log_scale, min=self.log_scale_min_clip, max=self.log_scale_max_clip
)
)
x = (y - mean) * inverse_scale
return x
x = fori_loop(0, y.shape[-1], _update_x, jnp.zeros(y.shape))
return x
def log_abs_det_jacobian(self, x, y, intermediates=None):
"""
Calculates the elementwise determinant of the log jacobian.
:param numpy.ndarray x: the input to the transform
:param numpy.ndarray y: the output of the transform
"""
if intermediates is None:
log_scale = self.arn(x)[1]
log_scale = _clamp_preserve_gradients(
log_scale, self.log_scale_min_clip, self.log_scale_max_clip
)
return log_scale.sum(-1)
else:
log_scale = intermediates
return log_scale.sum(-1)
def tree_flatten(self):
return (self.log_scale_min_clip, self.log_scale_max_clip), (
("log_scale_min_clip", "log_scale_max_clip"),
{"arn": self.arn},
)
def __eq__(self, other):
if not isinstance(other, InverseAutoregressiveTransform):
return False
return (
(self.arn is other.arn)
& jnp.array_equal(self.log_scale_min_clip, other.log_scale_min_clip)
& jnp.array_equal(self.log_scale_max_clip, other.log_scale_max_clip)
)
class BlockNeuralAutoregressiveTransform(Transform):
"""
An implementation of Block Neural Autoregressive flow.
**References**
1. *Block Neural Autoregressive Flow*,
Nicola De Cao, Ivan Titov, Wilker Aziz
"""
domain = real_vector
codomain = real_vector
def __init__(self, bn_arn):
self.bn_arn = bn_arn
def __call__(self, x):
"""
:param numpy.ndarray x: the input into the transform
"""
return self.call_with_intermediates(x)[0]
def call_with_intermediates(self, x):
y, logdet = self.bn_arn(x)
return y, logdet
def _inverse(self, y):
raise NotImplementedError(
"Block neural autoregressive transform does not have an analytic"
" inverse implemented."
)
def log_abs_det_jacobian(self, x, y, intermediates=None):
"""
Calculates the elementwise determinant of the log jacobian.
:param numpy.ndarray x: the input to the transform
:param numpy.ndarray y: the output of the transform
"""
if intermediates is None:
logdet = self.bn_arn(x)[1]
return logdet.sum(-1)
else:
logdet = intermediates
return logdet.sum(-1)
def tree_flatten(self):
return (), ((), {"bn_arn": self.bn_arn})
def __eq__(self, other):
return (
isinstance(other, BlockNeuralAutoregressiveTransform)
and self.bn_arn is other.bn_arn
)
|
pyro-pplREPO_NAMEnumpyroPATH_START.@numpyro_extracted@numpyro-master@numpyro@distributions@flows.py@.PATH_END.py
|
{
"filename": "pacs.py",
"repo_name": "astroumd/miriad",
"repo_path": "miriad_extracted/miriad-master/src/scripts/python/pacs.py",
"type": "Python"
}
|
#! /usr/bin/env python
#
# use this if you need just to produce offline figures
# import matplotlib
# matplotlib.use('Agg')
import numpy as np
#import matplotlib.pyplot as plt
import matplotlib.pylab as plt
#
from scipy.interpolate import spline
import pylab
__version__ = "PACS data reduction script helper functions: $Id$"
# the buddy pairs in the different campains; note these are 1-based antenna numbers!!!
buddy_b09 = [(2,21),(4,23),(5,20),(6,18),(8,19),(9,22),(13,16),(15,17)]
buddy_a09 = [ ]
def sexa2dec(dms):
"""
convert an D:M:S string to D, also works for H:M:S
Inputs:
dms D:M:S string, e.g. "04:05:22.3"
Returns:
converted floating point value of D:M:S
"""
w = dms.split(':')
if len(w) == 3:
return float(w[0]) + (float(w[1]) + float(w[2])/60.0)/60.0;
elif len(w) == 2:
return float(w[0]) + float(w[1])/60.0;
elif len(w) == 1:
return float(w[0])
else:
return 0.0
def avector(a):
"""convert ascii list into float list"""
v = []
for x in a:
v.append(float(x))
return v
def unwrap(a,doit=True):
"""
align phases - see e.g. MIRIAD::gpplt.GetPhase()
"""
if not doit: return a
a0 = a[0]
for i in range(0,len(a)):
a[i] = a[i] - 360*nint((a[i]-a0)/360.0)
a0 = 0.5*(a[i]+a0)
return a
def nint(x):
"""
get nearest integer. In python2.6 you can use to_integral_value
"""
if x>0: return int(x+0.5)
return int(x-0.5)
def rgplist(file):
"""
This reads the ascii output of a listing from gplist options=phase
which looks roughly as follows:
GpList: version 18-mar-09
Found gain entries for 15 antennas.
The phase gain values listed in the table are:
Time Anten 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
05:08:17.0 0 111 0 -151 48 4 102 -118 45 0 -16 -135 112 -68 26
05:32:21.0 0 96 0 -152 53 -13 101 -132 33 0 -30 -138 97 -90 9
05:56:29.5 0 121 0 -147 66 -15 83 -135 18 0 -55 -146 88 -98 18
...
Inputs:
file ...
Output:
(n,t,r) ....
"""
fp = open(file)
lines = fp.readlines()
fp.close()
inHeader = True
t=[]
r=[]
nants = 0
for l in lines:
w = l.split()
if inHeader:
if len(w) > 0 and w[0] == 'Time':
inHeader = False
else:
if nants == 0:
nants = len(w)-1;
else:
if nants != len(w)-1:
print "Bad line with different antenna count"
return 0
t.append( sexa2dec(w[0]) )
r.append( avector(w[1:]) )
return (nants,np.array(t), np.array(r).transpose())
def ruvlist(file):
"""
This reads the ascii output of a listing from uvlist recnum=0
which looks roughly as follows:
UVLIST: version 10-dec-08
***** UV Listing for vis1 *****
Options: brief,data
No. channels: 1, Linetype: channel
Line Start: 1.000, Width: 90.000, Step: 90.000
Scale factor: 1.000
------------------------------------------------------------
Data values for 09JAN17
Vis # Time Ant Pol U(kLam) V(kLam) Amp Phase
1 09:43:52.5 1- 2 RR -38.69 258.92 4.071 -146
2 09:43:52.5 1- 3 RR -308.83 187.31 1.926 -132
3 09:43:52.5 1- 4 RR 27.14 496.13 4.477 176
...
The data is assumed in regular order , time and baseline regular
Inputs:
file ...
Output:
(n,t,r) ....
"""
fp = open(file)
lines = fp.readlines()
fp.close()
inHeader = True
t=[]
r=[]
nrec = 0
time = ""
ntime = 0
nbl = 0
a1 = []
a2 = []
vec = []
nerr = 0
for l in lines:
w = l.split()
if inHeader:
if len(w) > 0 and w[0] == 'Vis':
inHeader = False
else:
nrec = nrec + 1
if w[1] != time:
ntime = ntime + 1
if ntime == 2:
nbl = nrec - 1
time = w[1]
# print 'new Time: %d %s' % (ntime,time)
if ntime > 1:
if len(vec) == nbl:
t.append( sexa2dec(time) )
r.append( avector(vec) )
else:
nerr = nerr + 1
vec = []
ant1 = w[2][:-1]
ant2 = w[3]
if ntime == 1:
a1.append(int(ant1))
a2.append(int(ant2))
if w[8][-1] == '*':
vec.append(0.0)
else:
vec.append(float(w[8]))
# and the final one
if len(vec) == nbl:
t.append( sexa2dec(time) )
r.append( avector(vec) )
else:
nerr = nerr + 1
if nerr > 0:
print "Not a regular visibility dataset, skipping %d/%d timeslots" % (nerr,ntime)
if False:
print "%d records" % nrec
print "%d timeslots" % ntime
print "%d baselines" % nbl
print "ant1: ",a1
print "ant2: ",a2
return (nbl,np.array(t),np.array(r).transpose())
def pplot(t,p,s='ro-'):
"""add an unwrapped phase plot"""
plt.plot(t,unwrap(p),s)
def fit1(pair,t1,p1,t2,p2):
"""for given buddy pair (1..8) grab the data for phase corr fit"""
# get 0 based ant number for carma and sza ant
c_an = buddy_b09[pair-1][0] - 1
s_an = buddy_b09[pair-1][1] - 1
# get the phases for the CARMA and SZA buddy pair
c_p = unwrap(p1[c_an])
s_p = unwrap(p2[s_an])
# reinterpolate the sza data on times of carma
#c_p1 = interp(t1, t2,s_p)
s_p2 = spline(t2,s_p, t1)
# fit
p=pylab.polyfit(s_p2,c_p,1)
# plot
plt.title('Antenna pair %d %d' % tuple(buddy_b09[pair-1]))
plt.plot(s_p2,c_p,'ro',label='a=%.2f b=%.2f' % tuple(p))
plt.plot(s_p2,pylab.polyval(p,s_p2),'-',label='Linear regression')
plt.legend(loc='best')
plt.xlabel('phase SZA %d' % (s_an+1))
plt.ylabel('phase CARMA %d' % (c_an+1))
print p
def fit4(pair,t1,p1,t2,p2):
"""for given buddy pair (1..8) grab the data for phase corr fit"""
# get 0 based ant number for carma and sza ant
c_an = buddy_b09[pair-1][0] - 1
s_an = buddy_b09[pair-1][1] - 1
# get the phases for the CARMA and SZA buddy pair
c_p = unwrap(p1[c_an])
s_p = unwrap(p2[s_an])
# reinterpolate the sza data on times of carma
#c_p1 = interp(t1, t2,s_p)
s_p2 = spline(t2,s_p, t1)
# fit
p=pylab.polyfit(s_p2,c_p,1)
# plot
plt.figure()
plt.subplot(2,1,1)
plt.title('Antenna pair %d %d' % tuple(buddy_b09[pair-1]))
plt.plot(s_p2,c_p,'ro',label='a=%.2f b=%.2f' % tuple(p))
plt.plot(s_p2,pylab.polyval(p,s_p2),'-',label='Linear regression')
plt.legend(loc='best')
plt.xlabel('phase SZA %d' % (s_an+1))
plt.ylabel('phase CARMA %d' % (c_an+1))
plt.subplot(2,1,2)
plt.plot(t1,c_p,'ro',label='CARMA')
plt.plot(t2,s_p,'bo',label='SZA')
plt.legend(loc='best')
plt.xlabel('time')
plt.ylabel('phase')
def figure():
plt.figure()
plt.show()
def example1(file):
(nants,t,r) = rgplist(file)
plt.plot(t,r[4],'ro-')
plt.plot(t,r[5],'bs-')
plt.axis([0,24,-180,180])
plt.savefig('example1.ps')
plt.show()
def example2(file):
(nants,t,r) = rgplist(file)
plt.figure(1)
plt.subplot(221)
plt.plot(t,r[0],'ro-')
plt.title('Ant-1')
plt.subplot(222)
plt.plot(t,unwrap(r[1]),'ro-')
plt.title('Ant-2')
plt.subplot(223)
plt.plot(t,r[2],'ro-')
plt.title('Ant-3')
plt.subplot(224)
plt.plot(t,unwrap(r[3]),'ro-')
plt.title('Ant-4')
#
plt.figure(2)
plt.plot(t,r[4],'ro-')
plt.title('Ant-5')
#
plt.show()
def example3(file,ant,doit=True):
(nants,t,r) = rgplist(file)
plt.plot(t,r[ant-1],'ro-')
plt.show()
def example4(file1,file2,pair=1):
"""feed it a CARMA and SZA dataset"""
(n1,t1,p1) = rgplist(file1)
(n2,t2,p2) = rgplist(file2)
fit4(pair,t1,p1,t2,p2)
if __name__ == '__main__':
print "No __main__ yet, if ever...."
|
astroumdREPO_NAMEmiriadPATH_START.@miriad_extracted@miriad-master@src@scripts@python@pacs.py@.PATH_END.py
|
{
"filename": "_tickformatstops.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/icicle/marker/colorbar/_tickformatstops.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickformatstopsValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self,
plotly_name="tickformatstops",
parent_name="icicle.marker.colorbar",
**kwargs,
):
super(TickformatstopsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickformatstop"),
data_docs=kwargs.pop(
"data_docs",
"""
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@icicle@marker@colorbar@_tickformatstops.py@.PATH_END.py
|
{
"filename": "representations.py",
"repo_name": "mwalmsley/zoobot",
"repo_path": "zoobot_extracted/zoobot-main/zoobot/pytorch/training/representations.py",
"type": "Python"
}
|
import logging
import pytorch_lightning as pl
from timm import create_model
class ZoobotEncoder(pl.LightningModule):
def __init__(self, encoder):
super().__init__()
logging.info('ZoobotEncoder: using provided in-memory encoder')
self.encoder = encoder # plain pytorch module e.g. Sequential
def forward(self, x):
if isinstance(x, list) and len(x) == 1:
return self(x[0])
return self.encoder(x)
@classmethod
def load_from_name(cls, name: str):
"""
e.g. ZoobotEncoder.load_from_name('hf_hub:mwalmsley/zoobot-encoder-convnext_nano')
Args:
name (str): huggingface hub name to load
Returns:
nn.Module: timm model
"""
timm_model = create_model(name, pretrained=True)
return cls(timm_model)
|
mwalmsleyREPO_NAMEzoobotPATH_START.@zoobot_extracted@zoobot-main@zoobot@pytorch@training@representations.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "hongwanliu/DarkHistory",
"repo_path": "DarkHistory_extracted/DarkHistory-master/docs/html/_static/conf.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# DarkHistory documentation build configuration file, created by
# sphinx-quickstart on Thu Sep 7 16:43:07 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# import sphinx_rtd_theme
sys.path.insert(0, os.path.abspath('./../'))
sys.path.insert(0, '/Users/hongwan/Github/numpydoc/')
# import numpydoc
# -- Set up new stuff -----------------------------------------------------
def skip_deprecated(app, what, name, obj, skip, options):
if hasattr(obj, "func_dict") and "__deprecated__" in obj.func_dict:
print("skipping " + name)
return True
return skip or False
def setup(app):
app.connect('autodoc-skip-member', skip_deprecated)
try:
from sphinx.ext.autosummary import Autosummary
from sphinx.ext.autosummary import get_documenter
from docutils.parsers.rst import directives
from sphinx.util.inspect import safe_getattr
import re
class AutoAutoSummary(Autosummary):
option_spec = {
'methods': directives.unchanged,
'attributes': directives.unchanged
}
required_arguments = 1
@staticmethod
def get_members(obj, typ, include_public=None):
if not include_public:
include_public = []
items = []
for name in dir(obj):
try:
documenter = get_documenter(safe_getattr(obj, name), obj)
except AttributeError:
continue
if documenter.objtype == typ:
items.append(name)
public = [x for x in items if x in include_public or not x.startswith('_')]
return public, items
def run(self):
clazz = self.arguments[0]
try:
(module_name, class_name) = clazz.rsplit('.', 1)
m = __import__(module_name, globals(), locals(), [class_name])
c = getattr(m, class_name)
if 'methods' in self.options:
_, methods = self.get_members(c, 'method', ['__init__'])
self.content = ["~%s.%s" % (clazz, method) for method in methods if not method.startswith('_')]
if 'attributes' in self.options:
_, attribs = self.get_members(c, 'attribute')
self.content = ["~%s.%s" % (clazz, attrib) for attrib in attribs if not attrib.startswith('_')]
finally:
return super(AutoAutoSummary, self).run()
app.add_directive('autoautosummary', AutoAutoSummary)
except BaseException as e:
raise e
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
# extensions = ['sphinx.ext.autodoc','sphinx.ext.autosummary',
# 'sphinx.ext.doctest','sphinx.ext.napoleon']
extensions = ['sphinx.ext.autodoc','sphinx.ext.autosummary', 'numpydoc', 'sphinx.ext.mathjax']
mathjax_path="https://cdn.mathjax.org/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML"
numpydoc_class_members_toctree = False
# Napoleon settings
# napoleon_include_special_with_doc = True
#Autosummary settings
autosummary_generate = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'DarkHistory'
copyright = '2019, Hongwan Liu, Gregory Ridgway and Tracy Slatyer'
author = 'Hongwan Liu, Gregory Ridgway and Tracy Slatyer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'default'
html_theme = 'scipy'
html_theme_path = ['.']
html_favicon = 'scipy/static/img/DarkHistory_favicon.png'
# html_theme = 'sphinx_rtd_theme'
# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
html_theme_options = {
"edit_link": False,
"sidebar": "right",
"scipy_org_logo": "true",
"rootlinks": [('https://github.com/hongwanliu/DarkHistory/tree/development', 'Repository')]
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
# html_sidebars = {
# '**': [
# 'about.html',
# 'navigation.html',
# 'relations.html', # needs 'show_related': True theme option to display
# 'searchbox.html',
# 'donate.html',
# ]
# }
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DarkHistorydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DarkHistory.tex', 'DarkHistory Documentation',
'Hongwan Liu', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'darkhistory', 'DarkHistory Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DarkHistory', 'DarkHistory Documentation',
author, 'DarkHistory', 'One line description of project.',
'Miscellaneous'),
]
|
hongwanliuREPO_NAMEDarkHistoryPATH_START.@DarkHistory_extracted@DarkHistory-master@docs@html@_static@conf.py@.PATH_END.py
|
{
"filename": "astroTimeLegacy.py",
"repo_name": "sczesla/PyAstronomy",
"repo_path": "PyAstronomy_extracted/PyAstronomy-master/src/pyasl/asl/astroTimeLegacy.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import datetime
import numpy
from numpy import sin, cos, tan, sqrt, arcsin
from PyAstronomy.pyaC import pyaErrors as PE
import six.moves as smo
def weekday(date):
"""
Return weekday by name
Parameters
----------
date : DATETIME object
Give the date of a day as DATETIME object.
Returns
-------
name: string
"Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"
"""
if not isinstance(date, datetime.date):
raise(ValueError("ERROR weekday: Given date is not a DATETIME object."))
dayname = ["Monday", "Tuesday", "Wednesday",
"Thursday", "Friday", "Saturday", "Sunday"]
ind = date.weekday()
return dayname[ind]
def daycnv(xjd, mode="idl"):
"""
Converts Julian dates into Gregorian calendar dates.
Handles both individual floats as xjd and iterables such as
lists and arrays. In the latter case, the result is returned
in the form of a list.
Parameters
----------
xjd : float, list, array
The Julian date
mode : string, {idl, dtlist, dt}, optional
Determines format of output. If 'idl' is given (default),
a list holding [year, month, day, (fractional) hours] is
returned; this mimics the behavior of the IDL astrolib function.
If 'dtlist' is given, a list holding
[year, month, day, hours, minutes, seconds, microseconds] is
returned. Finally, if 'dt' is specified, a Python
datetime object will be returned. If the input is an iterable,
the mode determines the format of the individual items in the
result list.
Returns
-------
Calendar date : list or datetime object
A list holding [year, month, day, (fractional) hours] (default)
or [year, month, day, hours, minutes, seconds, microseconds].
Alternatively, a Python datetime object is returned. The format
depends on the 'mode' specified. If the input is an iterable of
Julian dates, the output is a list.
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
NAME:
DAYCNV
PURPOSE:
Converts Julian dates to Gregorian calendar dates
CALLING SEQUENCE:
DAYCNV, XJD, YR, MN, DAY, HR
INPUTS:
XJD = Julian date, positive double precision scalar or vector
OUTPUTS:
YR = Year (Integer)
MN = Month (Integer)
DAY = Day (Integer)
HR = Hours and fractional hours (Real). If XJD is a vector,
then YR,MN,DAY and HR will be vectors of the same length.
EXAMPLE:
IDL> DAYCNV, 2440000.D, yr, mn, day, hr
yields yr = 1968, mn =5, day = 23, hr =12.
WARNING:
Be sure that the Julian date is specified as double precision to
maintain accuracy at the fractional hour level.
METHOD:
Uses the algorithm of Fliegel and Van Flandern (1968) as reported in
the "Explanatory Supplement to the Astronomical Almanac" (1992), p. 604
Works for all Gregorian calendar dates with XJD > 0, i.e., dates after
-4713 November 23.
REVISION HISTORY:
Converted to IDL from Yeoman's Comet Ephemeris Generator,
B. Pfarr, STX, 6/16/88
Converted to IDL V5.0 W. Landsman September 1997
"""
if not mode in ('idl', 'dtlist', 'dt'):
raise(PE.PyAValError("Unknown mode: " + str(mode),
where="daycnv",
solution="Use any of 'idl', 'dtlist', or 'dt'."))
# Adjustment needed because Julian day starts at noon, calendar day at midnight
iterable = hasattr(xjd, "__iter__")
# Use iterable throughout calculations
if not iterable:
xjd = [xjd]
jd = numpy.array(xjd).astype(int) # Truncate to integral day
frac = numpy.array(xjd).astype(float) - jd + \
0.5 # Fractional part of calendar day
gi = numpy.where(frac >= 1.0)
frac[gi] -= 1.0
jd[gi] += 1
hr = frac * 24.0
l = jd + 68569
n = 4 * l // 146097
l = l - (146097 * n + 3) // 4
yr = 4000 * (l + 1) // 1461001
l = l - 1461 * yr // 4 + 31 # 1461 = 365.25 * 4
mn = 80 * l // 2447
day = l - 2447 * mn // 80
l = mn // 11
mn = mn + 2 - 12 * l
yr = 100 * (n - 49) + yr + l
if mode in ('dt', 'dtlist'):
# [year, month, day, hours, minutes, seconds, microseconds] requested
hour = numpy.floor(hr).astype(int)
minute = numpy.floor((hr - numpy.floor(hr)) * 60).astype(int)
sec = numpy.floor((hr - hour - minute / 60.) * 3600.).astype(int)
msec = (3600 * 1e6 * (hr - hour - minute / 60. - sec / 3600.)).astype(int)
if mode == 'dtlist':
if not iterable:
return [yr[0], mn[0], day[0], hour[0], minute[0], sec[0], msec[0]]
return [[yr[i], mn[i], day[i], hour[i], minute[i], sec[i], msec[i]] for i in smo.range(len(yr))]
# Return datetime object
dts = [datetime.datetime(*(yr[i], mn[i], day[i], hour[i],
minute[i], sec[i], msec[i])) for i in smo.range(len(yr))]
if not iterable:
return dts[0]
return dts
if not iterable:
return [yr[0], mn[0], day[0], hr[0]]
return [[yr[i], mn[i], day[i], hr[i]] for i in smo.range(len(yr))]
def bprecess(ra, dec, mu_radec=None,
parallax=0.0, rad_vel=0.0, epoch=2000.0):
"""
Precess positions from J2000.0 (FK5) to B1950.0 (FK4).
Parameters
----------
ra : float
Right ascension [deg]
dec : float
Declination [deg]
mu_radec : list
List of two float entries, optional
Proper motion [arcsec per tropical CENTURY]
parallax : float
The parallax of the target
rad_vel : float
Radial velocity [km/s]
Returns
-------
Precessed position : list
[ra_1950, dec_1950, MU_RADEC, PARALLAX, RAD_VEL] referring to 1950
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
NAME:
BPRECESS
PURPOSE:
Precess positions from J2000.0 (FK5) to B1950.0 (FK4)
EXPLANATION:
Calculates the mean place of a star at B1950.0 on the FK4 system from
the mean place at J2000.0 on the FK5 system.
CALLING SEQUENCE:
bprecess, ra, dec, ra_1950, dec_1950, [ MU_RADEC = , PARALLAX =
RAD_VEL =, EPOCH = ]
INPUTS:
RA,DEC - Input J2000 right ascension and declination in *degrees*.
Scalar or N element vector
OUTPUTS:
RA_1950, DEC_1950 - The corresponding B1950 right ascension and
declination in *degrees*. Same number of elements as
RA,DEC but always double precision.
OPTIONAL INPUT-OUTPUT KEYWORDS
MU_RADEC - 2xN element double precision vector containing the proper
motion in seconds of arc per tropical *century* in right
ascension and declination.
PARALLAX - N_element vector giving stellar parallax (seconds of arc)
RAD_VEL - N_element vector giving radial velocity in km/s
The values of MU_RADEC, PARALLAX, and RADVEL will all be modified
upon output to contain the values of these quantities in the
B1950 system. The parallax and radial velocity will have a very
minor influence on the B1950 position.
EPOCH - scalar giving epoch of original observations, default 2000.0d
This keyword value is only used if the MU_RADEC keyword is not set.
NOTES:
The algorithm is taken from the Explanatory Supplement to the
Astronomical Almanac 1992, page 186.
Also see Aoki et al (1983), A&A, 128,263
BPRECESS distinguishes between the following two cases:
(1) The proper motion is known and non-zero
(2) the proper motion is unknown or known to be exactly zero (i.e.
extragalactic radio sources). In this case, the reverse of
the algorithm in Appendix 2 of Aoki et al. (1983) is used to
ensure that the output proper motion is exactly zero. Better
precision can be achieved in this case by inputting the EPOCH
of the original observations.
The error in using the IDL procedure PRECESS for converting between
B1950 and J1950 can be up to 12", mainly in right ascension. If
better accuracy than this is needed then BPRECESS should be used.
An unsystematic comparison of BPRECESS with the IPAC precession
routine (http://nedwww.ipac.caltech.edu/forms/calculator.html) always
gives differences less than 0.15".
EXAMPLE:
The SAO2000 catalogue gives the J2000 position and proper motion for
the star HD 119288. Find the B1950 position.
RA(2000) = 13h 42m 12.740s Dec(2000) = 8d 23' 17.69''
Mu(RA) = -.0257 s/yr Mu(Dec) = -.090 ''/yr
IDL> mu_radec = 100D* [ -15D*.0257, -0.090 ]
IDL> ra = ten(13, 42, 12.740)*15.D
IDL> dec = ten(8, 23, 17.69)
IDL> bprecess, ra, dec, ra1950, dec1950, mu_radec = mu_radec
IDL> print, adstring(ra1950, dec1950,2)
===> 13h 39m 44.526s +08d 38' 28.63"
REVISION HISTORY:
Written, W. Landsman October, 1992
Vectorized, W. Landsman February, 1994
Treat case where proper motion not known or exactly zero November 1994
Handling of arrays larger than 32767 Lars L. Christensen, march, 1995
Converted to IDL V5.0 W. Landsman September 1997
Fixed bug where A term not initialized for vector input
W. Landsman February 2000
"""
radeg = 180.0 / numpy.pi
sec_to_radian = 1.0 / radeg / 3600.0
M = numpy.array([[+0.9999256795, -0.0111814828, -0.0048590040,
-0.000551, -0.238560, +0.435730],
[+0.0111814828, +0.9999374849, -0.0000271557,
+0.238509, -0.002667, -0.008541],
[+0.0048590039, -0.0000271771, +0.9999881946,
-0.435614, +0.012254, +0.002117],
[-0.00000242389840, +0.00000002710544, +0.00000001177742,
+0.99990432, -0.01118145, -0.00485852],
[-0.00000002710544, -0.00000242392702, +0.00000000006585,
+0.01118145, +0.99991613, -0.00002716],
[-0.00000001177742, +0.00000000006585, -0.00000242404995,
+0.00485852, -0.00002717, +0.99996684]])
# in arc seconds per century
A_dot = 1e-3 * numpy.array([1.244, -1.579, -0.660])
ra_rad = ra / radeg
dec_rad = dec / radeg
cosra = cos(ra_rad)
sinra = sin(ra_rad)
cosdec = cos(dec_rad)
sindec = sin(dec_rad)
dec_1950 = 0.0
ra_1950 = 0.0
# Following statement moved inside loop in Feb 2000.
A = 1e-6 * numpy.array([-1.62557, -0.31919, -0.13843]) # in radians
r0 = numpy.array([cosra * cosdec, sinra * cosdec, sindec])
if mu_radec is not None:
mu_a = mu_radec[0]
mu_d = mu_radec[1]
# Velocity vector
r0_dot = numpy.array([-mu_a * sinra * cosdec - mu_d * cosra * sindec,
mu_a * cosra * cosdec - mu_d * sinra * sindec,
mu_d * cosdec]) + 21.095 * rad_vel * parallax * r0
else:
r0_dot = numpy.zeros(3)
R_0 = numpy.concatenate([r0, r0_dot])
R_1 = numpy.dot(numpy.transpose(M), R_0)
# Include the effects of the E-terms of aberration to form r and r_dot.
r1 = R_1[0:3]
r1_dot = R_1[3:6]
if mu_radec is None:
r1 = r1 + sec_to_radian * r1_dot * (epoch - 1950.0) / 100.
A = A + sec_to_radian * A_dot * (epoch - 1950.0) / 100.
x1 = R_1[0]
y1 = R_1[1]
z1 = R_1[2]
rmag = sqrt(x1**2 + y1**2 + z1**2)
s1 = r1 / rmag
s1_dot = r1_dot / rmag
s = s1
for j in smo.range(2):
r = s1 + A - ((s * A).sum()) * s
s = r / rmag
x = r[0]
y = r[1]
z = r[2]
r2 = x**2 + y**2 + z**2
rmag = sqrt(r2)
if mu_radec is not None:
r_dot = s1_dot + A_dot - ((s * A_dot).sum()) * s
x_dot = r_dot[0]
y_dot = r_dot[1]
z_dot = r_dot[2]
mu_radec[0] = (x * y_dot - y * x_dot) / (x**2 + y**2)
mu_radec[1] = (z_dot * (x**2 + y**2) - z *
(x * x_dot + y * y_dot)) / (r2 * sqrt(x**2 + y**2))
dec_1950 = arcsin(z / rmag)
ra_1950 = numpy.arctan2(y, x)
if parallax > 0.0:
rad_vel = (x * x_dot + y * y_dot + z * z_dot) / \
(21.095 * parallax * rmag)
parallax = parallax / rmag
if ra_1950 < 0.0:
ra_1950 += 2.0 * numpy.pi
ra_1950 = ra_1950 * radeg
dec_1950 = dec_1950 * radeg
return [ra_1950, dec_1950, mu_radec, parallax, rad_vel]
def premat(equinox1, equinox2, FK4=False):
"""
Return the precession matrix needed to go from EQUINOX1 to EQUINOX2.
Parameters
----------
equinox1, equinox2 : float
EQUINOX1 and EQUINOX2
FK4 : boolean
Set this to True to obtain output in FK4 system
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
NAME:
PREMAT
PURPOSE:
Return the precession matrix needed to go from EQUINOX1 to EQUINOX2.
EXPLANTION:
This matrix is used by the procedures PRECESS and BARYVEL to precess
astronomical coordinates
CALLING SEQUENCE:
matrix = PREMAT( equinox1, equinox2, [ /FK4 ] )
INPUTS:
EQUINOX1 - Original equinox of coordinates, numeric scalar.
EQUINOX2 - Equinox of precessed coordinates.
OUTPUT:
matrix - double precision 3 x 3 precession matrix, used to precess
equatorial rectangular coordinates
OPTIONAL INPUT KEYWORDS:
/FK4 - If this keyword is set, the FK4 (B1950.0) system precession
angles are used to compute the precession matrix. The
default is to use FK5 (J2000.0) precession angles
EXAMPLES:
Return the precession matrix from 1950.0 to 1975.0 in the FK4 system
IDL> matrix = PREMAT( 1950.0, 1975.0, /FK4)
PROCEDURE:
FK4 constants from "Computational Spherical Astronomy" by Taff (1983),
p. 24. (FK4). FK5 constants from "Astronomical Almanac Explanatory
Supplement 1992, page 104 Table 3.211.1.
REVISION HISTORY
Written, Wayne Landsman, HSTX Corporation, June 1994
Converted to IDL V5.0 W. Landsman September 1997
"""
deg_to_rad = numpy.pi / 180.0
sec_to_rad = deg_to_rad / 3600.0
t = 0.001 * (equinox2 - equinox1)
if not FK4:
st = 0.001 * (equinox1 - 2000.0)
# Compute 3 rotation angles
A = sec_to_rad * t * (23062.181 + st * (139.656 + 0.0139 * st)
+ t * (30.188 - 0.344 * st + 17.998 * t))
B = sec_to_rad * t * t * (79.280 + 0.410 * st + 0.205 * t) + A
C = sec_to_rad * t * (20043.109 - st * (85.33 + 0.217 * st)
+ t * (-42.665 - 0.217 * st - 41.833 * t))
else:
st = 0.001 * (equinox1 - 1900.0)
# Compute 3 rotation angles
A = sec_to_rad * t * (23042.53 + st * (139.75 + 0.06 * st)
+ t * (30.23 - 0.27 * st + 18.0 * t))
B = sec_to_rad * t * t * (79.27 + 0.66 * st + 0.32 * t) + A
C = sec_to_rad * t * (20046.85 - st * (85.33 + 0.37 * st)
+ t * (-42.67 - 0.37 * st - 41.8 * t))
sina = sin(A)
sinb = sin(B)
sinc = sin(C)
cosa = cos(A)
cosb = cos(B)
cosc = cos(C)
r = numpy.zeros((3, 3))
r[::, 0] = numpy.array([cosa * cosb * cosc - sina *
sinb, sina * cosb + cosa * sinb * cosc, cosa * sinc])
r[::, 1] = numpy.array([-cosa * sinb - sina * cosb *
cosc, cosa * cosb - sina * sinb * cosc, -sina * sinc])
r[::, 2] = numpy.array([-cosb * sinc, -sinb * sinc, cosc])
return r
def precess(ra, dec, equinox1, equinox2, FK4=False, radian=False):
"""
Precess coordinates from EQUINOX1 to EQUINOX2.
Parameters
----------
ra, dec, equinox1, equinox2 : float
Position and equinox
FK4 : boolean
Set to True to obtain output in FK4 system.
radian : boolean
If True, `ra` and `dec` must be given in radian (degrees otherwise).
Returns
-------
Position : list of ra and dec
A list with [ra, dec] precessed from equinox 1 to equinox 2.
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
NAME:
PRECESS
PURPOSE:
Precess coordinates from EQUINOX1 to EQUINOX2.
EXPLANATION:
For interactive display, one can use the procedure ASTRO which calls
PRECESS or use the /PRINT keyword. The default (RA,DEC) system is
FK5 based on epoch J2000.0 but FK4 based on B1950.0 is available via
the /FK4 keyword.
Use BPRECESS and JPRECESS to convert between FK4 and FK5 systems
CALLING SEQUENCE:
PRECESS, ra, dec, [ equinox1, equinox2, /PRINT, /FK4, /RADIAN ]
INPUT - OUTPUT:
RA - Input right ascension (scalar or vector) in DEGREES, unless the
/RADIAN keyword is set
DEC - Input declination in DEGREES (scalar or vector), unless the
/RADIAN keyword is set
The input RA and DEC are modified by PRECESS to give the
values after precession.
OPTIONAL INPUTS:
EQUINOX1 - Original equinox of coordinates, numeric scalar. If
omitted, then PRECESS will query for EQUINOX1 and EQUINOX2.
EQUINOX2 - Equinox of precessed coordinates.
OPTIONAL INPUT KEYWORDS:
/PRINT - If this keyword is set and non-zero, then the precessed
coordinates are displayed at the terminal. Cannot be used
with the /RADIAN keyword
/FK4 - If this keyword is set and non-zero, the FK4 (B1950.0) system
will be used otherwise FK5 (J2000.0) will be used instead.
/RADIAN - If this keyword is set and non-zero, then the input and
output RA and DEC vectors are in radians rather than degrees
RESTRICTIONS:
Accuracy of precession decreases for declination values near 90
degrees. PRECESS should not be used more than 2.5 centuries from
2000 on the FK5 system (1950.0 on the FK4 system).
EXAMPLES:
(1) The Pole Star has J2000.0 coordinates (2h, 31m, 46.3s,
89d 15' 50.6"); compute its coordinates at J1985.0
IDL> precess, ten(2,31,46.3)*15, ten(89,15,50.6), 2000, 1985, /PRINT
====> 2h 16m 22.73s, 89d 11' 47.3"
(2) Precess the B1950 coordinates of Eps Ind (RA = 21h 59m,33.053s,
DEC = (-56d, 59', 33.053") to equinox B1975.
IDL> ra = ten(21, 59, 33.053)*15
IDL> dec = ten(-56, 59, 33.053)
IDL> precess, ra, dec ,1950, 1975, /fk4
PROCEDURE:
Algorithm from Computational Spherical Astronomy by Taff (1983),
p. 24. (FK4). FK5 constants from "Astronomical Almanac Explanatory
Supplement 1992, page 104 Table 3.211.1.
PROCEDURE CALLED:
Function PREMAT - computes precession matrix
REVISION HISTORY
Written, Wayne Landsman, STI Corporation August 1986
Correct negative output RA values February 1989
Added /PRINT keyword W. Landsman November, 1991
Provided FK5 (J2000.0) I. Freedman January 1994
Precession Matrix computation now in PREMAT W. Landsman June 1994
Added /RADIAN keyword W. Landsman June 1997
Converted to IDL V5.0 W. Landsman September 1997
Correct negative output RA values when /RADIAN used March 1999
Work for arrays, not just vectors W. Landsman September 2003
"""
deg_to_rad = numpy.pi / 180.0
if not radian:
# ra, dec are given in degrees
ra_rad = ra * deg_to_rad # Convert to double precision if not already
dec_rad = dec * deg_to_rad
else:
ra_rad = ra
dec_rad = dec
a = cos(dec_rad)
x = [a * cos(ra_rad), a * sin(ra_rad), sin(dec_rad)] # input direction
sec_to_rad = deg_to_rad / 3600.0
# Use PREMAT function to get precession matrix from Equinox1 to Equinox2
r = premat(equinox1, equinox2, FK4=FK4)
x2 = numpy.dot(r, x) # rotate to get output direction cosines
ra_rad = numpy.arctan2(x2[1], x2[0])
dec_rad = arcsin(x2[2])
if not radian:
ra = ra_rad / deg_to_rad
# RA between 0 and 360 degrees
ra = ra + int(ra < 0.0) * 360.0
dec = dec_rad / deg_to_rad
else:
ra = ra_rad
dec = dec_rad
ra = ra + int(ra < 0.0) * 2.0 * numpy.pi
return [ra, dec]
def precess_xyz(x, y, z, equinox1, equinox2):
"""
Precess equatorial geocentric rectangular coordinates.
Parameters
----------
x, y, z, equinox1, equinox2 : float
Returns
-------
Precessed coordinates : list
A list containing the updated `x`, `y`, and `z` values.
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
NAME:
PRECESS_XYZ
PURPOSE:
Precess equatorial geocentric rectangular coordinates.
CALLING SEQUENCE:
precess_xyz, x, y, z, equinox1, equinox2
INPUT/OUTPUT:
x,y,z: scalars or vectors giving heliocentric rectangular coordinates
THESE ARE CHANGED UPON RETURNING.
INPUT:
EQUINOX1: equinox of input coordinates, numeric scalar
EQUINOX2: equinox of output coordinates, numeric scalar
OUTPUT:
x,y,z are changed upon return
NOTES:
The equatorial geocentric rectangular coords are converted
to RA and Dec, precessed in the normal way, then changed
back to x, y and z using unit vectors.
EXAMPLE:
Precess 1950 equinox coords x, y and z to 2000.
IDL> precess_xyz,x,y,z, 1950, 2000
HISTORY:
Written by P. Plait/ACC March 24 1999
(unit vectors provided by D. Lindler)
Use /Radian call to PRECESS W. Landsman November 2000
Use two parameter call to ATAN W. Landsman June 2001
"""
# take input coords and convert to ra and dec (in radians)
ra = numpy.arctan2(y, x)
delp = sqrt(x * x + y * y + z * z) # magnitude of distance to Sun
dec = arcsin(z / delp)
# precess the ra and dec
ra, dec = precess(ra, dec, equinox1, equinox2, radian=True)
# convert back to x, y, z
xunit = cos(ra) * cos(dec)
yunit = sin(ra) * cos(dec)
zunit = sin(dec)
x = xunit * delp
y = yunit * delp
z = zunit * delp
return [x, y, z]
def xyz(date, velocity=False, equinox=1950.0):
"""
Calculate geocentric X,Y, and Z and velocity coordinates of the Sun.
Parameters
----------
date : float
Julian date
equinox : float
Equinox of output. If None, Equinox will be 1950.
velocity : boolean
If False, the velocity of the Sun will not be calculated
Returns
-------
Sun position and velocity : list
A list of the from [X, Y, Z, XVEL, YVEL, ZVEL]. Last three values are None
if `velocity` flag is set to False.
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
NAME:
XYZ
PURPOSE:
Calculate geocentric X,Y, and Z and velocity coordinates of the Sun
EXPLANATION:
Calculates geocentric X,Y, and Z vectors and velocity coordinates
(dx, dy and dz) of the Sun. (The positive X axis is directed towards
the equinox, the y-axis, towards the point on the equator at right
ascension 6h, and the z axis toward the north pole of the equator).
Typical position accuracy is <1e-4 AU (15000 km).
CALLING SEQUENCE:
XYZ, date, x, y, z, [ xvel, yvel, zvel, EQUINOX = ]
INPUT:
date: reduced julian date (=JD - 2400000), scalar or vector
OUTPUT:
x,y,z: scalars or vectors giving heliocentric rectangular coordinates
(in A.U) for each date supplied. Note that sqrt(x^2 + y^2
+ z^2) gives the Earth-Sun distance for the given date.
xvel, yvel, zvel: velocity vectors corresponding to X, Y and Z.
OPTIONAL KEYWORD INPUT:
EQUINOX: equinox of output. Default is 1950.
EXAMPLE:
What were the rectangular coordinates and velocities of the Sun on
Jan 22, 1999 0h UT (= JD 2451200.5) in J2000 coords? NOTE:
Astronomical Almanac (AA) is in TDT, so add 64 seconds to
UT to convert.
IDL> xyz,51200.5+64.d/86400.d,x,y,z,xv,yv,zv,equinox = 2000
Compare to Astronomical Almanac (1999 page C20)
X (AU) Y (AU) Z (AU)
XYZ: 0.51456871 -0.76963263 -0.33376880
AA: 0.51453130 -0.7697110 -0.3337152
abs(err): 0.00003739 0.00007839 0.00005360
abs(err)
(km): 5609 11759 8040
NOTE: Velocities in AA are for Earth/Moon barycenter
(a very minor offset) see AA 1999 page E3
X VEL (AU/DAY) YVEL (AU/DAY) Z VEL (AU/DAY)
XYZ: -0.014947268 -0.0083148382 -0.0036068577
AA: -0.01494574 -0.00831185 -0.00360365
abs(err): 0.000001583 0.0000029886 0.0000032077
abs(err)
(km/sec): 0.00265 0.00519 0.00557
PROCEDURE CALLS:
PRECESS_XYZ
REVISION HISTORY
Original algorithm from Almanac for Computers, Doggett et al. USNO 1978
Adapted from the book Astronomical Photometry by A. Henden
Written W. Landsman STX June 1989
Correct error in X coefficient W. Landsman HSTX January 1995
Added velocities, more terms to positions and EQUINOX keyword,
some minor adjustments to calculations
P. Plait/ACC March 24, 1999
"""
picon = numpy.pi / 180.0
t = (date - 15020.0) / 36525.0 # Relative Julian century from 1900
# NOTE: longitude arguments below are given in *equinox* of date.
# Precess these to equinox 1950 to give everything an even footing.
# Compute argument of precession from equinox of date back to 1950
pp = (1.396041 + 0.000308 * (t + 0.5)) * (t - 0.499998)
# Compute mean solar longitude, precessed back to 1950
el = 279.696678 + 36000.76892 * t + 0.000303 * t * t - pp
# Compute Mean longitude of the Moon
c = 270.434164 + 480960. * t + 307.883142 * t - 0.001133 * t * t - pp
# Compute longitude of Moon's ascending node
n = 259.183275 - 1800. * t - 134.142008 * t + 0.002078 * t * t - pp
# Compute mean solar anomaly
g = 358.475833 + 35999.04975 * t - 0.00015 * t * t
# Compute the mean jupiter anomaly
j = 225.444651 + 2880.0 * t + 154.906654 * t * t
# Compute mean anomaly of Venus
v = 212.603219 + 58320. * t + 197.803875 * t + 0.001286 * t * t
# Compute mean anomaly of Mars
m = 319.529425 + 19080. * t + 59.8585 * t + 0.000181 * t * t
# Convert degrees to radians for trig functions
el = el * picon
g = g * picon
j = j * picon
c = c * picon
v = v * picon
n = n * picon
m = m * picon
# Calculate X,Y,Z using trigonometric series
X = 0.999860 * cos(el) \
- 0.025127 * cos(g - el) \
+ 0.008374 * cos(g + el) \
+ 0.000105 * cos(g + g + el) \
+ 0.000063 * t * cos(g - el) \
+ 0.000035 * cos(g + g - el) \
- 0.000026 * sin(g - el - j) \
- 0.000021 * t * cos(g + el) \
+ 0.000018 * sin(2. * g + el - 2. * v) \
+ 0.000017 * cos(c) \
- 0.000014 * cos(c - 2. * el) \
+ 0.000012 * cos(4. * g + el - 8. * m + 3. * j) \
- 0.000012 * cos(4. * g - el - 8. * m + 3. * j) \
- 0.000012 * cos(g + el - v) \
+ 0.000011 * cos(2. * g + el - 2. * v) \
+ 0.000011 * cos(2. * g - el - 2. * j)
Y = 0.917308 * sin(el) \
+ 0.023053 * sin(g - el) \
+ 0.007683 * sin(g + el) \
+ 0.000097 * sin(g + g + el) \
- 0.000057 * t * sin(g - el) \
- 0.000032 * sin(g + g - el) \
- 0.000024 * cos(g - el - j) \
- 0.000019 * t * sin(g + el) \
- 0.000017 * cos(2.00 * g + el - 2.00 * v) \
+ 0.000016 * sin(c) \
+ 0.000013 * sin(c - 2.00 * el) \
+ 0.000011 * sin(4.00 * g + el - 8.00 * m + 3.00 * j) \
+ 0.000011 * sin(4.00 * g - el - 8.00 * m + 3.00 * j) \
- 0.000011 * sin(g + el - v) \
+ 0.000010 * sin(2.00 * g + el - 2.00 * v) \
- 0.000010 * sin(2.00 * g - el - 2.00 * j)
Z = 0.397825 * sin(el) \
+ 0.009998 * sin(g - el) \
+ 0.003332 * sin(g + el) \
+ 0.000042 * sin(g + g + el) \
- 0.000025 * t * sin(g - el) \
- 0.000014 * sin(g + g - el) \
- 0.000010 * cos(g - el - j)
# Precess_to new equator?
if equinox is not None:
X, Y, Z = precess_xyz(X, Y, Z, 1950, equinox)
if not velocity:
return [X, Y, Z, None, None, None]
XVEL = -0.017200 * sin(el) \
- 0.000288 * sin(g + el) \
- 0.000005 * sin(2.00 * g + el) \
- 0.000004 * sin(c) \
+ 0.000003 * sin(c - 2.00 * el) \
+ 0.000001 * t * sin(g + el) \
- 0.000001 * sin(2.00 * g - el)
YVEL = 0.015780 * cos(el) \
+ 0.000264 * cos(g + el) \
+ 0.000005 * cos(2.00 * g + el) \
+ 0.000004 * cos(c) \
+ 0.000003 * cos(c - 2.00 * el) \
- 0.000001 * t * cos(g + el)
ZVEL = 0.006843 * cos(el) \
+ 0.000115 * cos(g + el) \
+ 0.000002 * cos(2.00 * g + el) \
+ 0.000002 * cos(c) \
+ 0.000001 * cos(c - 2.00 * el)
# Precess to new equator?
if equinox != 1950.0:
XVEL, YVEL, ZVEL = precess_xyz(XVEL, YVEL, ZVEL, 1950, equinox)
return [X, Y, Z, XVEL, YVEL, ZVEL]
def helio_jd(date, ra, dec, B1950=False, TIME_DIFF=False):
"""
Convert geocentric (reduced) Julian date into heliocentric Julian date
.. note:: The heliocentric julian date (HJD) differs from the barycentric julian date (BJD)
by +/- 4 s (e.g., Eastman et al. 2010, PASP 122, 935). See also the
:ref:`timescales`.
Parameters
----------
date : float
(Reduced) Julian date (2.4e6 subtracted)
ra, dec : float
Right ascension and declination in degrees
B1950 : boolean
If True, input coordinates are assumed to be given in equinox
1950 coordinates.
TIME_DIFF : boolean
If True, this function returns the time difference
(heliocentric JD - geocentric JD ) in seconds
Returns
-------
HJD : float
The heliocentric Julian date.
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
NAME:
HELIO_JD
PURPOSE:
Convert geocentric (reduced) Julian date to heliocentric Julian date
EXPLANATION:
This procedure correct for the extra light travel time between the Earth
and the Sun.
An online calculator for this quantity is available at
http://www.physics.sfasu.edu/astro/javascript/hjd.html
CALLING SEQUENCE:
jdhelio = HELIO_JD( date, ra, dec, /B1950, /TIME_DIFF)
INPUTS
date - reduced Julian date (= JD - 2400000), scalar or vector, MUST
be double precision
ra,dec - scalars giving right ascension and declination in DEGREES
Equinox is J2000 unless the /B1950 keyword is set
OUTPUTS:
jdhelio - heliocentric reduced Julian date. If /TIME_DIFF is set, then
HELIO_JD() instead returns the time difference in seconds
between the geocentric and heliocentric Julian date.
OPTIONAL INPUT KEYWORDS
/B1950 - if set, then input coordinates are assumed to be in equinox
B1950 coordinates.
/TIME_DIFF - if set, then HELIO_JD() returns the time difference
(heliocentric JD - geocentric JD ) in seconds
EXAMPLE:
What is the heliocentric Julian date of an observation of V402 Cygni
(J2000: RA = 20 9 7.8, Dec = 37 09 07) taken June 15, 1973 at 11:40 UT?
IDL> juldate, [1973,6,15,11,40], jd ;Get geocentric Julian date
IDL> hjd = helio_jd( jd, ten(20,9,7.8)*15., ten(37,9,7) )
==> hjd = 41848.9881
Wayne Warren (Raytheon ITSS) has compared the results of HELIO_JD with the
FORTRAN subroutines in the STARLINK SLALIB library (see
http://star-www.rl.ac.uk/).
Time Diff (sec)
Date RA(2000) Dec(2000) STARLINK IDL
1999-10-29T00:00:00.0 21 08 25. -67 22 00. -59.0 -59.0
1999-10-29T00:00:00.0 02 56 33.4 +00 26 55. 474.1 474.1
1940-12-11T06:55:00.0 07 34 41.9 -00 30 42. 366.3 370.2
1992-02-29T03:15:56.2 12 56 27.4 +42 10 17. 350.8 350.9
2000-03-01T10:26:31.8 14 28 36.7 -20 42 11. 243.7 243.7
2100-02-26T09:18:24.2 08 26 51.7 +85 47 28. 104.0 108.8
PROCEDURES CALLED:
bprecess, xyz, zparcheck
REVISION HISTORY:
Algorithm from the book Astronomical Photometry by Henden, p. 114
Written, W. Landsman STX June, 1989
Make J2000 default equinox, add B1950, /TIME_DIFF keywords, compute
variation of the obliquity W. Landsman November 1999
"""
# Because XYZ uses default B1950 coordinates, we'll convert everything to B1950
if date > 2.4e6:
PE.warn(PE.PyAValError("The given Julian Date ( " + str(date) + ") is exceedingly large far a reduced JD.",
solution="Did you forget to subtract 2.4e6?",
where="helio_jd"))
if not B1950:
bpresult = bprecess(ra, dec)
ra1 = bpresult[0]
dec1 = bpresult[1]
else:
ra1 = ra
dec1 = dec
radeg = 180.0 / numpy.pi
# I think, this is not needed in Python, even at this stage...
# zparcheck,'HELIO_JD',date,1,[3,4,5],[0,1],'Reduced Julian Date'
delta_t = (date - 33282.42345905) / 36525.0
epsilon_sec = 44.836 - 46.8495 * delta_t - \
0.00429 * delta_t**2 + 0.00181 * delta_t**3
epsilon = (23.433333 + epsilon_sec / 3600.0) / radeg
ra1 = ra1 / radeg
dec1 = dec1 / radeg
x, y, z, tmp, tmp, tmp = xyz(date)
# Find extra distance light must travel in AU, multiply by 1.49598e13 cm/AU,
# and divide by the speed of light, and multiply by 86400 second/year
time = -499.00522 * (cos(dec1) * cos(ra1) * x +
(tan(epsilon) * sin(dec1) + cos(dec1) * sin(ra1)) * y)
if TIME_DIFF:
return time
else:
return (date + time / 86400.0)
def jdcnv(dt):
"""
Converts Gregorian dates to Julian days
Parameters
----------
dt : DateTime object
The date. This is interpreted as UTC and the timezone component is not considered.
Returns
-------
Julian day : float
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
NAME:
JDCNV
PURPOSE:
Converts Gregorian dates to Julian days
EXPLANATION:
For IDL versions V5.1 or greater, this procedure is superceded by
JULDAY() function in the standard IDL distribution. Note, however,
that prior to V5.1 there wasa bug in JULDAY() that gave answers off
by 0.5 days.
CALLING SEQUENCE:
JDCNV, YR, MN, DAY, HR, JULIAN
INPUTS:
YR = Year, integer scalar or vector
MN = Month integer (1-12) scalar or vector
DAY = Day integer 1-31) scalar or vector
HR = Hours and fractions of hours of universal time (U.T.), scalar
or vector
OUTPUTS:
JULIAN = Julian date (double precision)
EXAMPLE:
To find the Julian Date at 1978 January 1, 0h (U.T.)
IDL> JDCNV, 1978, 1, 1, 0., JULIAN
will give JULIAN = 2443509.5
NOTES:
(1) JDCNV will accept vector arguments
(2) JULDATE is an alternate procedure to perform the same function
REVISON HISTORY:
Converted to IDL from Don Yeomans Comet Ephemeris Generator,
B. Pfarr, STX, 6/15/88
Converted to IDL V5.0 W. Landsman September 1997
Added checks on valid month, day ranges W. Landsman July 2008
"""
# L = leap years, -1 for Jan, Feb, else 0
L = int((dt.month - 14.0) / 12.0)
julian = dt.day - 32075 + \
int(1461 * (dt.year + 4800 + L) / 4.0) + \
int(367 * (dt.month - 2 - L * 12) / 12.0) - \
int(int(3 * ((dt.year + 4900 + L) / 100.0)) / 4.0)
julian += ((dt.hour / 24.0) + (dt.minute / (24.0 * 60.0)) +
(dt.second / 86400.) + (dt.microsecond / (86400. * 1e6)) - 0.5)
return julian
def get_juldate():
"""
Return the current Julian Date
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
NAME:
GET_JULDATE
PURPOSE:
Return the current Julian Date
EXPLANATION:
In V5.4, GET_JULDATE became completely obsolete with the introduction
of the /UTC keyword to SYSTIME(). So GET_JULDATE,jd is equivalent to
jd = SYSTIME(/JULIAN,/UTC).
CALLING SEQUENCE:
GET_JULDATE,jd
INPUTS:
None
OUTPUTS:
jd = Current Julian Date, double precision scalar
EXAMPLE:
Return the current hour, day, month and year as integers
IDL> GET_JULDATE, JD ;Get current Julian date
IDL> DAYCNV, JD, YR, MON, DAY, HOURS ;Convert to hour,day month & year
METHOD:
A call is made to SYSTIME(/JULIAN,/UTC).
REVISION HISTORY:
Written Wayne Landsman March, 1991
Converted to IDL V5.0 W. Landsman September 1997
Assume since V5.4 Use /UTC keyword to SYSTIME() W. Landsman April 2006
"""
now = datetime.datetime.utcnow()
return jdcnv(now)
def juldate(date):
"""
Convert from calendar to Reduced Julian Date
This function returns the *reduced* Julian date, which
is obtained by subtracting 2400000 from the Julian date.
To convert the output into Modified Julian Date (MJD),
another 0.5 days have to be subtracted.
Parameters
----------
date : DateTime object
Calendar date
Returns
-------
RJD : float
The **reduced** Julian date.
Notes
-----
.. note:: This function was ported from the IDL Astronomy User's Library.
:IDL - Documentation:
NAME:
JULDATE
PURPOSE:
Convert from calendar to Reduced Julian Date
EXPLANATION:
Julian Day Number is a count of days elapsed since Greenwich mean noon
on 1 January 4713 B.C. The Julian Date is the Julian day number
followed by the fraction of the day elapsed since the preceding noon.
This procedure duplicates the functionality of the JULDAY() function in
in the standard IDL distribution, but also allows interactive input and
gives output as Reduced Julian date (=JD - 2400000.)
(Also note that prior to V5.1 there was a bug in JULDAY() that gave
answers offset by 0.5 days.)
CALLING SEQUENCE:
JULDATE, /PROMPT ;Prompt for calendar Date, print Julian Date
or
JULDATE, date, jd
INPUT:
DATE - 3 to 6-element vector containing year,month (1-12),day, and
optionally hour, minute, and second all specified as numbers
(Universal Time). Year should be supplied with all digits.
Years B.C should be entered as negative numbers (and note that
Year 0 did not exist). If Hour, minute or seconds are not
supplied, they will default to 0.
OUTPUT:
JD - Reduced Julian date, double precision scalar. To convert to
Julian Date, add 2400000. JULDATE will print the value of
JD at the terminal if less than 2 parameters are supplied, or
if the /PROMPT keyword is set
OPTIONAL INPUT KEYWORD:
/PROMPT - If this keyword is set and non-zero, then JULDATE will prompt
for the calendar date at the terminal.
RESTRICTIONS:
The procedure HELIO_JD can be used after JULDATE, if a heliocentric
Julian date is required.
EXAMPLE:
A date of 25-DEC-2006 06:25 UT may be expressed as either
IDL> juldate, [2006, 12, 25, 6, 25], jd
IDL> juldate, [2006, 12, 25.2673611d], jd
In either case, one should obtain a Reduced Julian date of
JD = 54094.7673611
PROCEDURE USED:
GETOPT()
REVISION HISTORY
Adapted from IUE RDAF (S. Parsons) 8-31-87
Algorithm from Sky and Telescope April 1981
Added /PROMPT keyword, W. Landsman September 1992
Converted to IDL V5.0 W. Landsman September 1997
Make negative years correspond to B.C. (no year 0), work for year 1582
Disallow 2 digit years. W. Landsman March 2000
"""
jd = jdcnv(date)
jd -= 2400000.0
return jd
|
sczeslaREPO_NAMEPyAstronomyPATH_START.@PyAstronomy_extracted@PyAstronomy-master@src@pyasl@asl@astroTimeLegacy.py@.PATH_END.py
|
{
"filename": "spectrum.py",
"repo_name": "CASTOR-telescope/ETC",
"repo_path": "ETC_extracted/ETC-master/castor_etc/spectrum.py",
"type": "Python"
}
|
"""
Generate and handle spectral data and normalizations.
Includes:
- redshifting wavelengths
- blackbody radiation, power-law spectrum generation
- user-input spectrum
- Gaussian and Lorentzian emission/absorption lines
- generic spiral and elliptical galaxy spectra
- stellar spectra from the Pickles catalog
- stellar spectra from the 'ATLAS9' and 'BTSettl' catalogs
- normalization functions:
- normalize a blackbody spectrum to a star of given radius and distance
- normalize a spectrum to some average value or AB magnitude, either within a passband
or over the whole spectrum
- normalize a spectrum to a given bolometric luminosity and distance
- calculate the average value of a spectrum (erg/s/cm^2/A or AB mag) either within a
passband or over the whole spectrum
---
GNU General Public License v3 (GNU GPLv3)
(c) 2022. (c) 2022.
Government of Canada Gouvernement du Canada
National Research Council Conseil national de recherches
Ottawa, Canada, K1A 0R6 Ottawa, Canada, K1A 0R6
All rights reserved Tous droits réservés
NRC disclaims any warranties, Le CNRC dénie toute garantie
expressed, implied, or énoncée, implicite ou légale,
statutory, of any kind with de quelque nature que ce
respect to the software, soit, concernant le logiciel,
including without limitation y compris sans restriction
any warranty of merchantability toute garantie de valeur
or fitness for a particular marchande ou de pertinence
purpose. NRC shall not be pour un usage particulier.
liable in any event for any Le CNRC ne pourra en aucun cas
damages, whether direct or être tenu responsable de tout
indirect, special or general, dommage, direct ou indirect,
consequential or incidental, particulier ou général,
arising from the use of the accessoire ou fortuit, résultant
software. Neither the name de l'utilisation du logiciel. Ni
of the National Research le nom du Conseil National de
Council of Canada nor the Recherches du Canada ni les noms
names of its contributors may de ses participants ne peuvent
be used to endorse or promote être utilisés pour approuver ou
products derived from this promouvoir les produits dérivés
software without specific prior de ce logiciel sans autorisation
written permission. préalable et particulière
par écrit.
This file is part of the Ce fichier fait partie du projet
FORECASTOR ETC project. FORECASTOR ETC.
FORECASTOR ETC is free software: FORECASTOR ETC est un logiciel
you can redistribute it and/or libre ; vous pouvez le redistribuer
modify it under the terms of ou le modifier suivant les termes de
the GNU General Public la "GNU General Public
License as published by the License" telle que publiée
Free Software Foundation, par la Free Software Foundation :
either version 3 of the soit la version 3 de cette
License, or (at your option) licence, soit (à votre gré)
any later version. toute version ultérieure.
FORECASTOR ETC is distributed FORECASTOR ETC est distribué
in the hope that it will be dans l'espoir qu'il vous
useful, but WITHOUT ANY WARRANTY; sera utile, mais SANS AUCUNE
without even the implied warranty GARANTIE : sans même la garantie
of MERCHANTABILITY or FITNESS FOR implicite de COMMERCIALISABILITÉ
A PARTICULAR PURPOSE. See the ni d'ADÉQUATION À UN OBJECTIF
GNU General Public License for PARTICULIER. Consultez la Licence
more details. Générale Publique GNU pour plus
de détails.
You should have received Vous devriez avoir reçu une
a copy of the GNU General copie de la Licence Générale
Public License along with Publique GNU avec FORECASTOR ETC ;
FORECASTOR ETC. If not, see si ce n'est pas le cas, consultez :
<http://www.gnu.org/licenses/>. <http://www.gnu.org/licenses/>.
"""
import os
import warnings
from numbers import Number
from os.path import join
import astropy.units as u
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from astropy.constants import R_sun, pc
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy.wcs import WCS
from astroquery.gaia import Gaia
from scipy.integrate import simpson
from scipy.interpolate import interp1d
from . import constants as const
from .conversions import calc_photon_energy, flam_to_AB_mag, fnu_to_flam, mag_to_flux
from .filepaths import DATAPATH
from .telescope import Telescope
def getStarData(
temperature,
metallicity,
logg,
stellar_model_dir,
model_grid="ATLAS9",
):
"""
Reads in star data from atlas directory, according to temperature, metallicity, and log(G) values.
Parameters
----------
temperature :: float
Temperature of the reference star in kelvin.
metallicity :: 'p/m00','p/m05','p/m10','p/m15','p/m20', where p is plus, m is minus.
Metallicity of the reference star. Accepted inputs.
logg :: g00,g05,g10,g15,g20,g25,g30,g35,g40,g45,g50
log(G) value for the reference star.
stellar_model_dir :: str
Path to the stellar models directory. There are three variations contingent on the working environment.
Variation 1: stellar_model_dir = "/arc/projects/CASTOR/stellar_models" --> Default path (Working in the CANFAR server).
Variation 2: stellar_model_dir = <path to local stellar models directory>
Variation 3: stellar_model_dir = join(DATAPATH,"transit_data/stellar_models") --> This path should be used when building docker container locally.
Returns
--------
wavelengths :: `astropy.Quantity` wavelength
wavelength of the reference star in AA.
flux :: array of floats
Flux of the reference star in units erg s^-1 cm^-2 A^-1
"""
# Default to ATLAS9 model
# If Teff < 3500 K (i.e., min Teff in ATLAS9 grid, switch to BTSettl grid)
if (model_grid == "BTSettl") | (temperature < 3500):
if not os.path.exists(os.path.join(stellar_model_dir, "BTSettl_CIFIST/")):
raise RuntimeError(
f"The specified directory path `{stellar_model_dir}` does not contain "
+ "the stellar_models directory. Please make sure the path variable "
+ "points to the correct directory."
)
else:
grid_dir = os.path.join(stellar_model_dir, "BTSettl_CIFIST/")
# Scan directory to inventory model grid
# Assumes all files starting with 'lte' are model files
# Only stored solar metallicity models; may need to update for other metallicities
dir_flist = os.listdir(grid_dir)
model_flist = []
grid_param = []
for n, fname in enumerate(dir_flist):
if fname.startswith("lte"):
_teff = float(fname[3:6]) * 100
if fname[6] == "+":
_logg = -1 * float(fname[7:10])
else:
_logg = float(fname[7:10])
_mh = float(fname[11:14])
model_flist.append(fname)
grid_param.append([_teff, _logg, _mh])
grid_param = np.vstack(grid_param)
model_flist = np.array(model_flist)
# Identify closest model prioritizing teff, logg, then metallicity
mi = np.argmin(np.abs(grid_param[:, 0] - temperature))
ti = np.where(grid_param[:, 0] == grid_param[mi, 0])[0]
grid_param = grid_param[ti]
model_flist = model_flist[ti]
mi = np.argmin(np.abs(grid_param[:, 1] - logg))
ti = np.where(grid_param[:, 1] == grid_param[mi, 1])[0]
grid_param = grid_param[ti]
model_flist = model_flist[ti]
mi = np.argmin(np.abs(grid_param[:, 2] - metallicity))
ti = np.where(grid_param[:, 2] == grid_param[mi, 2])[0]
grid_param = grid_param[ti]
model_flist = model_flist[ti]
# Read in selected model
d = np.loadtxt(grid_dir + model_flist[0])
wv = d[:, 0] * u.AA
flux = d[:, 1] # [erg/s/cm2/angstrom]
elif model_grid == "ATLAS9":
if not os.path.exists(os.path.join(stellar_model_dir, "ATLAS9/ck04models/")):
raise RuntimeError(
f"The specified directory path `{stellar_model_dir}` does not contain "
+ "the stellar_model directory. Please modify the path variable. "
)
else:
grid_dir = os.path.join(stellar_model_dir, "ATLAS9/ck04models/")
teff_grid = np.array(
[
3500,
3750,
4000,
4250,
4500,
4750,
5000,
5250,
5500,
5750,
6000,
6250,
6500,
6750,
7000,
7250,
7500,
7750,
8000,
8250,
8500,
8750,
9000,
9250,
9500,
9750,
10000,
10250,
10500,
10750,
11000,
11250,
11500,
11750,
12000,
12250,
12500,
12750,
13000,
14000,
15000,
16000,
17000,
18000,
19000,
20000,
21000,
22000,
23000,
24000,
25000,
26000,
27000,
28000,
29000,
30000,
31000,
32000,
33000,
34000,
35000,
36000,
37000,
38000,
39000,
40000,
41000,
42000,
43000,
44000,
45000,
46000,
47000,
48000,
49000,
50000,
]
)
mh_grid = np.array([-0.25, -0.20, -0.15, -0.10, -0.05, 0.0, 0.02, 0.05])
logg_grid = np.array([0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5, 5.0])
mi = [
np.argmin(np.abs(teff_grid - temperature)),
np.argmin(np.abs(mh_grid - metallicity)),
np.argmin(np.abs(logg_grid - logg)),
]
teff_str = "{:.0f}".format(teff_grid[mi[0]])
if mh_grid[mi[1]] < 0:
mh_str = "m{:0>2d}".format(int(np.abs(mh_grid[mi[1]]) * 100))
else:
mh_str = "p{:0>2d}".format(int(np.abs(mh_grid[mi[1]]) * 100))
logg_str = "g{:0>2d}".format(int(logg_grid[mi[2]] * 10))
specfile = (
grid_dir + "ck" + mh_str + "/" + "ck" + mh_str + "_" + teff_str + ".fits"
)
if os.path.isfile(specfile):
hdul = fits.open(specfile)
data = hdul[1].data # the first extension has table
wv = data["WAVELENGTH"] * u.AA
flux = data[logg_str] # [erg/s/cm^2/A]
else:
print("Spectrum not found: ", specfile)
return wv, flux
def interp_EEM_table(Teff=[], Gmag=[], Bpmag=[], Rpmag=[]):
"""
Interpolate EEM_table using input Teff
Parameters
----------
Teff :: array of floats
Effective temperature of the identified sources
Gmag :: array of floats
Gaia G magnitude of the identified sources
Bpmag :: array of floats
Gaia BP magnitude of the identifies sources
Rpmag :: array of floats
Gaia RP magnitude of the idenitifies sources
Return
------
interp_output
Interpolation result
"""
table_fname = join(DATAPATH, "transit_data", "EEM_dwarf_UBVIJHK_colors_Teff.txt")
_logg_sun = 4.438
# Read in EEM table
eem = {
"Teff": [],
"radius": [],
"mass": [],
"logg": [],
"Gmag_abs": [],
"B-V": [],
"G-V": [],
"Bp-Rp": [],
"G-Rp": [],
"U-B": [],
"V-I": [],
}
eem_hdr = []
with open(table_fname, "r") as file:
data_block = False
for line in file:
# Only read in between lines starting with '$SpT'
if line.startswith("#SpT"):
if not data_block:
data_block = True
else:
break
if data_block:
_line = line.rstrip().split()
if len(eem_hdr) == 0:
eem_hdr = np.array(_line.copy())
else:
ci = np.where(eem_hdr == "Teff")[0][0]
eem["Teff"].append(float(_line[ci]))
ci = np.where(eem_hdr == "R_Rsun")[0][0]
if _line[ci].startswith("..."):
eem["radius"].append(np.nan)
else:
eem["radius"].append(float(_line[ci]))
ci = np.where(eem_hdr == "Msun")[0][0]
if _line[ci].startswith("..."):
eem["mass"].append(np.nan)
else:
eem["mass"].append(float(_line[ci]))
ci = np.where(eem_hdr == "M_G")[0][0]
if _line[ci].startswith("..."):
eem["Gmag_abs"].append(np.nan)
elif _line[ci].endswith(":"):
eem["Gmag_abs"].append(float(_line[ci][:-1]))
else:
eem["Gmag_abs"].append(float(_line[ci]))
ci = np.where(eem_hdr == "B-V")[0][0]
if _line[ci].startswith("..."):
eem["B-V"].append(np.nan)
else:
eem["B-V"].append(float(_line[ci]))
ci = np.where(eem_hdr == "G-V")[0][0]
if _line[ci].startswith("..."):
eem["G-V"].append(np.nan)
else:
eem["G-V"].append(float(_line[ci]))
ci = np.where(eem_hdr == "Bp-Rp")[0][0]
if _line[ci].startswith("..."):
eem["Bp-Rp"].append(np.nan)
else:
eem["Bp-Rp"].append(float(_line[ci]))
ci = np.where(eem_hdr == "G-Rp")[0][0]
if _line[ci].startswith("..."):
eem["G-Rp"].append(np.nan)
else:
eem["G-Rp"].append(float(_line[ci]))
ci = np.where(eem_hdr == "U-B")[0][0]
if _line[ci].startswith("..."):
eem["U-B"].append(np.nan)
else:
eem["U-B"].append(float(_line[ci]))
ci = np.where(eem_hdr == "V-Ic")[0][0]
if _line[ci].startswith("..."):
eem["V-I"].append(np.nan)
else:
eem["V-I"].append(float(_line[ci]))
for _lbl in eem.keys():
eem[_lbl] = np.array(eem[_lbl])
# Calculate logg
eem["logg"] = _logg_sun + np.log10(eem["mass"]) - 2.0 * np.log10(eem["radius"] ** 2)
# Sort by Teff
si = np.argsort(eem["Teff"])
for _lbl in eem.keys():
eem[_lbl] = eem[_lbl][si]
# If Teff not specified, use Bpmag, Rpmag to get Teff
if len(Teff) == 0:
si = np.argsort(eem["Bp-Rp"])
Teff = np.interp(Bpmag - Rpmag, eem["Bp-Rp"][si], eem["Teff"][si])
# Interpolate table using input Teff
# Use grid edges for missing values
interp_output = {"Teff": Teff}
for _lbl in eem.keys():
if _lbl != "Teff":
vi = np.isfinite(eem[_lbl])
interp_output[_lbl] = np.interp(
np.log10(Teff), np.log10(eem["Teff"][vi]), eem[_lbl][vi]
)
# Calculate U, I and add to table (if Gmag provided)
if len(Gmag) > 0:
interp_output["Rpmag"] = Gmag - interp_output["G-Rp"]
interp_output["Bpmag"] = interp_output["Bp-Rp"] + interp_output["Rpmag"]
interp_output["Umag"] = (
interp_output["U-B"] + interp_output["B-V"] - interp_output["G-V"] + Gmag
)
interp_output["Imag"] = Gmag - (interp_output["G-V"] + interp_output["V-I"])
return interp_output
def redshift_wavelengths(wavelengths, redshift):
"""
Apply redshift correction to wavelengths.
Parameters
----------
wavelengths :: int or float or `astropy.Quantity` or array
Wavelengths to redshift.
redshift :: int or float
Redshift to apply.
Returns
-------
red_wavelengths :: array of floats
Redshifted wavelengths.
"""
return wavelengths * (1 + redshift)
class SpectrumMixin:
"""
Mixin for generating spectra. To be used with `Source` object. Do not use directly!
"""
def _check_existing_spectrum(self, overwrite, quiet=False):
"""
Check for existing spectrum and if the source is a `CustomSource` instance. If the
object is a `CustomSource` instance, raise an error. If self.wavelengths and/or
self.spectrum is not None, raise an error if overwrite is False; otherwise print a
message notifying user that the method will overwrite the existing spectrum
(unless `quiet` is True).
Parameters
----------
overwrite :: bool
If False and self.wavelengths and/or self.spectrum is not None, raise an
error. If True and self.wavelengths and/or self.spectrum is not None, print a
message informing the user that the existing spectrum will be overwritten
(unless `quiet` is True).
quiet :: bool
If True, do not print a message when overwriting an existing spectrum.
Returns
-------
None
"""
from .sources import CustomSource # avoid circular import error
if isinstance(self, CustomSource):
raise ValueError("A `CustomSource` object does not support a spectrum.")
if self.wavelengths is not None or self.spectrum is not None:
if not overwrite:
raise ValueError(
"wavelengths/spectrum already exists! "
+ "Use overwrite=True to overwrite wavelengths/spectrum."
)
elif not quiet:
print(
"INFO: Overwriting existing wavelengths/spectrum "
+ "with new wavelengths/spectrum."
)
def spectrum_erg_to_photon(self):
"""
Convert the spectrum that has units of ergs in the numerator to units of photons
in the numerator.
Attributes
----------
spectrum :: array of floats
Spectrum that has units of photons in the numerator.
Returns
-------
None
"""
if self.spectrum is None or self.wavelengths is None:
raise ValueError(
"Please generate a spectrum before converting ergs to photons."
)
self.spectrum /= calc_photon_energy(wavelength=self.wavelengths)[0]
def redshift_wavelengths(self, redshift):
"""
Apply redshift correction to wavelengths. Does not affect the y-axis values of the
spectrum.
Parameters
----------
redshift :: int or float
Redshift to apply.
Attributes
----------
wavelengths :: `astropy.Quantity` array
The redshifted wavelengths of the spectrum, in angstroms.
Returns
-------
None
"""
if not isinstance(redshift, Number):
raise TypeError("redshift must be an int or float")
self.wavelengths *= 1 + redshift
def generate_uniform(
self, wavelengths, value, unit="ABmag", overwrite=False, quiet=False
):
"""
Generate a uniform spectrum equal to a constant value in either flam
(erg/s/cm^2/A), fnu (erg/s/cm^2/Hz), ABmag (AB magnitude), or STmag (ST
magnitude). Note that the computed (and stored) spectrum will always be in units
of flam.
Parameters
----------
wavelengths :: array of scalars or `astropy.Quantity` array
The wavelengths over which to generate the uniform spectrum. If an array of
scalars, it should be in angstrom.
value :: int or float
The value of the uniform spectrum in the specified `unit`.
unit :: "flam" or "fnu" or "ABmag" or "STmag"
The unit of the `value`: either flam (erg/s/cm^2/A), fnu (erg/s/cm^2/Hz),
ABmag (AB magnitude), or STmag (ST magnitude).
overwrite :: bool
If True, overwrite any existing wavelengths/spectrum. If False, raise an error
if wavelengths or spectrum is not None.
quiet :: bool
If True, do not print a message when overwriting an existing spectrum.
Attributes
----------
wavelengths :: `astropy.Quantity` array
The given wavelengths of the spectrum, converted into units of angstrom.
spectrum :: array of floats
Spectrum, in erg/s/cm^2/A, that is uniform in the specified unit.
Returns
-------
None
"""
#
# Check inputs
#
self._check_existing_spectrum(overwrite, quiet=quiet)
if not isinstance(wavelengths, u.Quantity):
wavelengths = wavelengths * u.AA
else:
wavelengths = wavelengths.to(u.AA)
if not isinstance(value, Number):
raise ValueError("`value` must be an int or float")
if unit not in ["flam", "fnu", "ABmag", "STmag"]:
raise ValueError("`unit` must be one of 'flam', 'fnu', 'ABmag', or 'STmag'")
if unit == "flam" or unit == "fnu":
if value <= 0:
raise ValueError("`value` must be > 0 if `unit` is 'flam' or 'fnu'")
#
# Generate spectrum
#
spectrum = np.full(np.shape(wavelengths), value, dtype=float)
if unit == "fnu":
spectrum = fnu_to_flam(
fnu=spectrum, wavelength=wavelengths, fnu_err=0.0, wavelength_err=0.0
)[0]
elif unit == "ABmag":
# Convert to fnu
spectrum = mag_to_flux(mag=spectrum, mag_err=0.0, zpt=-48.60)[0]
# Convert fnu to flam
spectrum = fnu_to_flam(
fnu=spectrum, wavelength=wavelengths, fnu_err=0.0, wavelength_err=0.0
)[0]
elif unit == "STmag":
# Convert directly to flam
spectrum = mag_to_flux(mag=spectrum, mag_err=0.0, zpt=-21.10)[0]
self.wavelengths = wavelengths
self.spectrum = spectrum
def generate_bb(
self,
T,
redshift=0.0,
emissivity=1.0,
wavelengths=None,
limits=[0.09, 1.2] << u.um,
resolution=1 << u.nm,
radius=1,
dist=1 << u.kpc,
overwrite=False,
quiet=False,
):
"""
Generate a blackbody (BB) spectrum (in erg/s/cm^2/A) using Planck's radiation law.
The spectral radiance of the BB (erg/s/cm^2/A/sr) is normalized to a star of given
radius and distance.
Parameters
----------
T :: int or float or `astropy.Quantity`
Intrinsic blackbody temperature (i.e., the temperature of the BB at
redshift=0). If int or float, the unit is assumed to be kelvin.
redshift :: int or float
Redshift of the blackbody.
emissivity :: int or float
Emissivity of the blackbody. (Technically, emissivity is unity per the
definition of a BB).
wavelengths :: array of floats or `astropy.Quantity` array
The wavelengths over which to calculate the spectrum. If an array of floats,
the unit is assumed to be in angstroms. If wavelengths is not None, the limits
and resolution parameters are ignored. Note that the final wavelengths
attribute will be an `astropy.Quantity` array in units of angstroms regardless
of this input's units.
limits :: list of 2 scalars or list of 2 `astropy.Quantity`
List containing the lower (0th index) and upper (1st index) bounds for the BB
spectrum's restframe wavelengths, inclusive. Limits should be > 0. If list
elements are int or float, they are assumed to be in angstroms. This parameter
is ignored if wavelengths is provided.
resolution :: int or float or `astropy.Quantity`
The wavelength resolution of the returned spectrum. If a scalar, it is assumed
to be in units of angstroms. This parameter is ignored if wavelengths is
provided.
radius :: float or `astropy.Quantity`
The radius of the source. If a scalar, it is assumed to be in units of solar
radii.
dist :: float or `astropy.Quantity`
The distance to the blackbody. If a scalar, it is assumed to be in units of
kpc.
overwrite :: bool
If True, overwrite any existing wavelengths/spectrum. If False, raise an error
if wavelengths or spectrum is not None.
quiet :: bool
If True, do not print a message when overwriting an existing spectrum.
Attributes
----------
wavelengths :: `astropy.Quantity` array
The redshifted wavelengths of the spectrum, in angstroms.
spectrum :: array of floats
BB spectrum in units of flam (erg/s/cm^2/A).
Returns
-------
None
"""
#
# Check inputs
#
self._check_existing_spectrum(overwrite, quiet=quiet)
if isinstance(T, u.Quantity):
T = T.to(u.K, equivalencies=u.temperature()).value
if wavelengths is None:
limits = list(limits)
for i, lim in enumerate(limits):
if isinstance(lim, u.Quantity):
limits[i] = lim.to(u.AA).value
if isinstance(resolution, u.Quantity):
resolution = resolution.to(u.AA).value
wavelengths = np.arange(limits[0], limits[1] + 0.5 * resolution, resolution)
elif isinstance(wavelengths, u.Quantity):
wavelengths = wavelengths.to(u.AA).value
#
# Generate BB spectrum with redshift
#
# Convert wavelengths from angstrom to cm
wavelengths = wavelengths * 1e-8 # cm
# Planck's radiation law
lightspeed = const.LIGHTSPEED.value # cm/s
prefactor = (2 * const.PLANCK_H.value * lightspeed * lightspeed) / (
wavelengths**5
)
denom = np.expm1(
(const.PLANCK_H.value * lightspeed) / (wavelengths * const.K_B.value * T)
)
spectrum = prefactor / denom # erg/s/cm^2/cm/sr
#
# Incorporate emissivity and convert per cm to per angstrom
#
spectrum *= 1e-8 * emissivity # erg/s/cm^2/A/sr
#
# Factor in redshift and convert wavelengths back to angstroms
#
wavelengths = redshift_wavelengths(wavelengths, redshift) * 1e8 # angstrom
#
# Assign to `Source` object attributes. Spectrum is in erg/s/cm^2/A
#
self.wavelengths = wavelengths * u.AA
self.spectrum = NormMixin.norm_to_star(spectrum, radius=radius, dist=dist) # flam
def generate_power_law(
self, ref_wavelength, wavelengths, exponent, overwrite=False, quiet=False
):
"""
Generate a spectrum with a shape following a power-law in some arbitrary unit. The
flux is defined so that it is equal to 1 at the reference wavelength.
The spectrum is calculated using the following formula:
```math
spectrum = (wavelengths / ref_wavelength)^exponent
```
where each variable is as defined in the Parameters documentation below.
Parameters
----------
ref_wavelength :: scalar or `astropy.Quantity`
The reference wavelength for the power-law. The spectrum at this wavelength
will have a flux of 1. If a scalar, it should be in angstrom.
wavelengths :: array of scalars or `astropy.Quantity` array
The wavelengths over which to calculate the power-law spectrum. If an array of
scalars, it should be in angstrom.
exponent :: int or float
The exponent for the power-law.
overwrite :: bool
If True, overwrite any existing wavelengths/spectrum. If False, raise an error
if wavelengths or spectrum is not None.
quiet :: bool
If True, do not print a message when overwriting an existing spectrum.
Attributes
----------
wavelengths :: `astropy.Quantity` array
The wavelengths of the spectrum, in angstroms.
spectrum :: array of floats
Spectrum following the power-law defined above, in arbitary units.
Returns
-------
None
"""
#
# Check inputs
#
self._check_existing_spectrum(overwrite, quiet=quiet)
if np.size(ref_wavelength) != 1:
raise ValueError(
"ref_wavelength must be a single scalar or `astropy.Quantity`."
)
if isinstance(wavelengths, u.Quantity):
wavelengths = wavelengths.to(u.AA).value
if isinstance(ref_wavelength, u.Quantity):
ref_wavelength = ref_wavelength.to(u.AA).value
#
# Power-law
#
spectrum = (wavelengths / ref_wavelength) ** exponent
self.wavelengths = wavelengths * u.AA
self.spectrum = spectrum
@staticmethod
def _generate_gaussian(
wavelengths,
spectrum,
center,
fwhm,
peak=None,
tot_flux=None,
add=True,
abs_peak=True,
):
"""
Add/subtract a Gaussian spectrum to/from an existing spectrum. This is useful for
representing emission lines (i.e., by adding a Gaussian source) or absorption
lines (i.e., by subtracting a Gaussian source). Note that the minimum/maximum
wavelengths of the source spectrum will not change.
The Gaussian spectrum can be represented by the following formulae (from
<https://pysynphot.readthedocs.io/en/latest/spectrum.html#gaussian-emission>):
```math
gaussian = peak / exp[(wavelengths - center)^2 / (2 * sigma^2)]
```
and
```math
sigma = fwhm / [2 * sqrt(2 * ln2)]
```
and
```math
peak = tot_flux / sqrt(2 * pi * sigma^2) <-- see Gaussian integral
```
where:
- gaussian is the Gaussian spectrum's flux in some arbitrary unit
- peak is the flux at the center of the Gaussian (i.e., the central wavelength)
- center is the central wavelength of the Gaussian
- wavelengths is the array of wavelengths over which to calculate the spectrum
- fwhm is the full-width at half-maximum of the Gaussian
- tot_flux is the total flux of the Gaussian under the curve
Parameters
----------
wavelengths :: array of floats or `astropy.Quantity` array
The wavelengths over which to calculate the Gaussian spectrum.
spectrum :: array of floats
The spectrum to/from which to add/subtract the Gaussian spectrum.
center :: scalar or `astropy.Quantity`
The central wavelength of the Gaussian. If a scalar, it is assumed to be in
angstrom.
fwhm :: scalar or `astropy.Quantity`
The full-width at half-maximum of the Gaussian. If a scalar, it is assumed to
be in angstrom.
peak :: int or float
The peak flux of the Gaussian (i.e., the flux at the center wavelength).
Exactly one of peak or tot_flux must be specified.
tot_flux :: int or float
The total flux under the curve. Exactly one of peak or tot_flux must be
specified.
add :: bool
If True, add the Gaussian spectrum to the existing spectrum. If False,
subtract the Gaussian from the existing spectrum.
abs_peak :: bool
If True, ensure that the peak of the emission line or dip of the absorption
line is at the given value. Otherwise, just add/subtract the given Gaussian
peak to/from the continuum.
Returns
-------
sorted_wavelengths :: array of floats or `astropy.Quantity` array
The wavelengths of the new spectrum. The shape of this array will be different
from the input `wavelengths` array.
sorted_spectrum :: array of floats
The spectrum with the Gaussian added/subtracted. The shape of this array will
be different from the input `spectrum` array.
"""
def _gaussian(_peak, _wavelengths, _center, _sigma):
_num_sigma = (_wavelengths - _center) / _sigma
return _peak / np.exp(0.5 * _num_sigma * _num_sigma)
#
# Check inputs
#
if np.size(center) != 1:
raise ValueError("center must be a single scalar or `astropy.Quantity`.")
if np.size(fwhm) != 1:
raise ValueError("fwhm must be a single scalar or `astropy.Quantity`.")
if (peak is None and tot_flux is None) or (
peak is not None and tot_flux is not None
):
raise ValueError("Exactly one of peak or tot_flux must be specified.")
if peak is not None and (
np.size(peak) != 1 or not isinstance(peak, Number) or peak <= 0
):
if add:
raise ValueError("peak must be a single int or float >= 0.")
else:
raise ValueError("dip must be a single int or float >= 0.")
elif tot_flux is not None and (
np.size(tot_flux) != 1 or not isinstance(tot_flux, Number) or tot_flux <= 0
):
raise ValueError("tot_flux must be a single int or float >= 0.")
# Convert lengths to angstrom
if isinstance(wavelengths, u.Quantity):
wavelengths_unit = wavelengths.unit
wavelengths = wavelengths.to(u.AA).value
else:
wavelengths_unit = None
if isinstance(center, u.Quantity):
center = center.to(u.AA).value
if isinstance(fwhm, u.Quantity):
fwhm = fwhm.to(u.AA).value
if fwhm <= 0:
raise ValueError("fwhm must be >= 0.")
#
# Gaussian spectrum
#
sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))
if peak is None:
peak = tot_flux / (np.sqrt(2 * np.pi) * sigma)
spectrum_interp = interp1d(
wavelengths, spectrum, kind="linear", bounds_error=False, fill_value=np.nan
)
if add:
if abs_peak:
# Ensure final peak is actually at desired value
peak -= spectrum_interp(center)
if peak < 0:
raise ValueError("peak of emission line below continuum.")
else:
if abs_peak:
# Ensure final dip is actually at desired value
center_val = spectrum_interp(center)
if peak > center_val:
raise ValueError("dip of absorption line above continuum.")
peak = center_val - peak
# Ensure Gaussian is well sampled by evaluating at the wavelengths within +/- 5
# sigma of the center. This also prevents overflow errors caused by calculations
# at wavelengths too far from the center.
gauss_wavelengths = center + np.arange(-5, 5.05, 0.1) * sigma
sorted_wavelengths = np.unique(np.concatenate((wavelengths, gauss_wavelengths)))
sorted_wavelengths = sorted_wavelengths[
(sorted_wavelengths >= wavelengths[0])
& (sorted_wavelengths <= wavelengths[-1])
]
sorted_spectrum = spectrum_interp(sorted_wavelengths)
in_range = (sorted_wavelengths >= gauss_wavelengths[0]) & (
sorted_wavelengths <= gauss_wavelengths[-1]
)
if add:
sorted_spectrum[in_range] += _gaussian(
peak, sorted_wavelengths[in_range], center, sigma
)
else:
sorted_spectrum[in_range] -= _gaussian(
peak, sorted_wavelengths[in_range], center, sigma
)
is_good = np.isfinite(sorted_wavelengths) & np.isfinite(sorted_spectrum)
sorted_wavelengths = sorted_wavelengths[is_good]
sorted_spectrum = sorted_spectrum[is_good]
is_negative_spectrum = sorted_spectrum < 0
if np.any(is_negative_spectrum):
sorted_spectrum[is_negative_spectrum] = 0.0
warnings.warn("Setting negative flux in spectrum to zero.", RuntimeWarning)
# print(
# "wavelengths with negative spectrum:",
# sorted_wavelengths[is_negative_spectrum],
# )
if wavelengths_unit is not None:
sorted_wavelengths <<= wavelengths_unit # convert to `astropy.Quantity` array
return sorted_wavelengths, sorted_spectrum
@staticmethod
def _generate_lorentzian(
wavelengths,
spectrum,
center,
fwhm,
peak=None,
tot_flux=None,
add=True,
abs_peak=True,
):
"""
Add/subtract a Lorentzian spectrum to/from an existing spectrum. This is useful
for representing emission lines (i.e., by adding a Lorentzian source) or
absorption lines (i.e., by subtracting a Lorentzian source). Note that the
minimum/maximum wavelengths of the source spectrum will not change.
The Lorentzian spectrum can be represented by the following formulae:
```math
lorentzian = peak / (1 + num_half_widths^2)
```
and
```math
num_half_widths = (wavelengths - center) / probable_error
```
and
```math
peak = tot_flux / (pi * probable_error) <-- see Cauchy distribution
```
and
```math
probable_error = fwhm / 2
```
where:
- lorentzian is the Lorentzian spectrum's flux in some arbitrary unit
- peak is the flux at the center (i.e., central wavelength) of the Lorentzian
- center is the central wavelength of the Lorentzian
- wavelengths is the array of wavelengths over which to calculate the spectrum
- fwhm is the full-width at half-maximum of the Lorentzian
- tot_flux is the total flux of the Lorentzian under the curve
Parameters
----------
wavelengths :: array of floats or `astropy.Quantity` array
The wavelengths over which to calculate the Lorentzian spectrum.
spectrum :: array of floats
The spectrum to/from which to add/subtract the Lorentzian spectrum.
center :: scalar or `astropy.Quantity`
The central wavelength of the Lorentzian. If a scalar, it is assumed to be in
angstrom.
fwhm :: scalar or `astropy.Quantity`
The full-width at half-maximum of the Lorentzian. If a scalar, it is assumed
to be in angstrom.
peak :: int or float
The peak flux of the Lorentzian (i.e., the flux at the center wavelength).
Exactly one of peak or tot_flux must be specified.
tot_flux :: int or float
The total flux under the curve. Exactly one of peak or tot_flux must be
specified.
add :: bool
If True, add the Lorentzian spectrum to the existing spectrum. If False,
subtract the Lorentzian from the existing spectrum.
abs_peak :: bool
If True, ensure that the peak of the emission line or dip of the absorption
line is at the given value. Otherwise, just add/subtract the given Lorentzian
peak to/from the continuum.
Returns
-------
sorted_wavelengths :: array of floats or `astropy.Quantity` array
The wavelengths of the new spectrum. The shape of this array will be different
from the input `wavelengths` array.
sorted_spectrum :: array of floats
The spectrum with the Lorentzian added/subtracted. The shape of this array
will be different from the input `spectrum` array.
"""
def _lorentzian(_peak, _wavelengths, _center, _probable_error):
_num_half_widths = (_wavelengths - _center) / _probable_error
return _peak / (1 + _num_half_widths * _num_half_widths)
#
# Check inputs
#
if np.size(center) != 1:
raise ValueError("center must be a single scalar or `astropy.Quantity`.")
if np.size(fwhm) != 1:
raise ValueError("fwhm must be a single scalar or `astropy.Quantity`.")
if (peak is None and tot_flux is None) or (
peak is not None and tot_flux is not None
):
raise ValueError("Exactly one of peak or tot_flux must be specified.")
if peak is not None and (
np.size(peak) != 1 or not isinstance(peak, Number) or peak <= 0
):
if add:
raise ValueError("peak must be a single int or float >= 0.")
else:
raise ValueError("dip must be a single int or float >= 0.")
elif tot_flux is not None and (
np.size(tot_flux) != 1 or not isinstance(tot_flux, Number) or tot_flux <= 0
):
raise ValueError("tot_flux must be a single int or float >= 0.")
# Convert lengths to angstrom
if isinstance(wavelengths, u.Quantity):
wavelengths_unit = wavelengths.unit
wavelengths = wavelengths.to(u.AA).value
else:
wavelengths_unit = None
if isinstance(center, u.Quantity):
center = center.to(u.AA).value
if isinstance(fwhm, u.Quantity):
fwhm = fwhm.to(u.AA).value
if fwhm <= 0:
raise ValueError("fwhm must be >= 0.")
#
# Lorentzian spectrum
#
probable_error = 0.5 * fwhm
if peak is None:
peak = tot_flux / (np.pi * probable_error)
spectrum_interp = interp1d(
wavelengths, spectrum, kind="linear", bounds_error=False, fill_value=np.nan
)
if add:
if abs_peak:
# Ensure final peak is actually at desired value
peak -= spectrum_interp(center)
if peak < 0:
raise ValueError("peak of emission line below continuum.")
else:
if abs_peak:
# Ensure final dip is actually at desired value
center_val = spectrum_interp(center)
if peak > center_val:
raise ValueError("dip of absorption line above continuum.")
peak = center_val - peak
# Ensure Lorentzian is well sampled by evaluating at the wavelengths within +/- 80
# units of probable error from the center. This also prevents overflow errors
# caused by calculations at wavelengths too far from the center.
lorentz_wavelengths = center + np.arange(-80, 80.25, 0.5) * probable_error
sorted_wavelengths = np.unique(np.concatenate((wavelengths, lorentz_wavelengths)))
sorted_wavelengths = sorted_wavelengths[
(sorted_wavelengths >= wavelengths[0])
& (sorted_wavelengths <= wavelengths[-1])
]
sorted_spectrum = spectrum_interp(sorted_wavelengths)
in_range = (sorted_wavelengths >= lorentz_wavelengths[0]) & (
sorted_wavelengths <= lorentz_wavelengths[-1]
)
if add:
sorted_spectrum[in_range] += _lorentzian(
peak, sorted_wavelengths[in_range], center, probable_error
)
else:
sorted_spectrum[in_range] -= _lorentzian(
peak, sorted_wavelengths[in_range], center, probable_error
)
is_good = np.isfinite(sorted_wavelengths) & np.isfinite(sorted_spectrum)
sorted_wavelengths = sorted_wavelengths[is_good]
sorted_spectrum = sorted_spectrum[is_good]
is_negative_spectrum = sorted_spectrum < 0
if np.any(is_negative_spectrum):
sorted_spectrum[is_negative_spectrum] = 0.0
warnings.warn("Setting negative flux in spectrum to zero.", RuntimeWarning)
# print(
# "wavelengths with negative spectrum:",
# sorted_wavelengths[is_negative_spectrum],
# )
if wavelengths_unit is not None:
sorted_wavelengths <<= wavelengths_unit # convert to `astropy.Quantity` array
return sorted_wavelengths, sorted_spectrum
def add_emission_line(
self, center, fwhm, peak=None, tot_flux=None, shape="gaussian", abs_peak=False
):
"""
Add a well-sampled emission line to the spectrum. Note that the minimum/maximum
wavelengths of the source spectrum will not change.
For generating an emission line spectrum (i.e., not adding/subtracting an emission
line to/from a spectrum), see the `generate_emission_line()` method instead.
N.B. the order in which emission/absorption lines are added will affect the final
spectrum if using the abs_peak/abs_dip flag. For instance, adding an emission line
on top of a continuum then specifying an absorption line with an absolute dip is
not the same as specifying an absorption line with an absolute dip then adding an
emission line on top of the new continuum.
Parameters
----------
center :: scalar or `astropy.Quantity`
The central wavelength of the emission line. If a scalar, it is assumed to be
in angstrom.
fwhm :: scalar or `astropy.Quantity`
The full-width at half-maximum of the emission line. If a scalar, it is
assumed to be in angstrom.
peak :: int or float
The peak flux of the emission line (i.e., the flux at the center wavelength).
Exactly one of peak or tot_flux must be specified.
tot_flux :: int or float
The total flux under the curve. Exactly one of peak or tot_flux must be
specified.
shape :: "gaussian" or "lorentzian"
The emission line profile.
abs_peak :: bool
If True, ensure that the peak of the emission line is at the given value.
Otherwise, just add the given emission line to the spectrum.
Attributes
----------
wavelengths :: `astropy.Quantity` array
The wavelengths of the spectrum including the emission line, in angstroms.
This wavelengths array will have a different shape than the previous
wavelengths array.
spectrum :: array of floats
The spectrum with the emission line added. This spectrum array will have a
different shape than the previous spectrum array.
Returns
-------
None
"""
if self.wavelengths is None or self.spectrum is None:
raise ValueError("Please generate or load a spectrum first")
if shape == "gaussian":
spectrum_func = SpectrumMixin._generate_gaussian
elif shape == "lorentzian":
spectrum_func = SpectrumMixin._generate_lorentzian
else:
raise ValueError("Emission line shape must be 'gaussian' or 'lorentzian'")
self.wavelengths, self.spectrum = spectrum_func(
self.wavelengths,
self.spectrum,
center,
fwhm,
peak=peak,
tot_flux=tot_flux,
add=True,
abs_peak=abs_peak,
)
def add_absorption_line(
self, center, fwhm, dip=None, tot_flux=None, shape="gaussian", abs_dip=False
):
"""
Add a well-sampled absorption line to the spectrum. Note that the minimum/maximum
wavelengths of the source spectrum will not change.
N.B. the order in which emission/absorption lines are added will affect the final
spectrum if using the abs_peak/abs_dip flag. For instance, adding an emission line
on top of a continuum then specifying an absorption line with an absolute dip is
not the same as specifying an absorption line with an absolute dip then adding an
emission line on top of the new continuum.
Parameters
----------
center :: scalar or `astropy.Quantity`
The central wavelength of the absorption line. If a scalar, it is assumed to
be in angstrom.
fwhm :: scalar or `astropy.Quantity`
The full-width at half-maximum of the absorption line. If a scalar, it is
assumed to be in angstrom.
dip :: int or float
The minimum flux of the absorption line (i.e., the flux at the center
wavelength). Exactly one of dip or tot_flux must be specified.
tot_flux :: int or float
The total flux under (above) the curve. Exactly one of dip or tot_flux must
be specified.
shape :: "gaussian" or "lorentzian"
The absorption line profile.
abs_dip :: bool
If True, ensure that the dip of the absorption line is at the given value.
Otherwise, just subtract the given absorption line from the spectrum.
Attributes
----------
wavelengths :: `astropy.Quantity` array
The wavelengths of the spectrum including the absorption line, in angstroms.
This wavelengths array will have a different shape than the previous
wavelengths array.
spectrum :: array of floats
The spectrum with the absorption line subtracted. This spectrum array will
have a different shape than the previous spectrum array.
Returns
-------
None
"""
if self.wavelengths is None or self.spectrum is None:
raise ValueError("Please generate or load a spectrum first")
if shape == "gaussian":
spectrum_func = SpectrumMixin._generate_gaussian
elif shape == "lorentzian":
spectrum_func = SpectrumMixin._generate_lorentzian
else:
raise ValueError("Absorption line shape must be 'gaussian' or 'lorentzian'")
self.wavelengths, self.spectrum = spectrum_func(
self.wavelengths,
self.spectrum,
center,
fwhm,
peak=dip,
tot_flux=tot_flux,
add=False,
abs_peak=abs_dip,
)
def generate_emission_line(
self,
center,
fwhm,
peak=None,
tot_flux=None,
shape="gaussian",
limits=[100, 1200] << u.nm,
overwrite=False,
quiet=False,
):
"""
Generate a spectrum representing a single emission line. The resolution of the
spectrum is at least 1% of the wavelength range.
To add/subtract a spectral line to/from a spectrum, see the `add_emission_line()`
and `add_absorption_line()` methods instead.
Parameters
----------
center :: scalar or `astropy.Quantity`
The central wavelength of the emission line. If a scalar, it is assumed to be
in angstrom.
fwhm :: scalar or `astropy.Quantity`
The full-width at half-maximum of the emission line. If a scalar, it is
assumed to be in angstrom.
peak :: int or float
The peak flux of the emission line (i.e., the flux at the center wavelength).
Exactly one of peak or tot_flux must be specified.
tot_flux :: int or float
The total flux under the curve. Exactly one of peak or tot_flux must be
specified.
shape :: "gaussian" or "lorentzian"
The emission line profile.
limits :: 2-element 1D array of `astropy.Quantity` or scalars
The [min, max] wavelengths of the spectrum. If the elements are scalars, it is
assumed to be in angstrom. The center of the emission line must be within
these limits.
overwrite :: bool
If True, overwrite any existing wavelengths/spectrum. If False, raise an error
if wavelengths or spectrum is not None.
quiet :: bool
If True, do not print a message when overwriting an existing spectrum.
Attributes
----------
wavelengths :: `astropy.Quantity` array
The wavelengths of the spectrum, in angstroms.
spectrum :: array of floats
The emission line spectrum in units of flam (erg/s/cm^2/A).
Returns
-------
None
"""
#
# Check inputs
#
self._check_existing_spectrum(overwrite, quiet=quiet)
if isinstance(center, u.Quantity):
center = center.to(u.AA).value
limits_AA = []
for limit in limits:
if isinstance(limit, u.Quantity):
limit = limit.to(u.AA).value
limits_AA.append(limit)
if np.shape(limits_AA) != (2,):
raise ValueError(
"limits must be a 2-element 1D array of `astropy.Quantity` or scalars"
)
if (center < limits_AA[0]) or (center > limits_AA[1]):
raise ValueError("center must be within limits")
if shape == "gaussian":
spectrum_func = SpectrumMixin._generate_gaussian
elif shape == "lorentzian":
spectrum_func = SpectrumMixin._generate_lorentzian
else:
raise ValueError("Emission line shape must be 'gaussian' or 'lorentzian'")
#
# Generate spectrum
#
# Assume flat (zero) spectrum as baseline. Then add emission line on top of this
resolution = (limits_AA[1] - limits_AA[0]) / 100
wavelengths = np.arange(limits_AA[0], limits_AA[1] + 0.5 * resolution, resolution)
self.wavelengths, self.spectrum = spectrum_func(
wavelengths=wavelengths * u.AA,
spectrum=np.zeros(len(wavelengths), dtype=float),
center=center,
fwhm=fwhm,
peak=peak,
tot_flux=tot_flux,
add=True,
abs_peak=False,
)
def set_spectrum(self, wavelengths, spectrum, unit, overwrite=False, quiet=False):
"""
Set the spectrum of the source based on the input arrays. To use a spectrum from a
file, see the `use_custom_spectrum()` method.
The input spectrum should have units of either flam (erg/s/cm^2/A), fnu
(erg/s/cm^2/Hz), ABmag (AB magnitude), or STmag (ST magnitude). Note that the
computed (and stored) spectrum will always be in units of flam.
Parameters
----------
wavelengths :: array of scalars or `astropy.Quantity` array
The wavelengths over which to generate the uniform spectrum. If an array of
scalars, it should be in angstrom. This should be a 1D array with the same
length as the `spectrum` array.
specturm :: array of scalars
The value of the spectrum at the given wavelengths, in units of `unit`. This
should be a 1D array with the same length as the `wavelengths` array.
unit :: "flam" or "fnu" or "ABmag" or "STmag"
The unit of the `spectrum` array: either flam (erg/s/cm^2/A), fnu
(erg/s/cm^2/Hz), ABmag (AB magnitude), or STmag (ST magnitude).
overwrite :: bool
If True, overwrite any existing wavelengths/spectrum. If False, raise an error
if wavelengths or spectrum is not None.
quiet :: bool
If True, do not print a message when overwriting an existing spectrum.
Attributes
----------
wavelengths :: astropy.Quantity` array
The given wavelengths of the spectrum, converted into units of angstrom.
spectrum :: array of floats
The given spectrum, converted into units of erg/s/cm^2/A.
Returns
-------
None
"""
#
# Check inputs
#
self._check_existing_spectrum(overwrite, quiet=quiet)
if not isinstance(wavelengths, u.Quantity):
wavelengths = wavelengths * u.AA
else:
wavelengths = wavelengths.to(u.AA)
if isinstance(spectrum, u.Quantity):
raise TypeError("`spectrum` must be an array of scalars")
if np.shape(spectrum) != np.shape(wavelengths) or np.ndim(spectrum) != 1:
raise ValueError(
"`wavelengths` and `spectrum` must be 1D arrays of the same shape"
)
if unit not in ["flam", "fnu", "ABmag", "STmag"]:
raise ValueError("`unit` must be one of 'flam', 'fnu', 'ABmag', or 'STmag'")
if unit == "flam" or unit == "fnu":
if np.any(spectrum) <= 0:
raise ValueError(
"All `spectrum` values must be > 0 if `unit` is 'flam' or 'fnu'"
)
#
# Convert spectrum to units of flam (erg/s/cm^2/A)
#
if unit == "fnu":
spectrum = fnu_to_flam(
fnu=spectrum, wavelength=wavelengths, fnu_err=0.0, wavelength_err=0.0
)[0]
elif unit == "ABmag":
# Convert to fnu
spectrum = mag_to_flux(mag=spectrum, mag_err=0.0, zpt=-48.60)[0]
# Convert fnu to flam
spectrum = fnu_to_flam(
fnu=spectrum, wavelength=wavelengths, fnu_err=0.0, wavelength_err=0.0
)[0]
elif unit == "STmag":
# Convert directly to flam
spectrum = mag_to_flux(mag=spectrum, mag_err=0.0, zpt=-21.10)[0]
self.wavelengths = wavelengths
self.spectrum = spectrum
def use_custom_spectrum(
self, filepath, wavelength_unit=u.AA, overwrite=False, quiet=False
):
"""
Use custom spectrum from an ASCII or FITS file. To use a spectrum from an array,
use the `set_spectrum()` method.
Parameters
----------
filepath :: str
The absolute path to the file containing the spectrum.
If the file is in ASCII format, the first column should contain the
wavelengths in `wavelength_units` and the second column containing the
spectrum in flam (erg/s/cm^2/A); the columns should be separated by a constant
number of spaces. Lines starting with a hash (#) will be ignored. The file
extension must not be .fit or .fits.
If the file is in FITS format, the first field (index 0) should contain the
wavelengths in `wavelength_units` and the second field (index 1) should
contain the spectrum in flam (erg/s/cm^2/A). The file extension must be .fit
or .fits.
wavelength_unit :: `astropy.Quantity` length unit
The unit of the wavelengths in the file (e.g., u.AA for angstrom, u.nm for
nanometer, u.um for micrometer, etc.)
overwrite :: bool
If True, overwrite any existing wavelengths/spectrum. If False, raise an error
if wavelengths or spectrum is not None.
quiet :: bool
If True, do not print a message when overwriting an existing spectrum.
Attributes
----------
wavelengths :: `astropy.Quantity` array
The wavelengths of the spectrum, in angstroms.
spectrum :: array of floats
Source spectrum in units of flam (erg/s/cm^2/A).
Returns
-------
None
"""
#
# Check inputs
#
self._check_existing_spectrum(overwrite, quiet=quiet)
if not isinstance(filepath, str):
raise TypeError("filepath must be a string.")
try:
_ = wavelength_unit.to(u.AA)
except Exception:
raise TypeError("wavelength_units must be an `astropy.Quantity` length unit.")
#
# Load spectrum
#
file_ext = filepath.split(".")[-1].lower()
try:
if file_ext == "fit" or file_ext == "fits":
data = fits.getdata(filepath)
self.wavelengths = (data.field(0)[0] * wavelength_unit).to(u.AA)
# Dhananjhay's comment for `data.field(1)` below - index 0 to access the
# corresponding arrays; data variable contains embedded arrays
self.spectrum = data.field(1)[0]
else:
data = pd.read_csv(
filepath,
sep=r"\s+",
header=None,
comment="#",
engine="python",
) # sep=" +" is Python regex to match a variable number of spaces
self.wavelengths = (data[0].values * wavelength_unit).to(u.AA)
self.spectrum = data[1].values
except Exception:
raise RuntimeError(
"Could not read spectrum from file. File must be in ASCII or FITS format "
+ "and adhere to the guidelines specified in the docstring."
)
def use_galaxy_spectrum(self, gal_type, overwrite=False, quiet=False):
"""
Use one of the predefined galaxy spectra. These non-uniformly sampled spectra are
from Fioc & Rocca-Volmerange (1997)
<https://ui.adsabs.harvard.edu/abs/1997A%26A...326..950F/abstract>. In particular,
the data files were downloaded from the Gemini Observatory Control Software (OCS)
GitHub repository:
<https://github.com/gemini-hlsw/ocs/blob/develop/bundle/edu.gemini.itc/src/main/resources/sed/non_stellar/elliptical-galaxy.nm>
and
<https://github.com/gemini-hlsw/ocs/blob/develop/bundle/edu.gemini.itc/src/main/resources/sed/non_stellar/spiral-galaxy.nm>.
Parameters
----------
gal_type :: "elliptical" or "spiral"
The galaxy morphology. The elliptical galaxy (class T=-5, -4) and spiral
galaxy (type Sc, class T=5) spectra both run from 22-9698 nm.
overwrite :: bool
If True, overwrite any existing wavelengths/spectrum. If False, raise an error
if wavelengths or spectrum is not None.
quiet :: bool
If True, do not print a message when overwriting an existing spectrum.
Attributes
----------
wavelengths :: `astropy.Quantity` array
The wavelengths of the spectrum, in angstroms.
spectrum :: array of floats
Galaxy spectrum in arbitrary units, normalized such that it is equal to 1 at
550 nm.
Returns
-------
None
"""
#
# Check inputs
#
self._check_existing_spectrum(overwrite, quiet=quiet)
if gal_type == "elliptical" or gal_type == "spiral":
filepath = join(DATAPATH, "galaxy_spectra", f"{gal_type}_galaxy.txt")
else:
raise ValueError("Galaxy type must be 'elliptical' or 'spiral'")
data = pd.read_csv(
filepath,
sep=" +",
header=None,
comment="#",
engine="python",
) # sep=" +" is Python regex to match a variable number of spaces
self.wavelengths = (data[0].values * u.nm).to(u.AA)
self.spectrum = data[1].values
def _calc_xy(self):
"""
Internal function that converts (ra, dec) of the identified Gaia sources to pixel
positions on the CCD.
Attributes
----------
gaia :: dict of arrays
Dictionary containing the interpolated parameters of the queried Gaia sources.
Return
------
None
"""
# Get projected CCD coordinates (centered on target)
wcs_input_dict = {
"CTYPE1": "RA---TAN",
"CUNIT1": "deg",
"CDELT1": self.fov.value / self.ccd_dim[0],
"CRPIX1": int(self.ccd_dim[0] / 2),
"CRVAL1": self.gaia["ra"][0],
"NAXIS1": self.ccd_dim[0],
"CTYPE2": "DEC--TAN",
"CUNIT2": "deg",
"CDELT2": self.fov.value / self.ccd_dim[1],
"CRPIX2": int(self.ccd_dim[1] / 2),
"CRVAL2": self.gaia["dec"][0],
"NAXIS2": self.ccd_dim[1],
"CROTA2": self.fov_pa.value,
}
wcs = WCS(wcs_input_dict)
# Convert Gaia source ra,dec -> x,y
g_coord = SkyCoord(
ra=self.gaia["ra"],
dec=self.gaia["dec"],
unit=(u.degree, u.degree),
frame="icrs",
)
_x, _y = wcs.world_to_pixel(g_coord)
self.gaia["x"], self.gaia["y"] = _x, _y
self.gaia["wcs"] = wcs
def _search_gaia(self):
"""
Internal Function. Search Gaia catalog for target and background stars.
Parameters
----------
TelescopeObj :: `castor_etc.Telescope` instance
The `Telescope` object passed down to the _calc_xy function.
Attributes
----------
gaia :: dict of arrays
Dictionary containing the interpolated parameters of the queried Gaia sources.
Returns
-------
None
"""
try:
srch_str = (
"SELECT *, DISTANCE(POINT({:.6f},{:.6f}), POINT(ra,dec))".format(
self.ra.value, self.dec.value
)
+ "AS ang_sep FROM gaiadr2.gaia_source "
+ "WHERE 1 = CONTAINS( POINT({:.6f},{:.6f}), ".format(
self.ra.value, self.dec.value
)
+ "CIRCLE(ra,dec,{:.2f}))".format(self.srch_rad.value)
+ "AND phot_g_mean_mag <={:.2f}".format(self.srch_Gmax)
+ "AND parallax IS NOT NULL ORDER BY ang_sep ASC"
)
job = Gaia.launch_job(srch_str)
results = job.get_results()
ra, dec, Gmag, Bpmag, Rpmag = (
np.array(results["ra"]),
np.array(results["dec"]),
np.array(results["phot_g_mean_mag"]),
np.array(results["phot_bp_mean_mag"]),
np.array(results["phot_rp_mean_mag"]),
)
print("{:.0f} Gaia source(s) found".format(len(ra)))
# Identify target within Gaia search (brightest target within sep_max)
sep_max = 8.0 / 3600.0 # * u.deg
c_coord = SkyCoord(
ra=self.ra.value,
dec=self.dec.value,
unit=(u.degree, u.degree),
frame="icrs",
)
g_coord = SkyCoord(ra=ra, dec=dec, unit=(u.degree, u.degree), frame="icrs")
sep = g_coord.separation(c_coord).degree
ti = np.where(sep < sep_max)[0]
if len(ti) > 0:
ti = ti[np.argmin(Gmag[ti])]
else:
ti = np.argmin(sep)
print(
f"No Gaia sources found within {sep_max * 3600.0:.2f} arcsec of "
+ "specified RA and DEC"
)
gi = np.delete(np.arange(len(ra)), ti)
# Place target at top of list, sort remaining sources by brightness.
si = np.argsort(Gmag[gi])
scalars = [ra, dec, Gmag, Bpmag, Rpmag, sep]
for scalar in scalars:
scalar = np.hstack([scalar[ti], scalar[gi[si]]])
# Dhananjhay - I believe this way is more efficient than using self.srch_nmax
# multiple time <Need to check>.
nmax = self.srch_nmax
if len(ra) > nmax:
ra = ra[:nmax]
dec = dec[:nmax]
Gmag = Gmag[:nmax]
Bpmag = Bpmag[:nmax]
Rpmag = Rpmag[:nmax]
sep = sep[:nmax]
self.gaia = {
"ra": ra,
"dec": dec,
"x": np.zeros_like(ra),
"y": np.zeros_like(ra),
"Gmag": Gmag,
"Bpmag": Bpmag,
"Rpmag": Rpmag,
"results": None,
"wcs": None,
}
self._calc_xy()
except Exception:
raise RuntimeError(
"Gaia search failed. Please revise the input target parameters."
)
def _specify_target_parameters(self, run_gaia_search=True):
"""
Internal function. Used to specify target Gaia parameters and populate the gaia
attribute of the PointSource. If temperature, Teff, and Gaia G magnitude, Gmag, of
the target are specified, then Gaia catalog query will not run.
Parameters
----------
run_gaia_search :: bool
If Teff and Gmag of the target are specified, then this attributes is set to
False and gaia query run is called off.
Attributes
----------
gaia :: dict of arrays
Dictionary containing the interpolated parameters of the queried Gaia sources.
Returns
-------
None
"""
xout = self.ccd_dim[0]
yout = self.ccd_dim[1]
if (self.Gmag is not None) & (self.Teff is not None):
run_gaia_search = False
if run_gaia_search:
self._search_gaia()
else:
self.gaia = {
"ra": np.array([self.ra.value]),
"dec": np.array([self.dec.value]),
"x": np.zeros(1) + xout / 2,
"y": np.zeros(1) + yout / 2,
"Gmag": np.array([np.nan]),
"Bpmag": np.array([np.nan]),
"Rpmag": np.array([np.nan]),
"Gmag_abs": np.array([np.nan]),
"logg": np.array([np.nan]),
"radius": np.array([np.nan]),
"metallicity": np.array([np.nan]),
"Teff": np.array([np.nan]),
"results": None, # Full results from Gaia query
"wcs": None, # wcs.world_to_pixel object used for projection transformation
}
self._calc_xy()
# Replace target parameters with user-specified values
scalars = [
self.Teff,
self.Gmag,
self.logg,
self.radius,
self.metallicity,
self.Bpmag,
self.Rpmag,
]
scalars_str = ["Teff", "Gmag", "logg", "radius", "metallicity", "Bpmag", "Rpmag"]
for scalar, scalar_str in zip(scalars, scalars_str):
if scalar is not None:
self.gaia[scalar_str][0] = scalar
# Interpolate EEM_table using Teff or (Bpmag, Rpmag)
if "Teff" in self.gaia.keys():
interp_eem = interp_EEM_table(Teff=self.gaia["Teff"], Gmag=self.gaia["Gmag"])
else:
interp_eem = interp_EEM_table(
Gmag=self.gaia["Gmag"], Bpmag=self.gaia["Bpmag"], Rpmag=self.gaia["Rpmag"]
)
if run_gaia_search:
self.gaia["Teff"] = interp_eem["Teff"]
self.gaia["logg"] = interp_eem["logg"]
self.gaia["radius"] = interp_eem["radius"]
self.gaia["metallicity"] = np.zeros_like(self.gaia["Teff"])
self.gaia["Gmag_abs"] = interp_eem["Gmag_abs"]
# If certain values are missing, replace with interpolated EEM_table values
for _lbl in ["Bpmag", "Rpmag", "logg", "radius", "Gmag_abs"]:
ti = np.isnan(self.gaia[_lbl])
self.gaia[_lbl][ti] = interp_eem[_lbl][ti]
# Replace missing metallicities with solar value
ti = np.isnan(self.gaia["metallicity"])
self.gaia["metallicity"][ti] = 0.0
def use_gaia_spectrum(
self,
TelescopeObj,
ra=None,
dec=None,
srch_Gmax=21.0,
srch_nmax=100,
srch_rad=None,
Teff=None,
Gmag=None,
logg=None,
radius=None,
metallicity=None,
Bpmag=None,
Rpmag=None,
stellar_model_grid="ATLAS9",
stellar_model_dir=None,
bkg_sources=True,
fov=None,
fov_pa=0 * u.deg,
overwrite=False,
quiet=False,
):
"""
Use a spectrum from either the ATLAS9 catalog or the Bt-Sett1 catalog containing
flux spectra for numerous stellar model atmospheres.
Parameters
----------
TelescopeObj :: `castor_etc.Telescope` instance
'The `Telescope` object containing the field of view (FoV) value required to
calculate the search radius for querying the Gaia database.
ra :: `astropy.Quantity` int or float
Right ascension of the target in degree
dec :: `astropy.Quantity` int or float
Declination of the target in degree
srch_Gmax :: int or float
Maximum Gaia G magnitude for Gaia catalog query (applies to both the target
and the guide stars)
srch_nmax :: int
Maximum Gaia sources to include.
srch_rad :: `astropy.Quantity`
Search radius, in degree, for Gaia catalog query (applies to both the target
and the guide stars)
Teff :: int or float
Effective temperature of the target in Kelvin
Gmag :: int or float
Gaia G magnitude of the target used to query the Gaia catalog
logg :: int or float
log(g) value of the target
radius :: int or float
radius of the target <ask james about units>
metallicity :: int or float
metallicity of the target
Bpmag :: int or float
Gaia BP magnitude of the target
Rpmag :: int or float
Gaia RP magnitude of the target
stellar_model_grid :: str
Stellar model grid, 'ATLAS9' or 'BTSettl', for selecting spectrum of the
source according to the interpolated stellar atmosphere model.
stellar_model_dir :: str
Path to the stellar models directory used by the `getStarData` function to
calculate spectrum of stars.
bkg_sources :: bool
If True, then background Gaia sources are included during the transit
simulation calculation. If False, nmax is set to 1.
fov :: int or float
Full width FoV in degree
fov_pa :: `astropy.Quantity`
Field of view position angle
overwrite :: bool
If True, overwrite any existing wavelengths/spectrum. If False, raise an error
if wavelengths or spectrum is not None.
quiet :: bool
If True, do not print a message when overwriting an existing spectrum.
Attributes
----------
wavelengths :: `astropy.Quantity` array
The wavelengths of the spectrum, in angstroms,
spectrum :: array of floats
Stellar spectrum in flam units
ra :: `astropy.Quantity` angle
Right ascension of the target in degree
dec :: `astropy.Quantity` angle
Declination of the target in degree
srch_Gmax :: int or float
Maximum Gaia G magnitude for Gaia catalog query (applies to both the target
and the guide stars)
srch_nmax :: int
Maximum Gaia sources to include.
srch_rad :: `astropy.Quantity`
Search radius, in degree, for Gaia catalog query (applies to both the target
and the guide stars)
Teff :: int or float
Effective temperature of the target in Kelvin
Gmag :: int or float
Gaia G magnitude of the target used to query the Gaia catalog
logg :: int or float
log(g) value of the target
radius :: int or float
radius of the target <ask james about units>
metallicity :: int or float
metallicity of the target
Bpmag :: int or float
Gaia BP magnitude of the target
Rpmag :: int or float
Gaia RP magnitude of the target
stellar_model_grid :: str
Stellar model grid, 'ATLAS9' or 'BTSettl', for selecting spectrum of the
source according to the interpolated stellar atmosphere model.
stellar_model_dir :: str
Path to the stellar models directory used by the `getStarData` function to
calculate spectrum of stars.
bkg_sources :: bool
If True, then background Gaia sources are included during the transit
simulation calculation. If False, nmax is set to 1.
fov :: int or float
Full width FoV in degree
fov_pa :: `astropy.Quantity`
Field of view position angle
ccd_dim :: int list
CCD dimesions adopted from TelescopeObj
Returns
-------
None
"""
if not isinstance(stellar_model_dir, str):
raise TypeError(
"Please specify the path to the stellar models directory"
+ "(e.g., ../../stellar_models)"
)
if not isinstance(stellar_model_grid, str):
raise TypeError(
"stellar_model_grid should be a string" + "(e.g., 'ATLAS9' or 'BTSettl')"
)
if not isinstance(bkg_sources, bool):
raise TypeError("bkg_sources should be either True or False")
if not isinstance(srch_nmax, int):
raise TypeError("srch_nmax must an integer number.")
scalars = [ra, dec, fov, fov_pa, srch_rad]
scalars_str = ["ra", "dec", "fov", "fov_pa", "srch_rad"]
for scalar, scalar_str in zip(scalars, scalars_str):
if scalar is not None:
if not isinstance(scalar, u.Quantity):
raise TypeError(
f"{scalar_str} must `astropy.Quantity` angles"
+ "(e.g., u.arcsec, u.deg)"
)
scalars = [srch_Gmax, Teff, Gmag, logg, radius, metallicity, Bpmag, Rpmag]
scalars_str = [
"srch_Gmax",
"Teff",
"Gmag",
"logg",
"radius",
"metallicity",
"Bpmag",
"Rpmag",
]
for scalar, scalar_str in zip(scalars, scalars_str):
if scalar is not None:
if not isinstance(scalar, Number):
raise TypeError(f"{scalar_str} must be an int or float")
if fov is None:
fov = TelescopeObj.transit_fov.to(u.deg)
if srch_rad is None:
srch_rad = (fov / 2.0) * np.sqrt(2.0)
if bkg_sources is False:
srch_nmax = 1 # Only the target Gaia source.
# Intializing PointSource attributes after checking the inputs
self.ra = ra.to(u.deg)
self.dec = dec.to(u.deg)
self.srch_Gmax = srch_Gmax
self.srch_nmax = srch_nmax
self.srch_rad = srch_rad.to(u.deg)
self.Teff = Teff
self.Gmag = Gmag
self.logg = logg
self.radius = radius
self.metallicity = metallicity
self.Bpmag = Bpmag
self.Rpmag = Rpmag
self.stellar_model_grid = stellar_model_grid
self.bkg_sources = bkg_sources
self.fov = fov.to(u.deg)
self.fov_pa = fov_pa.to(u.deg)
self.ccd_dim = TelescopeObj.transit_ccd_dim
# Performs a gaia query and interpolates potential sources' parameters, i.e., Teff, radius...
self._specify_target_parameters()
self._check_existing_spectrum(overwrite, quiet=quiet)
try:
# Zero index because we are only concerned with the target here.
wavelengths, spectrum = getStarData(
temperature=self.gaia["Teff"][0],
metallicity=self.gaia["metallicity"][0],
logg=self.gaia["logg"][0],
model_grid=stellar_model_grid,
stellar_model_dir=stellar_model_dir,
)
# Estimate distance using Gmag and estimated Gabs
dpc = 10 ** ((self.gaia["Gmag"][0] - self.gaia["Gmag_abs"][0]) / 5.0 + 1)
# Scale surface flux to observed flux using esimated dpc and radius
spectrum *= ((self.gaia["radius"][0] * R_sun) / (dpc * pc)) ** 2
# CASTOR wavelength range.
_range = np.where((wavelengths.to(u.AA).value <= 11000))
self.wavelengths = wavelengths[_range]
self.spectrum = spectrum[_range]
except Exception:
raise RuntimeError(
f"Could not load the {stellar_model_grid} data for some reason. Please contact the developer with a minimal"
+ "working example."
)
def use_pickles_spectrum(self, spectral_class, overwrite=False, quiet=False):
"""
Use a spectrum from the Pickles catalog
(<https://ui.adsabs.harvard.edu/abs/1998PASP..110..863P/abstract>) containing
spectra for numerous stellar spectral classes.
A table containing valid `spectral_class` inputs is at the end of this docstring.
Parameters
----------
spectral_class :: str from the "Table of valid `spectral_class` inputs" below
The spectral type of the star.
overwrite :: bool
If True, overwrite any existing wavelengths/spectrum. If False, raise an error
if wavelengths or spectrum is not None.
quiet :: bool
If True, do not print a message when overwriting an existing spectrum.
Attributes
----------
wavelengths :: `astropy.Quantity` array
The wavelengths of the spectrum, in angstroms.
spectrum :: array of floats
Stellar spectrum in arbitrary units, normalized such that it is equal to 1 at
5556 angstrom.
Returns
-------
None
Table of valid `spectral_class` inputs
-------------------------------------
```text
spectral_class Description (& wavelength range)
-------------- --------------------------------
"a0i" A0 I (1150-10620 A)
"a0iii" A0 III (1150-10620 A)
"a0iv" A0 IV (1150-10620 A)
"a0v" A0 V (1150-10620 A)
"a2i" A2 I (1150-10620 A)
"a2v" A2 V (1150-10620 A)
"a3iii" A3 III (1150-10620 A)
"a3v" A3 V (1150-10620 A)
"a47iv" A4-7 IV (1150-10620 A)
"a5iii" A5 III (1150-10620 A)
"a5v" A5 V (1150-10620 A)
"a7iii" A7 III (1150-10620 A)
"a7v" A7 V (1150-10620 A)
"b0i" B0 I (1150-10620 A)
"b0v" B0 V (1150-10620 A)
"b12iii" B1-2 III (1150-10620 A)
"b1i" B1 I (1150-10620 A)
"b1v" B1 V (1150-10620 A)
"b2ii" B2 II (1150-10620 A)
"b2iv" B2 IV (1150-10620 A)
"b3i" B3 I (1150-10620 A)
"b3iii" B3 III (1150-10620 A)
"b3v" B3 V (1150-10620 A)
"b57v" B5-7 V (1150-10620 A)
"b5i" B5 I (1150-10620 A)
"b5ii" B5 II (1150-10620 A)
"b5iii" B5 III (1150-10620 A)
"b6iv" B6 IV (1150-10620 A)
"b8i" B8 I (1150-10620 A)
"b8v" B8 V (1150-10620 A)
"b9iii" B9 III (1150-10620 A)
"b9v" B9 V (1150-10620 A)
"f02iv" F0-2 IV (1150-10620 A)
"f0i" F0 I (1150-10620 A)
"f0ii" F0 II (1150-10620 A)
"f0iii" F0 III (1150-10620 A)
"f0v" F0 V (1150-10620 A)
"f2ii" F2 II (1150-10620 A)
"f2iii" F2 III (1150-10620 A)
"f2v" F2 V (1150-10620 A)
"f5i" F5 I (1150-10620 A)
"f5iii" F5 III (1150-10620 A)
"f5iv" F5 IV (1150-10620 A)
"f5v" F5 V (1150-10620 A)
"f6v" F6 V (1150-10620 A)
"f8i" F8 I (1150-10620 A)
"f8iv" F8 IV (1150-10620 A)
"f8v" F8 V (1150-10620 A)
"g0i" G0 I (1150-10620 A)
"g0iii" G0 III (1150-10620 A)
"g0iv" G0 IV (1150-10620 A)
"g0v" G0 V (1150-10620 A)
"g2i" G2 I (1150-10620 A)
"g2iv" G2 IV (1150-10620 A)
"g2v" G2 V (1150-10620 A)
"g5i" G5 I (1150-10620 A)
"g5ii" G5 II (1150-10620 A)
"g5iii" G5 III (1150-10620 A)
"g5iv" G5 IV (1150-10620 A)
"g5v" G5 V (1150-10620 A)
"g8i" G8 I (1150-10620 A)
"g8iii" G8 III (1150-10620 A)
"g8iv" G8 IV (1150-10620 A)
"g8v" G8 V (1150-10620 A)
"k01ii" K0-1 II (1150-10620 A)
"k0iii" K0 III (1150-10620 A)
"k0iv" K0 IV (1150-10620 A)
"k0v" K0 V (1150-10620 A)
"k1iii" K1 III (1150-10620 A)
"k1iv" K1 IV (1150-10620 A)
"k2i" K2 I (1150-10620 A)
"k2iii" K2 III (1150-10620 A)
"k2v" K2 V (1150-10620 A)
"k34ii" K3-4 II (1150-10620 A)
"k3i" K3 I (1150-10620 A)
"k3iii" K3 III (1150-10620 A)
"k3iv" K3 IV (1150-10620 A)
"k3v" K3 V (1150-10620 A)
"k4i" K4 I (1150-10620 A)
"k4iii" K4 III (1150-10620 A)
"k4v" K4 V (1150-10620 A)
"k5iii" K5 III (1150-10620 A)
"k5v" K5 V (1150-10620 A)
"k7v" K7 V (1150-10620 A)
"m0iii" M0 III (1150-10620 A)
"m0v" M0 V (1150-10620 A)
"m10iii" M10 III (1150-10620 A)
"m1iii" M1 III (1150-10620 A)
"m1v" M1 V (1150-10620 A)
"m2i" M2 I (1150-10620 A)
"m2iii" M2 III (1150-10620 A)
"m2p5v" M2.5 V (1150-10620 A)
"m2v" M2 V (1150-10620 A)
"m3ii" M3 II (1150-10620 A)
"m3iii" M3 III (1150-10620 A)
"m3v" M3 V (1150-10620 A)
"m4iii" M4 III (1150-10620 A)
"m4v" M4 V (1150-10620 A)
"m5iii" M5 III (1150-10620 A)
"m5v" M5 V (1150-10620 A)
"m6iii" M6 III (1150-10620 A)
"m6v" M6 V (1150-10620 A)
"m7iii" M7 III (1150-10620 A)
"m8iii" M8 III (1150-10620 A)
"m9iii" M9 III (1150-10620 A)
"o5v" O5 V (1150-10620 A)
"o8iii" O8 III (1150-10620 A)
"o9v" O9 V (1150-10620 A)
"rf6v" metal-rich F6 V (1150-10620 A)
"rf8v" metal-rich F8 V (1150-10620 A)
"rg0v" metal-rich G0 V (1150-10620 A)
"rg5iii" metal-rich G5 III (1150-10620 A)
"rg5v" metal-rich G5 V (1150-10620 A)
"rk0iii" metal-rich K0 III (1150-10620 A)
"rk0v" metal-rich K0 V (1150-10620 A)
"rk1iii" metal-rich K1 III (1150-10620 A)
"rk2iii" metal-rich K2 III (1150-10620 A)
"rk3iii" metal-rich K3 III (1150-10620 A)
"rk4iii" metal-rich K4 III (1150-10620 A)
"rk5iii" metal-rich K5 III (1150-10620 A)
"uka0i" A0 I (1150-25000 A)
"uka0iii" A0 III (1150-25000 A)
"uka0iv" A0 IV (1150-25000 A)
"uka0v" A0 V (1150-25000 A)
"uka2i" A2 I (1150-25000 A)
"uka2v" A2 V (1150-25000 A)
"uka3iii" A3 III (1150-25000 A)
"uka3v" A3 V (1150-25000 A)
"uka47iv" A4-7 IV (1150-25000 A)
"uka5iii" A5 III (1150-25000 A)
"uka5v" A5 V (1150-25000 A)
"uka7iii" A7 III (1150-25000 A)
"uka7v" A7 V (1150-25000 A)
"ukb0i" B0 I (1150-25000 A)
"ukb0v" B0 V (1150-25000 A)
"ukb12iii" B1-2 III (1150-25000 A)
"ukb1i" B1 I (1150-25000 A)
"ukb1v" B1 V (1150-25000 A)
"ukb2ii" B2 II (1150-25000 A)
"ukb2iv" B2 IV (1150-25000 A)
"ukb3i" B3 I (1150-25000 A)
"ukb3iii" B3 III (1150-25000 A)
"ukb3v" B3 V (1150-25000 A)
"ukb57v" B5-7 V (1150-25000 A)
"ukb5i" B5 I (1150-25000 A)
"ukb5ii" B5 II (1150-25000 A)
"ukb5iii" B5 III (1150-25000 A)
"ukb6iv" B6 IV (1150-25000 A)
"ukb8i" B8 I (1150-25000 A)
"ukb8v" B8 V (1150-25000 A)
"ukb9iii" B9 III (1150-25000 A)
"ukb9v" B9 V (1150-25000 A)
"ukf02iv" F0-2 IV (1150-25000 A)
"ukf0i" F0 I (1150-25000 A)
"ukf0ii" F0 II (1150-25000 A)
"ukf0iii" F0 III (1150-25000 A)
"ukf0v" F0 V (1150-25000 A)
"ukf2ii" F2 II (1150-25000 A)
"ukf2iii" F2 III (1150-25000 A)
"ukf2v" F2 V (1150-25000 A)
"ukf5i" F5 I (1150-25000 A)
"ukf5iii" F5 III (1150-25000 A)
"ukf5iv" F5 IV (1150-25000 A)
"ukf5v" F5 V (1150-25000 A)
"ukf6v" F6 V (1150-25000 A)
"ukf8i" F8 I (1150-25000 A)
"ukf8iv" F8 IV (1150-25000 A)
"ukf8v" F8 V (1150-25000 A)
"ukg0i" G0 I (1150-25000 A)
"ukg0iii" G0 III (1150-25000 A)
"ukg0iv" G0 IV (1150-25000 A)
"ukg0v" G0 V (1150-25000 A)
"ukg2i" G2 I (1150-25000 A)
"ukg2iv" G2 IV (1150-25000 A)
"ukg2v" G2 V (1150-25000 A)
"ukg5i" G5 I (1150-25000 A)
"ukg5ii" G5 II (1150-25000 A)
"ukg5iii" G5 III (1150-25000 A)
"ukg5iv" G5 IV (1150-25000 A)
"ukg5v" G5 V (1150-25000 A)
"ukg8i" G8 I (1150-25000 A)
"ukg8iii" G8 III (1150-25000 A)
"ukg8iv" G8 IV (1150-25000 A)
"ukg8v" G8 V (1150-25000 A)
"ukk01ii" K0-1 II (1150-25000 A)
"ukk0iii" K0 III (1150-25000 A)
"ukk0iv" K0 IV (1150-25000 A)
"ukk0v" K0 V (1150-25000 A)
"ukk1iii" K1 III (1150-25000 A)
"ukk1iv" K1 IV (1150-25000 A)
"ukk2i" K2 I (1150-25000 A)
"ukk2iii" K2 III (1150-25000 A)
"ukk2v" K2 V (1150-25000 A)
"ukk34ii" K3-4 II (1150-25000 A)
"ukk3i" K3 I (1150-25000 A)
"ukk3iii" K3 III (1150-25000 A)
"ukk3iv" K3 IV (1150-25000 A)
"ukk3v" K3 V (1150-25000 A)
"ukk4i" K4 I (1150-25000 A)
"ukk4iii" K4 III (1150-25000 A)
"ukk4v" K4 V (1150-25000 A)
"ukk5iii" K5 III (1150-25000 A)
"ukk5v" K5 V (1150-25000 A)
"ukk7v" K7 V (1150-25000 A)
"ukm0iii" M0 III (1150-25000 A)
"ukm0v" M0 V (1150-25000 A)
"ukm10iii" M10 III (1150-25000 A)
"ukm1iii" M1 III (1150-25000 A)
"ukm1v" M1 V (1150-25000 A)
"ukm2i" M2 I (1150-25000 A)
"ukm2iii" M2 III (1150-25000 A)
"ukm2p5v" M2.5 V (1150-25000 A)
"ukm2v" M2 V (1150-25000 A)
"ukm3ii" M3 II (1150-25000 A)
"ukm3iii" M3 III (1150-25000 A)
"ukm3v" M3 V (1150-25000 A)
"ukm4iii" M4 III (1150-25000 A)
"ukm4v" M4 V (1150-25000 A)
"ukm5iii" M5 III (1150-25000 A)
"ukm5v" M5 V (1150-25000 A)
"ukm6iii" M6 III (1150-25000 A)
"ukm6v" M6 V (1150-25000 A)
"ukm7iii" M7 III (1150-25000 A)
"ukm8iii" M8 III (1150-25000 A)
"ukm9iii" M9 III (1150-25000 A)
"uko5v" O5 V (1150-25000 A)
"uko8iii" O8 III (1150-25000 A)
"uko9v" O9 V (1150-25000 A)
"ukrf6v" metal-rich F6 V (1150-25000 A)
"ukrf8v" metal-rich F8 V (1150-25000 A)
"ukrg0v" metal-rich G0 V (1150-25000 A)
"ukrg5iii" metal-rich G5 III (1150-25000 A)
"ukrg5v" metal-rich G5 V (1150-25000 A)
"ukrk0iii" metal-rich K0 III (1150-25000 A)
"ukrk0v" metal-rich K0 V (1150-25000 A)
"ukrk1iii" metal-rich K1 III (1150-25000 A)
"ukrk2iii" metal-rich K2 III (1150-25000 A)
"ukrk3iii" metal-rich K3 III (1150-25000 A)
"ukrk4iii" metal-rich K4 III (1150-25000 A)
"ukrk5iii" metal-rich K5 III (1150-25000 A)
"ukwf5v" metal-weak F5 V (1150-25000 A)
"ukwf8v" metal-weak F8 V (1150-25000 A)
"ukwg0v" metal-weak G0 V (1150-25000 A)
"ukwg5iii" metal-weak G5 III (1150-25000 A)
"ukwg5v" metal-weak G5 V (1150-25000 A)
"ukwg8iii" metal-weak G8 III (1150-25000 A)
"ukwk0iii" metal-weak K0 III (1150-25000 A)
"ukwk1iii" metal-weak K1 III (1150-25000 A)
"ukwk2iii" metal-weak K2 III (1150-25000 A)
"ukwk3iii" metal-weak K3 III (1150-25000 A)
"ukwk4iii" metal-weak K4 III (1150-25000 A)
"wf5v" metal-weak F5 V (1150-10620 A)
"wf8v" metal-weak F8 V (1150-10620 A)
"wg0v" metal-weak G0 V (1150-10620 A)
"wg5iii" metal-weak G5 III (1150-10620 A)
"wg5v" metal-weak G5 V (1150-10620 A)
"wg8iii" metal-weak G8 III (1150-10620 A)
"wk0iii" metal-weak K0 III (1150-10620 A)
"wk1iii" metal-weak K1 III (1150-10620 A)
"wk2iii" metal-weak K2 III (1150-10620 A)
"wk3iii" metal-weak K3 III (1150-10620 A)
"wk4iii" metal-weak K4 III (1150-10620 A)
```
"""
#
# Check inputs
#
self._check_existing_spectrum(overwrite, quiet=quiet)
valid_spectral_classes = [
"a0i",
"a0iii",
"a0iv",
"a0v",
"a2i",
"a2v",
"a3iii",
"a3v",
"a47iv",
"a5iii",
"a5v",
"a7iii",
"a7v",
"b0i",
"b0v",
"b12iii",
"b1i",
"b1v",
"b2ii",
"b2iv",
"b3i",
"b3iii",
"b3v",
"b57v",
"b5i",
"b5ii",
"b5iii",
"b6iv",
"b8i",
"b8v",
"b9iii",
"b9v",
"f02iv",
"f0i",
"f0ii",
"f0iii",
"f0v",
"f2ii",
"f2iii",
"f2v",
"f5i",
"f5iii",
"f5iv",
"f5v",
"f6v",
"f8i",
"f8iv",
"f8v",
"g0i",
"g0iii",
"g0iv",
"g0v",
"g2i",
"g2iv",
"g2v",
"g5i",
"g5ii",
"g5iii",
"g5iv",
"g5v",
"g8i",
"g8iii",
"g8iv",
"g8v",
"k01ii",
"k0iii",
"k0iv",
"k0v",
"k1iii",
"k1iv",
"k2i",
"k2iii",
"k2v",
"k34ii",
"k3i",
"k3iii",
"k3iv",
"k3v",
"k4i",
"k4iii",
"k4v",
"k5iii",
"k5v",
"k7v",
"m0iii",
"m0v",
"m10iii",
"m1iii",
"m1v",
"m2i",
"m2iii",
"m2p5v",
"m2v",
"m3ii",
"m3iii",
"m3v",
"m4iii",
"m4v",
"m5iii",
"m5v",
"m6iii",
"m6v",
"m7iii",
"m8iii",
"m9iii",
"o5v",
"o8iii",
"o9v",
"rf6v",
"rf8v",
"rg0v",
"rg5iii",
"rg5v",
"rk0iii",
"rk0v",
"rk1iii",
"rk2iii",
"rk3iii",
"rk4iii",
"rk5iii",
"uka0i",
"uka0iii",
"uka0iv",
"uka0v",
"uka2i",
"uka2v",
"uka3iii",
"uka3v",
"uka47iv",
"uka5iii",
"uka5v",
"uka7iii",
"uka7v",
"ukb0i",
"ukb0v",
"ukb12iii",
"ukb1i",
"ukb1v",
"ukb2ii",
"ukb2iv",
"ukb3i",
"ukb3iii",
"ukb3v",
"ukb57v",
"ukb5i",
"ukb5ii",
"ukb5iii",
"ukb6iv",
"ukb8i",
"ukb8v",
"ukb9iii",
"ukb9v",
"ukf02iv",
"ukf0i",
"ukf0ii",
"ukf0iii",
"ukf0v",
"ukf2ii",
"ukf2iii",
"ukf2v",
"ukf5i",
"ukf5iii",
"ukf5iv",
"ukf5v",
"ukf6v",
"ukf8i",
"ukf8iv",
"ukf8v",
"ukg0i",
"ukg0iii",
"ukg0iv",
"ukg0v",
"ukg2i",
"ukg2iv",
"ukg2v",
"ukg5i",
"ukg5ii",
"ukg5iii",
"ukg5iv",
"ukg5v",
"ukg8i",
"ukg8iii",
"ukg8iv",
"ukg8v",
"ukk01ii",
"ukk0iii",
"ukk0iv",
"ukk0v",
"ukk1iii",
"ukk1iv",
"ukk2i",
"ukk2iii",
"ukk2v",
"ukk34ii",
"ukk3i",
"ukk3iii",
"ukk3iv",
"ukk3v",
"ukk4i",
"ukk4iii",
"ukk4v",
"ukk5iii",
"ukk5v",
"ukk7v",
"ukm0iii",
"ukm0v",
"ukm10iii",
"ukm1iii",
"ukm1v",
"ukm2i",
"ukm2iii",
"ukm2p5v",
"ukm2v",
"ukm3ii",
"ukm3iii",
"ukm3v",
"ukm4iii",
"ukm4v",
"ukm5iii",
"ukm5v",
"ukm6iii",
"ukm6v",
"ukm7iii",
"ukm8iii",
"ukm9iii",
"uko5v",
"uko8iii",
"uko9v",
"ukrf6v",
"ukrf8v",
"ukrg0v",
"ukrg5iii",
"ukrg5v",
"ukrk0iii",
"ukrk0v",
"ukrk1iii",
"ukrk2iii",
"ukrk3iii",
"ukrk4iii",
"ukrk5iii",
"ukwf5v",
"ukwf8v",
"ukwg0v",
"ukwg5iii",
"ukwg5v",
"ukwg8iii",
"ukwk0iii",
"ukwk1iii",
"ukwk2iii",
"ukwk3iii",
"ukwk4iii",
"wf5v",
"wf8v",
"wg0v",
"wg5iii",
"wg5v",
"wg8iii",
"wk0iii",
"wk1iii",
"wk2iii",
"wk3iii",
"wk4iii",
]
if spectral_class not in valid_spectral_classes:
raise ValueError(f"{spectral_class} is not a valid `spectral_class`.")
try:
data = pd.read_fwf(
join(DATAPATH, "pickles_spectra", "dat", f"{spectral_class}.dat"),
colspecs=[(0, 7), (7, 17)],
header=None,
)
self.wavelengths = data[0].values * u.AA
self.spectrum = data[1].values
except Exception:
raise RuntimeError(
"Could not load the Pickles data for some reason (probably a formatting "
+ "quirk in the file). Please contact the developer with a minimal "
+ "working example."
)
def show_spectrum(self, plot=True):
"""
Plot the spectrum (which should be in units of flam).
Parameters
----------
plot :: bool
If True, plot the source weights and return None. If False, return the figure
and axis instance associated with the plot.
Returns
-------
None (if plot is True)
fig, ax (if plot is False) :: `matplotlib.figure.Figure`, `matplotlib.axes.Axes`
The figure and axis instance associated with the plot.
"""
if self.spectrum is None or self.wavelengths is None:
raise ValueError("Please generate a spectrum before plotting.")
fig, ax = plt.subplots()
ax.plot(self.wavelengths.to(u.AA).value, self.spectrum, "k", lw=1)
ax.fill_between(self.wavelengths.to(u.AA).value, self.spectrum, alpha=0.5)
if plt.rcParams["text.usetex"]:
ax.set_xlabel(r"Wavelength [\AA]")
ax.set_ylabel(r"Flux Density [$\rm erg\, s^{-1}\, cm^{-2}\,$\AA$^{-1}$]")
else:
ax.set_xlabel(r"Wavelength [$\rm \AA$]")
ax.set_ylabel(r"Flux Density [$\rm erg\, s^{-1}\, cm^{-2}\,\AA^{-1}$]")
ax.set_ylim(bottom=0)
if plot:
plt.show()
else:
return fig, ax
def calc_redleak_frac(self, TelescopeObj, quiet=False):
"""
Calculate a source's red leak fraction. The red leak fraction is defined to be the
ratio of the electron rate (i.e., electron/s) induced by red leak flux to the
total electron rate induced by the entire spectrum.
Parameters
----------
TelescopeObj :: `castor_etc.Telescope` instance
The `Telescope` object containing the passband response curves to use for the
red leak calculation.
quiet :: bool
If True, suppress warnings from red leak fraction calculations.
Returns
-------
redleak_fracs :: dict of floats
Dictionary containing the red leak fraction in each passband.
"""
from .sources import CustomSource # avoid circular import error
if isinstance(self, CustomSource):
raise AttributeError("Custom sources do not have red leak fractions!")
if self.wavelengths is None or self.spectrum is None:
raise ValueError("Please generate or load a spectrum first")
#
# Calculate red leak fraction (red leak electron/s to total electron/s)
#
redleak_fracs = dict.fromkeys(TelescopeObj.passbands, 0.0)
#
# Make useful source spectrum-derived quantities
#
source_wavelengths_AA = self.wavelengths.to(u.AA).value
source_photon_s_A = ( # photon/s/A
self.spectrum # erg/s/cm^2/A
* TelescopeObj.mirror_area.to(u.cm**2).value # cm^2
/ calc_photon_energy(wavelength=source_wavelengths_AA)[0] # photon/erg
)
source_interp = interp1d(
source_wavelengths_AA,
source_photon_s_A,
kind="linear",
bounds_error=False,
fill_value=np.nan,
) # photon/s/A
#
# Find red leak fraction per band
#
for band in redleak_fracs:
full_response_curve_wavelengths_AA = (
TelescopeObj.full_passband_curves[band]["wavelength"].to(u.AA).value
)
is_redleak = (
full_response_curve_wavelengths_AA
> TelescopeObj.redleak_thresholds[band].to(u.AA).value
)
redleak_wavelengths = full_response_curve_wavelengths_AA[is_redleak]
redleak_per_A = (
source_interp(redleak_wavelengths)
* TelescopeObj.full_passband_curves[band]["response"][is_redleak]
) # electron/s/A
total_erate_per_A = (
source_interp(full_response_curve_wavelengths_AA)
* TelescopeObj.full_passband_curves[band]["response"]
) # electron/s/A
isgood_redleak = np.isfinite(redleak_per_A) # don't include NaNs
isgood_total = np.isfinite(total_erate_per_A) # don't include NaNs
if not quiet and (not np.all(isgood_redleak) or not np.all(isgood_total)):
warnings.warn(
"Could not estimate red leak fraction "
+ f"at 1 or more wavelengths in {band}-band. "
+ "This may just be caused by the source spectrum not being "
+ f"defined at all wavelengths present in the {band}-band definition "
+ "file (which runs from "
+ f"{round(min(full_response_curve_wavelengths_AA), 2)} A "
+ f"to {round(max(full_response_curve_wavelengths_AA), 2)} A)."
+ "and is typically not a reason to worry. "
+ "This warning can be suppressed with `quiet=True`.",
RuntimeWarning,
)
try:
redleak_per_px = simpson( # electron/s (per px)
y=redleak_per_A[isgood_redleak], x=redleak_wavelengths[isgood_redleak]
)
total_erate_per_px = simpson( # electron/s (per px)
y=total_erate_per_A[isgood_total],
x=full_response_curve_wavelengths_AA[isgood_total],
)
redleak_frac = redleak_per_px / total_erate_per_px
except Exception:
raise RuntimeError(
f"Unable to calculate red leak fraction for {band}-band! "
+ "Please ensure there is at least 1 wavelength that is above "
+ "the red leak threshold."
)
if np.isfinite(redleak_frac):
if redleak_frac > 1:
# Catch errors caused by very small electron/s rates
# (e.g., single emission line spectrum)
redleak_fracs[band] = 1.0
else:
redleak_fracs[band] = redleak_frac
elif not quiet:
warnings.warn(
"Source red leak fraction could not be calculated "
+ f"in {band}-band!",
RuntimeWarning,
)
return redleak_fracs
class NormMixin:
"""
Mixin for normalizing spectra.
TODO: Make normalization to a specific value at a given wavelength?
"""
@staticmethod
def norm_to_star(spectrum, radius=1, dist=1 << u.kpc):
"""
Normalize the blackbody spectrum to a star of given radius and distance. By
default, normalizes the flux to a star of 1 solar radius at 1 kpc. Reference:
<https://github.com/spacetelescope/pysynphot/blob/925cdbac35a7851cee1bddaa2b47651235c44851/pysynphot/spectrum.py#L40>.
Note that the spectrum's units should have a unit of steradian (sr) in the
denominator, which will be multiplied out (e.g., erg/s/cm^2/A/sr).
Parameters
----------
spectrum :: array of floats
The spectrum to normalize. It should have a unit of steradian in the
denominator.
radius :: float or `astropy.Quantity` length
The radius of the source. If a scalar, it is assumed to be in units of solar
radii.
dist :: float or `astropy.Quantity` length
The distance to the blackbody. If a scalar, it is assumed to be in units of
kpc.
Returns
----------
norm_spectrum :: array of floats
Spectrum normalized such that the unit of steradian in the denominator
vanishes.
"""
#
# Check inputs
#
if not isinstance(radius, u.Quantity):
radius = radius * const.SUN_RADIUS # cm
if not isinstance(dist, u.Quantity):
dist = dist * u.kpc
radius = radius.to(u.km).value
dist = dist.to(u.km).value
#
# Normalize spectrum
#
radian = radius / dist
return spectrum * np.pi * radian * radian # multiply by projected solid angle
def norm_to_AB_mag(
self,
ab_mag,
passband=None,
TelescopeObj=None,
):
"""
Normalize a spectrum to a given AB magnitude in a specified passband or to a given
bolometric AB magnitude. The spectrum should be in units of flam (erg/s/cm^2/A).
The bolometric magnitude calculation assumes a perfect (unity) passband response.
WARNING: if the spectrum does not vanish at the edges (e.g., a uniform spectrum),
then the bolometric magnitude will depend on the length of the spectrum! This is
because the area under the curve does not converge!
Parameters
----------
ab_mag :: int or float
The desired AB magnitude.
passband :: valid `Telescope` passband string (e.g., "uv", "u", "g") or None
If not None, normalize the spectrum such that it has the desired AB magnitude
in this passband; `TelescopeObj` must also be provided. If None, normalize the
spectrum such that the spectrum's bolometric magnitude equals the given AB
magnitude; `TelescopeObj` must not be provided.
TelescopeObj :: `Telescope` object or None
The `Telescope` object containing the limits and response curves of the
different passbands. If not None, `passband` must also be provided. If None,
`passband` must not be provided.
Attributes
----------
spectrum :: array of floats
The renormalized spectrum in units of erg/s/cm^2/A (identical to the previous
spectrum units).
Returns
-------
None
"""
#
# Check inputs
#
if self.spectrum is None or self.wavelengths is None:
raise ValueError("Please generate a spectrum before normalizing.")
if (TelescopeObj is None and passband is not None) or (
TelescopeObj is not None and passband is None
):
raise ValueError(
"Please either specify both `TelescopeObj` and `passband`"
+ "or neither of them."
)
#
# Normalize
#
current_ab_mag = self.get_AB_mag(TelescopeObj=TelescopeObj)
if passband is not None:
if passband not in TelescopeObj.passbands:
raise ValueError(
f"Invalid `passband`. Valid passbands are: {TelescopeObj.passbands}."
)
current_ab_mag = current_ab_mag[passband]
norm_factor = 10 ** (-0.4 * (ab_mag - current_ab_mag))
self.spectrum *= norm_factor
def norm_luminosity_dist(self, luminosity, dist):
"""
Normalize the spectrum to a source of given (bolometric) luminosity and distance.
The `Source` object should have its spectrum in units of flam (erg/s/cm^2/A) and
wavelengths in angstrom. (Technically it is okay as long as the wavelengths are in
some unit <U> and the spectrum is in units of erg/s/cm^2/<U>.)
Parameters
----------
luminosity :: scalar or `astropy.Quantity` unit of power
The desired bolometric luminosity. If a scalar, this is assumed to be in units
of solar luminosities. If an `astropy.Quantity`, it must be a unit of power
(e.g., erg/s).
dist :: float or `astropy.Quantity` length
The distance to the source. If a scalar, it is assumed to be in units of kpc.
Attributes
----------
spectrum :: array
Normalized spectrum in original flux density units (e.g., erg/s/cm^2/A).
Returns
-------
None
"""
#
# Check inputs
#
if self.spectrum is None or self.wavelengths is None:
raise ValueError("Please generate a spectrum before normalizing.")
if not isinstance(luminosity, u.Quantity):
luminosity = luminosity * const.SUN_LUMINOSITY
if not isinstance(dist, u.Quantity):
dist = dist * u.kpc
luminosity = luminosity.to(u.erg / u.s).value
dist = dist.to(u.cm).value
#
# Normalize spectrum (originally in erg/s/cm^2/A)
#
erg_s_A = 4 * np.pi * dist * dist * self.spectrum # erg/s/A
tot_luminosity = simpson(y=erg_s_A, x=self.wavelengths.to(u.AA).value) # erg/s
norm_factor = luminosity / tot_luminosity # dimensionless
self.spectrum *= norm_factor # erg/s/cm^2/A
def get_AB_mag(self, TelescopeObj=None):
"""
Calculate the AB magnitude of the source either through the telescope's passbands
or over the whole spectrum (bolometric magnitude). Note that the source spectrum
should be in units of erg/s/cm^2/A. The bolometric magnitude calculation assumes a
perfect (unity) passband response.
WARNING: if the spectrum does not vanish at the edges (e.g., a uniform
spectrum), then the bolometric magnitude will depend on the length of the
spectrum! This is because the area under the curve does not converge!
Parameters
----------
TelescopeObj :: `Telescope` object or None
If provided, will calculate the AB magnitude of the source in each of the
telescope's passbands. Otherwise will calculate the source's bolometric AB
magnitude.
Returns
-------
ab_mags :: scalar or dict of scalars
If `TelescopeObj` is None, the result is a scalar equal to the bolometric AB
magnitude of the source. If TelescopeObj is not None, the result is a dict of
scalars representing the source's AB magnitude through each of the telescope's
passbands.
"""
if self.spectrum is None or self.wavelengths is None:
raise ValueError(
"Please generate a spectrum before calculating AB magnitude(s)."
)
wavelengths_AA = self.wavelengths.to(u.AA).value
if TelescopeObj is None:
#
# Calculate bolometric AB magnitude
#
return flam_to_AB_mag(
wavelengths_AA,
self.spectrum,
np.ones_like(wavelengths_AA, dtype=float), # perfect "passband" response
)
else:
#
# Calculate the AB magnitude through each of the telescope's passbands
#
if not isinstance(TelescopeObj, Telescope):
raise TypeError(
"`TelescopeObj` must be a `castor_etc.telescope.Telescope` object."
)
ab_mags = dict.fromkeys(TelescopeObj.passbands)
for band in TelescopeObj.passbands:
# Interpolate passband to spectrum resolution
passband_wavelengths = (
TelescopeObj.full_passband_curves[band]["wavelength"].to(u.AA).value
)
passband_interp = interp1d(
x=passband_wavelengths,
# y=passband_response,
y=TelescopeObj.full_passband_curves[band]["response"],
kind="linear",
bounds_error=False,
fill_value=np.nan,
)
passband_response = passband_interp(wavelengths_AA)
# Do not integrate NaNs
isgood_passband = np.isfinite(passband_response)
isgood_spectrum = np.isfinite(self.spectrum)
if np.any(~isgood_passband):
if np.all(~isgood_passband):
raise RuntimeError(
f"{band}-band response could not be estimated "
+ "at any source spectrum wavelength"
)
elif np.any(
~isgood_passband[
(wavelengths_AA >= passband_wavelengths.min())
& (wavelengths_AA <= passband_wavelengths.max())
]
): # only warn if there are NaNs/infs in the passband range
warnings.warn(
f"{band}-band response could not be estimated "
+ "at some source spectrum wavelengths",
RuntimeWarning,
)
if np.any(~isgood_spectrum):
if np.all(~isgood_spectrum):
raise RuntimeError("Source spectrum values are all non-finite!")
elif np.any(
~isgood_spectrum[
(wavelengths_AA >= passband_wavelengths.min())
& (wavelengths_AA <= passband_wavelengths.max())
]
): # only warn if there are NaNs/infs in the passband range
warnings.warn(
"Source spectrum values are not finite at some wavelengths",
RuntimeWarning,
)
ab_mags[band] = flam_to_AB_mag(
wavelengths_AA[isgood_passband & isgood_spectrum],
self.spectrum[isgood_passband & isgood_spectrum],
passband_response[isgood_passband & isgood_spectrum],
)
return ab_mags
|
CASTOR-telescopeREPO_NAMEETCPATH_START.@ETC_extracted@ETC-master@castor_etc@spectrum.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "lucatelli/morphen",
"repo_path": "morphen_extracted/morphen-main/README.md",
"type": "Markdown"
}
|
```
%&&&&&+ +&&&&&&&*+
#@@@@@+ *#@@@@@# *+
*@@#*@@% *@@@%%@@+ %#&&@#% &##&&#&% +#@#&&##%+ +&& &@@* &###&%%%* +%&+ &@%
+@@@% &@@%@@#+ #@% %@@* @@& +@@% +@@% @@& +@@% &@% +@@% &@#+ +@@@& *@#
%@@# +@@@@# %@@ %@@* @@& &@@&&&#&% %@@%*%@#% &@@##&&@@@ +@@#&&%*+ #@#&@& #@*
+@@@* &@@# #@% *@@% &@#++@@%**#@% @@&**+ &@* %@@% &@@+ &@@ *@#@&
%@& *@& &#& +&##&#&% &#& *@@% %##+ *#% ##& %####&%%%*%@@& *##+
+ + +%@&%*** +&@@&+
++**%%%%%%%***++ +***+ +%@@#%
+*%%&#@@@#&&%%%%%%%%&&#@@###@@@@@@#* *&@@#*
+*&#@@@#&%*+ *@@@@#&&@@@@ +%#@@#%+
+&#@@@&*+ #@@@*+ #@@@&%*++ +++**%&#@@#&*
+&@@@&* &@@@@@@@@@@%%&####@@@@@##&&%*+
%@@@%+ +%&&&##&%+
%@@#+
%&+
```
# `morphen`: Overview, features and limitations
***Readme file under development!***
## What is `morphen`?
`morphen` is a collection of python-based astronomical functionalities for image analysis and
processing. The three main functionalities of `morphen` are:
1. Image Analysis ([image_analysis/README.md](image_analysis/README.md))
2. Multi-Sersic Image Fitting Decomposition ([image_decomposition/README.md](image_decomposition/README.md))
3. Radio Interferometric Self-calibration ([selfcal/README.md](selfcal/README.md))
These tasks are powered by: Astropy, NumPy, SciPy, Photutils, PetroFit, Morfometryka, SEP, LMFIT,
Jax, scikit-image, WSClean, CASA, among others.
You will be able to measure basic image morphology and photometry. `morphen` also comes with a
state-of-the-art python-based image fitting implementation based on the Sersic
function. Particularly to radio astronomy, these tools involve pure python, but also are
integrated with CASA (https://casa.nrao.edu/) in order to work with common `casatasks` as well
`WSClean` -- a code for fast interferometric imaging (https://wsclean.readthedocs.io/en/latest/).
## Getting Started
Some specifics of what you can do with `morphen` includes:
- Perform morphological analysis of astronomical images (general.)
- Basic source extraction and photometry (general).
- Perform a multi-component Sersic image decomposition to astronomical images of galaxies (general).
- Perform self-calibration and imaging with `WSClean` and `CASA` (radio astronomy).
- Use information from distinct interferometric arrays to perform a joint separation of distinct
physical mechanisms of the radio emission (radio astronomy).
- Experimental: some functionalities are applicable to general astronomical data, but more
testing is required beyond radio astronomy.
While in development, these modules will be kept in the same place. Stable releases will be
provided for the full module. However, we plan to release these separated functionalities
as standalone repositories in the near future.
Currently, there is no option to install `morphen` (via `pip` or `conda`).
However, its usage is simple. The code can be used as a module, interactively via Jupyter notebooks,
or via the command line interface (see "Important notes" below). For now, we recommend
using it via Jupyter notebooks (see below for examples).
See ([install_instructions.md](install_instructions.md)) for installation instructions.
The modular file `morphen.py` is the on-development module that allows you
to do such tasks, like a normal package installed via `pip`. For that, one need to download the
entire repository. The `libs` directory, specifically
the `libs/libs.py` file, contains the core functionalities in which `morphen.py` is based.
[//]: # (Examples can be found in the following directories: )
[//]: # (- `notebooks/`: contains some more general examples of how to use the code.)
[//]: # (- `image_analysis/`: contains examples of how to use the image analysis functionalities.)
[//]: # (- `image_decomposition/`: contains examples of how to use the Sersic image decomposition )
[//]: # ( functionalities.)
### Important notes
1. The functionalities presented in the examples notebooks are somehow stable. We are in extensive
development and testing, and we are setting milestones for optimizations, bug fixes, better
documentation, and new functionalities for a larger scope of the code and scientific objectives.
2. The command line option is still under development and not all argument options are
available. However, using it via jupyter is somehow stable (check the notebooks for examples).
3. This readme file is under development. I am also currently adding more basic usages to
Jupyter notebooks guides. We also recognise that the code is still lacking of documentation, and
we are working on it.
4. Installation instructions for all the dependencies are provided in the ([install_instructions.md](install_instructions.md)) file.
## Features
### Image Fitting Decomposition
We introduce a Python-based image fitting implementation using the Sersic function.
This implementation is designed to be robust, fast with GPU acceleration using
JaX (https://jax.readthedocs.io/en/latest/index.html) and easy to use
(semi-automated).
The physical motivation behind this implementation is to provide an interface to easily perform a
multi-component decomposition constrained around prior knowledge from the data itself, without
the need of creating complicated configuration files to set model parameters.
This helps mitigate issues when trying to fit multiple-component models to the data.
Prior photometry is measured from the data using the `PetroFit` code
(https://petrofit.readthedocs.io/en/latest/index.html) and the
`photutils` package (https://photutils.readthedocs.io/en/stable/) and used as initial
constraints for the minimisation.
Examples of how to use it can be found in the Notebook
[```image_decomposition/morphen_sersic.ipynb```](image_decomposition/morphen_sersic.ipynb) (for
radio images) and in [```image_decomposition/morphen_sersic_optical.ipynb```](image_decomposition/morphen_sersic_optical.ipynb)
for optical images.
The decomposition was first designed for radio interferometric images, but can be used with any
other type of images, such as optical images. However, application to optical data is still a work in
progress as we require better PSF modeling, especially for HST and JWST observations.
For now, you already can check some basic examples in the notebook
[```image_decomposition/morphen_sersic_optical.ipynb```](image_decomposition/morphen_sersic_optical.ipynb)
[//]: # (More details can be found in the )
[//]: # ([```image_decomposition/README.md```](image_decomposition/README.md) file.)
[//]: # (Parameter preparation )
[//]: # (for minimisation is fully-automated, but the user has to define the number of model )
[//]: # (components to be fitted to the data.)
[//]: # (It uses the LMFIT package )
[//]: # (with a GPU optmisation layer (Jax). )
[//]: # (It uses the LMFIT package with an )
[//]: # (object-oriented implementation, easy to use and manageable number of n-components. )
### Image Analysis
In the directory [```image_analysis/```](image_analysis/), the notebook
[```morphen.ipynb```](image_analysis/morphen.ipynb) contain sets of examples of how to perform
basic image analysis, such as image statistics, photometry,
shape analysis, etc.
[//]: # (Check also [```image_analysis/README.md```](image_analysis/README.md) file for more details.)
[//]: # (Collective direct results from this code are published here: `<<ADD LINK>>`.)
### Radio Interferometric Related Tasks
[//]: # (#### Interferometric Imaging With `wsclean`)
[//]: # (Directory [```imaging/```](imaging/) contains a python script called )
[//]: # ([```imaging/imaging_with_wsclean_v3.py```](imaging/imaging_with_wsclean_v3.py) which is just a support code )
[//]: # (for easy use to call wsclean on the command line. See the intructions file of )
[//]: # (how to use it: [```imaging/wsclean_imaging.md```](imaging/wsclean_imaging.md))
#### Imaging with `wsclean` and self-calibration of interferometric visibilities
File [```selfcal/imaging_with_wsclean.py```](selfcal/imaging_with_wsclean.py) is a wrapper
to call `wsclean` on the command line, with pre-defined parameters already set. You can
use it to perform imaging with `wsclean` in a simple way and change parameters as
required. Note that not all `WSClean` arguments are available in this wrapper.
Arguments that are not implemented can be simply passed with the argument
`--opt_args` in `imaging_with_wsclean.py`. This script is standalone and can be downloaded
and used separately from the `morphen` package.
In previous versions of this module (not available in this repo), all self-calibration
routines were done with CASA. However, some changes were made and in this repo we
provide for the first time an automated way to perform self-calibration, which uses `WSClean` as
imager and `CASA` to compute the complex gain corrections (phases and amplitudes).
To check how to use it, see the
[```selfcal/README.md```](selfcal/README.md) file and examples in
[```selfcal/selfcalibration.ipynb```](selfcal/selfcalibration.ipynb).
This self-calibration pipeline was tested in multiple datasets with the VLA from 1.4 GHz to 33
GHz and with e-MERLIN at 5 GHz, for a wide range of sources total flux densities.
[//]: # (In about 50% of the cases, the pipeline was able to converge to a good solution, for the other )
[//]: # (cases, after further inspection, good solutions performing a second run of the pipeline. )
The file [```selfcal/auto_selfcal_wsclean.py```](selfcal/auto_selfcal_wsclean.py)
is a script to perform self-calibration with `wsclean` and `CASA`. Is fully automated,
but is still in development. Check the
[```selfcal/README.md```](selfcal/README.md) file for more details.
[//]: # (#### Selfcalibration and Imaging with `CASA`.)
[//]: # ((DOC NOT READY))
#### Interferometric Decomposition
Interferometric decomposition is a technique introduced by Lucatelli et al. (2024) to
disentangle the radio emission using combined images from distinct interferometric arrays.
***More details will be provided soon.***
[//]: # (#### CASA Utilities)
[//]: # ((IN DEV))
### Origin of `morphen`
The idea of `morphen` predates back to 2018 alongside the development of
`morfometryka` (https://iopscience.iop.org/article/10.1088/0004-637X/814/1/55/pdf) and
$$\kappa$$urvature (https://academic.oup.com/mnras/article/489/1/1161/5543965). The aim was to
expand the functionalities of `morfometryka` (such as automated bulge-disk decomposition) and some optimisations.
Development was on pause, but soon after I started working with radio astronomy, it was
clear that we needed a set of automated tools for radio interferometric data processing and analysis,
from basic plotting to more complicated tasks, such as self-calibration and a robust image
decomposition.
Alongside, it was also clear that the reproducibility in radio astronomy is a challenge, and
we were in need of a package towards reproducibility of scientific results. Hence, the ideas of
`morphen` were brought back to be incorporated within radio astronomy.
## How to contribute
This is an open-source project. We are welcoming all kinds of contributions, suggestions and bug
reports. Feel free to open an issue or contact us.
|
lucatelliREPO_NAMEmorphenPATH_START.@morphen_extracted@morphen-main@README.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/scene/camera/center/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._z import ZValidator
from ._y import YValidator
from ._x import XValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._z.ZValidator", "._y.YValidator", "._x.XValidator"]
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@scene@camera@center@__init__.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "google/jax",
"repo_path": "jax_extracted/jax-main/cloud_tpu_colabs/README.md",
"type": "Markdown"
}
|
# JAX on Cloud TPU examples
The same JAX code that runs on CPU and GPU can also be run on TPU. Cloud TPUs
have the advantage of quickly giving you access to multiple TPU accelerators,
including in [Colab](https://research.google.com/colaboratory/). All of the
example notebooks here use
[`jax.pmap`](https://jax.readthedocs.io/en/latest/jax.html#jax.pmap) to run JAX
computation across multiple TPU cores from Colab. You can also run the same code
directly on a [Cloud TPU
VM](https://cloud.google.com/tpu/docs/jax-quickstart-tpu-vm).
## Example Cloud TPU notebooks
The following notebooks showcase how to use and what you can do with Cloud TPUs on Colab:
### [Pmap Cookbook](https://colab.research.google.com/github/jax-ml/jax/blob/main/cloud_tpu_colabs/Pmap_Cookbook.ipynb)
A guide to getting started with `pmap`, a transform for easily distributing SPMD
computations across devices.
### [Lorentz ODE Solver](https://colab.research.google.com/github/jax-ml/jax/blob/main/cloud_tpu_colabs/Lorentz_ODE_Solver.ipynb)
Contributed by Alex Alemi (alexalemi@)
Solve and plot parallel ODE solutions with `pmap`.
<img src="https://raw.githubusercontent.com/jax-ml/jax/main/cloud_tpu_colabs/images/lorentz.png" width=65%></image>
### [Wave Equation](https://colab.research.google.com/github/jax-ml/jax/blob/main/cloud_tpu_colabs/Wave_Equation.ipynb)
Contributed by Stephan Hoyer (shoyer@)
Solve the wave equation with `pmap`, and make cool movies! The spatial domain is partitioned across the 8 cores of a Cloud TPU.

### [JAX Demo](https://colab.research.google.com/github/jax-ml/jax/blob/main/cloud_tpu_colabs/JAX_demo.ipynb)
An overview of JAX presented at the [Program Transformations for ML workshop at NeurIPS 2019](https://program-transformations.github.io/) and the [Compilers for ML workshop at CGO 2020](https://www.c4ml.org/). Covers basic numpy usage, `grad`, `jit`, `vmap`, and `pmap`.
## Performance notes
The [guidance on running TensorFlow on TPUs](https://cloud.google.com/tpu/docs/performance-guide) applies to JAX as well, with the exception of TensorFlow-specific details. Here we highlight a few important details that are particularly relevant to using TPUs in JAX.
### Padding
One of the most common culprits for surprisingly slow code on TPUs is inadvertent padding:
- Arrays in the Cloud TPU are tiled. This entails padding one of the dimensions to a multiple of 8, and a different dimension to a multiple of 128.
- The matrix multiplication unit performs best with pairs of large matrices that minimize the need for padding.
### bfloat16 dtype
By default\*, matrix multiplication in JAX on TPUs [uses bfloat16](https://cloud.google.com/blog/products/ai-machine-learning/bfloat16-the-secret-to-high-performance-on-cloud-tpus) with float32 accumulation. This can be controlled with the `precision` keyword argument on relevant `jax.numpy` functions (`matmul`, `dot`, `einsum`, etc). In particular:
- `precision=jax.lax.Precision.DEFAULT`: uses mixed bfloat16 precision (fastest)
- `precision=jax.lax.Precision.HIGH`: uses multiple MXU passes to achieve higher precision
- `precision=jax.lax.Precision.HIGHEST`: uses even more MXU passes to achieve full float32 precision
JAX also adds the `bfloat16` dtype, which you can use to explicitly cast arrays to bfloat16, e.g., `jax.numpy.array(x, dtype=jax.numpy.bfloat16)`.
\* We might change the default precision in the future, since it is arguably surprising. Please comment/vote on [this issue](https://github.com/jax-ml/jax/issues/2161) if it affects you!
## Running JAX on a Cloud TPU VM
Refer to the [Cloud TPU VM
documentation](https://cloud.google.com/tpu/docs/jax-quickstart-tpu-vm).
## Reporting issues and getting help
If you run into Cloud TPU-specific issues (e.g. trouble creating a Cloud TPU
VM), please email <cloud-tpu-support@google.com>, or <trc-support@google.com> if
you are a [TRC](https://sites.research.google/trc/) member. You can also [file a
JAX issue](https://github.com/jax-ml/jax/issues) or [ask a discussion
question](https://github.com/jax-ml/jax/discussions) for any issues with these
notebooks or using JAX in general.
If you have any other questions or comments regarding JAX on Cloud TPUs, please
email <jax-cloud-tpu-team@google.com>. We’d like to hear from you!
|
googleREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@cloud_tpu_colabs@README.md@.PATH_END.py
|
{
"filename": "mlp_lazy.py",
"repo_name": "google/flax",
"repo_path": "flax_extracted/flax-main/examples/linen_design_test/mlp_lazy.py",
"type": "Python"
}
|
# Copyright 2024 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jax
from jax import numpy as jnp
from flax import linen as nn
from flax.linen import Module
from pprint import pprint
from dense import Dense
# Here submodules are explicitly defined during init, but still materialized
# lazily only once a first input is passed through and shapes are known.
class MLP(Module):
def setup(self):
self.dense1 = Dense(features=2)
self.dense2 = Dense(features=1)
# shapes aren't yet known, so variables aren't materialized
print(self.dense2.variables)
# FrozenDict({})
def __call__(self, x):
return self.dense2(nn.relu(self.dense1(x)))
# Return an initialized instance of MLP by calling `__call__` with an input batch,
# initializing all variables.
#
# Variable shapes depend on the input shape passed in.
rngkey = jax.random.key(10)
mlp_variables = MLP().init(rngkey, jnp.zeros((1, 3)))
pprint(mlp_variables)
# {'params': {'dense1': {'bias': DeviceArray([0., 0.], dtype=float32),
# 'kernel': DeviceArray([[ 0.18307537, -0.38739476],
# [-0.902451 , -0.5190721 ],
# [ 0.51552075, 1.1169153 ]], dtype=float32)},
# 'dense2': {'bias': DeviceArray([0.], dtype=float32),
# 'kernel': DeviceArray([[ 0.6704609 ],
# [-0.90477365]], dtype=float32)}}}
|
googleREPO_NAMEflaxPATH_START.@flax_extracted@flax-main@examples@linen_design_test@mlp_lazy.py@.PATH_END.py
|
{
"filename": "test_datetimes.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/reshape/concat/test_datetimes.py",
"type": "Python"
}
|
import datetime as dt
from datetime import datetime
import dateutil
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
to_timedelta,
)
import pandas._testing as tm
class TestDatetimeConcat:
def test_concat_datetime64_block(self):
rng = date_range("1/1/2000", periods=10)
df = DataFrame({"time": rng})
result = concat([df, df])
assert (result.iloc[:10]["time"] == rng).all()
assert (result.iloc[10:]["time"] == rng).all()
def test_concat_datetime_datetime64_frame(self):
# GH#2624
rows = []
rows.append([datetime(2010, 1, 1), 1])
rows.append([datetime(2010, 1, 2), "hi"])
df2_obj = DataFrame.from_records(rows, columns=["date", "test"])
ind = date_range(start="2000/1/1", freq="D", periods=10)
df1 = DataFrame({"date": ind, "test": range(10)})
# it works!
concat([df1, df2_obj])
def test_concat_datetime_timezone(self):
# GH 18523
idx1 = date_range("2011-01-01", periods=3, freq="h", tz="Europe/Paris")
idx2 = date_range(start=idx1[0], end=idx1[-1], freq="h")
df1 = DataFrame({"a": [1, 2, 3]}, index=idx1)
df2 = DataFrame({"b": [1, 2, 3]}, index=idx2)
result = concat([df1, df2], axis=1)
exp_idx = DatetimeIndex(
[
"2011-01-01 00:00:00+01:00",
"2011-01-01 01:00:00+01:00",
"2011-01-01 02:00:00+01:00",
],
dtype="M8[ns, Europe/Paris]",
freq="h",
)
expected = DataFrame(
[[1, 1], [2, 2], [3, 3]], index=exp_idx, columns=["a", "b"]
)
tm.assert_frame_equal(result, expected)
idx3 = date_range("2011-01-01", periods=3, freq="h", tz="Asia/Tokyo")
df3 = DataFrame({"b": [1, 2, 3]}, index=idx3)
result = concat([df1, df3], axis=1)
exp_idx = DatetimeIndex(
[
"2010-12-31 15:00:00+00:00",
"2010-12-31 16:00:00+00:00",
"2010-12-31 17:00:00+00:00",
"2010-12-31 23:00:00+00:00",
"2011-01-01 00:00:00+00:00",
"2011-01-01 01:00:00+00:00",
]
).as_unit("ns")
expected = DataFrame(
[
[np.nan, 1],
[np.nan, 2],
[np.nan, 3],
[1, np.nan],
[2, np.nan],
[3, np.nan],
],
index=exp_idx,
columns=["a", "b"],
)
tm.assert_frame_equal(result, expected)
# GH 13783: Concat after resample
result = concat([df1.resample("h").mean(), df2.resample("h").mean()], sort=True)
expected = DataFrame(
{"a": [1, 2, 3] + [np.nan] * 3, "b": [np.nan] * 3 + [1, 2, 3]},
index=idx1.append(idx1),
)
tm.assert_frame_equal(result, expected)
def test_concat_datetimeindex_freq(self):
# GH 3232
# Monotonic index result
dr = date_range("01-Jan-2013", periods=100, freq="50ms", tz="UTC")
data = list(range(100))
expected = DataFrame(data, index=dr)
result = concat([expected[:50], expected[50:]])
tm.assert_frame_equal(result, expected)
# Non-monotonic index result
result = concat([expected[50:], expected[:50]])
expected = DataFrame(data[50:] + data[:50], index=dr[50:].append(dr[:50]))
expected.index._data.freq = None
tm.assert_frame_equal(result, expected)
def test_concat_multiindex_datetime_object_index(self):
# https://github.com/pandas-dev/pandas/issues/11058
idx = Index(
[dt.date(2013, 1, 1), dt.date(2014, 1, 1), dt.date(2015, 1, 1)],
dtype="object",
)
s = Series(
["a", "b"],
index=MultiIndex.from_arrays(
[
[1, 2],
idx[:-1],
],
names=["first", "second"],
),
)
s2 = Series(
["a", "b"],
index=MultiIndex.from_arrays(
[[1, 2], idx[::2]],
names=["first", "second"],
),
)
mi = MultiIndex.from_arrays(
[[1, 2, 2], idx],
names=["first", "second"],
)
assert mi.levels[1].dtype == object
expected = DataFrame(
[["a", "a"], ["b", np.nan], [np.nan, "b"]],
index=mi,
)
result = concat([s, s2], axis=1)
tm.assert_frame_equal(result, expected)
def test_concat_NaT_series(self):
# GH 11693
# test for merging NaT series with datetime series.
x = Series(
date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="US/Eastern")
)
y = Series(pd.NaT, index=[0, 1], dtype="datetime64[ns, US/Eastern]")
expected = Series([x[0], x[1], pd.NaT, pd.NaT])
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# all NaT with tz
expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns, US/Eastern]")
result = concat([y, y], ignore_index=True)
tm.assert_series_equal(result, expected)
def test_concat_NaT_series2(self):
# without tz
x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h"))
y = Series(date_range("20151124 10:00", "20151124 11:00", freq="1h"))
y[:] = pd.NaT
expected = Series([x[0], x[1], pd.NaT, pd.NaT])
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
# all NaT without tz
x[:] = pd.NaT
expected = Series(pd.NaT, index=range(4), dtype="datetime64[ns]")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("tz", [None, "UTC"])
def test_concat_NaT_dataframes(self, tz):
# GH 12396
dti = DatetimeIndex([pd.NaT, pd.NaT], tz=tz)
first = DataFrame({0: dti})
second = DataFrame(
[[Timestamp("2015/01/01", tz=tz)], [Timestamp("2016/01/01", tz=tz)]],
index=[2, 3],
)
expected = DataFrame(
[
pd.NaT,
pd.NaT,
Timestamp("2015/01/01", tz=tz),
Timestamp("2016/01/01", tz=tz),
]
)
result = concat([first, second], axis=0)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz1", [None, "UTC"])
@pytest.mark.parametrize("tz2", [None, "UTC"])
@pytest.mark.parametrize("item", [pd.NaT, Timestamp("20150101").as_unit("ns")])
def test_concat_NaT_dataframes_all_NaT_axis_0(self, tz1, tz2, item):
# GH 12396
# tz-naive
first = DataFrame([[pd.NaT], [pd.NaT]]).apply(lambda x: x.dt.tz_localize(tz1))
second = DataFrame([item]).apply(lambda x: x.dt.tz_localize(tz2))
result = concat([first, second], axis=0)
expected = DataFrame(Series([pd.NaT, pd.NaT, item], index=[0, 1, 0]))
expected = expected.apply(lambda x: x.dt.tz_localize(tz2))
if tz1 != tz2:
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz1", [None, "UTC"])
@pytest.mark.parametrize("tz2", [None, "UTC"])
def test_concat_NaT_dataframes_all_NaT_axis_1(self, tz1, tz2):
# GH 12396
first = DataFrame(Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1))
second = DataFrame(Series([pd.NaT]).dt.tz_localize(tz2), columns=[1])
expected = DataFrame(
{
0: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1),
1: Series([pd.NaT, pd.NaT]).dt.tz_localize(tz2),
}
)
result = concat([first, second], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("tz1", [None, "UTC"])
@pytest.mark.parametrize("tz2", [None, "UTC"])
def test_concat_NaT_series_dataframe_all_NaT(self, tz1, tz2):
# GH 12396
# tz-naive
first = Series([pd.NaT, pd.NaT]).dt.tz_localize(tz1)
second = DataFrame(
[
[Timestamp("2015/01/01", tz=tz2)],
[Timestamp("2016/01/01", tz=tz2)],
],
index=[2, 3],
)
expected = DataFrame(
[
pd.NaT,
pd.NaT,
Timestamp("2015/01/01", tz=tz2),
Timestamp("2016/01/01", tz=tz2),
]
)
if tz1 != tz2:
expected = expected.astype(object)
result = concat([first, second])
tm.assert_frame_equal(result, expected)
class TestTimezoneConcat:
def test_concat_tz_series(self):
# gh-11755: tz and no tz
x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
y = Series(date_range("2012-01-01", "2012-01-02"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
def test_concat_tz_series2(self):
# gh-11887: concat tz and object
x = Series(date_range("20151124 08:00", "20151124 09:00", freq="1h", tz="UTC"))
y = Series(["a", "b"])
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
def test_concat_tz_series3(self, unit, unit2):
# see gh-12217 and gh-12306
# Concatenating two UTC times
first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]")
first[0] = first[0].dt.tz_localize("UTC")
second = DataFrame([[datetime(2016, 1, 2)]], dtype=f"M8[{unit2}]")
second[0] = second[0].dt.tz_localize("UTC")
result = concat([first, second])
exp_unit = tm.get_finest_unit(unit, unit2)
assert result[0].dtype == f"datetime64[{exp_unit}, UTC]"
def test_concat_tz_series4(self, unit, unit2):
# Concatenating two London times
first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]")
first[0] = first[0].dt.tz_localize("Europe/London")
second = DataFrame([[datetime(2016, 1, 2)]], dtype=f"M8[{unit2}]")
second[0] = second[0].dt.tz_localize("Europe/London")
result = concat([first, second])
exp_unit = tm.get_finest_unit(unit, unit2)
assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]"
def test_concat_tz_series5(self, unit, unit2):
# Concatenating 2+1 London times
first = DataFrame(
[[datetime(2016, 1, 1)], [datetime(2016, 1, 2)]], dtype=f"M8[{unit}]"
)
first[0] = first[0].dt.tz_localize("Europe/London")
second = DataFrame([[datetime(2016, 1, 3)]], dtype=f"M8[{unit2}]")
second[0] = second[0].dt.tz_localize("Europe/London")
result = concat([first, second])
exp_unit = tm.get_finest_unit(unit, unit2)
assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]"
def test_concat_tz_series6(self, unit, unit2):
# Concatenating 1+2 London times
first = DataFrame([[datetime(2016, 1, 1)]], dtype=f"M8[{unit}]")
first[0] = first[0].dt.tz_localize("Europe/London")
second = DataFrame(
[[datetime(2016, 1, 2)], [datetime(2016, 1, 3)]], dtype=f"M8[{unit2}]"
)
second[0] = second[0].dt.tz_localize("Europe/London")
result = concat([first, second])
exp_unit = tm.get_finest_unit(unit, unit2)
assert result[0].dtype == f"datetime64[{exp_unit}, Europe/London]"
def test_concat_tz_series_tzlocal(self):
# see gh-13583
x = [
Timestamp("2011-01-01", tz=dateutil.tz.tzlocal()),
Timestamp("2011-02-01", tz=dateutil.tz.tzlocal()),
]
y = [
Timestamp("2012-01-01", tz=dateutil.tz.tzlocal()),
Timestamp("2012-02-01", tz=dateutil.tz.tzlocal()),
]
result = concat([Series(x), Series(y)], ignore_index=True)
tm.assert_series_equal(result, Series(x + y))
assert result.dtype == "datetime64[s, tzlocal()]"
def test_concat_tz_series_with_datetimelike(self):
# see gh-12620: tz and timedelta
x = [
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-02-01", tz="US/Eastern"),
]
y = [pd.Timedelta("1 day"), pd.Timedelta("2 day")]
result = concat([Series(x), Series(y)], ignore_index=True)
tm.assert_series_equal(result, Series(x + y, dtype="object"))
# tz and period
y = [pd.Period("2011-03", freq="M"), pd.Period("2011-04", freq="M")]
result = concat([Series(x), Series(y)], ignore_index=True)
tm.assert_series_equal(result, Series(x + y, dtype="object"))
def test_concat_tz_frame(self):
df2 = DataFrame(
{
"A": Timestamp("20130102", tz="US/Eastern"),
"B": Timestamp("20130603", tz="CET"),
},
index=range(5),
)
# concat
df3 = concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
tm.assert_frame_equal(df2, df3)
def test_concat_multiple_tzs(self):
# GH#12467
# combining datetime tz-aware and naive DataFrames
ts1 = Timestamp("2015-01-01", tz=None)
ts2 = Timestamp("2015-01-01", tz="UTC")
ts3 = Timestamp("2015-01-01", tz="EST")
df1 = DataFrame({"time": [ts1]})
df2 = DataFrame({"time": [ts2]})
df3 = DataFrame({"time": [ts3]})
results = concat([df1, df2]).reset_index(drop=True)
expected = DataFrame({"time": [ts1, ts2]}, dtype=object)
tm.assert_frame_equal(results, expected)
results = concat([df1, df3]).reset_index(drop=True)
expected = DataFrame({"time": [ts1, ts3]}, dtype=object)
tm.assert_frame_equal(results, expected)
results = concat([df2, df3]).reset_index(drop=True)
expected = DataFrame({"time": [ts2, ts3]})
tm.assert_frame_equal(results, expected)
def test_concat_multiindex_with_tz(self):
# GH 6606
df = DataFrame(
{
"dt": DatetimeIndex(
[
datetime(2014, 1, 1),
datetime(2014, 1, 2),
datetime(2014, 1, 3),
],
dtype="M8[ns, US/Pacific]",
),
"b": ["A", "B", "C"],
"c": [1, 2, 3],
"d": [4, 5, 6],
}
)
df = df.set_index(["dt", "b"])
exp_idx1 = DatetimeIndex(
["2014-01-01", "2014-01-02", "2014-01-03"] * 2,
dtype="M8[ns, US/Pacific]",
name="dt",
)
exp_idx2 = Index(["A", "B", "C"] * 2, name="b")
exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame(
{"c": [1, 2, 3] * 2, "d": [4, 5, 6] * 2}, index=exp_idx, columns=["c", "d"]
)
result = concat([df, df])
tm.assert_frame_equal(result, expected)
def test_concat_tz_not_aligned(self):
# GH#22796
ts = pd.to_datetime([1, 2]).tz_localize("UTC")
a = DataFrame({"A": ts})
b = DataFrame({"A": ts, "B": ts})
result = concat([a, b], sort=True, ignore_index=True)
expected = DataFrame(
{"A": list(ts) + list(ts), "B": [pd.NaT, pd.NaT] + list(ts)}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"t1",
[
"2015-01-01",
pytest.param(
pd.NaT,
marks=pytest.mark.xfail(
reason="GH23037 incorrect dtype when concatenating"
),
),
],
)
def test_concat_tz_NaT(self, t1):
# GH#22796
# Concatenating tz-aware multicolumn DataFrames
ts1 = Timestamp(t1, tz="UTC")
ts2 = Timestamp("2015-01-01", tz="UTC")
ts3 = Timestamp("2015-01-01", tz="UTC")
df1 = DataFrame([[ts1, ts2]])
df2 = DataFrame([[ts3]])
result = concat([df1, df2])
expected = DataFrame([[ts1, ts2], [ts3, pd.NaT]], index=[0, 0])
tm.assert_frame_equal(result, expected)
def test_concat_tz_with_empty(self):
# GH 9188
result = concat(
[DataFrame(date_range("2000", periods=1, tz="UTC")), DataFrame()]
)
expected = DataFrame(date_range("2000", periods=1, tz="UTC"))
tm.assert_frame_equal(result, expected)
class TestPeriodConcat:
def test_concat_period_series(self):
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="D"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="Period[D]")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
def test_concat_period_multiple_freq_series(self):
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(pd.PeriodIndex(["2015-10-01", "2016-01-01"], freq="M"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
def test_concat_period_other_series(self):
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="M"))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
def test_concat_period_other_series2(self):
# non-period
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(DatetimeIndex(["2015-11-01", "2015-12-01"]))
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
def test_concat_period_other_series3(self):
x = Series(pd.PeriodIndex(["2015-11-01", "2015-12-01"], freq="D"))
y = Series(["A", "B"])
expected = Series([x[0], x[1], y[0], y[1]], dtype="object")
result = concat([x, y], ignore_index=True)
tm.assert_series_equal(result, expected)
assert result.dtype == "object"
def test_concat_timedelta64_block():
rng = to_timedelta(np.arange(10), unit="s")
df = DataFrame({"time": rng})
result = concat([df, df])
tm.assert_frame_equal(result.iloc[:10], df, check_index_type=False)
tm.assert_frame_equal(result.iloc[10:], df, check_index_type=False)
def test_concat_multiindex_datetime_nat():
# GH#44900
left = DataFrame({"a": 1}, index=MultiIndex.from_tuples([(1, pd.NaT)]))
right = DataFrame(
{"b": 2}, index=MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)])
)
result = concat([left, right], axis="columns")
expected = DataFrame(
{"a": [1.0, np.nan], "b": 2}, MultiIndex.from_tuples([(1, pd.NaT), (2, pd.NaT)])
)
tm.assert_frame_equal(result, expected)
def test_concat_float_datetime64():
# GH#32934
df_time = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")})
df_float = DataFrame({"A": pd.array([1.0], dtype="float64")})
expected = DataFrame(
{
"A": [
pd.array(["2000"], dtype="datetime64[ns]")[0],
pd.array([1.0], dtype="float64")[0],
]
},
index=[0, 0],
)
result = concat([df_time, df_float])
tm.assert_frame_equal(result, expected)
expected = DataFrame({"A": pd.array([], dtype="object")})
result = concat([df_time.iloc[:0], df_float.iloc[:0]])
tm.assert_frame_equal(result, expected)
expected = DataFrame({"A": pd.array([1.0], dtype="object")})
result = concat([df_time.iloc[:0], df_float])
tm.assert_frame_equal(result, expected)
expected = DataFrame({"A": pd.array(["2000"], dtype="datetime64[ns]")}).astype(
object
)
result = concat([df_time, df_float.iloc[:0]])
tm.assert_frame_equal(result, expected)
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@reshape@concat@test_datetimes.py@.PATH_END.py
|
{
"filename": "Tutorial-GRMHD_Equations-Cartesian.ipynb",
"repo_name": "zachetienne/nrpytutorial",
"repo_path": "nrpytutorial_extracted/nrpytutorial-master/Tutorial-GRMHD_Equations-Cartesian.ipynb",
"type": "Jupyter Notebook"
}
|
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-59152712-8"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-59152712-8');
</script>
# Equations of General Relativistic Magnetohydrodynamics (GRMHD)
## Author: Zach Etienne
## This notebook documents and constructs a number of quantities useful for building symbolic (SymPy) expressions for the equations of general relativistic magnetohydrodynamics (GRMHD), using the same (Valencia-like) formalism as `IllinoisGRMHD`.
**Notebook Status:** <font color='orange'><b> Self-Validated; induction equation not yet implemented </b></font>
**Validation Notes:** This tutorial notebook has been confirmed to be self-consistent with its corresponding NRPy+ module, as documented [below](#code_validation). **Additional validation tests may have been performed, but are as yet, undocumented. (TODO)**
## Introduction
We write the equations of general relativistic magnetohydrodynamics in conservative form as follows (Eqs. 41-44 of [Duez *et al*](https://arxiv.org/pdf/astro-ph/0503420.pdf):
\begin{eqnarray}
\ \partial_t \rho_* &+& \partial_j \left(\rho_* v^j\right) = 0 \\
\partial_t \tilde{\tau} &+& \partial_j \left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right) = s \\
\partial_t \tilde{S}_i &+& \partial_j \left(\alpha \sqrt{\gamma} T^j{}_i \right) = \frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i} \\
\partial_t \tilde{B}^i &+& \partial_j \left(v^j \tilde{B}^i - v^i \tilde{B}^j\right) = 0,
\end{eqnarray}
where
$$
s = \alpha \sqrt{\gamma}\left[\left(T^{00}\beta^i\beta^j + 2 T^{0i}\beta^j + T^{ij} \right)K_{ij}
- \left(T^{00}\beta^i + T^{0i} \right)\partial_i\alpha \right].
$$
We represent $T^{\mu\nu}$ as the sum of the stress-energy tensor of a perfect fluid $T^{\mu\nu}_{\rm GRHD}$, plus the stress-energy associated with the electromagnetic fields in the force-free electrodynamics approximation $T^{\mu\nu}_{\rm GRFFE}$ (equivalently, $T^{\mu\nu}_{\rm em}$ in Duez *et al*):
$$
T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},
$$
where
* $T^{\mu\nu}_{\rm GRHD}$ is constructed from rest-mass density $\rho_0$, pressure $P$, internal energy $\epsilon$, 4-velocity $u^\mu$, and ADM metric quantities as described in the [NRPy+ GRHD equations tutorial notebook](Tutorial-GRHD_Equations-Cartesian.ipynb); and
* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the magnetic field vector $B^i$ and ADM metric quantities as described in the [NRPy+ GRFFE equations tutorial notebook](Tutorial-GRFFE_Equations-Cartesian.ipynb).
All quantities can be written in terms of the full GRMHD stress-energy tensor $T^{\mu\nu}$ in precisely the same way they are defined in the GRHD equations. ***Therefore, we will not define special functions for generating these quantities, and instead refer the user to the appropriate functions in the [GRHD module](../edit/GRHD/equations.py)*** Namely,
* The GRMHD conservative variables:
* $\rho_* = \alpha\sqrt{\gamma} \rho_0 u^0$, via `GRHD.compute_rho_star(alpha, sqrtgammaDET, rho_b,u4U)`
* $\tilde{\tau} = \alpha^2\sqrt{\gamma} T^{00} - \rho_*$, via `GRHD.compute_tau_tilde(alpha, sqrtgammaDET, T4UU,rho_star)`
* $\tilde{S}_i = \alpha \sqrt{\gamma} T^0{}_i$, via `GRHD.compute_S_tildeD(alpha, sqrtgammaDET, T4UD)`
* The GRMHD fluxes:
* $\rho_*$ flux: $\left(\rho_* v^j\right)$, via `GRHD.compute_rho_star_fluxU(vU, rho_star)`
* $\tilde{\tau}$ flux: $\left(\alpha^2 \sqrt{\gamma} T^{0j} - \rho_* v^j \right)$, via `GRHD.compute_tau_tilde_fluxU(alpha, sqrtgammaDET, vU,T4UU,rho_star)`
* $\tilde{S}_i$ flux: $\left(\alpha \sqrt{\gamma} T^j{}_i \right)$, via `GRHD.compute_S_tilde_fluxUD(alpha, sqrtgammaDET, T4UD)`
* GRMHD source terms:
* $\tilde{\tau}$ source term $s$: defined above, via `GRHD.compute_s_source_term(KDD,betaU,alpha, sqrtgammaDET,alpha_dD, T4UU)`
* $\tilde{S}_i$ source term: $\frac{1}{2} \alpha\sqrt{\gamma} T^{\mu\nu} g_{\mu\nu,i}$, via `GRHD.compute_S_tilde_source_termD(alpha, sqrtgammaDET,g4DD_zerotimederiv_dD, T4UU)`
In summary, all terms in the GRMHD equations can be constructed once the full GRMHD stress-energy tensor $T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE}$ is constructed. For completeness, the full set of input variables include:
* Spacetime quantities:
* ADM quantities $\alpha$, $\beta^i$, $\gamma_{ij}$, $K_{ij}$
* Hydrodynamical quantities:
* Rest-mass density $\rho_0$
* Pressure $P$
* Internal energy $\epsilon$
* 4-velocity $u^\mu$
* Electrodynamical quantities
* Magnetic field $B^i= \tilde{B}^i / \gamma$
### A Note on Notation
As is standard in NRPy+,
* Greek indices refer to four-dimensional quantities where the zeroth component indicates temporal (time) component.
* Latin indices refer to three-dimensional quantities. This is somewhat counterintuitive since Python always indexes its lists starting from 0. As a result, the zeroth component of three-dimensional quantities will necessarily indicate the first *spatial* direction.
For instance, in calculating the first term of $b^2 u^\mu u^\nu$, we use Greek indices:
```python
T4EMUU = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
# Term 1: b^2 u^{\mu} u^{\nu}
T4EMUU[mu][nu] = smallb2*u4U[mu]*u4U[nu]
```
When we calculate $\beta_i = \gamma_{ij} \beta^j$, we use Latin indices:
```python
betaD = ixp.zerorank1(DIM=3)
for i in range(3):
for j in range(3):
betaD[i] += gammaDD[i][j] * betaU[j]
```
As a corollary, any expressions involving mixed Greek and Latin indices will need to offset one set of indices by one: A Latin index in a four-vector will be incremented and a Greek index in a three-vector will be decremented (however, the latter case does not occur in this tutorial notebook). This can be seen when we handle $\frac{1}{2} \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu}$:
```python
# \alpha \sqrt{\gamma} T^{\mu \nu}_{\rm EM} \partial_i g_{\mu \nu} / 2
for i in range(3):
for mu in range(4):
for nu in range(4):
S_tilde_rhsD[i] += alpsqrtgam * T4EMUU[mu][nu] * g4DD_zerotimederiv_dD[mu][nu][i+1] / 2
```
<a id='toc'></a>
# Table of Contents
$$\label{toc}$$
Each family of quantities is constructed within a given function (**boldfaced** below). This notebook is organized as follows
1. [Step 1](#importmodules): Import needed NRPy+ & Python modules
1. [Step 2](#stressenergy): Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$:
* **compute_T4UU()**, **compute_T4UD()**:
1. [Step 3](#declarevarsconstructgrhdeqs): Construct $T^{\mu\nu}$ from GRHD & GRFFE modules with ADM and GRMHD input variables, and construct GRMHD equations from the full GRMHD stress-energy tensor.
1. [Step 4](#code_validation): Code Validation against `GRMHD.equations` NRPy+ module
1. [Step 5](#latex_pdf_output): Output this notebook to $\LaTeX$-formatted PDF file
<a id='importmodules'></a>
# Step 1: Import needed NRPy+ & Python modules \[Back to [top](#toc)\]
$$\label{importmodules}$$
```python
# Step 1: Import needed core NRPy+ modules
import sympy as sp # SymPy: The Python computer algebra package upon which NRPy+ depends
import indexedexp as ixp # NRPy+: Symbolic indexed expression (e.g., tensors, vectors, etc.) support
```
<a id='stressenergy'></a>
# Step 2: Define the GRMHD stress-energy tensor $T^{\mu\nu}$ and $T^\mu{}_\nu$ \[Back to [top](#toc)\]
$$\label{stressenergy}$$
Recall from above that
$$
T^{\mu\nu} = T^{\mu\nu}_{\rm GRHD} + T^{\mu\nu}_{\rm GRFFE},
$$
where
* $T^{\mu\nu}_{\rm GRHD}$ is constructed from the `GRHD.compute_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and
* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) function
Since a lowering operation on a sum of tensors is equivalent to the lowering operation applied to the individual tensors in the sum,
$$
T^\mu{}_{\nu} = T^\mu{}_{\nu}{}^{\rm GRHD} + T^\mu{}_{\nu}{}^{\rm GRFFE},
$$
where
* $T^\mu{}_{\nu}{}^{\rm GRHD}$ is constructed from the `GRHD.compute_T4UD(gammaDD,betaU,alpha, T4UU)` [GRHD](../edit/GRHD/equations.py) [(**tutorial**)](Tutorial-GRHD_Equations-Cartesian.ipynb) function, and
* $T^{\mu\nu}_{\rm GRFFE}$ is constructed from the `GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, TEM4UU)` [GRFFE](../edit/GRFFE/equations.py) [(**tutorial**)](Tutorial-GRFFE_Equations-Cartesian.ipynb) function.
```python
import GRHD.equations as GRHD
import GRFFE.equations as GRFFE
# Step 2.a: Define the GRMHD T^{mu nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, smallb4U, smallbsquared):
global GRHDT4UU
global GRFFET4UU
global T4UU
GRHD.compute_T4UU( gammaDD,betaU,alpha, rho_b,P,epsilon,u4U)
GRFFE.compute_TEM4UU(gammaDD,betaU,alpha, smallb4U, smallbsquared,u4U)
GRHDT4UU = ixp.zerorank2(DIM=4)
GRFFET4UU = ixp.zerorank2(DIM=4)
T4UU = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
GRHDT4UU[mu][nu] = GRHD.T4UU[mu][nu]
GRFFET4UU[mu][nu] = GRFFE.TEM4UU[mu][nu]
T4UU[mu][nu] = GRHD.T4UU[mu][nu] + GRFFE.TEM4UU[mu][nu]
# Step 2.b: Define T^{mu}_{nu} (a 4-dimensional tensor)
def compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU):
global T4UD
GRHD.compute_T4UD( gammaDD,betaU,alpha, GRHDT4UU)
GRFFE.compute_TEM4UD(gammaDD,betaU,alpha, GRFFET4UU)
T4UD = ixp.zerorank2(DIM=4)
for mu in range(4):
for nu in range(4):
T4UD[mu][nu] = GRHD.T4UD[mu][nu] + GRFFE.TEM4UD[mu][nu]
```
<a id='declarevarsconstructgrhdeqs'></a>
# Step 3: Declare ADM and hydrodynamical input variables, and construct all terms in GRMHD equations \[Back to [top](#toc)\]
$$\label{declarevarsconstructgrhdeqs}$$
```python
# First define hydrodynamical quantities
u4U = ixp.declarerank1("u4U", DIM=4)
rho_b,P,epsilon = sp.symbols('rho_b P epsilon',real=True)
B_tildeU = ixp.declarerank1("B_tildeU", DIM=3)
# Then ADM quantities
gammaDD = ixp.declarerank2("gammaDD","sym01",DIM=3)
KDD = ixp.declarerank2("KDD" ,"sym01",DIM=3)
betaU = ixp.declarerank1("betaU", DIM=3)
alpha = sp.symbols('alpha', real=True)
# Then numerical constant
sqrt4pi = sp.symbols('sqrt4pi', real=True)
# First compute smallb4U & smallbsquared from BtildeU, which are needed
# for GRMHD stress-energy tensor T4UU and T4UD:
GRHD.compute_sqrtgammaDET(gammaDD)
GRFFE.compute_B_notildeU(GRHD.sqrtgammaDET, B_tildeU)
GRFFE.compute_smallb4U( gammaDD,betaU,alpha, u4U,GRFFE.B_notildeU, sqrt4pi)
GRFFE.compute_smallbsquared(gammaDD,betaU,alpha, GRFFE.smallb4U)
# Then compute the GRMHD stress-energy tensor:
compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRHDT4UU,GRFFET4UU)
# Compute conservative variables in terms of primitive variables
GRHD.compute_rho_star( alpha, GRHD.sqrtgammaDET, rho_b,u4U)
GRHD.compute_tau_tilde(alpha, GRHD.sqrtgammaDET, T4UU,GRHD.rho_star)
GRHD.compute_S_tildeD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then compute v^i from u^mu
GRHD.compute_vU_from_u4U__no_speed_limit(u4U)
# Next compute fluxes of conservative variables
GRHD.compute_rho_star_fluxU( GRHD.vU, GRHD.rho_star)
GRHD.compute_tau_tilde_fluxU(alpha, GRHD.sqrtgammaDET, GRHD.vU,T4UU,GRHD.rho_star)
GRHD.compute_S_tilde_fluxUD( alpha, GRHD.sqrtgammaDET, T4UD)
# Then declare derivatives & compute g4DD_zerotimederiv_dD
gammaDD_dD = ixp.declarerank3("gammaDD_dD","sym01",DIM=3)
betaU_dD = ixp.declarerank2("betaU_dD" ,"nosym",DIM=3)
alpha_dD = ixp.declarerank1("alpha_dD" ,DIM=3)
GRHD.compute_g4DD_zerotimederiv_dD(gammaDD,betaU,alpha, gammaDD_dD,betaU_dD,alpha_dD)
# Then compute source terms on tau_tilde and S_tilde equations
GRHD.compute_s_source_term(KDD,betaU,alpha, GRHD.sqrtgammaDET,alpha_dD, T4UU)
GRHD.compute_S_tilde_source_termD( alpha, GRHD.sqrtgammaDET,GRHD.g4DD_zerotimederiv_dD, T4UU)
```
<a id='code_validation'></a>
# Step 4: Code Validation against `GRMHD.equations` NRPy+ module \[Back to [top](#toc)\]
$$\label{code_validation}$$
As a code validation check, we verify agreement in the SymPy expressions for the GRHD equations generated in
1. this tutorial versus
2. the NRPy+ [GRMHD.equations](../edit/GRMHD/equations.py) module.
```python
import GRMHD.equations as GRMHD
# Compute stress-energy tensor T4UU and T4UD:
GRMHD.compute_GRMHD_T4UU(gammaDD,betaU,alpha, rho_b,P,epsilon,u4U, GRFFE.smallb4U, GRFFE.smallbsquared)
GRMHD.compute_GRMHD_T4UD(gammaDD,betaU,alpha, GRMHD.GRHDT4UU,GRMHD.GRFFET4UU)
```
```python
def comp_func(expr1,expr2,basename,prefixname2="Ge."):
if str(expr1-expr2)!="0":
print(basename+" - "+prefixname2+basename+" = "+ str(expr1-expr2))
return 1
return 0
def gfnm(basename,idx1,idx2=None,idx3=None):
if idx2 is None:
return basename+"["+str(idx1)+"]"
if idx3 is None:
return basename+"["+str(idx1)+"]["+str(idx2)+"]"
return basename+"["+str(idx1)+"]["+str(idx2)+"]["+str(idx3)+"]"
expr_list = []
exprcheck_list = []
namecheck_list = []
for mu in range(4):
for nu in range(4):
namecheck_list.extend([gfnm("GRMHD.GRHDT4UU",mu,nu),gfnm("GRMHD.GRFFET4UU",mu,nu),
gfnm("GRMHD.T4UU", mu,nu),gfnm("GRMHD.T4UD", mu,nu)])
exprcheck_list.extend([GRMHD.GRHDT4UU[mu][nu],GRMHD.GRFFET4UU[mu][nu],
GRMHD.T4UU[mu][nu], GRMHD.T4UD[mu][nu]])
expr_list.extend([GRHDT4UU[mu][nu],GRFFET4UU[mu][nu],
T4UU[mu][nu], T4UD[mu][nu]])
num_failures = 0
for i in range(len(expr_list)):
num_failures += comp_func(expr_list[i],exprcheck_list[i],namecheck_list[i])
import sys
if num_failures == 0:
print("ALL TESTS PASSED!")
else:
print("ERROR: "+str(num_failures)+" TESTS DID NOT PASS")
sys.exit(1)
```
ALL TESTS PASSED!
<a id='latex_pdf_output'></a>
# Step 5: Output this notebook to $\LaTeX$-formatted PDF file \[Back to [top](#toc)\]
$$\label{latex_pdf_output}$$
The following code cell converts this Jupyter notebook into a proper, clickable $\LaTeX$-formatted PDF file. After the cell is successfully run, the generated PDF may be found in the root NRPy+ tutorial directory, with filename
[Tutorial-GRMHD_Equations-Cartesian.pdf](Tutorial-GRMHD_Equations-Cartesian.pdf) (Note that clicking on this link may not work; you may need to open the PDF file through another means.)
```python
import cmdline_helper as cmd # NRPy+: Multi-platform Python command-line interface
cmd.output_Jupyter_notebook_to_LaTeXed_PDF("Tutorial-GRMHD_Equations-Cartesian")
```
Created Tutorial-GRMHD_Equations-Cartesian.tex, and compiled LaTeX file to
PDF file Tutorial-GRMHD_Equations-Cartesian.pdf
|
zachetienneREPO_NAMEnrpytutorialPATH_START.@nrpytutorial_extracted@nrpytutorial-master@Tutorial-GRMHD_Equations-Cartesian.ipynb@.PATH_END.py
|
{
"filename": "brighterFatterKernel.py",
"repo_name": "lsst/ip_isr",
"repo_path": "ip_isr_extracted/ip_isr-main/python/lsst/ip/isr/brighterFatterKernel.py",
"type": "Python"
}
|
# This file is part of ip_isr.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Brighter Fatter Kernel calibration definition."""
__all__ = ['BrighterFatterKernel']
import numpy as np
from astropy.table import Table
import lsst.afw.math as afwMath
from . import IsrCalib
class BrighterFatterKernel(IsrCalib):
"""Calibration of brighter-fatter kernels for an instrument.
ampKernels are the kernels for each amplifier in a detector, as
generated by having ``level == 'AMP'``.
detectorKernel is the kernel generated for a detector as a
whole, as generated by having ``level == 'DETECTOR'``.
makeDetectorKernelFromAmpwiseKernels is a method to generate the
kernel for a detector, constructed by averaging together the
ampwise kernels in the detector. The existing application code is
only defined for kernels with ``level == 'DETECTOR'``, so this method
is used if the supplied kernel was built with ``level == 'AMP'``.
Parameters
----------
camera : `lsst.afw.cameraGeom.Camera`
Camera describing detector geometry.
level : `str`
Level the kernels will be generated for.
log : `logging.Logger`, optional
Log to write messages to.
**kwargs :
Parameters to pass to parent constructor.
Notes
-----
Version 1.1 adds the `expIdMask` property, and substitutes
`means` and `variances` for `rawMeans` and `rawVariances`
from the PTC dataset.
expIdMask : `dict`, [`str`,`numpy.ndarray`]
Dictionary keyed by amp names containing the mask produced after
outlier rejection.
rawMeans : `dict`, [`str`, `numpy.ndarray`]
Dictionary keyed by amp names containing the unmasked average of the
means of the exposures in each flat pair.
rawVariances : `dict`, [`str`, `numpy.ndarray`]
Dictionary keyed by amp names containing the variance of the
difference image of the exposures in each flat pair.
Corresponds to rawVars of PTC.
rawXcorrs : `dict`, [`str`, `numpy.ndarray`]
Dictionary keyed by amp names containing an array of measured
covariances per mean flux.
Corresponds to covariances of PTC.
badAmps : `list`
List of bad amplifiers names.
shape : `tuple`
Tuple of the shape of the BFK kernels.
gain : `dict`, [`str`,`float`]
Dictionary keyed by amp names containing the fitted gains.
noise : `dict`, [`str`,`float`]
Dictionary keyed by amp names containing the fitted noise.
meanXcorrs : `dict`, [`str`,`numpy.ndarray`]
Dictionary keyed by amp names containing the averaged
cross-correlations.
valid : `dict`, [`str`,`bool`]
Dictionary keyed by amp names containing validity of data.
ampKernels : `dict`, [`str`, `numpy.ndarray`]
Dictionary keyed by amp names containing the BF kernels.
detKernels : `dict`
Dictionary keyed by detector names containing the BF kernels.
"""
_OBSTYPE = 'bfk'
_SCHEMA = 'Brighter-fatter kernel'
_VERSION = 1.1
def __init__(self, camera=None, level=None, **kwargs):
self.level = level
# Things inherited from the PTC
self.expIdMask = dict()
self.rawMeans = dict()
self.rawVariances = dict()
self.rawXcorrs = dict()
self.badAmps = list()
self.shape = (17, 17)
self.gain = dict()
self.noise = dict()
# Things calculated from the PTC
self.meanXcorrs = dict()
self.valid = dict()
# Things that are used downstream
self.ampKernels = dict()
self.detKernels = dict()
super().__init__(**kwargs)
if camera:
self.initFromCamera(camera, detectorId=kwargs.get('detectorId', None))
self.requiredAttributes.update(['level', 'expIdMask', 'rawMeans', 'rawVariances', 'rawXcorrs',
'badAmps', 'gain', 'noise', 'meanXcorrs', 'valid',
'ampKernels', 'detKernels'])
def updateMetadata(self, setDate=False, **kwargs):
"""Update calibration metadata.
This calls the base class's method after ensuring the required
calibration keywords will be saved.
Parameters
----------
setDate : `bool`, optional
Update the CALIBDATE fields in the metadata to the current
time. Defaults to False.
kwargs :
Other keyword parameters to set in the metadata.
"""
kwargs['LEVEL'] = self.level
kwargs['KERNEL_DX'] = self.shape[0]
kwargs['KERNEL_DY'] = self.shape[1]
super().updateMetadata(setDate=setDate, **kwargs)
def initFromCamera(self, camera, detectorId=None):
"""Initialize kernel structure from camera.
Parameters
----------
camera : `lsst.afw.cameraGeom.Camera`
Camera to use to define geometry.
detectorId : `int`, optional
Index of the detector to generate.
Returns
-------
calib : `lsst.ip.isr.BrighterFatterKernel`
The initialized calibration.
Raises
------
RuntimeError
Raised if no detectorId is supplied for a calibration with
``level='AMP'``.
"""
self._instrument = camera.getName()
if detectorId is not None:
detector = camera[detectorId]
self._detectorId = detectorId
self._detectorName = detector.getName()
self._detectorSerial = detector.getSerial()
if self.level == 'AMP':
if detectorId is None:
raise RuntimeError("A detectorId must be supplied if level='AMP'.")
self.badAmps = []
for amp in detector:
ampName = amp.getName()
self.expIdMask[ampName] = []
self.rawMeans[ampName] = []
self.rawVariances[ampName] = []
self.rawXcorrs[ampName] = []
self.gain[ampName] = amp.getGain()
self.noise[ampName] = amp.getReadNoise()
self.meanXcorrs[ampName] = []
self.ampKernels[ampName] = []
self.valid[ampName] = []
elif self.level == 'DETECTOR':
if detectorId is None:
for det in camera:
detName = det.getName()
self.detKernels[detName] = []
else:
self.detKernels[self._detectorName] = []
return self
def getLengths(self):
"""Return the set of lengths needed for reshaping components.
Returns
-------
kernelLength : `int`
Product of the elements of self.shape.
smallLength : `int`
Size of an untiled covariance.
nObs : `int`
Number of observation pairs used in the kernel.
"""
kernelLength = self.shape[0] * self.shape[1]
smallLength = int((self.shape[0] - 1)*(self.shape[1] - 1)/4)
if self.level == 'AMP':
nObservations = set([len(self.rawMeans[amp]) for amp in self.rawMeans])
if len(nObservations) != 1:
raise RuntimeError("Inconsistent number of observations found.")
nObs = nObservations.pop()
else:
nObs = 0
return (kernelLength, smallLength, nObs)
@classmethod
def fromDict(cls, dictionary):
"""Construct a calibration from a dictionary of properties.
Parameters
----------
dictionary : `dict`
Dictionary of properties.
Returns
-------
calib : `lsst.ip.isr.BrighterFatterKernel`
Constructed calibration.
Raises
------
RuntimeError
Raised if the supplied dictionary is for a different
calibration.
Raised if the version of the supplied dictionary is 1.0.
"""
calib = cls()
if calib._OBSTYPE != (found := dictionary['metadata']['OBSTYPE']):
raise RuntimeError(f"Incorrect brighter-fatter kernel supplied. Expected {calib._OBSTYPE}, "
f"found {found}")
calib.setMetadata(dictionary['metadata'])
calib.calibInfoFromDict(dictionary)
calib.level = dictionary['metadata'].get('LEVEL', 'AMP')
calib.shape = (dictionary['metadata'].get('KERNEL_DX', 0),
dictionary['metadata'].get('KERNEL_DY', 0))
calibVersion = dictionary['metadata']['bfk_VERSION']
if calibVersion == 1.0:
calib.log.debug("Old Version of brighter-fatter kernel found. Current version: "
f"{calib._VERSION}. The new attribute 'expIdMask' will be "
"populated with 'True' values, and the new attributes 'rawMeans' "
"and 'rawVariances' will be populated with the masked 'means' "
"and 'variances' values."
)
# use 'means', because 'expIdMask' does not exist.
calib.expIdMask = {amp: np.repeat(True, len(dictionary['means'][amp])) for amp in
dictionary['means']}
calib.rawMeans = {amp: np.array(dictionary['means'][amp]) for amp in dictionary['means']}
calib.rawVariances = {amp: np.array(dictionary['variances'][amp]) for amp in
dictionary['variances']}
elif calibVersion == 1.1:
calib.expIdMask = {amp: np.array(dictionary['expIdMask'][amp]) for amp in dictionary['expIdMask']}
calib.rawMeans = {amp: np.array(dictionary['rawMeans'][amp]) for amp in dictionary['rawMeans']}
calib.rawVariances = {amp: np.array(dictionary['rawVariances'][amp]) for amp in
dictionary['rawVariances']}
else:
raise RuntimeError(f"Unknown version for brighter-fatter kernel: {calibVersion}")
# Lengths for reshape:
_, smallLength, nObs = calib.getLengths()
smallShapeSide = int(np.sqrt(smallLength))
calib.rawXcorrs = {amp: np.array(dictionary['rawXcorrs'][amp]).reshape((nObs,
smallShapeSide,
smallShapeSide))
for amp in dictionary['rawXcorrs']}
calib.gain = dictionary['gain']
calib.noise = dictionary['noise']
calib.meanXcorrs = {amp: np.array(dictionary['meanXcorrs'][amp]).reshape(calib.shape)
for amp in dictionary['rawXcorrs']}
calib.ampKernels = {amp: np.array(dictionary['ampKernels'][amp]).reshape(calib.shape)
for amp in dictionary['ampKernels']}
calib.valid = {amp: bool(value) for amp, value in dictionary['valid'].items()}
calib.badAmps = [amp for amp, valid in dictionary['valid'].items() if valid is False]
calib.detKernels = {det: np.array(dictionary['detKernels'][det]).reshape(calib.shape)
for det in dictionary['detKernels']}
calib.updateMetadata()
return calib
def toDict(self):
"""Return a dictionary containing the calibration properties.
The dictionary should be able to be round-tripped through
`fromDict`.
Returns
-------
dictionary : `dict`
Dictionary of properties.
"""
self.updateMetadata()
outDict = {}
metadata = self.getMetadata()
outDict['metadata'] = metadata
# Lengths for ravel:
kernelLength, smallLength, nObs = self.getLengths()
outDict['expIdMask'] = {amp: np.array(self.expIdMask[amp]).tolist() for amp in self.expIdMask}
outDict['rawMeans'] = {amp: np.array(self.rawMeans[amp]).tolist() for amp in self.rawMeans}
outDict['rawVariances'] = {amp: np.array(self.rawVariances[amp]).tolist() for amp in
self.rawVariances}
for amp in self.rawXcorrs.keys():
# Check to see if we need to repack the data.
correlationShape = np.array(self.rawXcorrs[amp]).shape
if nObs != correlationShape[0]:
if correlationShape[0] == np.sum(self.expIdMask[amp]):
# Repack data.
self.repackCorrelations(amp, correlationShape)
else:
raise ValueError("Could not coerce rawXcorrs into appropriate shape "
"(have %d correlations, but expect to see %d.",
correlationShape[0], np.sum(self.expIdMask[amp]))
outDict['rawXcorrs'] = {amp: np.array(self.rawXcorrs[amp]).reshape(nObs*smallLength).tolist()
for amp in self.rawXcorrs}
outDict['badAmps'] = self.badAmps
outDict['gain'] = self.gain
outDict['noise'] = self.noise
outDict['meanXcorrs'] = {amp: self.meanXcorrs[amp].reshape(kernelLength).tolist()
for amp in self.meanXcorrs}
outDict['ampKernels'] = {amp: self.ampKernels[amp].reshape(kernelLength).tolist()
for amp in self.ampKernels}
outDict['valid'] = self.valid
outDict['detKernels'] = {det: self.detKernels[det].reshape(kernelLength).tolist()
for det in self.detKernels}
return outDict
@classmethod
def fromTable(cls, tableList):
"""Construct calibration from a list of tables.
This method uses the `fromDict` method to create the
calibration, after constructing an appropriate dictionary from
the input tables.
Parameters
----------
tableList : `list` [`astropy.table.Table`]
List of tables to use to construct the brighter-fatter
calibration.
Returns
-------
calib : `lsst.ip.isr.BrighterFatterKernel`
The calibration defined in the tables.
"""
ampTable = tableList[0]
metadata = ampTable.meta
inDict = dict()
inDict['metadata'] = metadata
amps = ampTable['AMPLIFIER']
# Determine version for expected values. The ``fromDict``
# method can unpack either, but the appropriate fields need to
# be supplied.
calibVersion = metadata['bfk_VERSION']
if calibVersion == 1.0:
# We expect to find ``means`` and ``variances`` for this
# case, and will construct an ``expIdMask`` from these
# parameters in the ``fromDict`` method.
rawMeanList = ampTable['MEANS']
rawVarianceList = ampTable['VARIANCES']
inDict['means'] = {amp: mean for amp, mean in zip(amps, rawMeanList)}
inDict['variances'] = {amp: var for amp, var in zip(amps, rawVarianceList)}
elif calibVersion == 1.1:
# This will have ``rawMeans`` and ``rawVariances``, which
# are filtered via the ``expIdMask`` fields.
expIdMaskList = ampTable['EXP_ID_MASK']
rawMeanList = ampTable['RAW_MEANS']
rawVarianceList = ampTable['RAW_VARIANCES']
inDict['expIdMask'] = {amp: mask for amp, mask in zip(amps, expIdMaskList)}
inDict['rawMeans'] = {amp: mean for amp, mean in zip(amps, rawMeanList)}
inDict['rawVariances'] = {amp: var for amp, var in zip(amps, rawVarianceList)}
else:
raise RuntimeError(f"Unknown version for brighter-fatter kernel: {calibVersion}")
rawXcorrs = ampTable['RAW_XCORRS']
gainList = ampTable['GAIN']
noiseList = ampTable['NOISE']
meanXcorrs = ampTable['MEAN_XCORRS']
ampKernels = ampTable['KERNEL']
validList = ampTable['VALID']
inDict['rawXcorrs'] = {amp: kernel for amp, kernel in zip(amps, rawXcorrs)}
inDict['gain'] = {amp: gain for amp, gain in zip(amps, gainList)}
inDict['noise'] = {amp: noise for amp, noise in zip(amps, noiseList)}
inDict['meanXcorrs'] = {amp: kernel for amp, kernel in zip(amps, meanXcorrs)}
inDict['ampKernels'] = {amp: kernel for amp, kernel in zip(amps, ampKernels)}
inDict['valid'] = {amp: bool(valid) for amp, valid in zip(amps, validList)}
inDict['badAmps'] = [amp for amp, valid in inDict['valid'].items() if valid is False]
if len(tableList) > 1:
detTable = tableList[1]
inDict['detKernels'] = {det: kernel for det, kernel
in zip(detTable['DETECTOR'], detTable['KERNEL'])}
else:
inDict['detKernels'] = {}
return cls.fromDict(inDict)
def toTable(self):
"""Construct a list of tables containing the information in this
calibration.
The list of tables should create an identical calibration
after being passed to this class's fromTable method.
Returns
-------
tableList : `list` [`lsst.afw.table.Table`]
List of tables containing the crosstalk calibration
information.
"""
tableList = []
self.updateMetadata()
# Lengths
kernelLength, smallLength, nObs = self.getLengths()
ampList = []
expIdMaskList = []
rawMeanList = []
rawVarianceList = []
rawXcorrs = []
gainList = []
noiseList = []
meanXcorrsList = []
kernelList = []
validList = []
if self.level == 'AMP':
for amp in self.rawMeans.keys():
ampList.append(amp)
expIdMaskList.append(self.expIdMask[amp])
rawMeanList.append(self.rawMeans[amp])
rawVarianceList.append(self.rawVariances[amp])
correlationShape = np.array(self.rawXcorrs[amp]).shape
if nObs != correlationShape[0]:
if correlationShape[0] == np.sum(self.expIdMask[amp]):
# Repack data.
self.repackCorrelations(amp, correlationShape)
else:
raise ValueError("Could not coerce rawXcorrs into appropriate shape "
"(have %d correlations, but expect to see %d.",
correlationShape[0], np.sum(self.expIdMask[amp]))
rawXcorrs.append(np.array(self.rawXcorrs[amp]).reshape(nObs*smallLength).tolist())
gainList.append(self.gain[amp])
noiseList.append(self.noise[amp])
meanXcorrsList.append(self.meanXcorrs[amp].reshape(kernelLength).tolist())
kernelList.append(self.ampKernels[amp].reshape(kernelLength).tolist())
validList.append(int(self.valid[amp] and not (amp in self.badAmps)))
ampTable = Table({'AMPLIFIER': ampList,
'EXP_ID_MASK': expIdMaskList,
'RAW_MEANS': rawMeanList,
'RAW_VARIANCES': rawVarianceList,
'RAW_XCORRS': rawXcorrs,
'GAIN': gainList,
'NOISE': noiseList,
'MEAN_XCORRS': meanXcorrsList,
'KERNEL': kernelList,
'VALID': validList,
})
ampTable.meta = self.getMetadata().toDict()
tableList.append(ampTable)
if len(self.detKernels):
detList = []
kernelList = []
for det in self.detKernels.keys():
detList.append(det)
kernelList.append(self.detKernels[det].reshape(kernelLength).tolist())
detTable = Table({'DETECTOR': detList,
'KERNEL': kernelList})
detTable.meta = self.getMetadata().toDict()
tableList.append(detTable)
return tableList
def repackCorrelations(self, amp, correlationShape):
"""If the correlations were masked, they need to be repacked into the
correct shape.
Parameters
----------
amp : `str`
Amplifier needing repacked.
correlationShape : `tuple` [`int`], (3, )
Shape the correlations are expected to take.
"""
repackedCorrelations = []
idx = 0
for maskValue in self.expIdMask[amp]:
if maskValue:
repackedCorrelations.append(self.rawXcorrs[amp][idx])
idx += 1
else:
repackedCorrelations.append(np.full((correlationShape[1], correlationShape[2]), np.nan))
self.rawXcorrs[amp] = repackedCorrelations
# Implementation methods
def makeDetectorKernelFromAmpwiseKernels(self, detectorName, ampsToExclude=[]):
"""Average the amplifier level kernels to create a detector level
kernel. There is no change in index ordering/orientation from
this averaging.
Parameters
----------
detectorName : `str`
Detector for which the averaged kernel will be used.
ampsToExclude : `list` [`str`], optional
Amps that should not be included in the average.
"""
inKernels = np.array([self.ampKernels[amp] for amp in
self.ampKernels if amp not in ampsToExclude])
avgKernel = np.zeros_like(inKernels[0])
sctrl = afwMath.StatisticsControl()
sctrl.setNumSigmaClip(5.0)
for i in range(np.shape(avgKernel)[0]):
for j in range(np.shape(avgKernel)[1]):
avgKernel[i, j] = afwMath.makeStatistics(inKernels[:, i, j],
afwMath.MEANCLIP, sctrl).getValue()
self.detKernels[detectorName] = avgKernel
def replaceDetectorKernelWithAmpKernel(self, ampName, detectorName):
self.detKernel[detectorName] = self.ampKernel[ampName]
|
lsstREPO_NAMEip_isrPATH_START.@ip_isr_extracted@ip_isr-main@python@lsst@ip@isr@brighterFatterKernel.py@.PATH_END.py
|
{
"filename": "common.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/benchmarks/benchmarks/common.py",
"type": "Python"
}
|
"""
Airspeed Velocity benchmark utilities
"""
import sys
import os
import re
import time
import textwrap
import subprocess
import itertools
import random
class Benchmark:
"""
Base class with sensible options
"""
pass
def is_xslow():
try:
return int(os.environ.get('SCIPY_XSLOW', '0'))
except ValueError:
return False
class LimitedParamBenchmark(Benchmark):
"""
Limits parameter combinations to `max_number` choices, chosen
pseudo-randomly with fixed seed.
Raises NotImplementedError (skip) if not in active set.
"""
num_param_combinations = 0
def setup(self, *args, **kwargs):
slow = is_xslow()
if slow:
# no need to skip
return
param_seed = kwargs.pop('param_seed', None)
if param_seed is None:
param_seed = 1
params = kwargs.pop('params', None)
if params is None:
params = self.params
num_param_combinations = kwargs.pop('num_param_combinations', None)
if num_param_combinations is None:
num_param_combinations = self.num_param_combinations
all_choices = list(itertools.product(*params))
rng = random.Random(param_seed)
rng.shuffle(all_choices)
active_choices = all_choices[:num_param_combinations]
if args not in active_choices:
raise NotImplementedError("skipped")
def get_max_rss_bytes(rusage):
"""
Extract the max RSS value in bytes.
"""
if not rusage:
return None
if sys.platform.startswith('linux'):
# On Linux getrusage() returns ru_maxrss in kilobytes
# https://man7.org/linux/man-pages/man2/getrusage.2.html
return rusage.ru_maxrss * 1024
elif sys.platform == "darwin":
# on macOS ru_maxrss is in bytes
return rusage.ru_maxrss
else:
# Unknown, just return whatever is here.
return rusage.ru_maxrss
def run_monitored_wait4(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : int
Peak memory usage in bytes of the child Python process
Notes
-----
Works on Unix platforms (Linux, macOS) that have `os.wait4()`.
"""
code = textwrap.dedent(code)
start = time.time()
process = subprocess.Popen([sys.executable, '-c', code])
pid, returncode, rusage = os.wait4(process.pid, 0)
duration = time.time() - start
max_rss_bytes = get_max_rss_bytes(rusage)
if returncode != 0:
raise AssertionError("Running failed:\n%s" % code)
return duration, max_rss_bytes
def run_monitored_proc(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : float
Peak memory usage (rough estimate only) in bytes
"""
if not sys.platform.startswith('linux'):
raise RuntimeError("Peak memory monitoring only works on Linux")
code = textwrap.dedent(code)
process = subprocess.Popen([sys.executable, '-c', code])
peak_memusage = -1
start = time.time()
while True:
ret = process.poll()
if ret is not None:
break
with open('/proc/%d/status' % process.pid) as f:
procdata = f.read()
m = re.search(r'VmRSS:\s*(\d+)\s*kB', procdata, re.S | re.I)
if m is not None:
memusage = float(m.group(1)) * 1e3
peak_memusage = max(memusage, peak_memusage)
time.sleep(0.01)
process.wait()
duration = time.time() - start
if process.returncode != 0:
raise AssertionError("Running failed:\n%s" % code)
return duration, peak_memusage
def run_monitored(code):
"""
Run code in a new Python process, and monitor peak memory usage.
Returns
-------
duration : float
Duration in seconds (including Python startup time)
peak_memusage : float or int
Peak memory usage (rough estimate only) in bytes
"""
if hasattr(os, 'wait4'):
return run_monitored_wait4(code)
else:
return run_monitored_proc(code)
def get_mem_info():
"""Get information about available memory"""
import psutil
vm = psutil.virtual_memory()
return {
"memtotal": vm.total,
"memavailable": vm.available,
}
def set_mem_rlimit(max_mem=None):
"""
Set address space rlimit
"""
import resource
if max_mem is None:
mem_info = get_mem_info()
max_mem = int(mem_info['memtotal'] * 0.7)
cur_limit = resource.getrlimit(resource.RLIMIT_AS)
if cur_limit[0] > 0:
max_mem = min(max_mem, cur_limit[0])
try:
resource.setrlimit(resource.RLIMIT_AS, (max_mem, cur_limit[1]))
except ValueError:
# on macOS may raise: current limit exceeds maximum limit
pass
def with_attributes(**attrs):
def decorator(func):
for key, value in attrs.items():
setattr(func, key, value)
return func
return decorator
class safe_import:
def __enter__(self):
self.error = False
return self
def __exit__(self, type_, value, traceback):
if type_ is not None:
self.error = True
suppress = not (
os.getenv('SCIPY_ALLOW_BENCH_IMPORT_ERRORS', '1').lower() in
('0', 'false') or not issubclass(type_, ImportError))
return suppress
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@benchmarks@benchmarks@common.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter/textfont/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._sizesrc import SizesrcValidator
from ._size import SizeValidator
from ._familysrc import FamilysrcValidator
from ._family import FamilyValidator
from ._colorsrc import ColorsrcValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._sizesrc.SizesrcValidator",
"._size.SizeValidator",
"._familysrc.FamilysrcValidator",
"._family.FamilyValidator",
"._colorsrc.ColorsrcValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter@textfont@__init__.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "EranOfek/AstroPack",
"repo_path": "AstroPack_extracted/AstroPack-main/matlab/util/+tools/+array/README.md",
"type": "Markdown"
}
|
#
|
EranOfekREPO_NAMEAstroPackPATH_START.@AstroPack_extracted@AstroPack-main@matlab@util@+tools@+array@README.md@.PATH_END.py
|
{
"filename": "test_utils.py",
"repo_name": "Starfish-develop/Starfish",
"repo_path": "Starfish_extracted/Starfish-master/tests/test_grid_tools/test_utils.py",
"type": "Python"
}
|
import multiprocessing as mp
import numpy as np
import pytest
from Starfish.grid_tools import (
chunk_list,
air_to_vacuum,
vacuum_to_air,
vacuum_to_air_SLOAN,
idl_float,
)
class TestChunking:
def test_chunk_list_shape(self, grid_points):
chunked = chunk_list(grid_points)
assert len(chunked) == mp.cpu_count()
chunked = chunk_list(grid_points, 3)
assert len(chunked) == 3
class TestWavelengthUtils:
def test_air_to_vacuum(self):
wavelengths = np.linspace(1e4, 5e4, 1000)
outputs = air_to_vacuum(wavelengths)
assert len(outputs) == len(wavelengths)
def test_vacuum_to_air(self):
wavelengths = np.linspace(1e4, 5e4, 1000)
outputs = vacuum_to_air(wavelengths)
assert len(outputs) == len(wavelengths)
def test_vacuum_to_air_SLOAN(self):
wavelengths = np.linspace(1e4, 5e4, 1000)
outputs = vacuum_to_air_SLOAN(wavelengths)
assert len(outputs) == len(wavelengths)
@pytest.mark.parametrize(
"wavelengths", [np.linspace(1e4, 5e4, 1000), np.linspace(1e5, 5e5, 1000)]
)
def test_atv_vta_regression(self, wavelengths):
np.testing.assert_array_almost_equal(
wavelengths, vacuum_to_air(air_to_vacuum(wavelengths)), 2
)
@pytest.mark.parametrize(
"wavelengths", [np.linspace(1e4, 5e4, 1000), np.linspace(1e5, 5e5, 1000)]
)
def test_atv_vta_sloan_regression(self, wavelengths):
np.testing.assert_array_almost_equal(
wavelengths, vacuum_to_air_SLOAN(air_to_vacuum(wavelengths)), 0
)
@pytest.mark.parametrize(
"idl, num", [("1D4", 1e4), ("1.0", 1.0), ("1D-4", 1e-4), ("1d0", 1.0)]
)
def test_idl_float(idl, num):
np.testing.assert_almost_equal(idl_float(idl), num)
|
Starfish-developREPO_NAMEStarfishPATH_START.@Starfish_extracted@Starfish-master@tests@test_grid_tools@test_utils.py@.PATH_END.py
|
{
"filename": "processing.py",
"repo_name": "htjb/margarine",
"repo_path": "margarine_extracted/margarine-master/margarine/processing.py",
"type": "Python"
}
|
from tensorflow_probability import distributions as tfd
import tensorflow as tf
import random
@tf.function(jit_compile=True)
def _forward_transform(x, min=0, max=1):
r"""
Tranforms input samples. Normalise between 0 and 1 and then tranform
onto samples of standard normal distribution (i.e. base of
tfd.TransformedDistribution).
**parameters:**
x: **array**
| Samples to be normalised.
min: **array or list**
| Passed from the bijectors code. (mathematical
description of their
values...)
max: **array or list**
| Passed from the bijectors code.
(mathematical description of their
values...)
"""
x = tfd.Uniform(min, max).cdf(x)
x = tfd.Normal(0, 1).quantile(x)
return x
@tf.function(jit_compile=True)
def _inverse_transform(x, min, max):
r"""
Tranforms output samples. Inverts the processes in
``forward_transofrm``.
**parameters:**
x: **array**
| Samples to be normalised.
min: **array or list**
| Passed from the bijectors code.
(mathematical description of their
values...)
max: **array or list**
| Passed from the bijectors code.
(mathematical description of their
values...)
"""
x = tfd.Normal(0, 1).cdf(x)
x = tfd.Uniform(min, max).quantile(x)
return x
def pure_tf_train_test_split(a, b, test_size=0.2):
"""
Splitting data into training and testing sets. Function is equivalent
to sklearn.model_selection.train_test_split but a and b
are tensorflow tensors.
**parameters:**
a: **array**
| Samples to be split.
b: **array**
| Weights to be split.
test_size: **float**
| Fraction of data to be used for testing.
"""
idx = random.sample(range(len(a)), int(len(a)*test_size))
a_train = tf.gather(a,
tf.convert_to_tensor(
list(set(range(len(a))) - set(idx))))
b_train = tf.gather(b,
tf.convert_to_tensor(
list(set(range(len(b))) - set(idx))))
a_test = tf.gather(a, tf.convert_to_tensor(idx))
b_test = tf.gather(b, tf.convert_to_tensor(idx))
return a_train, a_test, b_train, b_test
|
htjbREPO_NAMEmargarinePATH_START.@margarine_extracted@margarine-master@margarine@processing.py@.PATH_END.py
|
{
"filename": "concentration.py",
"repo_name": "dylancromer/maszcal",
"repo_path": "maszcal_extracted/maszcal-main/spec/concentration.py",
"type": "Python"
}
|
import pytest
import numpy as np
import maszcal.concentration
import maszcal.cosmology
def describe_ConModel():
def describe_c():
def it_converts_the_masses_appropriately():
con_model = maszcal.concentration.ConModel(mass_def='500c')
masses = np.logspace(14, 16, 3)
redshifts = np.linspace(0, 2, 4)
cons = con_model.c(masses, redshifts, '200c')
assert cons.shape == (3, 4)
assert np.all(cons > 0)
def describe_convert_mass_def():
def it_does_nothing_if_in_def_and_out_def_are_the_same():
con_model = maszcal.concentration.ConModel(mass_def='500c')
masses = np.logspace(14, 16, 3)
redshifts = np.zeros(1)
new_masses = con_model.convert_mass_def(masses, redshifts, '500c', '500c')
assert np.allclose(masses, new_masses.flatten())
def the_conversion_makes_sense():
con_model = maszcal.concentration.ConModel(mass_def='500m')
masses = np.logspace(14, 16, 3)
redshifts = np.zeros(1)
new_masses = con_model.convert_mass_def(masses, redshifts, '500m', '200m')
assert np.all(masses < new_masses.flatten())
def describe_convert_c_mass_pair():
def it_does_nothing_if_in_def_and_out_def_are_the_same():
con_model = maszcal.concentration.ConModel(mass_def='500c')
masses = np.logspace(14, 16, 3)
cons = np.linspace(2, 4, 3)
redshifts = np.zeros(1)
new_masses, new_cons = con_model.convert_c_mass_pair(masses, cons, redshifts, '500c', '500c')
assert np.allclose(masses, new_masses.flatten())
assert np.allclose(cons, new_cons.flatten())
def describe_MatchingConModel():
def describe_c():
def it_converts_the_masses_appropriately():
con_model = maszcal.concentration.MatchingConModel(mass_def='500c')
masses = np.logspace(14, 16, 3)
redshifts = np.linspace(0, 2, 3)
cons = con_model.c(masses, redshifts, '200c')
assert cons.shape == (3,)
assert np.all(cons > 0)
def describe_ConInterpolator():
@pytest.fixture
def con_interp():
return maszcal.concentration.ConInterpolator(
mass_samples=np.logspace(13, 15, 5),
redshift_samples=np.linspace(0, 1, 4),
mass_definition='200m',
cosmo_params=maszcal.cosmology.CosmoParams(),
)
def it_interpolates_the_cm_relation(con_interp):
masses = np.logspace(np.log10(2e13), np.log10(4e14), 6)
zs = np.linspace(0, 1, 5)
cs = con_interp(masses, zs)
assert cs.shape == masses.shape + zs.shape
assert not np.any(np.isnan(cs))
|
dylancromerREPO_NAMEmaszcalPATH_START.@maszcal_extracted@maszcal-main@spec@concentration.py@.PATH_END.py
|
{
"filename": "reproduce.py",
"repo_name": "realfastvla/rfpipe",
"repo_path": "rfpipe_extracted/rfpipe-main/rfpipe/reproduce.py",
"type": "Python"
}
|
from __future__ import print_function, division, absolute_import, unicode_literals
from builtins import bytes, dict, object, range, map, input, str
from future.utils import itervalues, viewitems, iteritems, listvalues, listitems
from io import open
import pickle
import os.path
import numpy as np
from kalman_detector import kalman_prepare_coeffs, kalman_significance
import logging
logger = logging.getLogger(__name__)
def reproduce_candcollection(cc, data=None, wisdom=None, spec_std=None,
sig_ts=[], kalman_coeffs=[]):
""" Uses candcollection to make new candcollection with required info.
Will look for cluster label and filter only for peak snr, if available.
Location (e.g., integration, dm, dt) of each is used to create
canddata for each candidate, if required.
Can calculates features not used directly for search (as defined in
state.prefs.calcfeatures).
"""
from rfpipe import candidates, util
# set up output cc
st = cc.state
cc1 = candidates.CandCollection(prefs=st.prefs, metadata=st.metadata)
if len(cc):
if 'cluster' in cc.array.dtype.fields:
clusters = cc.array['cluster'].astype(int)
cl_rank, cl_count = candidates.calc_cluster_rank(cc)
calcinds = np.unique(np.where(cl_rank == 1)[0]).tolist()
logger.debug("Reproducing cands at {0} cluster peaks"
.format(len(calcinds)))
else:
logger.debug("No cluster field found. Reproducing all.")
calcinds = list(range(len(cc)))
# if candidates that need new feature calculations
if not all([f in cc.array.dtype.fields for f in st.features]):
logger.info("Generating canddata for {0} candidates"
.format(len(calcinds)))
candlocs = cc.locs
snrs = cc.snrtot
normprob = candidates.normprob(snrs, st.ntrials)
snrmax = snrs.max()
logger.info('Zscore/SNR for strongest candidate: {0}/{1}'
.format(normprob[np.where(snrs == snrmax)[0]][0], snrmax))
if ('snrk' in st.features and
'snrk' not in cc.array.dtype.fields and
(spec_std is None or not len(sig_ts) or not len(kalman_coeffs))):
# TODO: use same kalman calc for search as reproduce?
spec_std, sig_ts, kalman_coeffs = util.kalman_prep(data)
# reproduce canddata for each
for i in calcinds:
# TODO: check on best way to find max SNR with kalman, etc
snr = snrs[i]
candloc = candlocs[i]
# kwargs passed to canddata object for plotting/saving
kwargs = {}
if 'cluster' in cc.array.dtype.fields:
logger.info("Cluster {0}/{1} has {2} candidates and max detected SNR {3:.1f} at {4}"
.format(calcinds.index(i), len(calcinds)-1, cl_count[i],
snr, candloc))
# add supplementary plotting and cc info
kwargs['cluster'] = clusters[i]
kwargs['clustersize'] = cl_count[i]
else:
logger.info("Candidate {0}/{1} has detected SNR {2:.1f} at {3}"
.format(calcinds.index(i), len(calcinds)-1, snr,
candloc))
# reproduce candidate and get/calc features
data_corr = pipeline_datacorrect(st, candloc, data_prep=data)
for feature in st.features:
if feature in cc.array.dtype.fields: # if already calculated
kwargs[feature] = cc.array[feature][i]
else: # if desired, but not calculated here or from canddata
if feature == 'snrk':
if 'snrk' not in cc.array.dtype.fields:
spec = data_corr.real.mean(axis=3).mean(axis=1)[candloc[1]]
if np.count_nonzero(spec)/len(spec) > 1-st.prefs.max_zerofrac:
significance_kalman = -kalman_significance(spec, spec_std,
sig_ts=sig_ts,
coeffs=kalman_coeffs)
snrk = (2*significance_kalman)**0.5
else:
logger.warning("snrk set to 0, since {0}/{1} are zeroed".format(len(spec)-np.count_nonzero(spec), len(spec)))
snrk = 0.
logger.info("Calculated snrk of {0} after detection. "
"Adding it to CandData.".format(snrk))
kwargs[feature] = snrk
cd = pipeline_canddata(st, candloc, data_corr, spec_std=spec_std,
sig_ts=sig_ts, kalman_coeffs=kalman_coeffs, **kwargs)
if st.prefs.saveplots:
candidates.candplot(cd, snrs=snrs) # snrs before clustering
# regenerate cc with extra features in cd
cc1 += candidates.cd_to_cc(cd)
# if candidates that do not need new featuers, just select peaks
else:
logger.info("Using clustering info to select {0} candidates"
.format(len(calcinds)))
cc1.array = cc.array.take(calcinds)
return cc1
def pipeline_dataprep(st, candloc):
""" Prepare (read, cal, flag) data for a given state and candloc.
"""
from rfpipe import source
segment, candint, dmind, dtind, beamnum = candloc
# propagate through to new candcollection
st.prefs.segmenttimes = st._segmenttimes.tolist()
# prep data
data = source.read_segment(st, segment)
flagversion = "rtpipe" if hasattr(st, "rtpipe_version") else "latest"
data_prep = source.data_prep(st, segment, data, flagversion=flagversion)
return data_prep
def pipeline_datacorrect(st, candloc, data_prep=np.array([])):
""" Prepare and correct for dm and dt sampling of a given candloc
Can optionally pass in prepared (flagged, calibrated) data, if available.
"""
from rfpipe import util
import rfpipe.search
if not len(data_prep):
data_prep = pipeline_dataprep(st, candloc)
segment, candint, dmind, dtind, beamnum = candloc
dt = st.dtarr[dtind]
dm = st.dmarr[dmind]
scale = None
if hasattr(st, "rtpipe_version"):
scale = 4.2e-3 if st.rtpipe_version <= 1.54 else None
delay = util.calc_delay(st.freq, st.freq.max(), dm, st.inttime,
scale=scale)
data_dmdt = rfpipe.search.dedisperseresample(data_prep, delay, dt,
parallel=st.prefs.nthread > 1,
resamplefirst=st.fftmode=='cuda')
return data_dmdt
def pipeline_canddata(st, candloc, data_dmdt=np.array([]), spec_std=None, cpuonly=False,
sig_ts=[], kalman_coeffs=[], **kwargs):
""" Generate image and phased visibility data for candloc.
Phases to peak pixel in image of candidate.
Can optionally pass in corrected data, if available.
cpuonly argument not being used at present.
"""
from rfpipe import candidates, util
import rfpipe.search
segment, candint, dmind, dtind, beamnum = candloc
dt = st.dtarr[dtind]
dm = st.dmarr[dmind]
pc = st.get_pc(segment)
uvw = util.get_uvw_segment(st, segment, pc_mjd=pc, pc_radec=pc)
wisdom = rfpipe.search.set_wisdom(st.npixx, st.npixy)
if not len(data_dmdt):
data_dmdt = pipeline_datacorrect(st, candloc)
if ('snrk' in st.features and
'snrk' not in kwargs and
(spec_std is None or not len(sig_ts) or not len(kalman_coeffs))):
spec_std, sig_ts, kalman_coeffs = util.kalman_prep(data_dmdt)
# fftmode = 'fftw' if cpuonly else st.fftmode # can't remember why i did this!
image = rfpipe.search.grid_image(data_dmdt, uvw, st.npixx, st.npixy, st.uvres,
'fftw', st.prefs.nthread, wisdom=wisdom,
integrations=[candint])[0]
# TODO: allow dl,dm as args and reproduce detection for other SNRs
dl, dm = st.pixtolm(np.where(image == image.max()))
util.phase_shift(data_dmdt, uvw=uvw, dl=dl, dm=dm)
dataph = data_dmdt[max(0, candint-st.prefs.timewindow//2):candint+st.prefs.timewindow//2].mean(axis=1)
util.phase_shift(data_dmdt, uvw=uvw, dl=-dl, dm=-dm)
# TODO: This probably needs to be masked to avoid averaging zeros in
spec = data_dmdt.real.mean(axis=3).mean(axis=1)[candloc[1]]
if 'snrk' in st.features and 'snrk' not in kwargs:
if np.count_nonzero(spec)/len(spec) > 1-st.prefs.max_zerofrac:
significance_kalman = -kalman_significance(spec, spec_std,
sig_ts=sig_ts,
coeffs=kalman_coeffs)
snrk = (2*significance_kalman)**0.5
else:
logger.warning("snrk set to 0, since {0}/{1} are zeroed".format(len(spec)-np.count_nonzero(spec), len(spec)))
snrk = 0.
logger.info("Calculated snrk of {0} after detection. Adding it to CandData.".format(snrk))
kwargs['snrk'] = snrk
canddata = candidates.CandData(state=st, loc=tuple(candloc), image=image,
data=dataph, **kwargs)
# output is as from searching functions
return canddata
def pipeline_candidate(st, candloc, canddata=np.array([])):
""" End-to-end pipeline to reproduce candidate plot and calculate features.
Can optionally pass in canddata, if available.
*TODO: confirm that cc returned by this has clustering and other enhanced features*
"""
from rfpipe import candidates
segment, candint, dmind, dtind, beamnum = candloc
if not len(canddata):
canddata = pipeline_canddata(st, candloc)
candcollection = candidates.cd_to_cc(canddata)
return candcollection
def refine_sdm(sdmname, dm, preffile='realfast.yml', gainpath='/home/mchammer/evladata/telcal/',
npix_max=None, npix_max_orig=None, search_sigma=7, ddm=100,
refine=True, classify=True, devicenum=None, workdir=None, inprefs=None):
""" Given candId, look for SDM in portal, then run refinement.
Assumes this is running on rfnode with CBE lustre.
npix_max_orig sets the npix_max or the original detection.
ddm sets +- of dm grid to search
"""
from rfpipe import metadata, state, pipeline, candidates, util
if devicenum is None:
try:
from distributed import get_worker
name = get_worker().name
devicenum = int(name.split('g')[1])
except ValueError:
devicenum = 0
# Searching for gainfile
datasetId = '{0}'.format('_'.join(os.path.basename(sdmname).split('_')[1:-1]))
# set the paths to the gainfile
gainname = datasetId + '.GN'
logging.info('Searching for the gainfile {0} in {1}'.format(gainname, gainpath))
for path, dirs, files in os.walk(gainpath):
for f in filter(lambda x: gainname in x, files):
gainfile = os.path.join(path, gainname)
break
# Searching all miniSDMs
if inprefs:
prefs = inprefs
else:
prefs = {'saveplots': False, 'savenoise': False, 'savesols': False, 'savecandcollection': False,
'savecanddata': True,'dm_maxloss': 0.01, 'npix_max': npix_max}
prefs['gainfile'] = gainfile
prefs['workdir'] = workdir
prefs['sigma_image1'] = search_sigma
prefs['maxdm'] = dm+ddm
bdfdir = metadata.get_bdfdir(sdmfile=sdmname, sdmscan=1)
band = metadata.sdmband(sdmfile=sdmname, sdmscan=1, bdfdir=bdfdir)
cc = None
if 'VLASS' in sdmname:
prefname = 'VLASS'
elif '20A-346' in sdmname:
prefname = '20A-346'
else:
prefname = 'NRAOdefault'+band
try:
st = state.State(sdmfile=sdmname, sdmscan=1, inprefs=prefs, preffile=preffile, name=prefname, showsummary=False, bdfdir=bdfdir)
except AssertionError:
try:
logger.warning("Could not generate state with full image. Trying with npix_max at 2x original image size...")
prefs['npix_max'] = min(npix_max, 2*npix_max_orig)
st = state.State(sdmfile=sdmname, sdmscan=1, inprefs=prefs, preffile=preffile, name='NRAOdefault'+band, bdfdir=bdfdir, showsummary=False)
except AssertionError: # could be state can't be defined
logger.warning("Could not generate state with 2x images. Trying with original image size...")
prefs['npix_max'] = min(npix_max, npix_max_orig)
st = state.State(sdmfile=sdmname, sdmscan=1, inprefs=prefs, preffile=preffile, name='NRAOdefault'+band, bdfdir=bdfdir, showsummary=False)
except FileNotFoundError as e:
logger.warning("{0}".format(e))
return cc
st.prefs.dmarr = sorted([dm] + [dm0 for dm0 in st.dmarr if (dm0 == 0 or dm0 > dm-ddm)]) # remove superfluous dms, enforce orig dm
st.clearcache()
st.summarize()
ccs = pipeline.pipeline_scan(st, devicenum=devicenum)
cc = sum(ccs) if len(ccs) else ccs
# Classify the generated pickles using FETCH and generate refinement plots
if len(cc):
maxind = np.where(cc.snrtot == cc.snrtot.max())[0]
assert len(maxind) == 1
cd = cc[maxind[0]].canddata[0]
assert isinstance(cd, candidates.CandData)
if classify:
try:
frbprob = candidates.cd_to_fetch(cd, classify=True, devicenum=devicenum, mode='CPU')
logging.info('FETCH FRB Probability of the candidate {0} is {1}'.format(cd.candid, frbprob))
except AttributeError:
logging.info('FETCH classification failed.')
frbprob = None
else:
frbprob = None
if refine:
logging.info('Generating Refinement plots')
cd_refined_plot(cd, devicenum, frbprob=frbprob)
else:
if prefs['npix_max'] != npix_max_orig:
logging.info('No candidate was found in first search. Trying again with original image size.'.format(cc))
prefs['npix_max'] = npix_max_orig
st = state.State(sdmfile=sdmname, sdmscan=1, inprefs=prefs, preffile=preffile, name='NRAOdefault'+band, bdfdir=bdfdir,
showsummary=False)
st.prefs.dmarr = sorted([dm] + [dm0 for dm0 in st.dmarr if (dm0 == 0 or dm0 > dm-ddm)]) # remove superfluous dms, enforce orig dm
st.clearcache()
st.summarize()
ccs = pipeline.pipeline_scan(st, devicenum=devicenum)
cc = sum(ccs) if len(ccs) else ccs
if len(cc):
maxind = np.where(cc.snrtot == cc.snrtot.max())[0]
assert len(maxind) == 1
cd = cc[maxind[0]].canddata[0]
assert isinstance(cd, candidates.CandData)
if classify:
frbprob = candidates.cd_to_fetch(cd, classify=True, mode='CPU')
logging.info('FETCH FRB Probability of the candidate {0} is {1}'.format(cd.candid, frbprob))
else:
frbprob = None
if refine:
logging.info('Generating Refinement plots')
cd_refined_plot(cd, devicenum, frbprob=frbprob)
else:
logging.info('No candidate was found in search at original image size. Giving up.')
return cc
def cd_refined_plot(cd, devicenum, nsubbands=4, mode='CPU', frbprob=None):
""" Use canddata object to create refinement plot of subbanded SNR and dm-time plot.
"""
import rfpipe.search
from rfpipe import util
from matplotlib import gridspec
import pylab as plt
import matplotlib
params = {
'axes.labelsize' : 14,
'font.size' : 9,
'legend.fontsize': 12,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'text.usetex': False,
'figure.figsize': [12, 10]
}
matplotlib.rcParams.update(params)
segment, candint, dmind, dtind, beamnum = cd.loc
st = cd.state
scanid = cd.state.metadata.scanId
width_m = st.dtarr[dtind]
timewindow = st.prefs.timewindow
tsamp = st.inttime*width_m
dm = st.dmarr[dmind]
ft_dedisp = np.flip((cd.data.real.sum(axis=2).T), axis=0)
chan_freqs = np.flip(st.freq*1000, axis=0) # from high to low, MHz
nf, nt = np.shape(ft_dedisp)
candloc = cd.loc
logger.debug('Size of the FT array is ({0}, {1})'.format(nf, nt))
try:
assert nt > 0
except AssertionError as err:
logger.exception("Number of time bins is equal to 0")
raise err
try:
assert nf > 0
except AssertionError as err:
logger.exception("Number of frequency bins is equal to 0")
raise err
roll_to_center = nt//2 - cd.integration_rel
ft_dedisp = np.roll(ft_dedisp, shift=roll_to_center, axis=1)
# If timewindow is not set during search, set it equal to the number of time bins of candidate
if nt != timewindow:
logger.info('Setting timewindow equal to nt = {0}'.format(nt))
timewindow = nt
else:
logger.info('Timewindow length is {0}'.format(timewindow))
try:
assert nf == len(chan_freqs)
except AssertionError as err:
logger.exception("Number of frequency channel in data should match the frequency list")
raise err
if dm is not 0:
dm_start = 0
dm_end = 2*dm
else:
dm_start = -10
dm_end = 10
logger.info('Generating DM-time for candid {0} in DM range {1:.2f}--{2:.2f} pc/cm3'
.format(cd.candid, dm_start, dm_end))
logger.info("Using gpu devicenum: {0}".format(devicenum))
os.environ['CUDA_VISIBLE_DEVICES'] = str(devicenum)
dmt = rfpipe.search.make_dmt(ft_dedisp, dm_start-dm, dm_end-dm, 256, chan_freqs/1000,
tsamp, mode=mode, devicenum=int(devicenum))
delay = util.calc_delay(chan_freqs/1000, chan_freqs.max()/1000, -1*dm, tsamp)
dispersed = rfpipe.search.dedisperse_roll(ft_dedisp, delay)
# dispersed = disperse(ft_dedisp, -1*dm, chan_freqs/1000, tsamp)
im = cd.image
imstd = im.std() # consistent with rfgpu
snrim = np.round(im.max()/imstd, 2)
snrk = np.round(cd.snrk, 2)
l1, m1 = st.pixtolm(np.where(im == im.max()))
subsnrs, subts, bands = calc_subband_info(ft_dedisp, chan_freqs, nsubbands)
logging.info(f'Generating time series of full band')
ts_full = ft_dedisp.sum(0)
logging.info(f'Calculating SNR of full band')
snr_full = calc_snr(ts_full)
to_print = []
logging.info(f'{scanid}')
to_print.append(f"{'.'.join(scanid.split('.')[:3])}. \n")
to_print.append(f"{'.'.join(scanid.split('.')[3:])}\n")
logging.info(f'candloc: {candloc}, DM: {dm:.2f}')
to_print.append(f'candloc: {candloc}, DM: {dm:.2f}\n')
logging.info(f'Source: {st.metadata.source}')
to_print.append(f'Source: {st.metadata.source}\n')
logging.info(f'Subbanded SNRs are:')
to_print.append(f'Subbanded SNRs are:\n')
for i in range(nsubbands):
logging.info(f'Band: {chan_freqs[bands[i][0]]:.2f}-{chan_freqs[bands[i][1]-1]:.2f}, SNR: {subsnrs[i]:.2f}')
to_print.append(f'Band: {chan_freqs[bands[i][0]]:.2f}-{chan_freqs[bands[i][1]-1]:.2f}, SNR: {subsnrs[i]:.2f}\n')
logging.info(f'SNR of full band is: {snr_full:.2f}')
to_print.append(f'SNR of full band is: {snr_full:.2f}\n')
logging.info(f'SNR (im/k): {snrim}/{snrk}')
to_print.append(f'SNR (im/k): {snrim}/{snrk}\n')
logging.info(f'Clustersize: {cd.clustersize}')
to_print.append(f'Clustersize: {cd.clustersize}\n')
if frbprob is not None:
logging.info(f'frbprob: {frbprob}')
to_print.append(f'frbprob: {np.round(frbprob, 4)}\n')
str_print = ''.join(to_print)
fov = np.degrees(1./st.uvres)*60.
l1arcm = np.degrees(l1)*60
m1arcm = np.degrees(m1)*60
ts = np.arange(timewindow)*tsamp
gs = gridspec.GridSpec(4, 3, width_ratios=[3.5, 0.1, 3], height_ratios=[1, 1, 1, 1], wspace=0.05, hspace=0.20)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
ax4 = plt.subplot(gs[3, 0])
ax11 = plt.subplot(gs[0, 1])
ax22 = plt.subplot(gs[1, 1])
ax33 = plt.subplot(gs[2, 1])
ax44 = plt.subplot(gs[3, 1])
ax5 = plt.subplot(gs[0, 2:3])
ax6 = plt.subplot(gs[2:4, 2])
ax7 = plt.subplot(gs[1, 2])
x_loc = 0.1
y_loc = 0.5
for i in range(nsubbands):
ax1.plot(ts, subts[i] - subts[i].mean(), label = f'Band: {chan_freqs[bands[i][0]]:.0f}-{chan_freqs[bands[i][1]-1]:.0f}')
ax1.plot(ts, subts.sum(0) - subts.sum(0).mean(), 'k.', label = 'Full Band')
ax1.legend(loc='upper center', bbox_to_anchor=(0.5, 1.45), ncol=3, fancybox=True, shadow=True, fontsize=11)
ax1.set_ylabel('Flux (Arb. units)')
ax1.set_xlim(np.min(ts), np.max(ts))
ax11.text(x_loc, y_loc, 'Time Series', fontsize=14, ha='center', va='center', wrap=True, rotation=-90)
ax11.axis('off')
ax2.imshow(ft_dedisp, aspect='auto', extent=[ts[0], ts[-1], np.min(chan_freqs), np.max(chan_freqs)])
ax2.set_ylabel('Freq')
ax22.text(x_loc, y_loc, 'Dedispersed FT', fontsize=14, ha='center', va='center', wrap=True, rotation=-90)
ax22.axis('off')
ax3.imshow(dispersed, aspect='auto', extent=[ts[0], ts[-1], np.min(chan_freqs), np.max(chan_freqs)])
ax3.set_ylabel('Freq')
ax33.text(x_loc, y_loc, 'Original dispersed FT', fontsize=14, ha='center', va='center', wrap=True, rotation=-90)
ax33.axis('off')
ax4.imshow(np.flip(dmt, axis=0), aspect='auto', extent=[ts[0], ts[-1], dm_start, dm_end])
ax4.set_xlabel('Time (s)')
ax4.set_ylabel('DM')
ax44.text(x_loc, y_loc, 'DM-Time', fontsize=14, ha='center', va='center', wrap=True, rotation=-90)
ax44.axis('off')
# ax5.text(0.02, 0.8, str_print, fontsize=14, ha='left', va='top', wrap=True)
ax5.text(0.02, 1.4, str_print, fontsize=11.5, ha='left', va='top', wrap=True)
ax5.axis('off')
_ = ax6.imshow(im.transpose(), aspect='equal', origin='upper',
interpolation='nearest',
extent=[fov/2, -fov/2, -fov/2, fov/2],
cmap=plt.get_cmap('viridis'), vmin=0,
vmax=0.5*im.max())
ax6.set_xlabel('RA Offset (arcmin)')
ax6.set_ylabel('Dec Offset (arcmin)', rotation=-90, labelpad=12)
ax6.yaxis.tick_right()
ax6.yaxis.set_label_position("right")
# to set scale when we plot the triangles that label the location
ax6.autoscale(False)
# add markers on the axes at measured position of the candidate
ax6.scatter(x=[l1arcm], y=[-fov/2], c='#ffff00', s=60, marker='^',
clip_on=False)
ax6.scatter(x=[fov/2], y=[m1arcm], c='#ffff00', s=60, marker='>',
clip_on=False)
# makes it so the axis does not intersect the location triangles
ax6.set_frame_on(False)
sbeam = np.mean(st.beamsize_deg)*60
# figure out the location to center the zoomed image on
xratio = len(im[0])/fov # pix/arcmin
yratio = len(im)/fov # pix/arcmin
mult = 5 # sets how many times the synthesized beam the zoomed FOV is
xmin = max(0, int(len(im[0])//2-(m1arcm+sbeam*mult)*xratio))
xmax = int(len(im[0])//2-(m1arcm-sbeam*mult)*xratio)
ymin = max(0, int(len(im)//2-(l1arcm+sbeam*mult)*yratio))
ymax = int(len(im)//2-(l1arcm-sbeam*mult)*yratio)
left, width = 0.231, 0.15
bottom, height = 0.465, 0.15
# rect_imcrop = [left, bottom, width, height]
# ax_imcrop = fig.add_axes(rect_imcrop)
# logger.debug('{0}'.format(im.transpose()[xmin:xmax, ymin:ymax].shape))
# logger.debug('{0} {1} {2} {3}'.format(xmin, xmax, ymin, ymax))
_ = ax7.imshow(im.transpose()[xmin:xmax,ymin:ymax], aspect=1,
origin='upper', interpolation='nearest',
extent=[-1, 1, -1, 1],
cmap=plt.get_cmap('viridis'), vmin=0,
vmax=0.5*im.max())
# setup the axes
ax7.set_ylabel('Dec (arcmin)')
ax7.set_xlabel('RA (arcmin)')
ax7.xaxis.set_label_position('top')
# ax7.xaxis.tick_top()
ax7.yaxis.tick_right()
# ax7.yaxis.set_label_position("right")
xlabels = [str(np.round(l1arcm+sbeam*mult/2, 1)), '',
str(np.round(l1arcm, 1)), '',
str(np.round(l1arcm-sbeam*mult/2, 1))]
ylabels = [str(np.round(m1arcm-sbeam*mult/2, 1)), '',
str(np.round(m1arcm, 1)), '',
str(np.round(m1arcm+sbeam*mult/2, 1))]
ax7.set_xticklabels(xlabels)
ax7.set_yticklabels(ylabels)
# change axis label loc of inset to avoid the full picture
ax7.get_yticklabels()[0].set_verticalalignment('bottom')
plt.tight_layout()
plt.savefig(os.path.join(cd.state.prefs.workdir, 'cands_{0}_refined.png'.format(cd.state.metadata.scanId)), bbox_inches='tight')
def calc_subband_info(ft, chan_freqs, nsubbands=4):
""" Use freq-time array to calculate subbands and detect in each subband.
"""
nf, nt = ft.shape
subbandsize = nf//nsubbands
bandstarts = np.arange(1,nf,subbandsize) - 1
subsnrs = np.zeros(nsubbands)
subts = np.zeros((nsubbands, ft.shape[1]))
bands = []
for i in range(nsubbands):
bandstart = i*subbandsize
if i == nsubbands-1:
bandend = nf-1
else:
bandend = (i+1)*subbandsize
bands.append([bandstart, bandend])
logging.info(f'Generating time series of band: {chan_freqs[bands[i][0]]:.0f}-{chan_freqs[bands[i][1]-1]:.0f}')
subts[i, :] = ft[bandstart: bandend,:].sum(0)
logging.info(f'Calculating SNR of band: {chan_freqs[bands[i][0]]:.0f}-{chan_freqs[bands[i][1]-1]:.0f}')
subsnrs[i] = calc_snr(subts[i, :])
return subsnrs, subts, bands
def calc_snr(ts):
""" Use time series to calculate SNR of peak.
"""
from rfpipe import util
std = util.madtostd(ts)
if std == 0:
logging.warning('Standard Deviation of time series is 0. SNR not defined.')
snr = np.nan
return snr
noise_mask = (np.median(ts) - 3*std < ts) & (ts < np.median(ts) + 3*std)
if noise_mask.sum() == len(ts):
logging.warning('Time series is just noise, SNR = 0.')
snr = 0
else:
mean_ts = np.mean(ts[noise_mask])
std = util.madtostd(ts[noise_mask]-mean_ts)
if std == 0:
logging.warning('Noise Standard Deviation is 0. SNR not defined.')
snr = np.max(ts[~noise_mask]-mean_ts)/std
return snr
def oldcands_read(candsfile, sdmscan=None):
""" Read old-style candfile and create new-style candcollection
Returns a list of tuples (state, dataframe) per scan.
"""
with open(candsfile, 'rb') as pkl:
try:
d = pickle.load(pkl)
ret = pickle.load(pkl)
except UnicodeDecodeError:
d = pickle.load(pkl, encoding='latin-1')
ret = pickle.load(pkl, encoding='latin-1')
if isinstance(ret, tuple):
loc, prop = ret
elif isinstance(ret, dict):
loc = np.array(list(ret.keys()))
prop = np.array(list(ret.values()))
else:
logger.warning("Not sure what we've got in this here cands pkl file...")
if sdmscan is None: # and (u'scan' in d['featureind']):
# scanind = d['featureind'].index('scan')
scanind = 0
scans = np.unique(loc[:, scanind])
elif sdmscan is not None:
scans = [sdmscan]
ll = []
for scan in scans:
try:
st, cc = oldcands_readone(candsfile, scan)
ll.append((st, cc))
except AttributeError:
pass
return ll
def oldcands_readone(candsfile, scan=None):
""" Reads old-style candidate files to create new state and candidate
collection for a given scan.
Parsing merged cands file requires sdm locally with bdf for given scan.
If no scan provided, assumes candsfile is from single scan not merged.
"""
from rfpipe import preferences, metadata, state, candidates
with open(candsfile, 'rb') as pkl:
try:
d = pickle.load(pkl)
ret = pickle.load(pkl)
except UnicodeDecodeError:
d = pickle.load(pkl, encoding='latin-1')
ret = pickle.load(pkl, encoding='latin-1')
if isinstance(ret, tuple):
loc, prop = ret
elif isinstance(ret, dict):
loc = np.array(list(ret.keys()))
prop = np.array(list(ret.values()))
else:
logger.warning("Not sure what we've got in this here cands pkl file...")
# detect merged vs nonmerged
if 'scan' in d['featureind']:
locind0 = 1
else:
locind0 = 0
# merged candsfiles must be called with scan arg
if scan is None:
assert locind0 == 0, "Set scan if candsfile has multiple scans."
inprefs = preferences.oldstate_preferences(d, scan=scan)
inprefs.pop('gainfile')
inprefs.pop('workdir')
inprefs.pop('fileroot')
inprefs['segmenttimes'] = inprefs['segmenttimes']
sdmfile = os.path.basename(d['filename'])
try:
assert scan is not None
st = state.State(sdmfile=sdmfile, sdmscan=scan, inprefs=inprefs)
except:
meta = metadata.oldstate_metadata(d, scan=scan)
st = state.State(inmeta=meta, inprefs=inprefs, showsummary=False)
if 'rtpipe_version' in d:
st.rtpipe_version = float(d['rtpipe_version']) # TODO test this
if st.rtpipe_version <= 1.54:
logger.info('Candidates detected with rtpipe version {0}. All '
'versions <=1.54 used incorrect DM scaling.'
.format(st.rtpipe_version))
if scan is None:
assert locind0 == 0, "Set scan if candsfile has multiple scans."
scan = d['scan']
logger.info('Calculating candidate properties for scan {0}'.format(scan))
if locind0 == 1:
loc = loc[np.where(loc[:, 0] == scan)][:, locind0:]
print(st.features, st.prefs.searchtype)
fields = [str(ff) for ff in st.search_dimensions + st.features]
types = [str(tt) for tt in len(st.search_dimensions)*['<i4'] + len(st.features)*['<f4']]
dtype = np.dtype({'names': fields, 'formats': types})
features = np.zeros(len(loc), dtype=dtype)
for i in range(len(loc)):
features[i] = tuple(list(loc[i]) + list(prop[i]))
cc = candidates.CandCollection(features, st.prefs, st.metadata)
return st, cc
def oldcands_convert(candsfile, scan=None):
""" Take old style candsfile for a single scan and writes new style file.
"""
st, cc = oldcands_readone(candsfile, scan=scan)
with open(st.candsfile, 'wb') as pkl:
pickle.dump(cc, pkl)
|
realfastvlaREPO_NAMErfpipePATH_START.@rfpipe_extracted@rfpipe-main@rfpipe@reproduce.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "zclaytor/butterpy",
"repo_path": "butterpy_extracted/butterpy-main/setup.py",
"type": "Python"
}
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
readme = fh.read()
version = {}
with open("butterpy/version.py") as fp:
exec(fp.read(), version)
setup(
name="butterpy",
version=version["__version__"],
author="Zachary R. Claytor <zclaytor@hawaii.edu> and Miles Lucas <mdlucas@hawaii.edu>",
description="Tools for simulating stellar rotational light curves using realistic spot evolution",
long_description=readme,
long_description_content_type="text/markdown",
url="https://github.com/zclaytor/butterpy",
license="MIT",
python_requires="==3.*",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering :: Astronomy",
],
)
|
zclaytorREPO_NAMEbutterpyPATH_START.@butterpy_extracted@butterpy-main@setup.py@.PATH_END.py
|
{
"filename": "yesno.py",
"repo_name": "msiebert1/UCSC_spectral_pipeline",
"repo_path": "UCSC_spectral_pipeline_extracted/UCSC_spectral_pipeline-master/spectral_reduction/tmath/wombat/yesno.py",
"type": "Python"
}
|
def yesno(default):
from tmath.wombat.getch import getch
answer=''
while (answer != 'y') and (answer != 'n'):
print('(y)es or (n)o? (default/return = {}) '.format(default))
reply=getch()
reply=reply.strip()
if len(reply) == 0:
reply=default
answer=reply.lower()[0]
return answer
|
msiebert1REPO_NAMEUCSC_spectral_pipelinePATH_START.@UCSC_spectral_pipeline_extracted@UCSC_spectral_pipeline-master@spectral_reduction@tmath@wombat@yesno.py@.PATH_END.py
|
{
"filename": "_array.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/error_y/_array.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ArrayValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="array", parent_name="bar.error_y", **kwargs):
super(ArrayValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@error_y@_array.py@.PATH_END.py
|
{
"filename": "_weightsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/pie/outsidetextfont/_weightsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class WeightsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="weightsrc", parent_name="pie.outsidetextfont", **kwargs
):
super(WeightsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@pie@outsidetextfont@_weightsrc.py@.PATH_END.py
|
{
"filename": "LOAO_Routine.tmp.py",
"repo_name": "SilverRon/gppy",
"repo_path": "gppy_extracted/gppy-main/LOAO_Routine.tmp.py",
"type": "Python"
}
|
# %% [markdown]
# # IMSNG_Rougine.ipynb
# - Automatically Process the image data from GECKO facilities and search for transients in the subtracted images with `gpPy`
# - Author: Gregory S.H. Paek (23.04.24)
# %% [markdown]
# ## Library
# %%
from __future__ import print_function, division, absolute_import
import os, sys, glob, subprocess
import numpy as np
import astropy.io.ascii as ascii
import matplotlib.pyplot as plt
plt.ioff()
from astropy.nddata import CCDData
from preprocess import calib
from util import tool
from astropy.io import fits
from astropy.table import Table, vstack
from astropy import units as u
from ccdproc import ImageFileCollection
import warnings
warnings.filterwarnings(action='ignore')
# from itertools import product
from itertools import repeat
import multiprocessing
import time
start_localtime = time.strftime('%Y-%m-%d %H:%M:%S (%Z)', time.localtime())
# %%
# plot setting
import matplotlib.pyplot as plt
import matplotlib as mpl
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "last_expr"
mpl.rcParams["axes.titlesize"] = 14
mpl.rcParams["axes.labelsize"] = 20
plt.rcParams['savefig.dpi'] = 500
plt.rc('font', family='serif')
# %% [markdown]
# ## Ready
# %%
obs = 'LOAO'
# obs = 'LOAO'
print(f'# Observatory : {obs.upper()}')
ncores = 4
print(f"- Number of Cores: {ncores}")
# %% [markdown]
# ### Path
# %%
path_base = '/data4/gecko/factory'
path_ref = f'{path_base}/ref_frames/{obs.upper()}'
path_factory = f'{path_base}/{obs.lower()}'
path_save = f'/data6/bkgdata/{obs.upper()}'
path_log = f'/home/paek/log/{obs.lower()}.log'
path_keys = '/home/paek/table'
#------------------------------------------------------------
path_gal = '/data6/IMSNG/IMSNGgalaxies'
path_refcat = '/data4/gecko/factory/ref_frames/LOAO'
#------------------------------------------------------------
# path_config = '/home/paek/config'
path_config = './config'
path_default_gphot = f'{path_config}/gphot.{obs.lower()}.config'
path_mframe = f'/data3/paek/factory/master_frames'
path_calib = f'{path_base}/calib'
#------------------------------------------------------------
# Codes
#------------------------------------------------------------
path_phot_sg = './phot/gregoryphot_2021.py'
path_phot_mp = './phot/gregoryphot_mp_2021.py'
path_phot_sub = './phot/gregoryphot_sub_2021.py'
path_find = './phot/gregoryfind_bulk_mp_2021.py'
#------------------------------------------------------------
if 'KCT_ASI1600MM' in obs:
path_raw = '/data3/IMSNG/KCT/obsdata'
else:
path_raw = f'/data6/obsdata/{obs.upper()}'
rawlist = sorted(glob.glob(path_raw+'/2*'))
#------------------------------------------------------------
path_obs = '/home/paek/table/obs.dat'
path_changehdr = '/home/paek/table/changehdr.dat'
path_alltarget = '/home/paek/table/alltarget.dat'
ccdinfo = tool.getccdinfo(obs, path_obs)
# %% [markdown]
# - Folder setting
# %%
if not os.path.exists(path_base):
os.makedirs(path_base)
if not os.path.exists(path_ref):
os.makedirs(path_ref)
if not os.path.exists(path_factory):
os.makedirs(path_factory)
if not os.path.exists(path_save):
os.makedirs(path_save)
# %% [markdown]
# ### Table
# %%
logtbl = ascii.read(path_log)
datalist = np.copy(logtbl['date'])
obstbl = ascii.read(path_obs)
hdrtbl = ascii.read(path_changehdr)
alltbl = ascii.read(path_alltarget)
keytbl = ascii.read(f'{path_keys}/keys.dat')
# %% [markdown]
# ## Process Summary Status
# %%
protbl = Table()
protbl['process'] = ['master_frame', 'pre_process', 'astrometry', 'cr_removal', 'defringe', 'photometry', 'image_stack', 'photometry_com', 'subtraction', 'photometry_sub', 'transient_search', 'total']
protbl['status'] = False
protbl['time'] = 0.0 * u.second
# %% [markdown]
# ## Main Body
# %%
newlist = [
'/data6/obsdata/LOAO/2023_0519',
'/data6/obsdata/LOAO/2023_0520',
'/data6/obsdata/LOAO/2023_0521',
'/data6/obsdata/LOAO/2023_0522',
'/data6/obsdata/LOAO/2023_0523',
'/data6/obsdata/LOAO/2023_0524',
'/data6/obsdata/LOAO/2023_0525',
'/data6/obsdata/LOAO/2023_0526',
'/data6/obsdata/LOAO/2023_0527',
'/data6/obsdata/LOAO/2023_0528',
'/data6/obsdata/LOAO/2023_0529',
'/data6/obsdata/LOAO/2023_0530',
'/data6/obsdata/LOAO/2023_0531',
'/data6/obsdata/LOAO/2023_0601',
'/data6/obsdata/LOAO/2023_0602',
'/data6/obsdata/LOAO/2023_0603',
'/data6/obsdata/LOAO/2023_0604',
'/data6/obsdata/LOAO/2023_0605',
'/data6/obsdata/LOAO/2023_0606',
'/data6/obsdata/LOAO/2023_0607',
'/data6/obsdata/LOAO/2023_0608',
'/data6/obsdata/LOAO/2023_0609',
'/data6/obsdata/LOAO/2023_0610',
'/data6/obsdata/LOAO/2023_0611',
'/data6/obsdata/LOAO/2023_0612',
'/data6/obsdata/LOAO/2023_0613',
'/data6/obsdata/LOAO/2023_0614',
'/data6/obsdata/LOAO/2023_0615',
'/data6/obsdata/LOAO/2023_0616',
'/data6/obsdata/LOAO/2023_0617',
'/data6/obsdata/LOAO/2023_0618',
'/data6/obsdata/LOAO/2023_0619',
'/data6/obsdata/LOAO/2023_0621',
'/data6/obsdata/LOAO/2023_0622',
'/data6/obsdata/LOAO/2023_0623',
'/data6/obsdata/LOAO/2023_0624',
'/data6/obsdata/LOAO/2023_0625',
'/data6/obsdata/LOAO/2023_0626',
'/data6/obsdata/LOAO/2023_0627',
'/data6/obsdata/LOAO/2023_0628',
'/data6/obsdata/LOAO/2023_0629',
'/data6/obsdata/LOAO/2023_0630',
'/data6/obsdata/LOAO/2023_0701',
'/data6/obsdata/LOAO/2023_0702',
'/data6/obsdata/LOAO/2023_0703',
'/data6/obsdata/LOAO/2023_0704',
'/data6/obsdata/LOAO/2023_0705',
'/data6/obsdata/LOAO/2023_0706',
]
path = newlist[0]
for pp, path in enumerate(newlist):
print(f"[{pp+1}/{len(newlist)}]", path)
tdict = dict()
starttime = time.time()
path_data = '{}/{}'.format(path_factory, os.path.basename(path))
# Remove old folder and re-copy folder
rmcom = 'rm -rf {}'.format(path_data)
print(rmcom)
os.system(rmcom)
cpcom = 'cp -r {} {}'.format(path, path_data)
print(cpcom)
os.system(cpcom)
all_image_list = sorted(glob.glob(f"{path_data}/*fits"))
for inim in all_image_list:
if 'M101' in inim:
# print(inim)
pass
else:
rmcom = f"rm -rf {inim}"
print(rmcom)
os.system(rmcom)
# %% [markdown]
# ### Header Correction
# %%
ic0 = ImageFileCollection(path_data, keywords='*')
ic0.summary.write('{}/hdr.raw.dat'.format(path_data), format='ascii.tab', overwrite=True)
# Exceptions
# DOAO CCD
if obs == 'DOAO':
instrume = ic0.summary['instrume'][0]
if instrume == 'Apogee USB/Net':obs = 'DOAO_APOGEE'
elif instrume == '': obs = 'DOAO_FLI'
elif instrume == 'FLI': obs = 'DOAO_FLI'
elif instrume == 'Moravian C4-16000': obs = 'DOAO_C416K'
else:
pass
obsinfo = calib.getobsinfo(obs, obstbl)
if obs == 'SAO_C361K':
xbinning = ic0.summary['xbinning'][0]
if xbinning > 1:
print(f'{obs} : BINNINGx{xbinning}')
obsinfo['pixscale'] = obsinfo['pixscale']*xbinning
if ic0.summary['instrume'][0] == 'ASCOM Camera Driver for FLI Kepler':
obsinfo['pixscale'] = 0.311
obsinfo['fov'] = 0.25*60.
obsinfo['gain'] = float(ic0.summary['egain'][0])
# KHAO Binning
if obs == 'KHAO':
xbinning = ic0.summary['xbinning'][0]
if xbinning > 1:
obsinfo['pixscale'] = obsinfo['pixscale']*2
# LOAO
if obs == 'LOAO':
instrume = ic0.summary['instrume'][0]
if 'Finger Lakes' in instrume:
obs = 'LOAO_FLI'
obsinfo = calib.getobsinfo(obs, obstbl)
# RASA Mode
if obs == 'RASA36':
if 'hdr' in path:
mode = 'hdr'
badmode = True
elif 'high' in path:
mode = 'high'
badmode = True
else:
mode = 'high'
badmode = True
pass
print(f'Master Frame Mode:{mode} [Bad Mode:{badmode}]')
calib.correcthdr_routine(path_data, hdrtbl, obs)
print("Correction Done")
# objfilterlist, objexptimelist, flatfilterlist, darkexptimelist, obstime = calib.correcthdr_routine(path_data, hdrtbl)
ic1 = ImageFileCollection(path_data, keywords='*')
ic1.summary.write('{}/hdr.cor.dat'.format(path_data), format='ascii.tab', overwrite=True)
try:
nobj = len(ic1.filter(imagetyp='OBJECT').summary)
except:
nobj = len(ic1.filter(imagetyp='object').summary)
# %% [markdown]
# ### Marking the `GECKO` data
# %%
# testobj = 'S190425z'
project = "IMSNG"
obsmode = "MONITORING" # Default
for obj in ic1.filter(imagetyp='object').summary['object']:
if 'MS' in obj[:2]: # MS230425 (test event)
print(obj)
project = "GECKO"
obsmode = "TEST"
elif 'S2' in obj[:2]: # S230425 (super event)
print(obj)
project = "GECKO"
obsmode = "FOLLOWUP" # Follow-up
else:
pass
print(f"[{project}] {obsmode}")
# %% [markdown]
# - Slack notification
# %%
OAuth_Token = keytbl['key'][keytbl['name']=='slack'].item()
channel = '#pipeline'
text = f'[`gpPy`/{project}-{obsmode}] Start Processing {obs} {os.path.basename(path)} Data ({nobj} objects) with {ncores} cores'
param_slack = dict(
token = OAuth_Token,
channel = channel,
text = text,
)
tool.slack_bot(**param_slack)
# %% [markdown]
# ### Master Frame
# %% [markdown]
# - Bias
# %%
st = time.time()
#------------------------------------------------------------
# BIAS
#------------------------------------------------------------
try:
biasnumb = len(ic1.filter(imagetyp='Bias').summary)
except:
biasnumb = 0
# if len(ic1.filter(imagetyp='Bias').summary) != 0:
if biasnumb != 0:
mzero = calib.master_zero(ic1, fig=False)
# print(zeroim)
date = fits.getheader(f'{path_data}/zero.fits')['date-obs'][:10].replace('-', '')
if obs == 'RASA36':
zeroim = f'{path_mframe}/{obs}/zero/{date}-zero_{mode}.fits'
else:
zeroim = f'{path_mframe}/{obs}/zero/{date}-zero.fits'
cpcom = f'cp {path_data}/zero.fits {zeroim}'
print(cpcom)
os.system(cpcom)
plt.close('all')
else:
# IF THERE IS NO FLAT FRAMES, BORROW FROM CLOSEST OTHER DATE
print('\nNO BIAS FRAMES\n')
if obs == 'RASA36':
pastzero = np.array(glob.glob(f'{path_mframe}/{obs}/zero/*zero_{mode}.fits'))
else:
pastzero = np.array(glob.glob(f'{path_mframe}/{obs}/zero/*zero.fits'))
# CALCULATE CLOSEST ONE FROM TIME DIFFERENCE
deltime = []
for date in pastzero:
zeromjd = calib.isot_to_mjd((os.path.basename(date)).split('-')[0])
deltime.append(np.abs(ic1.summary['mjd'][0]-zeromjd))
indx_closet = np.where(deltime == np.min(deltime))
# tmpzero = path_data+'/'+os.path.basename(np.asscalar(pastzero[indx_closet]))
tmpzero = f"{path_data}/{os.path.basename(pastzero[indx_closet][0])}"
# cpcom = 'cp {} {}'.format(np.asscalar(pastzero[indx_closet]), tmpzero)
cpcom = f'cp {pastzero[indx_closet][0]} {tmpzero}'
print(cpcom)
os.system(cpcom)
# if obs != 'KCT':
if 'KCT_ASI1600MM' in obs:
#KCT Exception
mzero = CCDData.read(tmpzero, hdu=0, unit='adu')
elif obs == 'RASA36':
if (mode == 'high') & (badmode == True):
mzero = CCDData.read(tmpzero, hdu=0).multiply(20)
print('[Bad mode] Multiply 20 on the high mode bias.')
else:
mzero = CCDData.read(tmpzero, hdu=0)
elif obs == 'LSGT':
mzero = CCDData.read(tmpzero, hdu=0, unit='adu')
else:
mzero = CCDData.read(tmpzero, hdu=0)#, unit='adu')
mzero.meta['FILENAME'] = os.path.basename(tmpzero)
# %% [markdown]
# - Dark
# %%
#------------------------------------------------------------
# DARK (ITERATION FOR EACH EXPOSURE TIMES)
#------------------------------------------------------------
try:
darkexptimelist = sorted(list(set(ic1.filter(imagetyp='dark').summary['exptime'])))
darknumb = len(darkexptimelist)
except:
darknumb = 0
darkdict = dict()
# if len(darkexptimelist) != 0:
if darknumb != 0:
dark_process = True
for i, exptime in enumerate(darkexptimelist):
print('PRE PROCESS FOR DARK ({} sec)\t[{}/{}]'.format(exptime, i+1, len(darkexptimelist)))
mdark = calib.master_dark(ic1, mzero=mzero, exptime=exptime, fig=False)
darkdict['{}'.format(int(exptime))] = mdark
date = fits.getheader(f'{path_data}/dark-{int(exptime)}.fits')['date-obs'][:10].replace('-', '')
if obs == 'RASA36':
darkim = f'{path_mframe}/{obs}/dark/{int(exptime)}-{date}-dark_{mode}.fits'
else:
darkim = f'{path_mframe}/{obs}/dark/{int(exptime)}-{date}-dark.fits'
# print(zeroim)
cpcom = 'cp {}/dark-{}.fits {}'.format(path_data, int(exptime), darkim)
print(cpcom)
os.system(cpcom)
plt.close('all')
else:
# Borrow
print('\nNO DARK FRAMES\n')
objexptimelist = sorted(list(set(ic1.filter(imagetyp='object').summary['exptime'])))
exptime = objexptimelist[-1]
# pastdark = np.array(glob.glob('{}/{}/dark/{}*dark*.fits'.format(path_mframe, obs, int(exptime))))
if obs == 'RASA36':
pastdark = np.array(glob.glob(f'{path_mframe}/{obs}/dark/{int(exptime)}*dark_{mode}.fits'))
else:
pastdark = np.array(glob.glob(f'{path_mframe}/{obs}/dark/{int(exptime)}*dark.fits'))
if len(pastdark) == 0:
pastdark = np.array(glob.glob('{}/{}/dark/*dark*.fits'.format(path_mframe, obs)))
else:
pass
# CALCULATE CLOSEST ONE FROM TIME DIFFERENCE
deltime = []
delexptime = []
darkexptimes = []
for date in pastdark:
# darkmjd = calib.isot_to_mjd((os.path.basename(date)).split('-')[0])
darkmjd = calib.isot_to_mjd((os.path.basename(date)).split('-')[1])
darkexptime = int( os.path.basename(date).split('-')[0] )
# darkexptime = delexptime.append(int( os.path.basename(date).split('-')[1] ))
darkexptimes.append(darkexptime)
deltime.append(np.abs(ic1.summary['mjd'][0]-darkmjd))
if 'KCT' in obs:
indx_closet = np.where(
(np.abs(np.array(darkexptimes)-exptime) == np.min(np.abs(np.array(darkexptimes)-exptime)))
)
else:
indx_closet = np.where(
(deltime == np.min(deltime)) &
(darkexptimes == np.max(darkexptimes))
)
if len(indx_closet[0]) == 0:
indx_closet = np.where(
(deltime == np.min(deltime))
)
else:
pass
# tmpdark = path_data+'/'+os.path.basename(pastdark[indx_closet].item())
# tmpdark = '{}/{}'.format(path_data, os.path.basename(pastdark[indx_closet[0]].item()))
# tmpdark = pastdark[indx_closet[0]].item()
tmpdark = pastdark[indx_closet][-1]
exptime = int(fits.getheader(tmpdark)['exptime'])
# cpcom = 'cp {} {}/dark-{}.fits'.format(tmpdark, path_data, int(exptime))
cpcom = 'cp {} {}'.format(tmpdark, path_data, int(exptime))
print(cpcom)
os.system(cpcom)
if 'KCT' in obs:
#KCT Exception
mdark = CCDData.read(tmpdark, hdu=0, unit='adu')
elif obs == 'RASA36':
if (mode == 'high') & (badmode == True):
mdark = CCDData.read(tmpdark, hdu=0).multiply(20)
print('[Bad mode] Multiply 20 on the high mode dark.')
else:
mdark = CCDData.read(tmpdark, hdu=0)
elif obs == 'LSGT':
mdark = CCDData.read(tmpdark, hdu=0, unit='adu')
else:
mdark = CCDData.read(tmpdark, hdu=0)#, unit='adu')
mdark.meta['FILENAME'] = os.path.basename(tmpdark)
mdark.meta['EXPTIME'] = exptime
darkdict['{}'.format(int(exptime))] = mdark
# %% [markdown]
# - Flat
# %%
flatdict = dict()
try:
flatfilterlist = list(set(ic1.filter(imagetyp='flat').summary['filter']))
for i, filte in enumerate(flatfilterlist):
# print(i, filte)
print('MAKING MASTER FLAT IN {}-BAND'.format(filte))
mflat = calib.master_flat(ic1, mzero, filte, mdark=mdark, fig=True)
flatdict[filte] = mflat
date = fits.getheader(f'{path_data}/dark-{int(exptime)}.fits')['date-obs'][:10].replace('-', '')
if obs == 'RASA36':
flatim = f'{path_mframe}/{obs}/flat/{date}-n{filte}_{mode}.fits'
else:
flatim = f'{path_mframe}/{obs}/flat/{date}-n{filte}.fits'
cpcom = f'cp {path_data}/n{filte}.fits {flatim}'
print(cpcom)
os.system(cpcom)
plt.close('all')
except:
print('No flat calibration image.')
# flatdict['None'] = None
pass
# tdict['masterframe'] = time.time() - st
protbl['status'][protbl['process']=='master_frame'] = True
protbl['time'][protbl['process']=='master_frame'] = int(time.time() - st)
#------------------------------------------------------------
# OBJECT CALIBARTION (ZERO, DARK, FLAT)
#------------------------------------------------------------
st_ = time.time()
comment = '='*60+'\n' \
+ 'OBJECT CALIBRATION\n' \
+ '='*60+'\n'
print(comment)
objfilterlist = sorted(list(set(ic1.filter(imagetyp='object').summary['filter'])))
objexptimelist = sorted(list(set(ic1.filter(imagetyp='object').summary['exptime'])))
for i, filte in enumerate(objfilterlist):
print('PRE PROCESS FOR {} FILTER OBJECT\t[{}/{}]'.format(filte, i+1, len(objfilterlist)))
if filte in flatdict.keys():
mflat = flatdict[filte]
else:
print('\nNO {} FLAT FRAMES\n'.format(filte))
# CALCULATE CLOSEST ONE FROM TIME DIFFERENCE
deltime = []
if obs != 'RASA36':
pastflat = np.array(glob.glob('{}/{}/flat/*n{}*.fits'.format(path_mframe, obs, filte)))
for date in pastflat:
flatmjd = calib.isot_to_mjd((os.path.basename(date)).split('-')[0])
deltime.append(np.abs(ic1.summary['mjd'][0]-flatmjd))
elif obs == 'RASA36':
pastflat = np.array(glob.glob(f'{path_mframe}/{obs}/flat/*_{mode}-n{filte}*.fits'))
for date in pastflat:
flatmjd = fits.getheader(date)['MJD']
deltime.append(np.abs(ic1.summary['mjd'][0]-flatmjd))
indx_closet = np.where(deltime == np.min(deltime))
tmpflat = '{}/{}'.format(path_data, os.path.basename(pastflat[indx_closet][0].item()))
# tmpflat = pastflat[indx_closet][0].item()
cpcom = 'cp {} {}'.format(pastflat[indx_closet][0].item(), tmpflat)
print(cpcom)
os.system(cpcom)
if ('KCT' not in obs) & (obs != 'LSGT'):
mflat = CCDData.read(tmpflat, hdu=0)#, unit='adu')
elif obs == 'LSGT':
mflat = CCDData.read(tmpflat, hdu=0, unit='adu')
else:
#KCT Exception
mflat = CCDData.read(tmpflat, hdu=0, unit='adu')
mflat.meta['FILENAME'] = os.path.basename(tmpflat)
mflat.meta['FILENAME'] = os.path.basename(tmpflat)
flatdict[filte] = mflat
for expt in objexptimelist:
if str(int(expt)) in darkdict.keys():
mdark = darkdict[str(int(expt))]
else:
mdark = darkdict[list(darkdict.keys())[-1]]
calib.calibration(ic1, mzero, mflat, filte, mdark=mdark)
# tdict['objectcorrection'] = time.time() - st - tdict[list(tdict.keys())[-1]]
protbl['status'][protbl['process']=='pre_process'] = True
protbl['time'][protbl['process']=='pre_process'] = int(time.time() - st_)
# Corrected image list
fzimlist = []
for ims in ('{}/fz*.fits'.format(path_data), '{}/fz*.fit'.format(path_data), '{}/fz*.fts'.format(path_data)):
fzimlist.extend(sorted(glob.glob(ims)))
# %% [markdown]
# ### WCS Calculation
# %%
#------------------------------------------------------------
# ASTROMETRY
#------------------------------------------------------------
st_ = time.time()
print('ASTROMETRY START')
print('='*60)
astrometryfailist = []
# fzimlist = sorted(glob.glob(path_data+'/fz*.fits'))
astimlist = []
astotlist = []
astralist = []
astdelist = []
for inim in fzimlist:
obj = (fits.getheader(inim)['object']).upper()
if (obj in alltbl['obj']):
indx_target = np.where(obj == alltbl['obj'])[0][0]
ra, dec = alltbl['ra'][indx_target].item(), alltbl['dec'][indx_target].item()
astimlist.append(inim)
astralist.append(ra)
astdelist.append(dec)
else:
astotlist.append(inim)
# Astrometry (IMSNG field)
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncores) as pool:
results = pool.starmap(calib.astrometry, zip(astimlist, repeat(obsinfo['pixscale']), astralist, astdelist, repeat(obsinfo['fov']/60.), repeat(15)))
# Astrometry (non IMSNG field)
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncores) as pool:
results = pool.starmap(calib.astrometry, zip(astotlist, repeat(obsinfo['pixscale']), repeat(None), repeat(None), repeat(None), repeat(60)))
# Astrometry (failed IMSNG field)
astfailist = []
for inim in astimlist:
if (os.path.exists('{}/a{}'.format(path_data, os.path.basename(inim))) == False):
astfailist.append(inim)
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncores) as pool:
results = pool.starmap(calib.astrometry, zip(astfailist, repeat(obsinfo['pixscale']), repeat(None), repeat(None), repeat(None), repeat(60)))
for inim in astfailist:
if (os.path.exists('{}/a{}'.format(path_data, os.path.basename(inim))) == False):
astrometryfailist.append('{}/a{}'.format(path_data, os.path.basename(inim)))
os.system('rm '+path_data+'/*.axy '+path_data+'/*.corr '+path_data+'/*.xyls '+path_data+'/*.match '+path_data+'/*.rdls '+path_data+'/*.solved '+path_data+'/*.wcs ')
print('ASTROMETRY COMPLETE\n'+'='*60)
# tdict['astronomy'] = time.time() - st - tdict[list(tdict.keys())[-1]]
protbl['status'][protbl['process']=='astrometry'] = True
protbl['time'][protbl['process']=='astrometry'] = int(time.time() - st_)
# %% [markdown]
# ### Cosmic-ray Removal
# %%
st_ = time.time()
print('Quick seeing measurement with SE & Cosmic ray removal')
print('='*60)
gain = ccdinfo['gain'].value
rdnoise = ccdinfo['rdnoise']
# afzimlist = sorted(glob.glob(path_data+'/afz*.fits'))
afzimlist = []
for ims in ('{}/a*.fits'.format(path_data), '{}/a*.fit'.format(path_data), '{}/a*.fts'.format(path_data)):
afzimlist.extend(sorted(glob.glob(ims)))
outimlist = []
for i, inim in enumerate(afzimlist):
outim = '{}/cr{}'.format(os.path.dirname(inim), os.path.basename(inim))
outimlist.append(outim)
if ('LOAO' not in obs) & ('RASA36' not in obs) & ('LOAO_FLI' not in obs) & ('LSGT_ASI1600MM' != obs) & ('DNSM' != obs):
# Seeing measurement
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncores) as pool:
results = pool.starmap(tool.SE_seeing, zip(afzimlist, repeat(obs), repeat(path_obs), repeat(path_config), repeat(3*u.arcsecond), repeat(0.95), repeat(True)))
# Remove cosmic-ray
if __name__ == '__main__':
with multiprocessing.Pool(processes=ncores) as pool:
results = pool.starmap(calib.cr_removal, zip(afzimlist, outimlist, repeat(gain), repeat(rdnoise)))
else:
print('Skip Seeing measurement & CR remove processes for {}'.format(obs))
for inim, outim in zip(afzimlist, outimlist):
cpcom = 'cp {} {}'.format(inim, outim)
print(cpcom)
os.system(cpcom)
protbl['status'][protbl['process']=='cr_removal'] = True
protbl['time'][protbl['process']=='cr_removal'] = int(time.time() - st_)
# %% [markdown]
# ### Rename to IMSNG/GECKO Convention
# %%
fov = obsinfo['fov']*u.arcmin
crafzimlist = []
for ims in ('{}/cra*.fits'.format(path_data), '{}/cra*.fit'.format(path_data), '{}/cra*.fts'.format(path_data)):
crafzimlist.extend(sorted(glob.glob(ims)))
# for inim in sorted(glob.glob('{}/crafz*.fits'.format(path_data))):
for inim in crafzimlist:
obj = fits.getheader(inim)['object']
# Modify incorrect object header
if (inim.replace('crafz', 'afz') in astrometryfailist) & (obj in alltbl['obj']):
robj, sep = tool_tbd.imsng_name_correction(inim, alltbl, radius=fov)
else:
pass
calib.fnamechange(inim, obs)
caliblist = sorted(glob.glob(path_data+'/Calib*.fits'))
ic_cal = ImageFileCollection(path_data, glob_include='Calib*0.fits', keywords='*')
os.system('chmod 777 {}'.format(path_data))
os.system('chmod 777 {}/*'.format(path_data))
# Calib-*.fits TO SAVE PATH
f = open(path_data+'/object.txt', 'a')
f.write('obs obj dateobs filter exptime\n')
for inim in caliblist:
img = os.path.basename(inim)
part = img.split('-')
line = '{} {} {} {} {}\n'.format(part[1], part[2], part[3]+'T'+part[4], part[5], part[6])
print(line)
f.write(line)
f.close()
# DATA FOLDER TO SAVE PATH
# os.system('rm {}/afz*.fits {}/fz*.fits'.format(path_data, path_data))
os.system(f'rm {path_data}/*fz*.f*')
os.system(f'rm -rf {path_save}/{os.path.basename(path_data)}')
plt.close('all')
# %%
for calibim in caliblist:
center, vertices = tool.get_wcs_coordinates(calibim)
fits.setval(calibim, "RACENT", value=round(center[0].item(), 3), comment="RA CENTER [deg]")
fits.setval(calibim, "DECCENT", value=round(center[1].item(), 3), comment="DEC CENTER [deg]")
for ii, (_ra, _dec) in enumerate(vertices):
# print(ii, _ra, _dec)
fits.setval(calibim, f"RAPOLY{ii}", value=round(_ra, 3), comment=f"RA POLYGON {ii} [deg]")
fits.setval(calibim, f"DEPOLY{ii}", value=round(_dec, 3), comment=f"DEC POLYGON {ii} [deg]")
# %% [markdown]
# ### Defringe
# - Only for LOAO z, I, Y-bands
# %%
st_ = time.time()
if (obs == 'LOAO') & ('I' in ic_cal.filter(imagetyp='object').summary['filter']):
dfim = '/home/paek/qsopy/fringe/LOAO/fringe_i_ori.fits'
dfdat = '/home/paek/qsopy/fringe/LOAO/fringe_i.dat'
dfimlist = []
for inim in ic_cal.filter(imagetyp='object', filter='I').summary['file']:
# dfimlist.append(calib.defringe(str(inim), dfim, dfdat))
dfedim = calib.defringe(str(inim), dfim, dfdat)
mvcom = 'mv {} {}'.format(dfedim, inim)
print(mvcom)
os.system(mvcom)
# tdict['defringe'] = time.time() - st - tdict[list(tdict.keys())[-1]]
else:
print('No images to defringe')
pass
protbl['status'][protbl['process']=='defringe'] = True
protbl['time'][protbl['process']=='defringe'] = int(time.time() - st_)
#------------------------------------------------------------
print('='*60)
print('Calibration IS DONE.\t('+str(int(time.time() - starttime))+' sec)')
print('='*60)
# %% [markdown]
# ## Photometry
# %%
st_ = time.time()
print('#\tPhotometry')
path_infile = f'{path_data}/{os.path.basename(path_default_gphot)}'
path_new_gphot = f'{os.path.dirname(path_infile)}/gphot.config'
# Copy default photometry configuration
cpcom = f'cp {path_default_gphot} {path_new_gphot}'
print(cpcom)
os.system(cpcom)
# Read default photometry configuration
f = open(path_default_gphot, 'r')
lines = f.read().splitlines()
f.close()
# Write photometry configuration
g = open(path_new_gphot, 'w')
for line in lines:
if 'imkey' in line:
line = f'imkey\t{path_data}/C*0.fits'
else:
pass
g.write(line+'\n')
g.close()
if obs == 'DOAO':
path_phot = path_phot_sg
else:
path_phot = path_phot_mp
# Execute
com = f'python {path_phot} {path_data} {ncores}'
# com = f'python {path_phot} {path_data} 1'
print(com)
os.system(com)
protbl['status'][protbl['process']=='photometry'] = True
protbl['time'][protbl['process']=='photometry'] = int(time.time() - st_)
# %% [markdown]
# ## Image registering & combine
# %%
st_ = time.time()
print('IMAGE REGISTERING & COMBINE')
combined_images = []
step = (1/24/60)*60 # 1 hour
ic_cal_phot = ImageFileCollection(path_data, glob_include='Calib*0.fits', keywords='*')
calist = sorted(glob.glob('{}/Calib*.fits'.format(path_data)))
objlist = sorted(list(set(ic_cal_phot.summary['object'])))
filterlist = sorted(list(set(ic_cal_phot.summary['filter'])))
# obj = 'NGC3147'
# filte = 'R'
for obj in objlist:
for filte in filterlist:
imlist_tmp = sorted(glob.glob('{}/Calib*-{}-*-{}-*.fits'.format(path_data, obj, filte)))
if len(imlist_tmp) == 0:
pass
elif len(imlist_tmp) == 1:
inim = imlist_tmp[0]
comim = inim.replace('.fits', '.com.fits')
cpcom = f'cp {inim} {comim}'
print(cpcom)
os.system(cpcom)
else:
print(obj, filte)
# ic_part = sorted(glob.glob('{}/Calib*{}*{}*.fits'.format(path_data, obj, filte)))
jds = np.array([fits.getheader(inim)['jd'] for inim in imlist_tmp])
delts = jds - np.min(jds)
grouplist = []
grouplists = []
i = 0
for i in range(len(delts)):
# Initial setting
if i == 0:
t0 = delts[i]
# Add last group to grouplists
elif i == len(delts)-1:
grouplists.append(grouplist)
t1 = delts[i]
# print(t0, t1)
dif = np.abs(t0-t1)
if dif < step:
grouplist.append(imlist_tmp[i])
# Generate new group
else:
grouplists.append(grouplist)
grouplist = [imlist_tmp[i]]
t0 = t1
for group in grouplists:
print('-'*60)
if len(group) > 1:
ref_image = group[0]
images_to_align = group[1:]
for inim in group:
print(inim)
# _data, _hdr = fits.getdata(ref_image, header=True)
# ximage, yimage = _data.shape
# racent, decent = _hdr['RACENT'], _hdr['DECCENT']
try:
# outim = tool.swarpcomb(
# images_to_align,
# gain=obsinfo['gain'].value,
# pixscale=obsinfo['pixscale'],
# racent=racent,
# decent=decent,
# ximage=ximage,
# yimage=yimage,
# listname='obj.list',
# path_save='.',
# keys_to_get=[
# 'OBJECT',
# 'FILTER',
# 'RACENT',
# 'DECCENT',
# 'RAPOLY0',
# 'DEPOLY0',
# 'RAPOLY1',
# 'DEPOLY1',
# 'RAPOLY2',
# 'DEPOLY2',
# 'RAPOLY3',
# 'DEPOLY3',
# ]
# )
outim = tool.imcombine_routine(images_to_align, ref_image)
combined_images.append(outim)
except:
print('Fail to image align & combine routine.')
print(images_to_align)
pass
else:
print('There is only one image.')
combined_images.append(group[0])
protbl['status'][protbl['process']=='image_stack'] = True
protbl['time'][protbl['process']=='image_stack'] = int(time.time() - st_)
# %%
# images_to_align = group
# ref_image = images_to_align[0]
# outim = tool.imcombine_routine(images_to_align, ref_image)
# %% [markdown]
# ## Photometry for combined images
# %%
st_ = time.time()
# Write photometry configuration
h = open(path_new_gphot, 'w')
for line in lines:
if 'imkey' in line:
line = '{}\t{}/C*com.fits'.format('imkey', path_data)
else:
pass
h.write(line+'\n')
h.close()
# Execute
path_phot = path_phot_mp
com = 'python {} {} {}'.format(path_phot, path_data, ncores)
print(com)
os.system(com)
# tdict['photometry_com'] = time.time() - st - tdict[list(tdict.keys())[-1]]
protbl['status'][protbl['process']=='photometry_com'] = True
protbl['time'][protbl['process']=='photometry_com'] = int(time.time() - st_)
ic_com_phot = ImageFileCollection(path_data, glob_include='Calib*com.fits', keywords='*')
# Summary
print('Draw observation summary plots')
# for filte in list(set(ic_cal_phot.summary['filter'])):
for filte in filterlist:
try:
tool.obs_summary(filte, ic_cal_phot, ic_com_phot, path_save=path_data)
except:
print('Fail to make summary plots.')
pass
plt.close('all')
# %% [markdown]
# # Image subtraction
#
# %%
print('IMAGE SUBTRACTION')
subtracted_images = []
ds9comlist = []
for inim in combined_images:
hdr = fits.getheader(inim)
# obs = os.path.basename(inim).split('-')[1]
# obs = 'LOAO'
obj = hdr['object']
filte = hdr['filter']
path_refim = '/data3/paek/factory/ref_frames/{}'.format(obs)
refimlist = glob.glob('{}/Ref*{}*{}*.fits'.format(path_refim, obj, filte))
if len(refimlist) > 0:
refim = refimlist[0]
# subim, ds9com = tool.subtraction_routine3(inim, refim)
# if False:
if obs not in ['LSGT', 'DOAO', 'RASA36', 'SAO_C361K',]:
subim, ds9com = tool.subtraction_routine(inim, refim)
else:
subim, ds9com = tool.subtraction_routine2(inim, refim)
if os.path.getsize(subim) != 0:
rmcom = f"rm {subim}"
print(rmcom)
os.system(rmcom)
subim, ds9com = tool.subtraction_routine(inim, refim)
else:
pass
if subim != None:
subtracted_images.append(subim)
ds9comlist.append(ds9com)
else:
print('There is no reference image for {}'.format(os.path.basename(inim)))
pass
rmcom = 'rm {}/*Ref*gregister.fits'.format(path_data)
print(rmcom)
os.system(rmcom)
# tdict['subtraction'] = time.time() - st - tdict[list(tdict.keys())[-1]]
protbl['status'][protbl['process']=='subtraction'] = True
protbl['time'][protbl['process']=='subtraction'] = int(time.time() - st_)
# %% [markdown]
# ## Photometry for subtracted images
#
# %%
st_ = time.time()
# Write photometry configuration
s = open(path_new_gphot, 'w')
for line in lines:
if 'imkey' in line:
# line = '{}\t{}/hd*com.fits'.format('imkey', path_data)
line = '{}\t{}/hd*.fits'.format('imkey', path_data)
else:
pass
if 'photfraction' in line:
line = '{}\t{}'.format('photfraction', 1.0)
else:
pass
if 'DETECT_MINAREA' in line:
line = '{}\t{}'.format('DETECT_MINAREA', 10)
else:
pass
if 'DETECT_THRESH' in line:
line = '{}\t{}'.format('DETECT_THRESH', 1.25)
else:
pass
s.write(line+'\n')
s.close()
# Execute
hdimlist = sorted(glob.glob('{}/hd*.fits'.format(path_data)))
if len(hdimlist) > 0:
com = 'python {} {}'.format(path_phot_sub, path_data)
print(com)
os.system(com)
# tdict['photometry_sub'] = time.time() - st - tdict[list(tdict.keys())[-1]]
else:
print('No subtracted image.')
pass
protbl['status'][protbl['process']=='photometry_sub'] = True
protbl['time'][protbl['process']=='photometry_sub'] = int(time.time() - st_)
# %% [markdown]
# ## Transient Search
#
# %%
st_ = time.time()
fovval = fov.value
# Input table for transient search
tstbl = Table()
# hdimlist = sorted(glob.glob(f'{path_data}/hd*com.fits'))
hdimlist = sorted(glob.glob(f'{path_data}/hd*.fits'))
if len(hdimlist) != 0:
tstbl['hdim'] = hdimlist
tskeys = ['hdcat', 'hcim', 'inim', 'scicat', 'refim']
for key in tskeys:
tstbl[key] = ' '*300
tstbl['fovval'] = fovval
for i, hdim in enumerate(hdimlist):
hdcat = hdim.replace('.fits','.phot_sub.cat')
hcim = hdim.replace('hdCalib', 'hcCalib')
inim = hdim.replace('hdCalib', 'Calib')
scicat = inim.replace('.fits', '.phot.cat')
hdr = fits.getheader(hdim)
obj = hdr['object']
filte = hdr['filter']
path_refim = f'/data3/paek/factory/ref_frames/{obs}'
refimlist = glob.glob(f'{path_refim}/Ref*{obj}*{filte}*.fits')
refim = refimlist[0]
for key, im in zip(tskeys, [hdcat, hcim, inim, scicat, refim]):
tstbl[key][i] = im
out_tstbl = f'{path_data}/transient_search.txt'
tstbl.write(out_tstbl, format='ascii.tab', overwrite=True)
com = f'python {path_find} {out_tstbl} {ncores}'
print(com)
subprocess.call(com, shell=True)
protbl['status'][protbl['process']=='transient_search'] = True
protbl['time'][protbl['process']=='transient_search'] = int(time.time() - st_)
# %% [markdown]
# # Summary file
# %%
#------------------------------------------------------------
#------------------------------------------------------------
protbl['status'][protbl['process']=='total'] = True
protbl['time'][protbl['process']=='total'] = int(time.time() - st)
protbl.write('{}/obs.summary.log'.format(path_data), format='ascii.tab', overwrite=True)
print(protbl)
# Write data summary
f = open(path_data+'/obs.summary.log', 'a')
end_localtime = time.strftime('%Y-%m-%d %H:%M:%S (%Z)', time.localtime())
f.write('Pipelne start\t: {}\n'.format(start_localtime))
f.write('Pipelne end\t: {}\n'.format(end_localtime))
try:
f.write('='*60+'\n')
f.write('PATH :{}\n'.format(path))
f.write('OBJECT NUMBER # :{}\n'.format(len(ic_cal.summary)))
objkind = sorted(set(ic_cal.summary['object']))
f.write('OBJECTS # : {}\n'.format(objkind))
for obj in objkind:
f.write('-'*60+'\n')
for filte in list(set(ic_cal.summary['filter'])):
indx_tmp = ic_cal.files_filtered(filter=filte, object=obj)
if len(indx_tmp) > 0:
f.write('{}\t{}\n'.format(obj, filte))
except:
pass
f.close()
# WRITE LOG
f = open(path_log, 'a')
# f.write(path_raw+'/'+os.path.basename(path_data)+'\n')
# f.write('{}/{}\n'.format(path_raw, os.path.basename(path_data)))
f.write(f'{path_raw}/{os.path.basename(path_data)}\n')
f.close()
# %% [markdown]
# ## Slack message
# %%
total_time = round(protbl['time'][protbl['process']=='total'].item()/60., 1)
channel = '#pipeline'
text = f'[`gpPy`/{project}-{obsmode}] Processing Complete {obs} {os.path.basename(path)} Data ({nobj} objects) with {ncores} cores taking {total_time} mins'
param_slack = dict(
token = OAuth_Token,
channel = channel,
text = text,
)
tool.slack_bot(**param_slack)
# 2023_0628 pass
|
SilverRonREPO_NAMEgppyPATH_START.@gppy_extracted@gppy-main@LOAO_Routine.tmp.py@.PATH_END.py
|
{
"filename": "gains.py",
"repo_name": "ratt-ru/QuartiCal",
"repo_path": "QuartiCal_extracted/QuartiCal-main/testing/fixtures/gains.py",
"type": "Python"
}
|
import pytest
import dask
@pytest.fixture(scope="module")
def cmp_calibration_graph_outputs(add_calibration_graph_outputs):
return dask.compute(*add_calibration_graph_outputs)
@pytest.fixture(scope="module")
def cmp_gain_xds_lod(cmp_calibration_graph_outputs):
return cmp_calibration_graph_outputs[0]
@pytest.fixture(scope="module")
def cmp_net_xds_list(cmp_calibration_graph_outputs):
return cmp_calibration_graph_outputs[1]
@pytest.fixture(scope="module")
def cmp_post_solve_data_xds_list(cmp_calibration_graph_outputs):
return cmp_calibration_graph_outputs[2]
@pytest.fixture(params=["antenna", "array"], scope="module")
def solve_per(request):
return request.param
|
ratt-ruREPO_NAMEQuartiCalPATH_START.@QuartiCal_extracted@QuartiCal-main@testing@fixtures@gains.py@.PATH_END.py
|
{
"filename": "psf_fit.py",
"repo_name": "kevin218/POET",
"repo_path": "POET_extracted/POET-master/code/lib/psf_fit.py",
"type": "Python"
}
|
import numpy as np
from astropy.io import fits as pf
import scipy.ndimage.interpolation as si
import scipy.optimize as so
import gaussian as g
import time
"""
File:
-----
psf_fit.py
Description:
------------
Routines for creating PSF (from a supersampled PSF) and fitting of
PSF images to data images. There are two functions to make a PSF,
either interpolating or binning down from a supersampled PSF.
Package Contents:
-----------------
There are two main types of routines: 'constructors' to make a PSF
image, and 'PSF fitting' routines to fit a PSF to data. The package
also contains 'wrappers' to easy implement in Spitzer data, and
other subroutines.
PSF Constructors:
- make_psf_interp: Makes a PSF image by shifting, rescaling, and
setting the stellar and sky fluxes of a super
sampled PSF.
- make_psf_binning: Makes a PSF image by binning down a super sampled
PSF. Sets then the stellar and sky fluxes.
PSF Fitting:
- psf_fit: Fits a supersampled PSF to a data image. The position is
fitted at discrete positions while the stellar and sky
fluxes are fitted with scipy's leastsq function.
Spitzer Wrapper:
- spitzer_fit: Routine wrapper for easy plug-in into POET pipeline.
Fits a PSF in a data frame from Spitzer.
Subroutines:
- binarray: Resamples a 2D image by stacking and adding every bin
of pixels along each dimension.
- residuals: Calculates the residuals of a weighted, stellar flux +
sky background fit of a model to data.
- gradient: Calculates the gradient of the parameters in residuals.
Modification History:
---------------------
2011-07-21 patricio Wrapped up the PSF fitting routines into this file.
pcubillos@fulbrightmail.org
"""
# :::: PSF Constructors ::::::::::::::::::::::::::::::::::::::::
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def make_psf_interp(spsf, shape, scale, params, psfctr, *args):
"""
Makes a PSF image by shifting, rescaling, and setting the stellar
and sky fluxes of a super sampled PSF.
Parameters:
-----------
spsf: 2D ndarray
The supersampled PSF image.
shape: 2-element tuple
The shape of the output image.
scale: Scalar
Ratio of the PSF and data pixel-scales.
params: 4-elements tuple [yshift, xshift, flux, sky]
y, x-shift: The desired position of the center of the PSF
relative to the center of the image.
flux: The total flux for the star.
sky: The sky flux level.
psfctr: 2-element tuple [y, x]
y, x-position of the center of the supersampled PSF.
Returns:
--------
psf: 2D ndimage
Resampled PSF image.
center: 2-elements tuple
The position of the center of the returned PSF.
Example:
--------
>>> import psf_fit as pf
>>> import sys
>>> import numpy as np
>>> sys.path.append('/home/esp01/events/wa008b-patricio/wa008bs41/lib/')
>>> sys.path.append('/home/patricio/ast/esp01/convert/lib/python/gaussian/')
>>> import manageevent as me
>>> # Let's obtain a PSF from a Spitzer data set:
>>> e = me.loadevent('/home/esp01/events/wa008b-patricio/wa008bs41/run/fgc/wa008bs41_ctr', load=['data','uncd','mask'])
>>> sst_psf = np.copy(e.psfim)
>>> # The PSF center position:
>>> psfctr = np.copy(e.psfctr)
>>> # Make a PSF of shape 21 by 21:
>>> shape = 21,21
>>> # The Spitzer provided PSF has a pixel scale 5 times finer:
>>> scale = 5.0
>>> # Make a PSF shifted 3 pixels down and 6 pixes to the left of the
>>> # center of the image. Star flux is 1000 and the sky level 100:
>>> params = [-3.0, 6.0, 1e4, 1e2]
>>> psf, pos = pf.make_psf_interp(sst_psf, shape, scale, params, psfctr)
>>> plt.figure(0)
>>> plt.clf()
>>> plt.imshow(psf, interpolation='nearest', origin='ll')
>>> plt.colorbar()
>>> # Print the position of the center:
>>> print(pos)
Modification History:
---------------------
2011-07-24 patricio Added return center.
2011-05-19 Patricio Initial version. pcubillos@fulbrightmail.org
"""
# We will extract a section of the supersampled PSF. Calculate its shape:
shape = np.asarray(shape, float)
psf_shape = 1 + scale*(shape - 1)
# Calculate the zoom factor:
# (See my notes on scipy.ndimage.interpolation.zoom for detailed
# explanation: /home/patricio/ast/esp01/notes/scipy_notes.txt)
zoom = (shape + 0.5)/psf_shape
# Extract a sub-section of the PSF around psfctr:
lims = np.array([np.around(psfctr) - np.around(psf_shape/2),
np.around(psfctr) - np.around(psf_shape/2) + psf_shape])
spsf = np.copy(spsf[lims[0,0]:lims[1,0], lims[0,1]:lims[1,1]])
# Shift the PSF:
shift = np.asarray(params[0:2])*scale
shiftpsf = si.shift(spsf, shift, mode='nearest')
# Resample the PSF (zoom uses a spline interpolation):
psf = si.zoom(shiftpsf, zoom, mode='nearest')
# Normalize:
psf = psf / np.sum(psf)
# Construct the PSF (set stellar and sky flux):
psf = psf*params[2] + params[3]
# Remember, we subtract 0.5 because the origin is at the center of
# the first pixel:
center = shape/2.0 - 0.5 + np.asarray(params[0:2])
return psf, center
def make_psf_binning(spsf, shape, scale, params, psfctr, subpsf=None):
"""
Makes a PSF image by binning down a super sampled PSF. Sets then
the stellar and sky fluxes.
Parameters:
-----------
spsf: 2D ndarray
The supersampled PSF image.
shape: 2-element tuple
The shape of the output image.
scale: Scalar
Ratio of the PSF and data pixel-scales.
params: 4-elements tuple [y, x, flux, sky]
y, x: Subpixel position where to put the sPSF center. As this
is a pixel position, y,x must be integers (see Example).
flux: The total flux from the star.
sky: The sky flux level.
psfctr: 2-element tuple [y, x]
y, x-position of the center of the PSF.
subpsf: 2D ndarray
An array where to write the subsection of the supersampled
PSF. It should have shape: shape*scale. It will be
overrited.
Return:
-------
binpsf : 2D ndarray
Rebinned PSF image.
pos : 2-elements tuple
The position of the center of the PSF in binpsf.
Example:
--------
>>> import psf_fit as pf
>>> import pyfits as pyf
>>> import matplotlib.pyplot as plt
>>> ttpsf = pyf.getdata('/home/esp01/events/wa008b-patricio/Tiny_tim/irac4_5600K_100x.fits')
>>> psfctr = np.asarray(np.shape(ttpsf))/2
>>> scale = 100
>>> shape = 21,21
>>> # To put the center at the image center, calculate the corresponding
>>> # subpixel: center = shape * scale / 2
>>> params = 1050, 1050, 1.0, 0.0
>>> psf,pos = pf.make_psf_binning(ttpsf, shape, scale, params, psfctr)
>>> plt.figure(10)
>>> plt.clf()
>>> plt.imshow(psf, origin='lower left', interpolation='nearest')
>>> plt.colorbar()
>>> print(pos)
Notes:
------
make_psf_binning works under the premise that the supersampled PSF
is sampled fine enough that differences less than a supersampled
pixel is undistinguishable. Then the sPSF can be shifted
in units of subpixels only.
Note also the input parameter 'param' is different than in
make_psf_interp.
Modification History:
---------------------
2011-07-20 patricio Added subpsf parameter (makes it faster).
2011-05-19 patricio Initial version. pcubillos@fulbrightmail.org
"""
# Pixel of the center of the PSF:
yctr, xctr = (np.around(psfctr)).astype(int)
# Sub-PSF shape:
ns = (np.asarray(shape, float)*scale).astype(int)
# Trim the psf to the specified area:
if subpsf is None:
subpsf = np.zeros(ns)
# Extract sub-section from supersampled PSF:
#params = params
#print(params)
subpsf[:] = spsf[yctr-params[0]:yctr-params[0]+ns[0],
xctr-params[1]:xctr-params[1]+ns[1]]
# Resampled image:
binpsf = binarray(subpsf, scale)
# Normalize:
binpsf = binpsf / np.sum(binpsf)
# Set flux and add sky:
binpsf = binpsf*params[2] + params[3]
# Position in binpsf:
pos = (np.asarray(params[0:2],float) - (scale-1)/2.0) / scale
# Return the array and the postion of the center:
return binpsf, pos
# :::: PSF Fitting Routines ::::::::::::::::::::::::::::::::::::
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def psf_fit(data, fluxguess, spsf, psfctr, scale, shift, make="bpf",
mask=None, weights=None, step=None, pos=None):
"""
Fits a supersampled PSF to a data image. The position is fitted at
discrete postions while the stellar and sky fluxes are fitted with
scipy's leastsq function.
Parameters:
-----------
data: 2D ndarray
The science image we are trying to fit.
fluxguess: 2-element tuple [flux, sky]
Tuple giving the starting point to fit the total star flux
and sky flux level.
spsf: 2D ndarray
The supersampled PSF image.
psfctr: 2-element tuple [y, x]
y, x-position of the center of the PSF.
scale: scalar
Ratio of the PSF and data pixel-scales.
shift: 2-element tuple [yshift, xshift]
Each element is a 1D array containing the shifts of the
center of the PSF to the center of the image at which the
fit will be evaluated.
mask : ndarray
Mask of bad pixel values, same shape as data. Good pixels
have value 1; bad pixels have value 0, and will not be
considered in the fit.
weights: ndarray
Weights for the minimization, for scientific data the
weights should be 1/sqrt(variance). Same shape as data.
step : scalar
The initial step of the number of elements to jump when
evaluating shift.
pos : 2-element list
The index of the elements in shift where to start the
evaluation.
Example:
--------
>>> import psf_fit as pf
>>> import sys, os, time
>>> import numpy as np
>>> sys.path.append('/home/esp01/events/wa008b-patricio/wa008bs41/lib/')
>>> sys.path.append('/home/patricio/ast/esp01/convert/lib/python/gaussian/')
>>> import manageevent as me
>>> import pyfits as pyf
>>> # Example #1:
>>> # Using a Spitzer supplied PSF and make_psf_interp:
>>> # Get a PSF and its center:
>>> e = me.loadevent('/home/esp01/events/wa008b-patricio/wa008bs41/run/fgc/wa008bs41_ctr', load=['data','uncd','mask'])
>>> sst_psf = np.copy(e.psfim)
>>> psfctr = np.copy(e.psfctr)
>>> # The scale factor:
>>> scale = 5.0
>>> # Let's create an image to fit:
>>> # The image size will be 21 by 21:
>>> shape = np.array([21,21])
>>> # Define the position of the center of the PSF, and fluxes:
>>> params = [1.75, 0.5, 5e4, 2e2]
>>> # Make the image:
>>> image, center = pf.make_psf_interp(sst_psf, shape, scale, params, psfctr)
>>> # Add some noise:
>>> noise = np.sqrt(image) * np.random.randn(21,21)
>>> # The image to fit:
>>> y = image + noise
>>> var = np.abs(image)
>>> # Let's say our prior guess lies whitin 1 pixel from the given position:
>>> yguess = params[0] + 2*(np.random.rand()-0.5)
>>> xguess = params[1] + 2*(np.random.rand()-0.5)
>>> # Array of Y,X shifs around our guess where to search:
>>> noffset = 201
>>> offsetrad = 1.0 # search within a 1 pixel radius:
>>> offset = offsetrad * np.linspace(-1.0, 1.0, noffset)
>>> # The shifts are relative to the center of the image:
>>> yshift = yguess + offset
>>> xshift = xguess + offset
>>> shift = (yshift, xshift)
>>> # Starting point, guess for the fluxes:
>>> fluxguess = (0.1e5, 80)
>>> # Find the best fit:
>>> pos, bestp, chisq = pf.psf_fit(y, fluxguess, sst_psf, psfctr, scale, shift, mask=None, weights=1/var, make='ipf')
>>> # Best position:
>>> print(pos)
>>> # Best flux fit:
>>> print(bestp)
>>> # Example #2:
>>> # Using a Tiny Tim supplied PSF and make_psf_binning:
>>> # Get a PSF and its center:
>>> ttpsf = pyf.getdata('/home/esp01/events/wa008b-patricio/Tiny_tim/irac4_5600K_100x.fits')
>>> psfctr = np.asarray(np.shape(ttpsf))/2
>>> # The scale factor:
>>> scale = 100
>>> # Create an image to fit:
>>> shape = np.array([21,21])
>>> params = [1043, 915, 5e5, 200]
>>> image, center = pf.make_psf_binning(ttpsf, shape, scale, params, psfctr)
>>> # Add some noise:
>>> noise = np.sqrt(image) * np.random.randn(21,21)
>>> # The image to fit:
>>> y = image + noise
>>> var = np.abs(image)
>>> # Let's say our guess is whitin 1 pixel from the given position:
>>> yguess = params[0] + np.random.randint(-scale,scale)
>>> xguess = params[1] + np.random.randint(-scale,scale)
>>> # Array of Y,X shifs around our guess where to search:
>>> offsetrad = 1.0 # search within a 1 pixel radius:
>>> noffset = int(2*scale*offsetrad + 1)
>>> offset = np.arange(noffset) - noffset/2
>>> # The shifts are relative to the position of the PSF:
>>> yshift = yguess + offset
>>> xshift = xguess + offset
>>> shift = (yshift, xshift)
>>> # Starting point, guess for the fluxes:
>>> fluxguess = (1e4, 80)
>>> # Find the best fit:
>>> tini = time.time()
>>> pos, bestp, chisq = pf.psf_fit(y, fluxguess, ttpsf, psfctr, scale, shift, mask=None, weights=1/var, make='bpf')
>>> print(time.time()-tini)
>>> # Best position:
>>> print(pos)
>>> # Best flux fit:
>>> print(bestp)
Modification History:
---------------------
2011-05-21 patricio Initial version. pcubillos@fulbrightmail.org
2011-05-27 patricio Include gradient parameter in leastsq.
2011-07-26 patricio Unified both make_psf.
"""
shape = np.shape(data)
# Default mask: all good
if mask is None:
mask = np.ones(shape)
# Default weights: no weighting
if weights is None:
weights = np.ones(shape)
# Unpack shift
y, x = shift
# Lengths of the dependent varables:
ny = len(y)
nx = len(x)
# Default initial step:
if step is None:
step = int(ny/2)
# Default initial position:
if pos is None:
pos = [int(ny/2), int(nx/2)]
# Allocate space for subpsf in make_psf_bin outside the loop:
ns = (np.asarray(shape, float)*scale).astype(int)
subpsf = np.zeros(ns)
# Define PSF constructor:
if make == "ipf":
maker = make_psf_interp
# Discard values on the edge of the mask:
j = 2
mask[0:j,:] = mask[:,0:j] = mask[-j:,:] = mask[:,-j:] = 0
elif make == "bpf":
maker = make_psf_binning
else:
print("Unacceptable PSF constructor. Must be 'ipf' or 'bpf'")
return
# Initialize a chi-square grid:
chisq = -np.ones((ny, nx))
# goodratio = np.sum(mask)/np.size(mask)
# print(goodratio)
while(step > 0):
# Calculate chisq in the surrounding:
for shifty in np.arange(-1,2):
# y position to evaluate:
posy = np.clip(pos[0]+shifty*step, 0, ny-1)
for shiftx in np.arange(-1,2):
# x position to evaluate:
posx = np.clip(pos[1]+shiftx*step, 0, nx-1)
if chisq[posy, posx] == -1:
# Make a psf model for given y,x position:
model, center = maker(spsf, shape, scale,
[int(y[posy]), int(x[posx]), int(1.0), int(0.0)], psfctr, subpsf)
# Weighted, masked values:
mmodel = model [np.where(mask)]
mdata = data [np.where(mask)]
mweights = weights[np.where(mask)]
args = (mdata, mmodel, mweights)
# The fitting:
p, cov, info, msg, flag = so.leastsq(residuals, fluxguess, args,
Dfun=gradient, full_output=True, col_deriv=1)
err = np.sqrt(np.diagonal(cov))
# Chi-square per degree of freedom:
cspdof = ( np.sum((info['fvec'])**2.0) /
(len(info["fvec"])-len(fluxguess)) )
chisq[posy, posx] = cspdof
# Is the current position the minimum chi-square?
# Minimum chi-square position:
mcp = np.where(chisq==np.amin(chisq[np.where(chisq>=0)]))
# If it is, then reduce the step size:
if pos[0] == mcp[0][0] and pos[1] == mcp[1][0]:
step = int(np.round(step/2.0))
# If not, then move to the position of min. chi-square:
else:
pos[0] = mcp[0][0]
pos[1] = mcp[1][0]
# The best fitting parameters at the best position:
model, center = maker(spsf, shape, scale, [int(y[pos[0]]), int(x[pos[1]]), 1, 0],
psfctr, subpsf)
# This is the fix I need to do:
mmodel = model [np.where(mask)]
mdata = data [np.where(mask)]
mweights = weights[np.where(mask)]
args = (mdata, mmodel, mweights)
p, cov, info, msg, flag = so.leastsq(residuals, fluxguess, args,
Dfun=gradient, full_output=True, col_deriv=1)
err = np.sqrt(np.diagonal(cov))
# Return the position of min chisq, the best parameters, and the chisq grid:
return center, p, chisq
# :::: Spitzer Wrapper Routine :::::::::::::::::::::::::::::::::
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def spitzer_fit(data, mask, weights, psf, psfctr, scale, make,
offsetrad=1.0, noffset=201):
"""
Routine wrapper for easy plug-in into POET pipeline.
Fits a PSF in a data frame from Spitzer.
Parameters:
-----------
data: 2D ndarray
Data image to fit the PSF.
mask: 2D ndarray
Mask of bad pixel values, same shape as data. Good pixels
have value 1; bad pixels have value 0, and will not be
considered in the fit.
weights: 2D ndarray
Weights for the minimization, for scientific data the
weights should be 1/sqrt(variance). Same shape as data.
psf: 2D ndimage
The supersampled PSF image.
psfctr: 2-elements tuple [y, x]
y, x-position of the center of the PSF.
scale: Scalar
Ratio of the PSF and data pixel-scales.
noffset: Scalar
Radii around the guess position where to look for best fit.
Returns:
--------
bestfit: 4-elements tuple [y, x, starflux, skyflux]
position and fluxes of the PSF that best fit the data.
Modification History:
---------------------
2011-07-26 patricio First documented version.
pcubillos@fulbrightmail.org
"""
# Initial flux guess:
skyguess = np.median(data)
starguess = np.sum(data-skyguess)
fluxguess = [starguess, skyguess]
# Use fit gaussian for a first YX guess:
datashape = np.asarray(np.shape(data))
fit, err = g.fitgaussian(data, fitbg=1, yxguess=datashape/2)
yxguess = fit[2:4]
# Obtain the position guess (depending on the make used):
if make == 'bpf':
# Scale to the PSF scale:
yguess, xguess = np.around(scale*(yxguess+0.5) - 0.5)
elif make == 'ipf':
# Guess with respect to the center of the image:
yguess = yxguess[0] - np.shape(data)[0]/2.0 - 0.5
xguess = yxguess[1] - np.shape(data)[1]/2.0 - 0.5
# Array of shifs around our guess where to search:
if make == 'bpf':
noffset = int(2*scale*offsetrad + 1)
offset = np.arange(noffset) - noffset/2
elif make == 'ipf':
offset = offsetrad * np.linspace(-1.0, 1.0, noffset)
yshift = yguess + offset
xshift = xguess + offset
shift = (yshift, xshift)
# Do the fit:
pos, bestp, chisq = psf_fit(data, fluxguess, psf, psfctr, scale,
shift, mask=mask, weights=weights, make=make)
# Return best fit: [y, x, starflux, skyflux]
return (pos[0], pos[1], bestp[0], bestp[1])
# :::: Sub Routines ::::::::::::::::::::::::::::::::::::::::::::
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
def binarray(image, binsize):
"""
Resamples a 2D image by stacking and adding every bin of pixels
along each dimension.
Parameters:
-----------
image : 2D ndarray
scale : integer scalar
Return:
-------
rimage : 2D ndarray
Resampled image.
Example:
--------
>>>import stack_psf as s
>>>a = np.array([[1,0,1,0,1,2],
[1,1,1,1,1,1],
[0,0,0,1,1,1],
[1,0,0,1,0,0]])
>>>b = s.binarray(a,2)
Y dimension is incommensurable, ignoring last incomplete stack.
>>>print(b)
[[ 3., 3., 3.],
[ 1., 2., 2.]]
>>>b = s.binarray(a,2)
>>>print(b)
[[ 5. 9.]]
Modfication History:
--------------------
2011-06-04 patricio Very first version.
pcubillos@fulbrightmail.org
"""
ny, nx = np.shape(image)
# Shout if stack is incommensurable:
if ny % binsize != 0:
print("Y dimension is incommensurable, ignoring last incomplete stack.")
if nx % binsize != 0:
print("X dimension is incommensurable, ignoring last incomplete stack.")
# Output resampled array:
newshape = int(ny/int(binsize)), int(nx/int(binsize))
binarr = np.zeros(newshape)
# Stack and add the values:
for j in np.arange(newshape[0]):
ystart = j *binsize
yend = (j+1)*binsize
for i in np.arange(newshape[1]):
xstart = i *binsize
xend = (i+1)*binsize
binarr[j,i] = np.sum(image[ystart:yend, xstart:xend])
return binarr
def residuals(params, data, model, weights):
"""
Calculates the residuals of a weighted, stellar flux + sky
background fit of a model to data.
Parameters:
-----------
params : 2-element tuple [flux, sky]
The model parameters to fit. Flux is the scaling factor,
while sky is a constant background.
data : 1D ndarray
An array with the data values.
model : 1D ndarray
Same shape as data, this array contains the stellar model.
weights : ndarray
Same shape as data, this array contains weighting
factors to ponderate the fit. Usually corresponds to:
weights = 1/standard deviation.
Result:
-------
This routine return a 1D ndarray with the weighted differences
between the model and the data.
Modification History:
---------------------
2011-05-27 patricio Initial Version.
pcubillos@fulbrightmail.org
"""
return (model*params[0] + params[1] - data)*weights
def gradient(params, data, model, weights):
"""
Calculates the gradient of the parameters in residuals.
Parameters:
-----------
params : 2-element tuple [flux, sky]
The model parameters to fit. Flux is the scaling factor,
while sky is a constant background.
data : 1D ndarray
An array with the data values.
model : 1D ndarray
Same shape as data, this array contains the stellar model.
weights : ndarray
Same shape as data, this array contains weighting
factors to ponderate the fit. Usually corresponds to:
weights = 1/standard deviation.
Result:
-------
This routine return a tuple of 1D ndarrays. Each element in the
tuple corresponds to the derivative of residuals with respect to
each element in params.
Modification History:
---------------------
2011-05-27 patricio Initial Version.
pcubillos@fulbrightmail.org
"""
return [model*weights, weights]
|
kevin218REPO_NAMEPOETPATH_START.@POET_extracted@POET-master@code@lib@psf_fit.py@.PATH_END.py
|
{
"filename": "table_mode.md",
"repo_name": "jbroll/starbase",
"repo_path": "starbase_extracted/starbase-master/docs/table_mode.md",
"type": "Markdown"
}
|
### table_mode - set the table i/o mode.
SYNOPSIS
--------
```
#include <../tablelib/table.h>
void table_mode(TableHead t, int m, int x);
```
PARAMETERS
----------
* `"TableHead` t" - Not Documented.
* `"int` m" - Not Documented.
* `"int` x" - Not Documented.
DESCRIPTION
-----------
The table mode may be set to TABLE_PARSE and
TABLE_JUSTIFY. See [table_header.](table_header..html)
SEE ALSO
--------
[table_colval](table_colval.html)
,
[table_colvals](table_colvals.html)
,
[table_colvali](table_colvali.html)
,
[table_colvald](table_colvald.html)
,
[table_rowloc](table_rowloc.html)
,
[table_parsline](table_parsline.html)
,
[table_colpad](table_colpad.html)
,
[table_coladd](table_coladd.html)
,
[table_colarg](table_colarg.html)
,
[table_colnum](table_colnum.html)
,
[table_colnam](table_colnam.html)
,
[table_hdrfree](table_hdrfree.html)
,
[table_hdrnth](table_hdrnth.html)
,
[table_rowfree](table_rowfree.html)
,
[table_header](table_header.html)
,
[table_rowput](table_rowput.html)
,
[table_hdrput](table_hdrput.html)
,
[table_rowget](table_rowget.html)
,
[table_rowtrim](table_rowtrim.html)
,
[table_hdrget](table_hdrget.html)
,
[table_hdrgetn](table_hdrgetn.html)
,
[table_hdrgeti](table_hdrgeti.html)
,
[table_hdrgetd](table_hdrgetd.html)
,
[table_hdrgets](table_hdrgets.html)
,
[table_hdrfind](table_hdrfind.html)
,
[table_extract](table_extract.html)
,
[table_load](table_load.html)
,
[table_loadva](table_loadva.html)
,
[table_ncol](table_ncol.html)
,
[table_ofs](table_ofs.html)
,
[table_ors](table_ors.html)
|
jbrollREPO_NAMEstarbasePATH_START.@starbase_extracted@starbase-master@docs@table_mode.md@.PATH_END.py
|
{
"filename": "test_pickle.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/lib/matplotlib/tests/test_pickle.py",
"type": "Python"
}
|
from io import BytesIO
import ast
import os
import sys
import pickle
import pickletools
import numpy as np
import pytest
import matplotlib as mpl
from matplotlib import cm
from matplotlib.testing import subprocess_run_helper, is_ci_environment
from matplotlib.testing.decorators import check_figures_equal
from matplotlib.dates import rrulewrapper
from matplotlib.lines import VertexSelector
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
import matplotlib.figure as mfigure
from mpl_toolkits.axes_grid1 import axes_divider, parasite_axes # type: ignore[import]
def test_simple():
fig = plt.figure()
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.subplot(121)
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.axes(projection='polar')
plt.plot(np.arange(10), label='foobar')
plt.legend()
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
# ax = plt.subplot(121, projection='hammer')
# pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
plt.figure()
plt.bar(x=np.arange(10), height=np.arange(10))
pickle.dump(plt.gca(), BytesIO(), pickle.HIGHEST_PROTOCOL)
fig = plt.figure()
ax = plt.axes()
plt.plot(np.arange(10))
ax.set_yscale('log')
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
def _generate_complete_test_figure(fig_ref):
fig_ref.set_size_inches((10, 6))
plt.figure(fig_ref)
plt.suptitle('Can you fit any more in a figure?')
# make some arbitrary data
x, y = np.arange(8), np.arange(10)
data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
v = np.sin(v * -0.6)
# Ensure lists also pickle correctly.
plt.subplot(3, 3, 1)
plt.plot(list(range(10)))
plt.ylabel("hello")
plt.subplot(3, 3, 2)
plt.contourf(data, hatches=['//', 'ooo'])
plt.colorbar()
plt.subplot(3, 3, 3)
plt.pcolormesh(data)
plt.subplot(3, 3, 4)
plt.imshow(data)
plt.ylabel("hello\nworld!")
plt.subplot(3, 3, 5)
plt.pcolor(data)
ax = plt.subplot(3, 3, 6)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.streamplot(x, y, u, v)
ax = plt.subplot(3, 3, 7)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.quiver(x, y, u, v)
plt.subplot(3, 3, 8)
plt.scatter(x, x ** 2, label='$x^2$')
plt.legend(loc='upper left')
plt.subplot(3, 3, 9)
plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4, label='$-.5 x$')
plt.legend(draggable=True)
# Ensure subfigure parenting works.
subfigs = fig_ref.subfigures(2)
subfigs[0].subplots(1, 2)
subfigs[1].subplots(1, 2)
fig_ref.align_ylabels() # Test handling of _align_label_groups Groupers.
@mpl.style.context("default")
@check_figures_equal(extensions=["png"])
def test_complete(fig_test, fig_ref):
_generate_complete_test_figure(fig_ref)
# plotting is done, now test its pickle-ability
pkl = pickle.dumps(fig_ref, pickle.HIGHEST_PROTOCOL)
# FigureCanvasAgg is picklable and GUI canvases are generally not, but there should
# be no reference to the canvas in the pickle stream in either case. In order to
# keep the test independent of GUI toolkits, run it with Agg and check that there's
# no reference to FigureCanvasAgg in the pickle stream.
assert "FigureCanvasAgg" not in [arg for op, arg, pos in pickletools.genops(pkl)]
loaded = pickle.loads(pkl)
loaded.canvas.draw()
fig_test.set_size_inches(loaded.get_size_inches())
fig_test.figimage(loaded.canvas.renderer.buffer_rgba())
plt.close(loaded)
def _pickle_load_subprocess():
import os
import pickle
path = os.environ['PICKLE_FILE_PATH']
with open(path, 'rb') as blob:
fig = pickle.load(blob)
print(str(pickle.dumps(fig)))
@mpl.style.context("default")
@check_figures_equal(extensions=['png'])
def test_pickle_load_from_subprocess(fig_test, fig_ref, tmp_path):
_generate_complete_test_figure(fig_ref)
fp = tmp_path / 'sinus.pickle'
assert not fp.exists()
with fp.open('wb') as file:
pickle.dump(fig_ref, file, pickle.HIGHEST_PROTOCOL)
assert fp.exists()
proc = subprocess_run_helper(
_pickle_load_subprocess,
timeout=60,
extra_env={
"PICKLE_FILE_PATH": str(fp),
"MPLBACKEND": "Agg",
# subprocess_run_helper will set SOURCE_DATE_EPOCH=0, so for a dirty tree,
# the version will have the date 19700101. As we aren't trying to test the
# version compatibility warning, force setuptools-scm to use the same
# version as us.
"SETUPTOOLS_SCM_PRETEND_VERSION_FOR_MATPLOTLIB": mpl.__version__,
},
)
loaded_fig = pickle.loads(ast.literal_eval(proc.stdout))
loaded_fig.canvas.draw()
fig_test.set_size_inches(loaded_fig.get_size_inches())
fig_test.figimage(loaded_fig.canvas.renderer.buffer_rgba())
plt.close(loaded_fig)
def test_gcf():
fig = plt.figure("a label")
buf = BytesIO()
pickle.dump(fig, buf, pickle.HIGHEST_PROTOCOL)
plt.close("all")
assert plt._pylab_helpers.Gcf.figs == {} # No figures must be left.
fig = pickle.loads(buf.getbuffer())
assert plt._pylab_helpers.Gcf.figs != {} # A manager is there again.
assert fig.get_label() == "a label"
def test_no_pyplot():
# tests pickle-ability of a figure not created with pyplot
from matplotlib.backends.backend_pdf import FigureCanvasPdf
fig = mfigure.Figure()
_ = FigureCanvasPdf(fig)
ax = fig.add_subplot(1, 1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
def test_renderer():
from matplotlib.backends.backend_agg import RendererAgg
renderer = RendererAgg(10, 20, 30)
pickle.dump(renderer, BytesIO())
def test_image():
# Prior to v1.4.0 the Image would cache data which was not picklable
# once it had been drawn.
from matplotlib.backends.backend_agg import new_figure_manager
manager = new_figure_manager(1000)
fig = manager.canvas.figure
ax = fig.add_subplot(1, 1, 1)
ax.imshow(np.arange(12).reshape(3, 4))
manager.canvas.draw()
pickle.dump(fig, BytesIO())
def test_polar():
plt.subplot(polar=True)
fig = plt.gcf()
pf = pickle.dumps(fig)
pickle.loads(pf)
plt.draw()
class TransformBlob:
def __init__(self):
self.identity = mtransforms.IdentityTransform()
self.identity2 = mtransforms.IdentityTransform()
# Force use of the more complex composition.
self.composite = mtransforms.CompositeGenericTransform(
self.identity,
self.identity2)
# Check parent -> child links of TransformWrapper.
self.wrapper = mtransforms.TransformWrapper(self.composite)
# Check child -> parent links of TransformWrapper.
self.composite2 = mtransforms.CompositeGenericTransform(
self.wrapper,
self.identity)
def test_transform():
obj = TransformBlob()
pf = pickle.dumps(obj)
del obj
obj = pickle.loads(pf)
# Check parent -> child links of TransformWrapper.
assert obj.wrapper._child == obj.composite
# Check child -> parent links of TransformWrapper.
assert [v() for v in obj.wrapper._parents.values()] == [obj.composite2]
# Check input and output dimensions are set as expected.
assert obj.wrapper.input_dims == obj.composite.input_dims
assert obj.wrapper.output_dims == obj.composite.output_dims
def test_rrulewrapper():
r = rrulewrapper(2)
try:
pickle.loads(pickle.dumps(r))
except RecursionError:
print('rrulewrapper pickling test failed')
raise
def test_shared():
fig, axs = plt.subplots(2, sharex=True)
fig = pickle.loads(pickle.dumps(fig))
fig.axes[0].set_xlim(10, 20)
assert fig.axes[1].get_xlim() == (10, 20)
def test_inset_and_secondary():
fig, ax = plt.subplots()
ax.inset_axes([.1, .1, .3, .3])
ax.secondary_xaxis("top", functions=(np.square, np.sqrt))
pickle.loads(pickle.dumps(fig))
@pytest.mark.parametrize("cmap", cm._colormaps.values())
def test_cmap(cmap):
pickle.dumps(cmap)
def test_unpickle_canvas():
fig = mfigure.Figure()
assert fig.canvas is not None
out = BytesIO()
pickle.dump(fig, out)
out.seek(0)
fig2 = pickle.load(out)
assert fig2.canvas is not None
def test_mpl_toolkits():
ax = parasite_axes.host_axes([0, 0, 1, 1])
axes_divider.make_axes_area_auto_adjustable(ax)
assert type(pickle.loads(pickle.dumps(ax))) == parasite_axes.HostAxes
def test_standard_norm():
assert type(pickle.loads(pickle.dumps(mpl.colors.LogNorm()))) \
== mpl.colors.LogNorm
def test_dynamic_norm():
logit_norm_instance = mpl.colors.make_norm_from_scale(
mpl.scale.LogitScale, mpl.colors.Normalize)()
assert type(pickle.loads(pickle.dumps(logit_norm_instance))) \
== type(logit_norm_instance)
def test_vertexselector():
line, = plt.plot([0, 1], picker=True)
pickle.loads(pickle.dumps(VertexSelector(line)))
def test_cycler():
ax = plt.figure().add_subplot()
ax.set_prop_cycle(c=["c", "m", "y", "k"])
ax.plot([1, 2])
ax = pickle.loads(pickle.dumps(ax))
l, = ax.plot([3, 4])
assert l.get_color() == "m"
# Run under an interactive backend to test that we don't try to pickle the
# (interactive and non-picklable) canvas.
def _test_axeswidget_interactive():
ax = plt.figure().add_subplot()
pickle.dumps(mpl.widgets.Button(ax, "button"))
@pytest.mark.xfail( # https://github.com/actions/setup-python/issues/649
('TF_BUILD' in os.environ or 'GITHUB_ACTION' in os.environ) and
sys.platform == 'darwin' and sys.version_info[:2] < (3, 11),
reason='Tk version mismatch on Azure macOS CI'
)
def test_axeswidget_interactive():
subprocess_run_helper(
_test_axeswidget_interactive,
timeout=120 if is_ci_environment() else 20,
extra_env={'MPLBACKEND': 'tkagg'}
)
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@lib@matplotlib@tests@test_pickle.py@.PATH_END.py
|
{
"filename": "act.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/act.py",
"type": "Python"
}
|
# Form implementation generated from reading ui file 'act.ui'
#
# Created by: PyQt6 UI code generator 6.6.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic6 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt6 import QtCore, QtGui, QtWidgets
class Ui_Activity(object):
def setupUi(self, Activity):
Activity.setObjectName("Activity")
Activity.resize(906, 717)
font = QtGui.QFont()
font.setPointSize(9)
Activity.setFont(font)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("../../../../../.designer/backup/33_striker.png"), QtGui.QIcon.Mode.Normal, QtGui.QIcon.State.Off)
Activity.setWindowIcon(icon)
Activity.setLocale(QtCore.QLocale(QtCore.QLocale.Language.English, QtCore.QLocale.Country.UnitedKingdom))
self.gridLayout_2 = QtWidgets.QGridLayout(Activity)
self.gridLayout_2.setObjectName("gridLayout_2")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setHorizontalSpacing(4)
self.gridLayout.setVerticalSpacing(2)
self.gridLayout.setObjectName("gridLayout")
self.label_low_freq = QtWidgets.QLabel(parent=Activity)
self.label_low_freq.setObjectName("label_low_freq")
self.gridLayout.addWidget(self.label_low_freq, 2, 5, 1, 1)
self.regres_wl = QtWidgets.QDoubleSpinBox(parent=Activity)
self.regres_wl.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.regres_wl.setFont(font)
self.regres_wl.setMinimum(0.1)
self.regres_wl.setMaximum(2.0)
self.regres_wl.setSingleStep(0.1)
self.regres_wl.setProperty("value", 0.5)
self.regres_wl.setObjectName("regres_wl")
self.gridLayout.addWidget(self.regres_wl, 8, 3, 1, 1)
self.regres_bt = QtWidgets.QDoubleSpinBox(parent=Activity)
self.regres_bt.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.regres_bt.setFont(font)
self.regres_bt.setMaximum(1.0)
self.regres_bt.setSingleStep(0.1)
self.regres_bt.setProperty("value", 0.5)
self.regres_bt.setObjectName("regres_bt")
self.gridLayout.addWidget(self.regres_bt, 8, 4, 1, 1)
self.radio_Splines = QtWidgets.QRadioButton(parent=Activity)
self.radio_Splines.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_Splines.setFont(font)
self.radio_Splines.setObjectName("radio_Splines")
self.buttonGroup_trendOptions = QtWidgets.QButtonGroup(Activity)
self.buttonGroup_trendOptions.setObjectName("buttonGroup_trendOptions")
self.buttonGroup_trendOptions.addButton(self.radio_Splines)
self.gridLayout.addWidget(self.radio_Splines, 5, 0, 1, 2)
self.spline_bt = QtWidgets.QDoubleSpinBox(parent=Activity)
self.spline_bt.setEnabled(True)
self.spline_bt.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setPointSize(9)
self.spline_bt.setFont(font)
self.spline_bt.setMaximum(20000000.0)
self.spline_bt.setSingleStep(0.1)
self.spline_bt.setProperty("value", 5.0)
self.spline_bt.setObjectName("spline_bt")
self.gridLayout.addWidget(self.spline_bt, 5, 4, 1, 1)
self.radio_Regressions = QtWidgets.QRadioButton(parent=Activity)
self.radio_Regressions.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_Regressions.setFont(font)
self.radio_Regressions.setObjectName("radio_Regressions")
self.buttonGroup_trendOptions.addButton(self.radio_Regressions)
self.gridLayout.addWidget(self.radio_Regressions, 8, 0, 1, 2)
self.label_wl = QtWidgets.QLabel(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.label_wl.setFont(font)
self.label_wl.setObjectName("label_wl")
self.gridLayout.addWidget(self.label_wl, 2, 3, 1, 1)
self.reset_data = QtWidgets.QPushButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.reset_data.setFont(font)
self.reset_data.setObjectName("reset_data")
self.gridLayout.addWidget(self.reset_data, 2, 8, 1, 1)
self.comboBox_poly = QtWidgets.QComboBox(parent=Activity)
self.comboBox_poly.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.comboBox_poly.setFont(font)
self.comboBox_poly.setObjectName("comboBox_poly")
self.gridLayout.addWidget(self.comboBox_poly, 6, 2, 1, 1)
self.flatten_data = QtWidgets.QRadioButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.flatten_data.setFont(font)
self.flatten_data.setChecked(True)
self.flatten_data.setObjectName("flatten_data")
self.buttonGroup_plot2 = QtWidgets.QButtonGroup(Activity)
self.buttonGroup_plot2.setObjectName("buttonGroup_plot2")
self.buttonGroup_plot2.addButton(self.flatten_data)
self.gridLayout.addWidget(self.flatten_data, 3, 10, 1, 1)
self.poly_bt = QtWidgets.QDoubleSpinBox(parent=Activity)
self.poly_bt.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.poly_bt.setFont(font)
self.poly_bt.setMaximum(1.0)
self.poly_bt.setSingleStep(0.1)
self.poly_bt.setProperty("value", 0.5)
self.poly_bt.setObjectName("poly_bt")
self.gridLayout.addWidget(self.poly_bt, 6, 4, 1, 1)
spacerItem = QtWidgets.QSpacerItem(13, 20, QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Minimum)
self.gridLayout.addItem(spacerItem, 3, 5, 1, 1)
self.print_stat = QtWidgets.QPushButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.print_stat.setFont(font)
self.print_stat.setObjectName("print_stat")
self.gridLayout.addWidget(self.print_stat, 2, 7, 1, 1)
self.line_2 = QtWidgets.QFrame(parent=Activity)
self.line_2.setFrameShape(QtWidgets.QFrame.Shape.VLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.line_2.setObjectName("line_2")
self.gridLayout.addWidget(self.line_2, 2, 6, 8, 1)
self.saveProduct = QtWidgets.QPushButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.saveProduct.setFont(font)
self.saveProduct.setObjectName("saveProduct")
self.gridLayout.addWidget(self.saveProduct, 8, 10, 1, 1)
self.radio_GPs = QtWidgets.QRadioButton(parent=Activity)
self.radio_GPs.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_GPs.setFont(font)
self.radio_GPs.setObjectName("radio_GPs")
self.buttonGroup_trendOptions.addButton(self.radio_GPs)
self.gridLayout.addWidget(self.radio_GPs, 9, 0, 1, 2)
self.GLS_of_data = QtWidgets.QRadioButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.GLS_of_data.setFont(font)
self.GLS_of_data.setObjectName("GLS_of_data")
self.buttonGroup_plot2.addButton(self.GLS_of_data)
self.gridLayout.addWidget(self.GLS_of_data, 4, 10, 1, 1)
self.GP_period = QtWidgets.QDoubleSpinBox(parent=Activity)
self.GP_period.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.GP_period.setFont(font)
self.GP_period.setObjectName("GP_period")
self.gridLayout.addWidget(self.GP_period, 9, 4, 1, 1)
self.checkBox_GP_robust = QtWidgets.QCheckBox(parent=Activity)
self.checkBox_GP_robust.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.checkBox_GP_robust.setFont(font)
self.checkBox_GP_robust.setObjectName("checkBox_GP_robust")
self.gridLayout.addWidget(self.checkBox_GP_robust, 9, 5, 1, 1)
self.spline_wl = QtWidgets.QDoubleSpinBox(parent=Activity)
self.spline_wl.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(9)
self.spline_wl.setFont(font)
self.spline_wl.setMinimum(0.1)
self.spline_wl.setMaximum(2000000.0)
self.spline_wl.setSingleStep(0.1)
self.spline_wl.setProperty("value", 180.0)
self.spline_wl.setObjectName("spline_wl")
self.gridLayout.addWidget(self.spline_wl, 5, 3, 1, 1)
self.click_to_reject = QtWidgets.QCheckBox(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.click_to_reject.setFont(font)
self.click_to_reject.setObjectName("click_to_reject")
self.gridLayout.addWidget(self.click_to_reject, 3, 7, 1, 2)
self.readme_button = QtWidgets.QPushButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.readme_button.setFont(font)
self.readme_button.setObjectName("readme_button")
self.gridLayout.addWidget(self.readme_button, 9, 10, 1, 1)
self.radio_remove_mean = QtWidgets.QRadioButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_remove_mean.setFont(font)
self.radio_remove_mean.setChecked(False)
self.radio_remove_mean.setObjectName("radio_remove_mean")
self.buttonGroup_trendOptions.addButton(self.radio_remove_mean)
self.gridLayout.addWidget(self.radio_remove_mean, 3, 1, 1, 1)
self.comboBox_splines = QtWidgets.QComboBox(parent=Activity)
self.comboBox_splines.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(9)
self.comboBox_splines.setFont(font)
self.comboBox_splines.setObjectName("comboBox_splines")
self.gridLayout.addWidget(self.comboBox_splines, 5, 2, 1, 1)
self.comboBox_GP = QtWidgets.QComboBox(parent=Activity)
self.comboBox_GP.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.comboBox_GP.setFont(font)
self.comboBox_GP.setObjectName("comboBox_GP")
self.gridLayout.addWidget(self.comboBox_GP, 9, 2, 1, 1)
self.kernel_size = QtWidgets.QDoubleSpinBox(parent=Activity)
self.kernel_size.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.kernel_size.setFont(font)
self.kernel_size.setDecimals(1)
self.kernel_size.setMaximum(100.0)
self.kernel_size.setSingleStep(0.1)
self.kernel_size.setProperty("value", 5.0)
self.kernel_size.setObjectName("kernel_size")
self.gridLayout.addWidget(self.kernel_size, 9, 3, 1, 1)
self.try_button = QtWidgets.QPushButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.try_button.setFont(font)
self.try_button.setObjectName("try_button")
self.gridLayout.addWidget(self.try_button, 2, 10, 1, 1)
self.GLS_of_detr_data = QtWidgets.QRadioButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.GLS_of_detr_data.setFont(font)
self.GLS_of_detr_data.setObjectName("GLS_of_detr_data")
self.buttonGroup_plot2.addButton(self.GLS_of_detr_data)
self.gridLayout.addWidget(self.GLS_of_detr_data, 5, 10, 1, 1)
self.comboBox_regs = QtWidgets.QComboBox(parent=Activity)
self.comboBox_regs.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.comboBox_regs.setFont(font)
self.comboBox_regs.setObjectName("comboBox_regs")
self.gridLayout.addWidget(self.comboBox_regs, 8, 2, 1, 1)
self.GLS_of_model = QtWidgets.QRadioButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.GLS_of_model.setFont(font)
self.GLS_of_model.setObjectName("GLS_of_model")
self.buttonGroup_plot2.addButton(self.GLS_of_model)
self.gridLayout.addWidget(self.GLS_of_model, 6, 10, 1, 1)
self.comboBox_sliders = QtWidgets.QComboBox(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.comboBox_sliders.setFont(font)
self.comboBox_sliders.setObjectName("comboBox_sliders")
self.gridLayout.addWidget(self.comboBox_sliders, 4, 2, 1, 1)
self.poly_wl = QtWidgets.QDoubleSpinBox(parent=Activity)
self.poly_wl.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.poly_wl.setFont(font)
self.poly_wl.setMinimum(0.1)
self.poly_wl.setMaximum(2.0)
self.poly_wl.setSingleStep(0.1)
self.poly_wl.setProperty("value", 0.5)
self.poly_wl.setObjectName("poly_wl")
self.gridLayout.addWidget(self.poly_wl, 6, 3, 1, 1)
self.line = QtWidgets.QFrame(parent=Activity)
self.line.setFrameShape(QtWidgets.QFrame.Shape.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Shadow.Sunken)
self.line.setObjectName("line")
self.gridLayout.addWidget(self.line, 2, 9, 9, 1)
self.label_method = QtWidgets.QLabel(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.label_method.setFont(font)
self.label_method.setObjectName("label_method")
self.gridLayout.addWidget(self.label_method, 2, 2, 1, 1)
self.label_high_freq = QtWidgets.QLabel(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.label_high_freq.setFont(font)
self.label_high_freq.setObjectName("label_high_freq")
self.gridLayout.addWidget(self.label_high_freq, 2, 4, 1, 1)
self.radio_remove_median = QtWidgets.QRadioButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_remove_median.setFont(font)
self.radio_remove_median.setChecked(True)
self.radio_remove_median.setObjectName("radio_remove_median")
self.buttonGroup_trendOptions.addButton(self.radio_remove_median)
self.gridLayout.addWidget(self.radio_remove_median, 3, 0, 1, 1)
self.radio_Polynomials = QtWidgets.QRadioButton(parent=Activity)
self.radio_Polynomials.setEnabled(False)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_Polynomials.setFont(font)
self.radio_Polynomials.setObjectName("radio_Polynomials")
self.buttonGroup_trendOptions.addButton(self.radio_Polynomials)
self.gridLayout.addWidget(self.radio_Polynomials, 6, 0, 1, 2)
self.filter_order = QtWidgets.QDoubleSpinBox(parent=Activity)
self.filter_order.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setPointSize(9)
self.filter_order.setFont(font)
self.filter_order.setLocale(QtCore.QLocale(QtCore.QLocale.Language.English, QtCore.QLocale.Country.UnitedKingdom))
self.filter_order.setDecimals(0)
self.filter_order.setMinimum(1.0)
self.filter_order.setMaximum(30.0)
self.filter_order.setSingleStep(1.0)
self.filter_order.setProperty("value", 3.0)
self.filter_order.setObjectName("filter_order")
self.gridLayout.addWidget(self.filter_order, 4, 3, 1, 1)
self.radio_timeW = QtWidgets.QRadioButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.radio_timeW.setFont(font)
self.radio_timeW.setObjectName("radio_timeW")
self.buttonGroup_trendOptions.addButton(self.radio_timeW)
self.gridLayout.addWidget(self.radio_timeW, 4, 0, 1, 2)
self.plot = PlotWidget(parent=Activity)
self.plot.setMouseTracking(True)
self.plot.setObjectName("plot")
self.gridLayout.addWidget(self.plot, 0, 0, 1, 11)
self.plot_2 = PlotWidget(parent=Activity)
self.plot_2.setMouseTracking(True)
self.plot_2.setObjectName("plot_2")
self.gridLayout.addWidget(self.plot_2, 1, 0, 1, 11)
self.label = QtWidgets.QLabel(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.label.setFont(font)
self.label.setObjectName("label")
self.gridLayout.addWidget(self.label, 2, 0, 1, 2)
self.filter_high_freq = QtWidgets.QDoubleSpinBox(parent=Activity)
self.filter_high_freq.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setPointSize(9)
self.filter_high_freq.setFont(font)
self.filter_high_freq.setLocale(QtCore.QLocale(QtCore.QLocale.Language.English, QtCore.QLocale.Country.UnitedKingdom))
self.filter_high_freq.setDecimals(3)
self.filter_high_freq.setMinimum(1.0)
self.filter_high_freq.setMaximum(999999.0)
self.filter_high_freq.setSingleStep(1.0)
self.filter_high_freq.setProperty("value", 1.0)
self.filter_high_freq.setObjectName("filter_high_freq")
self.gridLayout.addWidget(self.filter_high_freq, 4, 4, 1, 1)
self.filter_low_freq = QtWidgets.QDoubleSpinBox(parent=Activity)
self.filter_low_freq.setMinimumSize(QtCore.QSize(100, 0))
font = QtGui.QFont()
font.setPointSize(9)
self.filter_low_freq.setFont(font)
self.filter_low_freq.setLocale(QtCore.QLocale(QtCore.QLocale.Language.English, QtCore.QLocale.Country.UnitedKingdom))
self.filter_low_freq.setDecimals(3)
self.filter_low_freq.setMinimum(1.0)
self.filter_low_freq.setMaximum(999999.0)
self.filter_low_freq.setSingleStep(1.0)
self.filter_low_freq.setProperty("value", 365.0)
self.filter_low_freq.setObjectName("filter_low_freq")
self.gridLayout.addWidget(self.filter_low_freq, 4, 5, 1, 1)
self.add_epoch = QtWidgets.QPushButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.add_epoch.setFont(font)
self.add_epoch.setObjectName("add_epoch")
self.gridLayout.addWidget(self.add_epoch, 8, 7, 1, 1)
self.extra_BJD = QtWidgets.QDoubleSpinBox(parent=Activity)
self.extra_BJD.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setPointSize(9)
self.extra_BJD.setFont(font)
self.extra_BJD.setDecimals(3)
self.extra_BJD.setMinimum(-9999999.0)
self.extra_BJD.setMaximum(9999999.0)
self.extra_BJD.setProperty("value", 2457000.0)
self.extra_BJD.setObjectName("extra_BJD")
self.gridLayout.addWidget(self.extra_BJD, 8, 8, 1, 1)
self.bin_data = QtWidgets.QDoubleSpinBox(parent=Activity)
self.bin_data.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(9)
self.bin_data.setFont(font)
self.bin_data.setSuffix("")
self.bin_data.setDecimals(6)
self.bin_data.setMinimum(1e-05)
self.bin_data.setSingleStep(0.001)
self.bin_data.setProperty("value", 0.01)
self.bin_data.setObjectName("bin_data")
self.gridLayout.addWidget(self.bin_data, 6, 8, 1, 1)
self.button_bin_data = QtWidgets.QPushButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.button_bin_data.setFont(font)
self.button_bin_data.setObjectName("button_bin_data")
self.gridLayout.addWidget(self.button_bin_data, 6, 7, 1, 1)
self.act_sigma_clip = QtWidgets.QDoubleSpinBox(parent=Activity)
self.act_sigma_clip.setEnabled(True)
font = QtGui.QFont()
font.setPointSize(9)
self.act_sigma_clip.setFont(font)
self.act_sigma_clip.setDecimals(4)
self.act_sigma_clip.setMinimum(0.5)
self.act_sigma_clip.setMaximum(30.0)
self.act_sigma_clip.setSingleStep(0.5)
self.act_sigma_clip.setProperty("value", 10.0)
self.act_sigma_clip.setObjectName("act_sigma_clip")
self.gridLayout.addWidget(self.act_sigma_clip, 5, 8, 1, 1)
self.button_sigma_clip = QtWidgets.QPushButton(parent=Activity)
font = QtGui.QFont()
font.setPointSize(9)
self.button_sigma_clip.setFont(font)
self.button_sigma_clip.setObjectName("button_sigma_clip")
self.gridLayout.addWidget(self.button_sigma_clip, 5, 7, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 0, 0, 1, 1)
self.retranslateUi(Activity)
QtCore.QMetaObject.connectSlotsByName(Activity)
def retranslateUi(self, Activity):
_translate = QtCore.QCoreApplication.translate
Activity.setWindowTitle(_translate("Activity", "Activity"))
self.label_low_freq.setText(_translate("Activity", "low freq "))
self.radio_Splines.setText(_translate("Activity", "Splines"))
self.radio_Regressions.setText(_translate("Activity", "Regressions"))
self.label_wl.setText(_translate("Activity", "Window length"))
self.reset_data.setText(_translate("Activity", "Reset"))
self.flatten_data.setText(_translate("Activity", "detrended data"))
self.print_stat.setText(_translate("Activity", "Print stat"))
self.saveProduct.setText(_translate("Activity", "Save modif. data"))
self.radio_GPs.setText(_translate("Activity", "Gaussian Processes"))
self.GLS_of_data.setText(_translate("Activity", "GLS of input data"))
self.checkBox_GP_robust.setText(_translate("Activity", "robust"))
self.click_to_reject.setText(_translate("Activity", "remove outliers"))
self.readme_button.setText(_translate("Activity", "READ ME"))
self.radio_remove_mean.setText(_translate("Activity", "Mean"))
self.try_button.setText(_translate("Activity", "Try!"))
self.GLS_of_detr_data.setText(_translate("Activity", "GLS of modif. data"))
self.GLS_of_model.setText(_translate("Activity", "GLS of model"))
self.label_method.setText(_translate("Activity", "Method"))
self.label_high_freq.setText(_translate("Activity", "high freq"))
self.radio_remove_median.setText(_translate("Activity", "Median"))
self.radio_Polynomials.setText(_translate("Activity", "Polynomials"))
self.radio_timeW.setText(_translate("Activity", "Filter"))
self.label.setText(_translate("Activity", "Subtract"))
self.add_epoch.setText(_translate("Activity", "Add/Remove BJD"))
self.button_bin_data.setText(_translate("Activity", "Bin data [d]"))
self.button_sigma_clip.setText(_translate("Activity", "sigma clip"))
from pyqtgraph import PlotWidget
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Activity = QtWidgets.QWidget()
ui = Ui_Activity()
ui.setupUi(Activity)
Activity.show()
sys.exit(app.exec())
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@act.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "nasa/Kamodo",
"repo_path": "Kamodo_extracted/Kamodo-master/README.md",
"type": "Markdown"
}
|
 
# The CCMC Kamodo Analysis Suite
## Online Documentation
https://nasa.github.io/Kamodo/
## Vision Statement
Kamodo is an official NASA open-source python package built upon the functionalization of datasets. Once a dataset is functionalized in Kamodo, several important capabilities are then available to the user, including data analysis via function composition, automatic unit conversions, and publication quality graphics all using intuitive and simplistic syntax. By applying these capabilities to heliophysics model outputs, we aim to:
- Drastically simplify the currently complex data utilization process for model outputs,
- Provide interactive access to functionalized model outputs for users ranging in programming skill from beginners – via code-free interfaces and video tutorials – to advanced users – via thorough documentation, Jupyter notebook examples and sample workflows,
- Layer multiple functionalities on top of the functionalized model outputs, all with model-agnostic and uniform syntax, including but not limited to:
- Flythrough tools,
- Vector field tracing (including magnetic field mapping),
- Coordinate conversions,
- Domain-specific interactive plots of publication quality,
- Modular driver swapping,
- Satellite constellation mission planning tools,
- Simulated imagery, and
- A line of sight calculation tool,
- Greatly reduce the programming skill currently required outside of Kamodo to perform model validation studies and model-data comparisons,
- Enable model output utilization both on the cloud and on personal laptops in a variety of methods (e.g. through HAPI and interactive calls from the command line),
- Streamline the CCMC user workflow by becoming interoperable with other CCMC services (e.g. CAMEL and the various scoreboards),
- And become the next generation interface for CCMC users to interact with and analyze model outputs (e.g. through ROR and IR),
...all while keeping the developed software open-source and freely available. The Kamodo team also supports the heliophysics community by pursuing interoperability with commonly-used python packages, collaborating with community members to add model outputs and new functionalities, and remaining involved with community events (e.g. conferences, challenges, and research support). As the library of supported model outputs types expands and new model-agnostic tools are added, Kamodo will become a staple software package in the heliophysics community to transform current workflows into a more efficient and productive process. We are building the next generation of capability with Kamodo. Join us!
## Kamodo currently supports:
- ADELPHI: AMPERE-Derived ELectrodynamic Properties of the High-latitude Ionosphere, https://doi.org/10.1029/2020SW002677
- AMGeO: Assimilative Mapping of Geospace Observations, https://doi.org/10.5281/zenodo.3564914
- CTIPe: Coupled Thermosphere Ionosphere Plasmasphere Electrodynamics Model, https://doi.org/10.1029/2007SW000364
- DTM: The Drag Temperature Model, https://doi.org/10.1051/swsc/2015001
- GITM: Global Ionosphere Thermosphere Model, https://doi.org/10.1016/j.jastp.2006.01.008
- IRI: International Reference Ionosphere Model, https://doi.org/10.5194/ars-16-1-2018
- OpenGGCM_GM: The Open Geospace General Circulation Model - GM outputs only, https://doi.org/10.1023/A:1014228230714
- SuperDARN_uni: SuperDARN uniform grid output ,https://doi.org/10.1029/2010JA016017
- SuperDARN_equ: SuperDARN equal area grid output, https://doi.org/10.1029/2010JA016017
- SWMF_IE: Space Weather Modeling Framework - IE outputs, https://doi.org/10.1029/2006SW000272
- SWMF_GM: Space Weather Modeling Framework - GM outputs, https://doi.org/10.1029/2006SW000272
- TIEGCM: Thermosphere Ionosphere Electrodynamics General Circulation Model, https://doi.org/10.1029/2012GM001297
- WACCMX: Whole Atmosphere Community Climate Model With Thermosphere and Ionosphere Extension, https://doi.org/10.1002/2017MS001232
- WAMIPE: The coupled Whole Atmosphere Model - Ionosphere Plasmasphere Model, https://doi.org/10.1002/2015GL067312 and https://doi.org/10.1029/2022SW003193
- Weimer: Weimer Ionosphere model, https://doi.org/10.1029/2005JA011270
- It also supports any data available through the HAPI interface.
## Kamodo Installation Instructions
Kamodo is built to run with at least 16 GB of RAM. Attempting to run Kamodo with less memory may result in errors.
### Conda prompt commands:
- Move to the directory where you want the Kamodo package to be stored or if you wish to create a new environment, use this command:
> conda create -n Kamodo_env python=3.7
- Add the packages needed by the CCMC readers to the desired environment (replace 'Kamodo_env' with your environment name):
> conda install -n Kamodo_env -c conda-forge netCDF4 cdflib astropy ipython jupyter h5py sgp4
- Activate the desired environment.
> conda activate Kamodo_env
- Install remaining dependencies:
> python -m pip install --upgrade spacepy
> python -m pip install hapiclient
- Download CCMC Kamodo to the current directory:
> git clone https://github.com/nasa/Kamodo.git
- Install the CCMC Kamodo package. (Check the directory structure before using this command. The ./Kamodo directory should contain the kamodo_ccmc directory.)
> python -m pip install ./Kamodo
Note: Developers should install CCMC Kamodo with the -e option
### Testing commands from an ipython or notebook session
```python
from kamodo import Kamodo
k = Kamodo()
import kamodo_ccmc.flythrough.model_wrapper as MW
MW.Model_Variables('OpenGGCM_GM')
```
The OpenGGCM_GM model accepts the standardized variable names listed below.
-----------------------------------------------------------------------------------
B_x : '['x component of magnetic field', 0, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'nT']'
B_y : '['y component of magnetic field', 1, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'nT']'
B_z : '['z component of magnetic field', 2, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'nT']'
E_x : '['x component of electric field', 6, 'GSE', 'car', ['time', 'x', 'x', 'x'], 'mV/m']'
E_y : '['y component of electric field', 7, 'GSE', 'car', ['time', 'y', 'y', 'y'], 'mV/m']'
E_z : '['z component of electric field', 8, 'GSE', 'car', ['time', 'z', 'z', 'z'], 'mV/m']'
N_plasma : '['number density of plasma (hydrogen equivalent)', 12, 'GSE', 'car', ['time', 'x', 'y', 'z'], '1/cm**3']'
P_plasma : '['plasma pressure', 14, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'pPa']'
eta : '['resistivity', 13, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'm**2/s']'
j_x : '['current density, x component', 15, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'muA/m**2']'
j_y : '['current density, y component', 16, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'muA/m**2']'
j_z : '['current density, z component', 17, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'muA/m**2']'
v_plasmax : '['x component of plasma velocity', 9, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'km/s']'
v_plasmay : '['y component of plasma velocity', 10, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'km/s']'
v_plasmaz : '['z component of plasma velocity', 11, 'GSE', 'car', ['time', 'x', 'y', 'z'], 'km/s']'
## Citing Kamodo
When publishing research which used Kamodo, please provide appropriate credit to the CCMC and the Kamodo team via citation or acknowledgment. Please also let the team know of publications or presentations that use Kamodo. Below is list of publications for Kamodo.
- Pembroke, A., D. De Zeeuw, L. Rastaetter, R. Ringuette, O. Gerland, D. Patel and M. Contreras (2022). Kamodo: A functional API for space weather models and data. JOSS 7, 75, 4053, https://doi.org/10.21105/joss.04053.
- Ringuette, R., D. De Zeeuw, L. Rastaetter, A. Pembroke, O. Gerland, K. Garcia-Sage (2022). Kamodo’s model-agnostic satellite flythrough: Lowering the utilization barrier for heliophysics model outputs, Frontiers in Astronomy and Space Sciences, vol 9. http://dx.doi.org/10.3389/fspas.2022.1005977.
- Ringuette, R., L. Rastaetter, D. De Zeeuw, K. Garcia-Sage, R. Robinson, and O. Gerland (2022). Kamodo's Satellite Constellation Mission Planning Tool, poster presentation presented by L. Rastaetter at the 2022 Fall meeting of AGU, Dec 12-16, Chicago, IL, USA. https://doi.org/10.22541/essoar.167214257.73153757/v1.
- Ringuette, R., L. Rastaetter, D. De Zeeuw, A. Pembroke, and O. Gerland (2023). Simplifying model data access and utilization. Adv. Space. Res. under review.
## Resources
- CCMC's Kamodo Official website - https://ccmc.gsfc.nasa.gov/tools/kamodo/
- CCMC's Kamodo Documentation page - https://nasa.github.io/Kamodo/
- Sample model outputs - https://ccmc.gsfc.nasa.gov/RoR_WWW/output_files/KAMODO_DEMO/
- Youtube tutorial channel - https://www.youtube.com/playlist?list=PLBWJQ5-pik_yBBcrpDRPM2hLluh-jreFa
## The Kamodo team
**Dr. Lutz Rastaetter**
- ORCiD: https://orcid.org/0000-0002-7343-4147
- NASA Staff Page: https://ccmc.gsfc.nasa.gov/staff/lutz-rastaetter/
**Dr. Darren De Zeeuw**
- ORCiD: https://orcid.org/0000-0002-4313-5998
- NASA Staff Page: https://ccmc.gsfc.nasa.gov/staff/darren-de-zeeuw/
**Dr. Katherine Garcia-Sage**
- ORCiD: https://orcid.org/0000-0001-6398-8755
- NASA Staff Page: https://ccmc.gsfc.nasa.gov/staff/katherine-garcia-sage/
## Open-Source License
Kamodo is an official NASA open source software package. Kamodo's official source code is hosted on github under a permissive NASA open source license: For more details, go here: https://github.com/nasa/Kamodo/blob/master/LICENSE
|
nasaREPO_NAMEKamodoPATH_START.@Kamodo_extracted@Kamodo-master@README.md@.PATH_END.py
|
{
"filename": "2_RunningTheCode.ipynb",
"repo_name": "natashabatalha/virga",
"repo_path": "virga_extracted/virga-master/docs/notebooks/2_RunningTheCode.ipynb",
"type": "Jupyter Notebook"
}
|
# Running the Code
In this tutorial you will learn:
1. How to run the code!
You should already be familiar with:
1. How to pick condensates to run in a cloud model
2. What is $f_{sed}$ and what number is right for my model?
3. What are the chemical limitations in the code
4. How to compute initial Mie scattering grid
```python
from bokeh.io import output_notebook
from bokeh.plotting import show, figure
from bokeh.palettes import Colorblind
output_notebook()
import numpy as np
import pandas as pd
import astropy.units as u
#cloud code
import virga.justdoit as jdi
```
## Simple Isothermal/Constant $K_{z}$ Example
Let's take the isothermal example from the first tutorial to begin gaining intuition for using the code
```python
pressure = np.logspace(-5,3,30) #simple isotherml PT profile (kelvin)
temperature = np.zeros(30)+1600
kz = np.zeros(30)+ 1e10 #constant kz profile
metallicity = 1 #atmospheric metallicity relative to Solar
mean_molecular_weight = 2.2 # atmospheric mean molecular weight
#get pyeddy recommendation for which gases to run
recommended_gases = jdi.recommend_gas(pressure, temperature,
metallicity,mean_molecular_weight)
print(recommended_gases)
```
### Define the `Atmosphere Class`
```python
#set cloud species to condense
#let's take the recommended gases at face value for now
sum_planet = jdi.Atmosphere(['MgSiO3'],fsed=0.1,mh=metallicity,
mmw = mean_molecular_weight)
```
### Set gravity and the P/T/Kz profile
```python
#set the planet gravity
sum_planet.gravity(gravity=357.00, gravity_unit=u.Unit('cm/(s**2)'))
#PT
sum_planet.ptk(df = pd.DataFrame({'pressure':pressure, 'temperature':temperature,
'kz':kz}))
```
```python
#you can also read in a file using pandas.read_csv()
pd.DataFrame({'pressure':pressure, 'temperature':temperature,
'kz':kz}).to_csv('test.csv')
sum_planet.ptk(filename='test.csv',usecols = [1,2,3])
```
#### The minimum values for `kz`
There is a parameter called `kz_min` in the ptk function. The default value is `kz_min=1e5`. This number isn't necessarily a hard and fast rule, but it does avoid numerical instabilities. Another thing to note is that __this excludes negative kz values__. The code will alert you if you are tripping the `kz_min` reset.
```python
#set the planet gravity
sum_planet.gravity(gravity=357.00, gravity_unit=u.Unit('cm/(s**2)'))
#This is what happens when you trip up the minimum KZ value
sum_planet.ptk(df = pd.DataFrame({'pressure':pressure, 'temperature':temperature,
'kz':kz*-1}))
```
### Run the code
```python
#directory where mieff files are
mieff_directory = '/data/virga/'
sum_planet.ptk(filename='test.csv',usecols = [1,2,3])
#start with the simplest output
#get total opacity, single scattering, asymmetry, and individual optical depths
df_out = sum_planet.compute(directory=mieff_directory)
```
```python
#get full dictionary output
all_out = sum_planet.compute(directory=mieff_directory, as_dict=True)
```
## Exploring `dict` Output
There are many things to explore in the `dict` output. In the next tutorial, we will reproduce some of the most common plots used in papers to analyze cloud runs.
```python
#see what outputs exist in the dictionary
all_out.keys()
```
Hopefully the names of the `dict` elements are self explanatory. If not, here is a brief description of each. `nl`=number of layers, `nw`=number of wavelengths, `ng`=number of gases.
- `condensate_mmr`(nl x ng): Mean mass mixing ratio (concentration) of the condensate. See `qc` in A&M 01 Eqn. 4 and Eqn. 8.
- `cond_plus_gas_mmr`(nl x ng): Mean mass mixing ratio of the consensate plus the vapor. See `qt` in A&M 01 Eqn. 7.
- `mean_particle_r`(nl x ng): Geometric mean particle radius. See `r_g` in A&M 01 Eqn. 13.
- `droplet_eff_r`(nl x ng): Effetive (area-weighted) droplet radius. See `r_eff` in A&M 01 Eqn. 17
- `column_density`(nl x ng): Total number concentration of particles PER LAYER. See `N` in A&M 01 Eqn. 14
- `opd_per_layer`(nl x nw): Total extinction PER layer. This includes all gases.
- `single_scattering`(nl x nw): Total single scattering albedo
- `asymmetry`(nl x nw): Total asymmetry
- `opd_by_gas`(nl x ng): Optical depth for conservative geometric scatteres separated by gas. E.g. [Fig 7 in Morley+2012](https://arxiv.org/pdf/1206.4313.pdf)
Additionally, scalar input contains some of the original input
```python
all_out['scalar_inputs']
```
```python
```
|
natashabatalhaREPO_NAMEvirgaPATH_START.@virga_extracted@virga-master@docs@notebooks@2_RunningTheCode.ipynb@.PATH_END.py
|
{
"filename": "recipes_IMAGE.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/niri/recipes/sq/recipes_IMAGE.py",
"type": "Python"
}
|
"""
Recipes available to data with tags ['NIRI', 'IMAGE'].
Default is "reduce".
"""
recipe_tags = {'NIRI', 'IMAGE'}
def reduce(p):
"""
This recipe process NIRI data up to and including alignment and stacking.
A single stacked output image is produced.
It will attempt to do dark and flat correction if a processed calibration
is available. Sky subtraction is done when possible. QA metrics are
measured.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.prepare()
p.addDQ()
p.removeFirstFrame()
p.ADUToElectrons()
p.addVAR(read_noise=True, poisson_noise=True)
p.nonlinearityCorrect()
p.darkCorrect()
p.flatCorrect()
p.separateSky()
p.associateSky(stream='sky')
p.skyCorrect(instream='sky', mask_objects=False, outstream='skysub')
p.detectSources(stream='skysub')
p.transferAttribute(stream='sky', source='skysub', attribute='OBJMASK')
p.clearStream(stream='skysub')
p.associateSky()
p.skyCorrect(mask_objects=True)
p.cleanReadout()
p.detectSources()
p.adjustWCSToReference()
p.resampleToCommonFrame()
p.scaleCountsToReference()
p.stackFrames()
p.storeProcessedScience(suffix="_image")
return
def ultradeep(p):
"""
This recipe process F2 data to produce a single final stacked image.
It will attempt to do dark and flat correction if a processed calibration
is available.
It conducts an additional pass over and above the standard recipe, where
objects are found in the full stack and then masked out in the individual
inputs, to improve the quality of the sky subtraction. It is designed for
deep on-source-dithered sequences.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.prepare()
p.addDQ()
p.removeFirstFrame()
p.ADUToElectrons()
p.addVAR(read_noise=True, poisson_noise=True)
p.nonlinearityCorrect()
p.darkCorrect()
p.flatCorrect()
# A shortcut way to copy all the AD object to a new stream
# since this doesn't modify the AD objects
p.flushPixels(outstream='flat_corrected')
p.separateSky()
assert len(p.streams['main']) == len(p.streams['sky']), \
"Sequence includes sky-only frames"
p.associateSky(stream='sky')
p.skyCorrect(instream='sky', mask_objects=False, outstream='skysub')
p.detectSources(stream='skysub')
p.transferAttribute(stream='sky', source='skysub', attribute='OBJMASK')
p.clearStream(stream='skysub')
p.associateSky()
p.skyCorrect(mask_objects=True)
p.detectSources()
p.adjustWCSToReference()
# Transfer correct WCS to inputs
p.transferAttribute(stream='flat_corrected', source='main', attribute='wcs')
p.flushPixels()
p.resampleToCommonFrame()
p.scaleCountsToReference()
p.stackFrames()
p.detectSources()
p.writeOutputs() # effectively the standard recipe output
p.transferObjectMask(stream='flat_corrected', source='main')
p.clearStream(stream='main')
p.dilateObjectMask(instream='flat_corrected', outstream='main', dilation=2)
p.clearStream(stream='flat_corrected') # no longer needed
p.separateSky()
p.associateSky()
p.skyCorrect(mask_objects=True)
p.flushPixels()
p.detectSources()
p.adjustWCSToReference()
p.resampleToCommonFrame()
p.scaleCountsToReference()
p.stackFrames()
p.writeOutputs()
p.storeProcessedScience(suffix="_image")
def ultradeep_part1(p):
"""
This recipe simply performs the standard reduction steps to remove
instrumental signatures from the inputs. It's intended to be run as
a first step for ultradeep (three-pass) imaging reduction, to
produce intermediate reduction products that do not need to be
recreated if there is an issue with the initial reduction.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.prepare()
p.addDQ()
p.removeFirstFrame()
p.ADUToElectrons()
p.addVAR(read_noise=True, poisson_noise=True)
p.nonlinearityCorrect()
p.darkCorrect()
p.flatCorrect()
def ultradeep_part2(p):
"""
This recipe takes _flatCorrected images from part 1 as input and
continues the reduction to produce a stacked image. It then
identifies sources in the stack and transfers the OBJMASK back to the
individual input images, saving those to disk, ready for part 3.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.copyInputs(outstream='flat_corrected')
p.separateSky()
assert len(p.streams['main']) == len(p.streams['sky']), \
"Sequence must not contain sky-only frames"
p.associateSky(stream='sky')
p.skyCorrect(instream='sky', mask_objects=False, outstream='skysub')
p.detectSources(stream='skysub')
p.transferAttribute(stream='sky', source='skysub', attribute='OBJMASK')
p.clearStream(stream='skysub')
p.associateSky()
p.skyCorrect(mask_objects=True)
p.detectSources()
p.adjustWCSToReference()
# Transfer correct WCS to inputs
p.transferAttribute(stream='flat_corrected', source='main', attribute='wcs')
p.flushPixels()
p.resampleToCommonFrame()
p.scaleCountsToReference()
p.stackFrames()
p.detectSources()
p.writeOutputs() # effectively the standard recipe output
p.transferObjectMask(instream='flat_corrected', outstream='main', source='main')
def ultradeep_part3(p):
"""
This recipe takes flat-corrected images with OBJMASKs as inputs and
produces a final stack. It should take the _objmaskTransferred outputs
from part 2.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.separateSky()
p.associateSky()
p.skyCorrect(mask_objects=True)
p.detectSources()
p.adjustWCSToReference()
p.resampleToCommonFrame()
p.scaleCountsToReference()
p.stackFrames()
p.writeOutputs()
p.storeProcessedScience(suffix="_image")
def makeSkyFlat(p):
"""
This recipe makes a flatfield image from a series of dithered sky images.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.prepare()
p.addDQ()
p.removeFirstFrame()
p.ADUToElectrons()
p.addVAR(read_noise=True, poisson_noise=True)
p.nonlinearityCorrect()
p.darkCorrect()
# Make a "fastsky" by combining frames
p.stackFrames(operation='median', scale=True, outstream='fastsky')
p.normalizeFlat(stream='fastsky')
p.thresholdFlatfield(stream='fastsky')
# Flatfield with the fastsky and find objects
p.flatCorrect(flat=p.streams['fastsky'][0], outstream='flattened')
p.detectSources(stream='flattened')
p.dilateObjectMask(dilation=10, stream='flattened')
p.addObjectMaskToDQ(stream='flattened')
#p.writeOutputs(stream='flattened')
p.transferAttribute(source='flattened', attribute='mask')
p.stackFrames(operation='mean', scale=True, reject_method="minmax", nlow=0, nhigh=1)
p.normalizeFlat()
p.thresholdFlatfield()
p.storeProcessedFlat(force=True)
return
_default = reduce
def alignAndStack(p):
"""
This recipe stack already preprocessed data.
Parameters
----------
p : PrimitivesBASE object
A primitive set matching the recipe_tags.
"""
p.detectSources()
p.adjustWCSToReference()
p.resampleToCommonFrame()
p.scaleCountsToReference()
p.stackFrames()
return
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@niri@recipes@sq@recipes_IMAGE.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/agents/agent_toolkits/playwright/__init__.py",
"type": "Python"
}
|
"""Playwright browser toolkit."""
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.agent_toolkits.playwright.toolkit import (
PlayWrightBrowserToolkit,
)
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"PlayWrightBrowserToolkit": "langchain_community.agent_toolkits.playwright.toolkit"
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"PlayWrightBrowserToolkit",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@agents@agent_toolkits@playwright@__init__.py@.PATH_END.py
|
{
"filename": "test_ramp_extraction.py",
"repo_name": "spacetelescope/jdaviz",
"repo_path": "jdaviz_extracted/jdaviz-main/jdaviz/configs/rampviz/tests/test_ramp_extraction.py",
"type": "Python"
}
|
import pytest
from regions import CirclePixelRegion, PixCoord
from jdaviz.core.marks import Lines
from jdaviz.configs.imviz.plugins.parsers import HAS_ROMAN_DATAMODELS
@pytest.mark.skipif(not HAS_ROMAN_DATAMODELS, reason="roman_datamodels is not installed")
def test_previews_roman(rampviz_helper, roman_level_1_ramp):
_ramp_extraction_previews(rampviz_helper, roman_level_1_ramp)
def test_previews_jwst(rampviz_helper, jwst_level_1b_ramp):
_ramp_extraction_previews(rampviz_helper, jwst_level_1b_ramp)
def _ramp_extraction_previews(_rampviz_helper, _ramp_file):
_rampviz_helper.load_data(_ramp_file)
# add subset:
region = CirclePixelRegion(center=PixCoord(12.5, 15.5), radius=2)
_rampviz_helper.plugins['Subset Tools'].import_region(region)
ramp_extr = _rampviz_helper.plugins['Ramp Extraction']._obj
subsets = _rampviz_helper.app.get_subsets()
ramp_cube = _rampviz_helper.app.data_collection[0]
n_groups = ramp_cube.shape[-1]
assert len(subsets) == 1
assert 'Subset 1' in subsets
integration_viewer = _rampviz_helper.app.get_viewer('integration-viewer')
# contains a layer for the default ramp extraction and the subset:
assert len(integration_viewer.layers) == 2
# profile viewer x-axis is the group dimension
assert str(integration_viewer.state.x_att) == 'Pixel Axis 2 [x]'
# no subset previews should be visible yet:
assert len([
mark for mark in integration_viewer.native_marks
# should be a subclass of Lines, should be visible,
if mark.visible and isinstance(mark, Lines) and
# and the default profile is a 1D series with length n_groups:
len(mark.x) == n_groups
]) == 1
# check that when the plugin is active, there's one ramp profile generated by the
# plugin per pixel in the subset (if show_subset_preview),
# plus one live preview (if show_live_preview):
for show_live_preview in [True, False]:
for show_subset_preview in [True, False]:
with ramp_extr.as_active():
ramp_extr.show_live_preview = show_live_preview
ramp_extr.show_subset_preview = show_subset_preview
ramp_extr.aperture_selected = 'Subset 1'
subset_state = subsets[ramp_extr.aperture_selected][0]['subset_state']
n_pixels_in_subset = subset_state.to_mask(ramp_cube)[..., 0].sum()
assert len([
mark for mark in integration_viewer.custom_marks
if mark.visible and isinstance(mark, Lines) and
len(mark.x) == n_groups
]) == int(show_subset_preview) * n_pixels_in_subset + int(show_live_preview)
|
spacetelescopeREPO_NAMEjdavizPATH_START.@jdaviz_extracted@jdaviz-main@jdaviz@configs@rampviz@tests@test_ramp_extraction.py@.PATH_END.py
|
{
"filename": "constraints.py",
"repo_name": "timothydmorton/VESPA",
"repo_path": "VESPA_extracted/VESPA-master/vespa/stars/constraints.py",
"type": "Python"
}
|
from __future__ import print_function,division
import logging
import copy
try:
import numpy as np
except ImportError:
np = None
from ..hashutils import hasharray, hashcombine, hashdict
class Constraint(object):
"""
Base class for all constraints to be applied to StarPopulations.
"""
arrays = ('ok',)
def __init__(self,mask,name='',**kwargs):
self.name = name
self.ok = np.array(mask)
#self.frac = float(self.ok.sum())/np.size(mask)
for kw in kwargs:
setattr(self,kw,kwargs[kw])
def __eq__(self,other):
return hash(self) == hash(other)
def __ne__(self,other):
return not self.__eq__(other)
def __hash__(self):
return hashcombine(hash(self.name), hasharray(self.ok))
def __str__(self):
return self.name
def __repr__(self):
return '<%s: %s>' % (type(self),str(self))
@property
def N(self):
return np.size(self.ok)
@property
def wok(self):
return np.where(self.ok)[0]
@property
def frac(self):
return float(self.ok.sum())/self.N
def resample(self, inds):
"""Returns copy of constraint, with mask rearranged according to indices
"""
new = copy.deepcopy(self)
for arr in self.arrays:
x = getattr(new, arr)
setattr(new, arr, x[inds])
return new
class ConstraintDict(dict):
"""
A dictionary that is hashable.
"""
def __hash__(self):
return hashdict(self)
class JointConstraintAnd(Constraint):
def __init__(self,c1,c2,name='',**kwargs):
self.name = name
mask = ~(~c1.ok & ~c2.ok)
Constraint.__init__(self,mask,name=name,**kwargs)
class JointConstraintOr(Constraint):
def __init__(self,c1,c2,name='',**kwargs):
self.name = name
mask = ~(~c1.ok | ~c2.ok)
Constraint.__init__(self,mask,name=name,**kwargs)
class RangeConstraint(Constraint):
arrays = Constraint.arrays + ('vals',)
def __init__(self,vals,lo,hi,name='',**kwargs):
self.lo = lo
self.hi = hi
Constraint.__init__(self,(vals > lo) & (vals < hi),
name=name,vals=vals,lo=lo,hi=hi,**kwargs)
def __str__(self): #implement default string formatting better.....TODO
return '{:.3g} < {} < {:.3g}'.format(self.lo,self.name,self.hi)
class UpperLimit(RangeConstraint):
def __init__(self,vals,hi,name='',**kwargs):
RangeConstraint.__init__(self,vals,name=name,lo=-np.inf,hi=hi,**kwargs)
def __str__(self):
return '{} < {:.3g}'.format(self.name,self.hi)
class LowerLimit(RangeConstraint):
def __init__(self,vals,lo,name='',**kwargs):
RangeConstraint.__init__(self,vals,name=name,lo=lo,hi=np.inf,**kwargs)
def __str__(self):
return '{} > {:.3g}'.format(self.name,self.lo)
class MeasurementConstraint(RangeConstraint):
def __init__(self,vals,val,dval,thresh=3,name='',**kwargs):
lo = val - thresh*dval
hi = val + thresh*dval
RangeConstraint.__init__(self,vals,lo,hi,name=name,val=val,
dval=dval,thresh=thresh,**kwargs)
class FunctionLowerLimit(Constraint):
arrays = Constraint.arrays + ('xs','ys')
def __init__(self,xs,ys,fn,name='',**kwargs):
Constraint.__init__(self,ys > fn(xs),name=name,xs=xs,ys=ys,fn=fn,**kwargs)
class FunctionUpperLimit(Constraint):
arrays = Constraint.arrays + ('xs','ys')
def __init__(self,xs,ys,fn,name='',**kwargs):
Constraint.__init__(self,ys < fn(xs),name=name,
xs=xs,ys=ys,fn=fn,**kwargs)
|
timothydmortonREPO_NAMEVESPAPATH_START.@VESPA_extracted@VESPA-master@vespa@stars@constraints.py@.PATH_END.py
|
{
"filename": "tips_tricks.ipynb",
"repo_name": "afeinstein20/eleanor",
"repo_path": "eleanor_extracted/eleanor-main/notebooks/tips_tricks.ipynb",
"type": "Jupyter Notebook"
}
|
[1.1 Choosing an Aperture](#first-bullet)
[1.2 Adding or Removing Flux Correction Terms](#second-bullet)
[1.3 Running eleanor Locally](#fourth-bullet)
[1.4 Targets Without TIC IDs](#fifth-bullet)
[1.5 Handling Saturated Targets](#sixth-bullet)
[1.6 Example: Rediscovering $\pi$ Mensae c](#seventh-bullet)
# 1.1 Choosing an Aperture <a class="anchor" id="first-bullet"></a>
Let's begin with the same WASP-100 we used in the quickstart tutorial. I'm going to hide warnings too, just to make it easier to see what's going on.
```python
from IPython.display import Image
import warnings
warnings.filterwarnings('ignore')
```
```python
import eleanor
import numpy as np
import matplotlib.pyplot as plt
```
```python
star = eleanor.Source(tic=38846515, sector=1)
data = eleanor.TargetData(star, height=15, width=15, bkg_size=31, do_psf=False, do_pca=True)
q = data.quality == 0
```
INFO: Found cached file /Users/bmontet/.eleanor/mastDownload/HLSP/hlsp_eleanor_tess_ffi_postcard-s0001-4-1-cal-0902-1078_tess_v2_pc/hlsp_eleanor_tess_ffi_postcard-s0001-4-1-cal-0902-1078_tess_v2_bkg.fits with expected size 78955200. [astroquery.query]
INFO: Found cached file /Users/bmontet/.eleanor/mastDownload/HLSP/hlsp_eleanor_tess_ffi_postcard-s0001-4-1-cal-0902-1078_tess_v2_pc/hlsp_eleanor_tess_ffi_postcard-s0001-4-1-cal-0902-1078_tess_v2_pc.fits with expected size 158022720. [astroquery.query]
INFO: Found cached file /Users/bmontet/.eleanor/mastDownload/HLSP/hlsp_eleanor_tess_ffi_postcard-s0001-4-1-cal-0902-1078_tess_v2_pc/hlsp_eleanor_tess_ffi_postcard-s0001-4-1_tess_v2_pm.txt with expected size 237847. [astroquery.query]
WARNING:tensorflow:From /Users/bmontet/research/tess/eleanor/eleanor/targetdata.py:837: The name tf.logging.set_verbosity is deprecated. Please use tf.compat.v1.logging.set_verbosity instead.
WARNING:tensorflow:From /Users/bmontet/research/tess/eleanor/eleanor/targetdata.py:837: The name tf.logging.ERROR is deprecated. Please use tf.compat.v1.logging.ERROR instead.
100%|██████████| 1282/1282 [00:10<00:00, 113.98it/s]
What if you're not satisfied with $\texttt{eleanor}$'s default choice of aperture? Well, we provide you with three ways to explore different apertures.
(1) All of the apertures $\texttt{eleanor}$ tries behind the scenes are saved! So are their raw and corrected light curves. These are stored as ```data.all_apertures```, ```data.all_raw_flux```, and ```data.all_corr_flux```, respectively. We can explore these options by calling the following, for example.
```python
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15,4), gridspec_kw={'width_ratios':[1,3]})
ax1.imshow(data.tpf[0])
ax1.imshow(data.all_apertures[0], cmap='Greys', alpha=0.7)
ax1.set_title('Aperture over TPF')
ax2.plot(data.time[q], data.all_raw_flux[0][q]/np.nanmedian(data.all_raw_flux[0][q]), 'k', label='Raw')
ax2.plot(data.time[q], data.all_corr_flux[0][q]/np.nanmedian(data.all_corr_flux[0][q]) - 0.015, 'r', label='Corrected')
ax2.set_xlabel('Time [BJD - 2457000]')
ax2.set_ylabel('Normalized Flux')
ax2.legend();
```

(2) $\texttt{eleanor}$ can help you create your own mask. By calling `eleanor.custom_aperture()`, we can choose from a circular or rectangular aperture. We can also choose the size (radius or length x width) and the pixel the aperture is centered on by passing in a list of $\textit{(x,y)}$ to `pos`. The aperture will only be created on pixels within the TPF (it won't spill over to other pixels). Calling `eleanor.custom_aperture()` overwrites `eleanor.TargetData.aperture`, `eleanor.TargetData.raw_flux`, and `eleanor.TargetData.corr_flux`.
```python
eleanor.TargetData.custom_aperture(data, shape='circle', r=2, pos=[7,6], method='exact')
eleanor.TargetData.get_lightcurve(data)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15,4), gridspec_kw={'width_ratios':[1,3]})
ax1.imshow(data.tpf[0])
ax1.imshow(data.aperture, cmap='Greys', alpha=0.5)
ax1.set_title('Aperture over TPF')
#plt.imshow(data.aperture)
ax2.plot(data.time[q], data.raw_flux[q]/np.nanmedian(data.raw_flux[q]), 'k', label='Raw')
ax2.plot(data.time[q], data.corr_flux[q]/np.nanmedian(data.corr_flux[q]) - 0.015, 'r', label='Corrected')
ax2.legend()
ax2.set_xlabel('Time [BJD - 2457000]')
ax2.set_ylabel('Normalized Flux');
```

(3) We can pass in our own mask by creating a 2D array of the same shape as the TPF and calling eleanor.TargetData.get_lightcurve(aperture=mask)
```python
mask = np.zeros(np.shape(data.tpf[0]))
mask[6:8,6:8] = 1
data.get_lightcurve(aperture=mask)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15,4), gridspec_kw={'width_ratios':[1,3]})
ax1.imshow(data.tpf[0])
ax1.imshow(mask, cmap='Greys', alpha=0.5)
ax1.set_title('Aperture over TPF')
#plt.imshow(data.aperture)
ax2.plot(data.time[q], data.raw_flux[q]/np.nanmedian(data.raw_flux[q]), 'k', label='Raw')
ax2.plot(data.time[q], data.corr_flux[q]/np.nanmedian(data.corr_flux[q]) - 0.035, 'r', label='Corrected')
ax2.legend()
ax2.set_xlabel('Time [BJD - 2457000]')
ax2.set_ylabel('Normalized Flux');
```

# 1.2 Adding or Removing Flux Correction Terms <a class="anchor" id="second-bullet"></a>
When we call eleanor.TargetData(), some simple systematics corrections are automatically performed on the light curve. Let's apply those explicitly to the newly created raw light curve from the custom aperture and see how we can change them.
```python
corr_flux = eleanor.TargetData.corrected_flux(data, flux=data.raw_flux)
```
```python
plt.figure(figsize=(15,5))
plt.plot(data.time[q], corr_flux[q], 'k')
plt.ylabel('Flux', fontsize=24)
plt.xlabel('Time', fontsize=24);
```

## 1.2.1 Changing quality flags
If we think certain data are bad, but not quality flagged, we can give them a different quality label and re-run this function, causing them to be ignored so they do not influence other cadences. By default, all highest quality data have flag zero and all other flags are positive. Let's throw out a four-day chunk temporarily:
```python
data.quality[q][(data.time[q] > 1333) & (data.time[q] < 1337)] = -8675309
corr_flux = eleanor.TargetData.corrected_flux(data, flux=data.raw_flux)
q = data.quality <= 0.5
plt.figure(figsize=(15,5))
plt.plot(data.time[q], corr_flux[q], 'k')
plt.ylabel('Flux', fontsize=24)
plt.xlabel('Time', fontsize=24);
```

## 1.2.2 Removing terms from the model
By default, the ```corrected_flux``` function removes signals that are correlated with the position of the star on the detector, with common modes of nearby stars, and with the background. We can stop these from being a part of ```corrected_flux``` by setting them to zero.
```python
data.cbvs = np.zeros_like(data.cbvs)
data.centroid_xs = np.zeros_like(data.centroid_xs)
data.centroid_ys = np.zeros_like(data.centroid_ys)
corr_flux = eleanor.TargetData.corrected_flux(data, flux=data.raw_flux)
plt.figure(figsize=(15,5))
plt.plot(data.time[q], corr_flux[q], 'k')
plt.ylabel('Flux', fontsize=24)
plt.xlabel('Time', fontsize=24);
```

In this case, we have a noisier light curve that looks more like the raw flux.
We can always get back to where we started by just re-running ```eleanor.TargetData()``` with the same call we used initially.
```python
data = eleanor.TargetData(star, height=15, width=15, bkg_size=31, do_psf=False, do_pca=True)
```
100%|██████████| 1282/1282 [00:11<00:00, 114.33it/s]
# 1.3 Running eleanor locally/without internet <a class="anchor" id="fourth-bullet"></a>
With the default settings, `eleanor` will attempt to find the proper postcard for your target, as well as other data about your system, through MAST. If you don't have internet access, but have downloaded your postcard already (if you've made a light curve for your star of interest previously, you may already have the postcard in your `~/.eleanor` directory), you can avoid all need for the internet with a few simple tricks.
All you need to do are pass through `local = True` in your call to `eleanor.Source()` and give the directory of your postcard and pointing model. By default, these are downloaded to the same directory. If you don't pass through a pointing model directory, it will assume it's in the same place as your postcard. I've put mine in `../../testdir1` and `../../testdir2`, respectively.
You also need to pass through your star's coordinates. Normally you can pass through one of a name, TIC ID, Gaia DR2, ID, or coordinates. In these cases, behind the scenes `eleanor` will use any one of these to get the rest of this information, but behind the scenes relies on the coordinates. You need to pass through the other values as well, but just so they're set to something so `eleanor` doesn't try to look them up. You can set them to zero, that's fine.
```python
coords = (68.959732, -64.02704)
star = eleanor.Source(coords=coords, tic=0, gaia=0, sector=1, local=True,
post_dir='./testdir1')#, pm_dir='./testdir2')
data = eleanor.TargetData(star, do_pca=True, do_psf=False)
```
```python
plt.figure(figsize=(15,5))
q = data.quality == 0
plt.plot(data.time[q], corr_flux[q], 'k')
plt.ylabel('Flux', fontsize=24)
plt.xlabel('Time', fontsize=24);
```

The only thing that will be missing is `star.tess_mag`, but you can set that yourself too before you run `data.save()` and it will be recorded properly.
```python
star.tess_mag = 10.53
```
# 1.4 Targets without TIC IDs <a class="anchor" id="fifth-bullet"></a>
When you pass through ```coords``` into `eleanor.Source()`, it will use these coordinates to find the TIC entry at that location. However, some targets (often very faint or non-stellar in nature) do not appear in the TIC. This will produce an error that inhibits ```eleanor.Source()``` from running successfully. However, as we noted in the previous section, `eleanor` does not use the TIC ID behind the scenes. Passing through a false TIC ID and the proper coordinates will work just fine.
# 1.5 Saturated targets <a class="anchor" id="sixth-bullet"></a>
`eleanor` tests a variety of apertures and selects the one with the lowest CDPP. Very saturated targets ($T < 6-7$, depending on location on the detector) have many pixels at the saturation limit. Choosing a small number of saturated pixels will then produce a very stable light curve, leading `eleanor` to choose very small apertures that do not represent the true astrophysical variability from these sources. To make light curves from these, we need to use larger apertures. This is possible!
```python
star = eleanor.Source(name='alpha draconis', sector=15)
data = eleanor.TargetData(star, height=31, width=13)
```
No eleanor postcard has been made for your target (yet). Using TessCut instead.
```python
plt.imshow(data.tpf[300]);
```

Yes, that's a saturated target. Let's define our own aperture and go forth.
```python
ap = np.zeros_like(data.tpf[300])
ap[:,4:11] = 1
data.get_lightcurve(aperture=ap)
q = data.quality == 0
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15,4), gridspec_kw={'width_ratios':[1,3]})
ax1.imshow(data.tpf[0])
ax1.imshow(data.aperture, cmap='Greys', alpha=0.5)
ax1.set_title('Aperture over TPF')
#plt.imshow(data.aperture)
ax2.plot(data.time[q], data.corr_flux[q]/np.nanmedian(data.corr_flux[q]) - 0.0, 'k')
ax2.set_ylim(0.999, 1.0004)
ax2.set_xlabel('Time [BJD - 2457000]')
ax2.set_ylabel('Normalized Flux');
```

Alpha draconis, with $V=3.7$, has a deep secondary eclipse as reported in Bedding et al. (2020), and the star is so bright the photon noise is at the few ppm level on 30-minute timescales.
It often works very well for bright, saturated targets to use additional regressors, drawing on the corner pixels of the TPF to use on the detrending. An example of this is below.
# 1.6 Putting it all together: recovering $\pi$ Men c <a class="anchor" id="seventh-bullet"></a>
Let's make sure we can recover the planet discovered with TESS data around the bright star $\pi$ Mensae, combining what we have learned about saturated targets with the addition of regressing against corner pixels in the TPF for improved background subtraction.
```python
star = eleanor.Source(name='pi Mensae', sector=1)
data = eleanor.TargetData(star, height=21, width=13)
```
INFO: Found cached file /Users/bmontet/.eleanor/mastDownload/HLSP/hlsp_eleanor_tess_ffi_postcard-s0001-4-2-cal-1588-0268_tess_v2_pc/hlsp_eleanor_tess_ffi_postcard-s0001-4-2-cal-1588-0268_tess_v2_bkg.fits with expected size 78955200. [astroquery.query]
INFO: Found cached file /Users/bmontet/.eleanor/mastDownload/HLSP/hlsp_eleanor_tess_ffi_postcard-s0001-4-2-cal-1588-0268_tess_v2_pc/hlsp_eleanor_tess_ffi_postcard-s0001-4-2-cal-1588-0268_tess_v2_pc.fits with expected size 158022720. [astroquery.query]
INFO: Found cached file /Users/bmontet/.eleanor/mastDownload/HLSP/hlsp_eleanor_tess_ffi_postcard-s0001-4-2-cal-1588-0268_tess_v2_pc/hlsp_eleanor_tess_ffi_postcard-s0001-4-2_tess_v2_pm.txt with expected size 239144. [astroquery.query]
```python
plt.imshow(data.tpf[300]);
```

```python
ap = np.zeros_like(data.tpf[300])
ap[:,3:10] = 1
data.get_lightcurve(aperture=ap)
corr_flux = eleanor.TargetData.corrected_flux(data, flux=data.raw_flux, regressors='corner')
q = data.quality == 0
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15,4), gridspec_kw={'width_ratios':[1,3]})
ax1.imshow(data.tpf[0])
ax1.imshow(data.aperture, cmap='Greys', alpha=0.5)
ax1.set_title('Aperture over TPF')
#plt.imshow(data.aperture)
ax2.plot(data.time[q], corr_flux[q]/np.nanmedian(corr_flux[q]) - 0.0, 'k')
ax2.set_xlabel('Time [BJD - 2457000]')
ax2.set_ylabel('Normalized Flux');
```

It looks like there's a planet here! Let's fold on the known period and phase:
```python
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15,4), gridspec_kw={'width_ratios':[1,3]})
ax1.imshow(data.tpf[0])
ax1.imshow(data.aperture, cmap='Greys', alpha=0.5)
ax1.set_title('Aperture over TPF')
#plt.imshow(data.aperture)
modtime = np.mod(data.time[q], 6.268)-2.96
ax2.plot(modtime, corr_flux[q]/np.nanmedian(corr_flux[q]) - 0.0, 'k.')
ax2.set_xlabel('Time [BJD - 2457000]')
ax2.set_ylabel('Normalized Flux');
```

There it is!
|
afeinstein20REPO_NAMEeleanorPATH_START.@eleanor_extracted@eleanor-main@notebooks@tips_tricks.ipynb@.PATH_END.py
|
{
"filename": "blockchain.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/document_loaders/blockchain.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import BlockchainDocumentLoader
from langchain_community.document_loaders.blockchain import BlockchainType
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {
"BlockchainType": "langchain_community.document_loaders.blockchain",
"BlockchainDocumentLoader": "langchain_community.document_loaders",
}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"BlockchainType",
"BlockchainDocumentLoader",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@document_loaders@blockchain.py@.PATH_END.py
|
{
"filename": "_align.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattercarpet/hoverlabel/_align.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AlignValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="align", parent_name="scattercarpet.hoverlabel", **kwargs
):
super(AlignValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["left", "right", "auto"]),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattercarpet@hoverlabel@_align.py@.PATH_END.py
|
{
"filename": "test_esa_hubble.py",
"repo_name": "D-arioSpace/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/esa/hubble/tests/test_esa_hubble.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
==============
eHST Tap Tests
==============
European Space Astronomy Centre (ESAC)
European Space Agency (ESA)
"""
import os
import shutil
import gzip
from pathlib import Path
from unittest.mock import MagicMock
from unittest.mock import patch
import numpy as np
import pytest
from astropy import coordinates
from astropy.table.table import Table
from requests.models import Response
from astroquery.esa.hubble import ESAHubbleClass
import astroquery.esa.utils.utils as esautils
from astroquery.esa.hubble.tests.dummy_tap_handler import DummyHubbleTapHandler
from astropy.utils.exceptions import AstropyDeprecationWarning
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
def get_mockreturn(method, request, url, params, *args, **kwargs):
file = 'm31.vot'
if 'OBSERVATION_ID' in params:
file = params['OBSERVATION_ID'] + ".vot"
response = data_path(file)
shutil.copy(response + '.test', response)
return response
@pytest.fixture(autouse=True)
def ehst_request(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError:
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(ESAHubbleClass, '_request', get_mockreturn)
return mp
def get_cone_mockreturn(params, *args, **kwargs):
file = data_path('cone_search_m31_5.vot')
if 'OBSERVATION_ID' in kwargs:
file = kwargs['OBSERVATION_ID'] + ".vot"
response = data_path(file)
shutil.copy(response + '.test', response)
return response
@pytest.fixture(autouse=True)
def ehst_cone_search(request):
mp = request.getfixturevalue("monkeypatch")
mp.setattr(ESAHubbleClass, 'cone_search', get_cone_mockreturn)
return mp
class MockResponse:
observation_id = 'test'
@staticmethod
def pformat():
return True
class TestESAHubble:
def get_dummy_tap_handler(self, method='launch_job', query=None):
if query is None:
query = "select top 10 * from hsc_v2.hubble_sc2"
parameterst = {'query': query,
'output_file': "test2.vot",
'output_format': "votable",
'verbose': False}
dummyTapHandler = DummyHubbleTapHandler(method, parameterst)
return dummyTapHandler
def test_download_product_errors(self):
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
with pytest.raises(ValueError) as err:
ehst.download_product(observation_id="J6FL25S4Q",
product_type="DUMMY")
assert "This product_type is not allowed" in err.value.args[0]
def test_download_product_by_calibration(self, tmp_path):
parameters = {'observation_id': "J6FL25S4Q",
'cal_level': "RAW",
'filename': Path(tmp_path, "J6FL25S4Q.vot.test"),
'verbose': True}
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.download_product(observation_id=parameters['observation_id'],
calibration_level=parameters['cal_level'],
filename=parameters['filename'],
verbose=parameters['verbose'])
def test_download_product_by_product_type(self, tmp_path):
parameters = {'observation_id': "J6FL25S4Q",
'product_type': "SCIENCE",
'filename': Path(tmp_path, "J6FL25S4Q.vot.test"),
'verbose': True}
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.download_product(observation_id=parameters['observation_id'],
product_type=parameters['product_type'],
filename=parameters['filename'],
verbose=parameters['verbose'])
parameters['product_type'] = "SCIENCE"
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.download_product(observation_id=parameters['observation_id'],
product_type=parameters['product_type'],
filename=parameters['filename'],
verbose=parameters['verbose'])
parameters['product_type'] = "PREVIEW"
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.download_product(observation_id=parameters['observation_id'],
product_type=parameters['product_type'],
filename=parameters['filename'],
verbose=parameters['verbose'])
def test_get_postcard(self, tmp_path):
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.get_postcard(observation_id="X0MC5101T",
filename=Path(tmp_path, "X0MC5101T.vot"),
verbose=True)
ehst.get_postcard(observation_id="X0MC5101T",
filename=Path(tmp_path, "X0MC5101T.vot"), resolution=1024,
verbose=True)
@patch.object(ESAHubbleClass, 'cone_search')
@patch.object(ESAHubbleClass, '_query_tap_target')
def test_query_target(self, mock_query_tap_target, mock_cone_search):
mock_query_tap_target.return_value = 10, 10
mock_cone_search.return_value = "test"
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
table = ehst.query_target(name="test")
assert table == "test"
def test_cone_search(self):
coords = coordinates.SkyCoord("00h42m44.51s +41d16m08.45s",
frame='icrs')
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
parameters = {'coordinates': coords,
'radius': 0.0,
'filename': 'file_cone',
'output_format': 'votable',
'cache': True}
target_file = data_path('cone_search.vot')
with open(target_file, mode='rb') as file:
target_obj = file.read()
response = Response()
response._content = target_obj
ehst._request = MagicMock(return_value=response)
ehst.cone_search(coordinates=parameters['coordinates'],
radius=parameters['radius'],
filename=parameters['filename'],
output_format=parameters['output_format'],
cache=parameters['cache'])
DummyHubbleTapHandler("cone_search", parameters)
def test_cone_search_coords(self):
coords = "00h42m44.51s +41d16m08.45s"
parameterst = {'query': "select top 10 * from hsc_v2.hubble_sc2",
'output_file': "test2.vot",
'output_format': "votable",
'verbose': True}
dummyTapHandler = DummyHubbleTapHandler("launch_job", parameterst)
parameters = {'coordinates': coords,
'radius': 0.0,
'filename': 'file_cone',
'async_job': False,
'output_format': 'votable',
'cache': True,
'verbose': True}
ehst = ESAHubbleClass(tap_handler=dummyTapHandler, show_messages=False)
ehst.cone_search(coordinates=parameters['coordinates'],
radius=parameters['radius'],
filename=parameters['filename'],
output_format=parameters['output_format'],
async_job=parameters['async_job'],
cache=parameters['cache'],
verbose=parameters['verbose'])
with pytest.raises(ValueError) as err:
ehst._getCoordInput(1234)
assert "Coordinates must be either a string or " \
"astropy.coordinates" in err.value.args[0]
def test_query_tap(self):
parameters = {'query': "select top 10 * from hsc_v2.hubble_sc2",
'async_job': False,
'output_file': "test2.vot",
'output_format': "votable",
'verbose': False}
parameters2 = {'query': "select top 10 * from hsc_v2.hubble_sc2",
'output_file': "test2.vot",
'output_format': "votable",
'verbose': False}
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.query_tap(query=parameters['query'],
async_job=parameters['async_job'],
output_file=parameters['output_file'],
output_format=parameters['output_format'],
verbose=parameters['verbose'])
self.get_dummy_tap_handler().check_call("launch_job", parameters2)
def test_get_tables(self):
parameters = {'only_names': True,
'verbose': True}
DummyHubbleTapHandler("get_tables", parameters)
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.get_tables(only_names=True, verbose=True)
def test_get_artifact(self, tmp_path):
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
path = Path(tmp_path, "w0ji0v01t_c2f.fits.gz")
ehst.get_artifact(artifact_id=path)
def test_download_file(self, tmp_path):
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
file = 'w0ji0v01t_c2f.fits'
path = Path(tmp_path, file + '.gz')
ehst.download_file(file=path, filename=path)
def test_get_associated_files(self):
observation_id = 'test'
query = (f"select art.artifact_id as filename, p.calibration_level, art.archive_class as type, "
f"pg_size_pretty(art.size_uncompr) as size_uncompressed from ehst.artifact art "
f"join ehst.plane p on p.plane_id = art.plane_id where "
f"art.observation_id = '{observation_id}'")
parameters = {'query': query,
'output_file': 'test2.vot',
'output_format': "votable",
'verbose': False}
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(query=query), show_messages=False)
ehst.get_associated_files(observation_id=observation_id)
self.get_dummy_tap_handler(query=query).check_call("launch_job", parameters)
@patch.object(ESAHubbleClass, 'get_associated_files')
def test_download_fits(self, mock_associated_files):
observation_id = 'test'
query = (f"select art.artifact_id as filename, p.calibration_level, art.archive_class as type, "
f"pg_size_pretty(art.size_uncompr) as size_uncompressed from ehst.artifact art "
f"join ehst.plane p on p.plane_id = art.plane_id where "
f"art.observation_id = '{observation_id}'")
parameters = {'query': query,
'output_file': 'test2.vot',
'output_format': "votable",
'verbose': False}
mock_associated_files.return_value = [{'filename': 'test.fits'}]
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(query=query), show_messages=False)
ehst.download_fits_files(observation_id=observation_id)
self.get_dummy_tap_handler(query=query).check_call("launch_job", parameters)
def test_is_not_gz(self, tmp_path):
target_file = data_path('cone_search.vot')
ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
assert esautils.check_rename_to_gz(target_file) in target_file
def test_is_gz(self, tmp_path):
ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
# test_file = data_path('m31.vot.test')
temp_file = 'testgz'
target_file = os.path.join(tmp_path, temp_file)
with gzip.open(target_file, 'wb') as f:
f.write(b'')
# with open(test_file, 'rb') as f_in, gzip.open(target_file, 'wb') as f_out:
# f_out.writelines(f_in)
assert esautils.check_rename_to_gz(target_file) in f"{target_file}.fits.gz"
def test_get_columns(self):
parameters = {'table_name': "table",
'only_names': True,
'verbose': True}
dummyTapHandler = DummyHubbleTapHandler("get_columns", parameters)
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.get_columns(table_name="table", only_names=True, verbose=True)
dummyTapHandler.check_call("get_columns", parameters)
def test_query_criteria_proposal(self):
parameters1 = {'proposal': 12345,
'async_job': False,
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': True,
'get_query': True}
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
test_query = ehst.query_criteria(proposal=parameters1['proposal'],
async_job=parameters1['async_job'],
output_file=parameters1['output_file'],
output_format=parameters1['output_format'],
verbose=parameters1['verbose'],
get_query=parameters1['get_query'])
parameters2 = {'query': test_query,
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': False}
parameters3 = {'query': "select * from ehst.archive where("
"proposal_id = '12345')",
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': False}
dummy_tap_handler = DummyHubbleTapHandler("launch_job", parameters2)
dummy_tap_handler.check_call("launch_job", parameters3)
def test_retrieve_observations_from_proposal(self):
program = 12345
parameters1 = {'proposal': program,
'async_job': False,
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': True,
'get_query': True}
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.get_observations_from_program(program=parameters1['proposal'])
dummy_tap_handler = DummyHubbleTapHandler("launch_job", None)
dummy_tap_handler.check_method("launch_job")
@patch.object(ESAHubbleClass, 'get_associated_files')
@patch.object(ESAHubbleClass, 'query_criteria')
def test_download_fits_from_proposal(self, mock_observations, mock_files):
mock_observations.return_value = {'observation_id': ['test']}
mock_files.return_value = [{'filename': 'test.fits'}]
tap_handler = self.get_dummy_tap_handler("load_data")
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler("load_data"), show_messages=False)
ehst.download_files_from_program(program=12345, only_fits=True)
tap_handler.check_method("load_data")
@patch.object(ESAHubbleClass, 'get_associated_files')
@patch.object(ESAHubbleClass, 'query_criteria')
def test_download_all_from_proposal(self, mock_observations, mock_files):
mock_observations.return_value = {'observation_id': ['test']}
mock_files.return_value = {'filename': ['test.fits']}
tap_handler = self.get_dummy_tap_handler("load_data")
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler("load_data"), show_messages=False)
ehst.download_files_from_program(program=12345, only_fits=False)
tap_handler.check_method("load_data")
def test_query_criteria(self):
parameters1 = {'calibration_level': "PRODUCT",
'data_product_type': "image",
'intent': "SCIENCE",
'obs_collection': ['HST'],
'instrument_name': ['WFC3'],
'filters': ['F555W'],
'async_job': False,
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': True,
'get_query': True}
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
test_query = ehst.query_criteria(calibration_level=parameters1['calibration_level'],
data_product_type=parameters1['data_product_type'],
intent=parameters1['intent'],
obs_collection=parameters1['obs_collection'],
instrument_name=parameters1['instrument_name'],
filters=parameters1['filters'],
async_job=parameters1['async_job'],
output_file=parameters1['output_file'],
output_format=parameters1['output_format'],
verbose=parameters1['verbose'],
get_query=parameters1['get_query'])
parameters2 = {'query': test_query,
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': False}
parameters3 = {'query': "select * from ehst.archive where("
"calibration_level=3 AND "
"data_product_type LIKE '%image%' AND "
"intent LIKE '%science%' AND (collection "
"LIKE '%HST%') AND (instrument_name LIKE "
"'%WFC3%') AND (filter "
"LIKE '%F555W%'))",
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': False}
dummy_tap_handler = DummyHubbleTapHandler("launch_job", parameters2)
dummy_tap_handler.check_call("launch_job", parameters3)
def test_query_criteria_numeric_calibration(self):
parameters1 = {'calibration_level': 1,
'data_product_type': "image",
'intent': "SCIENCE",
'obs_collection': ['HST'],
'instrument_name': ['WFC3'],
'filters': ['F555W'],
'async_job': False,
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': True,
'get_query': True}
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
test_query = ehst.query_criteria(calibration_level=parameters1['calibration_level'],
data_product_type=parameters1['data_product_type'],
intent=parameters1['intent'],
obs_collection=parameters1['obs_collection'],
instrument_name=parameters1['instrument_name'],
filters=parameters1['filters'],
async_job=parameters1['async_job'],
output_file=parameters1['output_file'],
output_format=parameters1['output_format'],
verbose=parameters1['verbose'],
get_query=parameters1['get_query'])
parameters2 = {'query': test_query,
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': False}
parameters3 = {'query': "select * from ehst.archive where("
"calibration_level=1 AND "
"data_product_type LIKE '%image%' AND "
"intent LIKE '%science%' AND (collection "
"LIKE '%HST%') AND (instrument_name LIKE "
"'%WFC3%') AND (filter "
"LIKE '%F555W%'))",
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': False}
dummy_tap_handler = DummyHubbleTapHandler("launch_job", parameters2)
dummy_tap_handler.check_call("launch_job", parameters3)
parameters1['calibration_level'] = 4
with pytest.raises(KeyError) as err:
ehst.query_criteria(calibration_level=parameters1['calibration_level'],
data_product_type=parameters1['data_product_type'],
intent=parameters1['intent'],
obs_collection=parameters1['obs_collection'],
instrument_name=parameters1['instrument_name'],
filters=parameters1['filters'],
async_job=parameters1['async_job'],
output_file=parameters1['output_file'],
output_format=parameters1['output_format'],
verbose=parameters1['verbose'],
get_query=parameters1['get_query'])
assert "Calibration level must be between 0 and 3" in err.value.args[0]
def test_cone_search_criteria(self):
parameters1 = {'target': "m31",
'radius': 7,
'data_product_type': "image",
'obs_collection': ['HST'],
'instrument_name': ['ACS/WFC'],
'filters': ['F435W'],
'async_job': False,
'filename': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': True}
test_query = "select o.*, p.calibration_level, p.data_product_type, " \
"pos.ra, pos.dec from ehst.observation AS o JOIN " \
"ehst.plane as p on o.observation_uuid=p.observation_" \
"uuid JOIN ehst.position as pos on p.plane_id = " \
"pos.plane_id where((o.collection LIKE '%HST%') AND " \
"(o.instrument_name LIKE '%WFPC2%') AND " \
"(o.filter LIKE '%F606W%') AND " \
"1=CONTAINS(POINT('ICRS', pos.ra, pos.dec)," \
"CIRCLE('ICRS', 10.6847083, 41.26875, " \
"0.11666666666666667)))"
parameters3 = {'query': test_query,
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': False}
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
query_criteria_query = "select o.*, p.calibration_level, " \
"p.data_product_type, pos.ra, pos.dec from " \
"ehst.observation AS o JOIN ehst.plane as p " \
"on o.observation_uuid=p.observation_uuid " \
"JOIN ehst.position as pos on p.plane_id = " \
"pos.plane_id where((o.collection LIKE " \
"'%HST%') AND (o.instrument_name LIKE " \
"'%WFPC2%') AND (o.filter " \
"LIKE '%F606W%'))"
ehst.query_criteria = MagicMock(return_value=query_criteria_query)
target = coordinates.SkyCoord("00h42m44.51s +41d16m08.45s", frame='icrs')
ehst._query_tap_target = MagicMock(return_value=target)
ehst.cone_search_criteria(target=parameters1['target'],
radius=parameters1['radius'],
data_product_type=parameters1
['data_product_type'],
obs_collection=parameters1['obs_collection'],
instrument_name=parameters1
['instrument_name'],
filters=parameters1['filters'],
async_job=parameters1['async_job'],
filename=parameters1['filename'],
output_format=parameters1['output_format'],
verbose=parameters1['verbose'])
dummy_tap_handler = DummyHubbleTapHandler("launch_job", parameters3)
dummy_tap_handler.check_call("launch_job", parameters3)
c = coordinates.SkyCoord("00h42m44.51s +41d16m08.45s", frame='icrs')
ehst.cone_search_criteria(coordinates=c,
radius=parameters1['radius'],
data_product_type=parameters1
['data_product_type'],
obs_collection=parameters1['obs_collection'],
instrument_name=parameters1
['instrument_name'],
filters=parameters1['filters'],
async_job=parameters1['async_job'],
filename=parameters1['filename'],
output_format=parameters1['output_format'],
verbose=parameters1['verbose'])
with pytest.raises(TypeError) as err:
ehst.cone_search_criteria(target=parameters1['target'],
coordinates=123,
radius=parameters1['radius'],
data_product_type=parameters1
['data_product_type'],
obs_collection=parameters1
['obs_collection'],
instrument_name=parameters1
['instrument_name'],
filters=parameters1['filters'],
async_job=parameters1['async_job'],
filename=parameters1['filename'],
output_format=parameters1
['output_format'],
verbose=parameters1['verbose'])
assert "Please use only target or coordinates as" \
"parameter." in err.value.args[0]
def test_query_criteria_no_params(self):
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.query_criteria(async_job=False,
output_file="output_test_query_"
"by_criteria.vot.gz",
output_format="votable",
verbose=True)
parameters = {'query': "select o.*, p.calibration_level, "
"p.data_product_type from ehst.observation "
"AS o LEFT JOIN ehst.plane as p on "
"o.observation_uuid=p.observation_uuid",
'output_file': "output_test_query_by_criteria.vot.gz",
'output_format': "votable",
'verbose': False}
dummy_tap_handler = DummyHubbleTapHandler("launch_job", parameters)
dummy_tap_handler.check_call("launch_job", parameters)
def test_empty_list(self):
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
with pytest.raises(ValueError) as err:
ehst.query_criteria(instrument_name=[1],
async_job=False,
output_file="output_test_query_"
"by_criteria.vot.gz",
output_format="votable",
verbose=True)
assert "One of the lists is empty or there are " \
"elements that are not strings" in err.value.args[0]
def test__get_decoded_string(self):
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
dummy = '\x74\x65\x73\x74'
decoded_string = ehst._get_decoded_string(dummy)
assert decoded_string == 'test'
def test__get_decoded_string_unicodedecodeerror(self):
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
dummy = '\xd0\x91'
decoded_string = ehst._get_decoded_string(dummy)
assert decoded_string == dummy
def test__get_decoded_string_attributeerror(self):
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
dummy = True
decoded_string = ehst._get_decoded_string(dummy)
assert decoded_string == dummy
@patch.object(ESAHubbleClass, 'query_tap')
def test__select_related_composite(self, mock_query):
arr = {'a': np.array([1, 4], dtype=np.int32),
'b': [2.0, 5.0],
'observation_id': ['x', 'y']}
data_table = Table(arr)
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
mock_query.return_value = data_table
dummy_obs_id = "1234"
oids = ehst._select_related_composite(observation_id=dummy_obs_id)
assert set(['x', 'y']).issubset(set(oids))
@patch.object(ESAHubbleClass, 'query_tap')
def test_select_related_members(self, mock_query):
arr = {'a': np.array([1, 4], dtype=np.int32),
'b': [2.0, 5.0],
'members': ['caom:HST/test', 'y']}
data_table = Table(arr)
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
mock_query.return_value = data_table
dummy_obs_id = "1234"
oids = ehst._select_related_members(observation_id=dummy_obs_id)
assert oids == ['test']
@patch.object(ESAHubbleClass, 'query_tap')
def test_get_observation_type(self, mock_query):
arr = {'a': np.array([1, 4], dtype=np.int32),
'b': [2.0, 5.0],
'obs_type': ['HST Test', 'y']}
data_table = Table(arr)
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
mock_query.return_value = data_table
dummy_obs_id = "1234"
oids = ehst.get_observation_type(observation_id=dummy_obs_id)
assert oids == 'HST Test'
def test_get_observation_type_obs_id_none_valueerror(self):
with pytest.raises(ValueError):
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
dummy_obs_id = None
ehst.get_observation_type(observation_id=dummy_obs_id)
@patch.object(ESAHubbleClass, 'query_tap')
def test_get_observation_type_invalid_obs_id_valueerror(self, mock_query):
with pytest.raises(ValueError):
arr = {'a': np.array([], dtype=np.int32),
'b': [],
'obs_type': []}
data_table = Table(arr)
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
mock_query.return_value = data_table
dummy_obs_id = '1234'
ehst.get_observation_type(observation_id=dummy_obs_id)
@patch.object(ESAHubbleClass, 'query_tap')
@patch.object(ESAHubbleClass, 'get_observation_type')
def test_get_hst_link(self, mock_observation_type, mock_query):
mock_observation_type.return_value = "HST"
arr = {'a': np.array([1], dtype=np.int32),
'b': [2.0],
'observation_id': ['1234']}
data_table = Table(arr)
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
mock_query.return_value = data_table
dummy_obs_id = "1234"
oids = ehst.get_hap_hst_link(observation_id=dummy_obs_id)
assert oids == ['1234']
@patch.object(ESAHubbleClass, 'get_observation_type')
@patch.object(ESAHubbleClass, '_select_related_members')
def test_get_hap_link(self, mock_select_related_members, mock_observation_type):
mock_select_related_members.return_value = 'test'
mock_observation_type.return_value = "HAP"
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
dummy_obs_id = "1234"
oids = ehst.get_hap_hst_link(observation_id=dummy_obs_id)
assert oids == 'test'
@patch.object(ESAHubbleClass, 'get_observation_type')
def test_get_hap_hst_link_invalid_id_valueerror(self, mock_observation_type):
with pytest.raises(ValueError):
mock_observation_type.return_value = "valueerror"
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
dummy_obs_id = "1234"
ehst.get_hap_hst_link(observation_id=dummy_obs_id)
@patch.object(ESAHubbleClass, 'get_observation_type')
def test_get_hap_hst_link_compositeerror(self, mock_observation_type):
with pytest.raises(ValueError):
mock_observation_type.return_value = "HAP Composite"
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
dummy_obs_id = "1234"
ehst.get_hap_hst_link(observation_id=dummy_obs_id)
@patch.object(ESAHubbleClass, '_select_related_members')
@patch.object(ESAHubbleClass, 'get_observation_type')
def test_get_member_observations_composite(self, mock_observation_type, mock_select_related_members):
mock_observation_type.return_value = "Composite"
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
mock_select_related_members.return_value = 'test'
dummy_obs_id = "1234"
oids = ehst.get_member_observations(observation_id=dummy_obs_id)
assert oids == 'test'
@patch.object(ESAHubbleClass, '_select_related_composite')
@patch.object(ESAHubbleClass, 'get_observation_type')
def test_get_member_observations_simple(self, mock_observation_type, mock_select_related_composite):
mock_observation_type.return_value = "Simple"
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
mock_select_related_composite.return_value = 'test'
dummy_obs_id = "1234"
oids = ehst.get_member_observations(observation_id=dummy_obs_id)
assert oids == 'test'
@patch.object(ESAHubbleClass, 'get_observation_type')
def test_get_member_observations_invalid_id_valueerror(self, mock_observation_type):
with pytest.raises(ValueError):
mock_observation_type.return_value = "valueerror"
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
dummy_obs_id = "1234"
ehst.get_member_observations(observation_id=dummy_obs_id)
@patch.object(ESAHubbleClass, 'query_criteria')
@patch.object(ESAHubbleClass, '_query_tap_target')
@patch.object(ESAHubbleClass, 'query_tap')
def test_cone_search_criteria_only_target(self, mock_query_tap, mock__query_tap_target, mock_query_criteria):
mock_query_criteria.return_value = "Simple query"
mock__query_tap_target.return_value = coordinates.SkyCoord("00h42m44.51s +41d16m08.45s", frame='icrs')
mock_query_tap.return_value = "table"
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
oids = ehst.cone_search_criteria(target="m11", radius=1)
assert oids == 'table'
@patch.object(ESAHubbleClass, 'query_criteria')
@patch.object(ESAHubbleClass, 'query_tap')
def test_cone_search_criteria_only_coordinates(self, mock_query_tap, mock_query_criteria):
mock_query_criteria.return_value = "Simple query"
mock_query_tap.return_value = "table"
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
oids = ehst.cone_search_criteria(coordinates="00h42m44.51s +41d16m08.45s", radius=1)
assert oids == 'table'
@patch.object(ESAHubbleClass, 'query_criteria')
def test_cone_search_criteria_typeerror(self, mock_query_criteria):
mock_query_criteria.return_value = "Simple query"
with pytest.raises(TypeError):
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.cone_search_criteria(coordinates="00h42m44.51s +41d16m08.45s", target="m11", radius=1)
def test_query_hst_tap(self):
parameters = {'query': "select top 10 * from hsc_v2.hubble_sc2",
'async_job': False,
'output_file': "test2.vot",
'output_format': "votable",
'verbose': False}
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
with pytest.warns(AstropyDeprecationWarning):
ehst.query_hst_tap(query=parameters['query'],
async_job=parameters['async_job'],
output_file=parameters['output_file'],
output_format=parameters['output_format'],
verbose=parameters['verbose'])
@patch("http.client.HTTPSConnection")
@patch("http.client.HTTPResponse")
def test_show_messages(self, mock_conn, mock_res):
mock_res.status = 400
mock_conn.getresponse = MagicMock(return_value=mock_res)
ESAHubbleClass()
mock_res.assert_called()
def test_get_datalabs_path(self):
parameters = {'filename': "ib4x04ivq_flt.jpg",
'default_volume': None}
dummyTapHandler = DummyHubbleTapHandler("get_datalabs_path", parameters)
ehst = ESAHubbleClass(tap_handler=self.get_dummy_tap_handler(), show_messages=False)
ehst.get_datalabs_path(filename="ib4x04ivq_flt.jpg", default_volume="")
dummyTapHandler.check_call("get_datalabs_path", parameters)
|
D-arioSpaceREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@esa@hubble@tests@test_esa_hubble.py@.PATH_END.py
|
{
"filename": "_cmin.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolar/marker/line/_cmin.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CminValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="cmin", parent_name="scatterpolar.marker.line", **kwargs
):
super(CminValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"cauto": False}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolar@marker@line@_cmin.py@.PATH_END.py
|
{
"filename": "_showticklabels.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/marker/colorbar/_showticklabels.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="showticklabels",
parent_name="scatter3d.marker.colorbar",
**kwargs,
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@marker@colorbar@_showticklabels.py@.PATH_END.py
|
{
"filename": "instrument.py",
"repo_name": "lsst-ts/ts_wep",
"repo_path": "ts_wep_extracted/ts_wep-main/python/lsst/ts/wep/instrument.py",
"type": "Python"
}
|
# This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from __future__ import annotations
__all__ = ["Instrument"]
from functools import lru_cache
from pathlib import Path
from typing import Optional, Sequence, Tuple, Union
import batoid
import numpy as np
from lsst.ts.wep.utils.enumUtils import BandLabel, DefocalType, EnumDict
from lsst.ts.wep.utils.ioUtils import mergeConfigWithFile
from scipy.optimize import minimize_scalar
class Instrument:
"""Object with relevant geometry of the primary mirror and focal plane.
The value of every parameter is first pulled from the configFile, and
then overridden by any parameters explicitly passed as keyword arguments
to the class constructor.
Parameters
----------
configFile: Path or str, optional
Path to file specifying values for the other parameters. If the
path starts with "policy:", it will look in the policy directory.
Any explicitly passed parameters override values found in this file
(the default is policy:instruments/LsstCam.yaml)
name: str, optional
The name of the instrument.
(the default is None)
diameter : float, optional
The diameter of the primary mirror in meters. If None, but
batoidModelName is set, this value will be pulled from the Batoid
model. (the default is None)
obscuration : float, optional
The fractional obscuration of the primary mirror. If None, but
batoidModelName is set, this value will be pulled from the Batoid
model. (the default is None)
focalLength : float, optional
The effective focal length in meters. If None, but batoidModelName
is set, this value will be pulled from the Batoid model.
(the default is None)
defocalOffset : float, optional
The defocal offset of the images in meters. If None, but
batoidModelName, batoidOffsetOptic, and batoidOffsetValue are set,
this value will be calculated using the Batoid model.
(the default is None)
pixelSize : float, optional
The pixel size in meters. (the default is None)
refBand : BandLabel or str, optional
When getting the wavelength or loading the Batoid model, use this
value in place of BandLabel.REF. It should be a BandLabel Enum, or
one of the corresponding strings. If set to None, this value defaults
to BandLabel.REF. (the default is None)
wavelength : float or dict, optional
The effective wavelength of the instrument in meters. Can be a float,
or a dictionary that corresponds to different bands. The keys in this
dictionary are expected to correspond to the strings specified in the
BandLabel enum in ts_wep.utils.enums. If set to None, this defaults
to {BandLabel.REF: 500e-9}. (the default is None)
batoidModelName : str, optional
Name of Batoid model. If the string contains "{band}", it is assumed
there are different Batoid models for different photometric bands,
and the names of these bands will be filled in at runtime using the
strings specified in the BandLabel enum in jf_wep.utils.enums.
(the default is None)
batoidOffsetOptic : str or None, optional
The optic to offset in the Batoid model in order to calculate
the equivalent detector offset for the model.
(the default is None)
batoidOffsetValue : float or None, optional
The value in meters to offset the optic in the Batoid model to
calculate the equivalent detector offset for the model. The
detector offset is then used for everything else. Note that
depending on the model, the sign of this value might matter.
(the default is None)
maskParams : dict, optional
Dictionary of mask parameters. Each key in this dictionary corresponds
to a different mask element. The corresponding values are dictionaries
that define circles with different centers and radii. The key, value
pairs are
- thetaMin: the minimum field angle in degrees for which this mask
element is relevant
- center: list of polynomial coeffs (in meters) for np.polyval()
to determine the center of the circle
- radius: list of polynomial coeffs (in meters) for np.polyval()
to determine the radius of the circle
None defaults to an empty dictionary.
Notes
-----
The following parameters are required to instantiate the Instrument:
- diameter
- obscuration
- focalLength
- defocalOffset
- pixelSize
With the exception of pixelSize, if not explicitly set, these parameters
can be pulled from the Batoid model specified by batoidModelName.
Note that the calculation of defocalOffset also requires that
batoidOffsetOptic and batoidOffsetValue are set.
"""
def __init__(
self,
configFile: Union[Path, str, None] = "policy:instruments/LsstCam.yaml",
*,
name: Optional[str] = None,
diameter: Optional[float] = None,
obscuration: Optional[float] = None,
focalLength: Optional[float] = None,
defocalOffset: Optional[float] = None,
pixelSize: Optional[float] = None,
refBand: Union[BandLabel, str, None] = None,
wavelength: Union[float, dict, None] = None,
batoidModelName: Optional[str] = None,
batoidOffsetOptic: Optional[str] = None,
batoidOffsetValue: Optional[float] = None,
maskParams: Optional[dict] = None,
) -> None:
# Merge keyword arguments with defaults from configFile
params = mergeConfigWithFile(
configFile,
name=name,
diameter=diameter,
obscuration=obscuration,
focalLength=focalLength,
defocalOffset=defocalOffset,
pixelSize=pixelSize,
refBand=refBand,
wavelength=wavelength,
batoidModelName=batoidModelName,
batoidOffsetOptic=batoidOffsetOptic,
batoidOffsetValue=batoidOffsetValue,
maskParams=maskParams,
)
# Set each parameter
for key, value in params.items():
setattr(self, key, value)
# Check the config
self.checkConfig()
def copy(self) -> Instrument:
"""Return a deep copy of the instrument.
Notes
-----
Any cached data from the original instrument will need to be
repopulated in the copied instrument.
"""
return Instrument(
name=self.name,
diameter=self.diameter,
obscuration=self.obscuration,
focalLength=self.focalLength,
defocalOffset=self.defocalOffset,
pixelSize=self.pixelSize,
refBand=self.refBand,
wavelength=self.wavelength,
batoidModelName=self.batoidModelName,
batoidOffsetOptic=self.batoidOffsetOptic,
batoidOffsetValue=self.batoidOffsetValue,
maskParams=self.maskParams,
)
def checkConfig(self) -> None:
"""Access every attribute to make sure no errors are thrown."""
for item in dir(self):
if item[0] != "_":
getattr(self, item)
def clearCaches(self) -> None:
"""Clear the Batoid caches."""
self.getBatoidModel.cache_clear()
self._getIntrinsicZernikesCached.cache_clear()
self._getIntrinsicZernikesTACached.cache_clear()
self._focalLengthBatoid = None
self._defocalOffsetBatoid = None
@property
def name(self) -> str:
"""The name of the instrument."""
return self._name
@name.setter
def name(self, value: str) -> None:
"""Set the name of the instrument.
Parameters
----------
value : str
The name of the instrument.
"""
self._name = str(value)
@property
def diameter(self) -> float:
"""The primary mirror diameter in meters."""
if self._diameter is not None:
return self._diameter
elif self.batoidModelName is not None:
return self.getBatoidModel().pupilSize
else:
raise ValueError(
"There is currently no diameter set. "
"Please set either the diameter, or the batoidModelName."
)
@diameter.setter
def diameter(self, value: Union[float, None]) -> None:
"""Set the mirror diameter.
Parameters
----------
value : float or None
The mirror diameter in meters.
Raises
------
ValueError
If the value is negative or zero
"""
if value is not None:
value = float(value)
if value <= 0:
raise ValueError("diameter must be positive.")
self._diameter = value
@property
def radius(self) -> float:
"""The primary mirror radius in meters."""
return self.diameter / 2
@property
def area(self) -> float:
"""The primary mirror area in square meters."""
return np.pi * self.radius**2 * (1 - self.obscuration**2)
@property
def obscuration(self) -> float:
"""The fractional obscuration."""
if self._obscuration is not None:
return self._obscuration
elif self.batoidModelName is not None:
return self.getBatoidModel().pupilObscuration
else:
raise ValueError(
"There is currently no obscuration set. "
"Please set either the obscuration, or the batoidModelName."
)
@obscuration.setter
def obscuration(self, value: Union[float, None]) -> None:
"""Set the fractional obscuration.
Parameters
----------
value : float
The fractional obscuration
Raises
------
ValueError
If the fractional obscuration is not between 0 and 1 (inclusive)
"""
if value is not None:
value = float(value)
if value < 0 or value > 1:
raise ValueError("obscuration must be between 0 and 1 (inclusive).")
self._obscuration = value
@property
def focalLength(self) -> float:
"""The focal length in meters."""
if self._focalLength is not None:
return self._focalLength
if self._focalLengthBatoid is not None:
return self._focalLengthBatoid
elif self.batoidModelName is not None:
self._focalLengthBatoid = batoid.analysis.focalLength(
self.getBatoidModel(),
0,
0,
self.wavelength[self.refBand],
)
return self._focalLengthBatoid
else:
raise ValueError(
"There is currently no focalLength set. "
"Please set either the focalLength, or the batoidModelName."
)
@focalLength.setter
def focalLength(self, value: Union[float, None]) -> None:
"""Set the focal length.
Parameters
----------
value : float
The focal length in meters
Raises
------
ValueError
If the focal length is not positive
"""
if value is not None:
value = float(value)
if value <= 0:
raise ValueError("focalLength must be positive.")
self._focalLength = value
@property
def focalRatio(self) -> float:
"""The f-number."""
return self.focalLength / self.diameter
@property
def defocalOffset(self) -> float:
"""The defocal offset in meters."""
if self._defocalOffset is not None:
return self._defocalOffset
elif self._defocalOffsetBatoid is not None:
return self._defocalOffsetBatoid
elif self.batoidModelName is not None and self._batoidOffsetValue is not None:
# Load the model and wavelength info
batoidModel = self.getBatoidModel()
offsetOptic = self.batoidOffsetOptic
eps = batoidModel.pupilObscuration
wavelength = self.wavelength[BandLabel.REF]
batoidOffsetValue = self.batoidOffsetValue
# Calculate dZ4 for the optic
shift = np.array([0, 0, batoidOffsetValue])
dZ4optic = batoid.zernike(
batoidModel.withLocallyShiftedOptic(offsetOptic, +shift),
*np.zeros(2),
wavelength,
eps=eps,
jmax=4,
nx=128,
)[4]
# Define a function to calculate dZ4 for an offset detector
def dZ4det(offset):
return batoid.zernike(
batoidModel.withLocallyShiftedOptic("Detector", [0, 0, offset]),
*np.zeros(2),
wavelength,
eps=eps,
jmax=4,
nx=128,
)[4]
# Calculate the equivalent detector offset
result = minimize_scalar(
lambda offset: np.abs((dZ4det(offset) - dZ4optic) / dZ4optic),
bounds=(-0.1, 0.1),
)
if not result.success or result.fun > 1e-3:
raise RuntimeError(
"Calculating defocalOffset from batoidOffsetValue failed."
)
# Save the calculated offset
self._defocalOffsetBatoid = np.abs(result.x)
return self._defocalOffsetBatoid
else:
raise ValueError(
"There is currently no defocalOffset set. "
"Please set either the defocalOffset, OR the batoidModelName, "
"the batoidOffsetOptic, and the batoidOffsetValue."
)
@defocalOffset.setter
def defocalOffset(self, value: Union[float, None]) -> None:
"""Set the defocal offset.
Parameters
----------
value : float
The defocal offset in meters.
"""
if value is not None:
value = np.abs(float(value))
self._defocalOffset = value
# Clear relevant caches
self._getIntrinsicZernikesTACached.cache_clear()
@property
def pupilOffset(self) -> float:
"""The pupil offset in meters."""
return self.focalLength**2 / self.defocalOffset
@property
def pixelSize(self) -> float:
"""The pixel size in meters."""
return self._pixelSize
@pixelSize.setter
def pixelSize(self, value: float) -> None:
"""Set the pixel size.
Parameters
----------
value : float
The pixel size in meters.
Raises
------
ValueError
If the pixel size is not positive
"""
try:
value = float(value)
except TypeError:
raise TypeError("pixelSize must be a number.")
if value <= 0:
raise ValueError("pixelSize must be positive.")
self._pixelSize = value
@property
def pixelScale(self) -> float:
"""The pixel scale in arcseconds per pixel."""
return np.rad2deg(self.pixelSize / self.focalLength) * 3600
@property
def donutRadius(self) -> float:
"""The expected donut radius in pixels."""
rMeters = self.defocalOffset / np.sqrt(4 * self.focalRatio**2 - 1)
rPixels = rMeters / self.pixelSize
return rPixels
@property
def donutDiameter(self) -> float:
"""The expected donut diameter in pixels."""
return 2 * self.donutRadius
@property
def refBand(self) -> BandLabel:
"""Band to use with Batoid and wavelength when band == BandLabel.REF"""
return self._refBand
@refBand.setter
def refBand(self, value: Union[BandLabel, str, None]) -> None:
"""Set reference band for loading Batoid model and getting wavelength.
Parameters
----------
value : BandLabel or str
The reference band. Should be a BandLabel Enum, or one of the
corresponding strings. If set to None, this value defaults to
BandLabel.REF.
"""
if value is None:
self._refBand = BandLabel.REF
else:
self._refBand = BandLabel(value)
# Clear relevant caches
self._getIntrinsicZernikesCached.cache_clear()
self._getIntrinsicZernikesTACached.cache_clear()
self._focalLengthBatoid = None
self._defocalOffsetBatoid = None
@property
def wavelength(self) -> EnumDict:
"""Return the effective wavelength(s) in meters."""
if self._wavelength is None:
return EnumDict(BandLabel, {BandLabel.REF: 500e-9, self.refBand: 500e-9})
else:
return self._wavelength
@wavelength.setter
def wavelength(self, value: Union[float, dict, None]) -> None:
"""Set the effective wavelength(s).
Parameters
----------
value : float or dict
The effective wavelength(s) in meters. Can either be a single
float, or a dictionary mapping BandLabels to floats.
Raises
------
TypeError
If the provided value is not a float or dictionary
ValueError
If the provided value is a dictionary, and the dictionary does not
contain a wavelength for the reference band,
"""
# Make sure the value is a float or dictionary
if (
not isinstance(value, float)
and not isinstance(value, dict)
and not isinstance(value, EnumDict)
and value is not None
):
raise TypeError("wavelength must be a float, dictionary, or None.")
# Save wavelength info in a BandLabel EnumDict
if isinstance(value, dict) or isinstance(value, EnumDict):
value = EnumDict(BandLabel, value)
try:
value[BandLabel.REF] = value[self.refBand]
except KeyError:
raise ValueError(
"The wavelength dictionary must contain a wavelength "
"for the reference band."
)
elif value is not None:
value = EnumDict(BandLabel, {BandLabel.REF: value, self.refBand: value})
# Set the new value
self._wavelength = value
# Clear relevant caches
self._getIntrinsicZernikesCached.cache_clear()
self._getIntrinsicZernikesTACached.cache_clear()
self._focalLengthBatoid = None
self._defocalOffsetBatoid = None
@property
def batoidModelName(self) -> Union[str, None]:
"""The Batoid model name."""
return self._batoidModelName
@batoidModelName.setter
def batoidModelName(self, value: Optional[str]) -> None:
"""Set the Batoid model name.
The Batoid model name is used to load the Batoid model via
batoid.Optic.fromYaml(batoidModelName + ".yaml")
The name must match one of the yaml files in the batoid/data directory:
https://github.com/jmeyers314/batoid/tree/main/batoid/data
You can use "{band}" in the name, and this will be replaced with a band
name when loading the batoid model.
E.g. Setting the name to "LSST_{band}" allows one to load the Batoid
models corresponding to "LSST_u.yaml", "LSST_g.yaml", etc. using the
getBatoidModel() method below.
Parameters
----------
value : str or None
The name of the Batoid model.
Raises
------
TypeError
If value is not a string or None
"""
# Make sure the value is a string or None
if not isinstance(value, str) and value is not None:
raise TypeError("batoidModelName must be a string, or None.")
# Set the new value
oldValue = getattr(self, "_batoidModelName", None)
self._batoidModelName = value
# Make sure the Batoid model can be found
try:
self.getBatoidModel()
except FileNotFoundError:
# Undo the change
self._batoidModelName = oldValue
# Raise the error
raise ValueError(
f"batoidModelName {value} does not match any of the models "
f"in Batoid version {batoid.__version__}."
)
# Clear relevant caches
self.getBatoidModel.cache_clear()
self._getIntrinsicZernikesCached.cache_clear()
self._getIntrinsicZernikesTACached.cache_clear()
self._focalLengthBatoid = None
self._defocalOffsetBatoid = None
@property
def batoidOffsetOptic(self) -> Union[str, None]:
"""The optic that is offset in the Batoid model."""
return self._batoidOffsetOptic
@batoidOffsetOptic.setter
def batoidOffsetOptic(self, value: Union[str, None]) -> None:
"""Set the optic that is offset in the Batoid model.
This optic is offset in order to calculate the equivalent
detector offset for the model.
Parameters
----------
value : str or None
The name of the optic to be offset in the Batoid model.
Raises
------
RuntimeError
If no Batoid model is set
TypeError
If value is not a string or None
ValueError
If the optic is not found in the Batoid model
"""
if value is not None:
if self.batoidModelName is None:
raise RuntimeError("There is no Batoid model set.")
elif not isinstance(value, str):
raise TypeError("batoidOffsetOptic must be a string or None.")
elif value not in self.getBatoidModel()._names:
raise ValueError(f"Optic {value} not found in the Batoid model.")
self._batoidOffsetOptic = value
# Clear relevant caches
self._getIntrinsicZernikesTACached.cache_clear()
self._defocalOffsetBatoid = None
@property
def batoidOffsetValue(self) -> Union[float, None]:
"""Amount in meters the optic is offset in the Batoid model."""
return self._batoidOffsetValue
@batoidOffsetValue.setter
def batoidOffsetValue(self, value: Union[float, None]) -> None:
"""Set amount in meters the optic is offset in the batoid model.
This is the amount that batoidOffsetOptic is offset in the Batoid
model to calculate the equivalent detector offset for the model.
Note depending on the model, the sign of this value might matter.
Parameters
----------
value : float or None
The offset value
Raises
------
RuntimeError
If no Batoid model is set
"""
if value is not None:
if self.batoidModelName is None:
raise RuntimeError("There is no Batoid model set.")
value = float(value)
self._batoidOffsetValue = value
# Clear relevant caches
self._getIntrinsicZernikesTACached.cache_clear()
self._defocalOffsetBatoid = None
@lru_cache(10)
def getBatoidModel(
self, band: Union[BandLabel, str] = BandLabel.REF
) -> batoid.Optic:
"""Return the Batoid model for the instrument and the requested band.
Parameters
----------
band : BandLabel or str, optional
The BandLabel Enum or corresponding string, specifying which Batoid
model to load. Only relevant if self.batoidModelName contains
"{band}". (the default is BandLabel.REF)
"""
# If batoidModelName is None, we can't load a model, so return None
if self.batoidModelName is None:
return None
# Get the band enum
band = BandLabel(band)
# Replace the reference band
band = self.refBand if band == BandLabel.REF else band
# Fill any occurrence of "{band}" with the band string
batoidModelName = self.batoidModelName.format(band=band.value)
# Load the Batoid model
return batoid.Optic.fromYaml(batoidModelName + ".yaml")
@lru_cache(100)
def _getIntrinsicZernikesCached(
self,
xAngle: float,
yAngle: float,
band: Union[BandLabel, str],
jmax: int,
) -> np.ndarray:
"""Cached interior function for the getIntrinsicZernikes method.
We need to do this because numpy arrays are mutable.
Parameters
----------
xAngle : float
The x-component of the field angle in degrees.
yAngle : float
The y-component of the field angle in degrees.
band : BandLabel or str, optional
The BandLabel Enum or corresponding string, specifying which batoid
model to load. Only relevant if self.batoidModelName contains
"{band}".
jmax : int, optional
The maximum Noll index of the intrinsic Zernikes.
Returns
-------
np.ndarray
The Zernike coefficients in meters, starting with index 0
"""
# Get the band enum
band = BandLabel(band)
# Get the batoid model
batoidModel = self.getBatoidModel(band)
# If there is no batoid model, just return zeros
if batoidModel is None:
return np.zeros(jmax + 1)
# Get the wavelength
if len(self.wavelength) > 1:
wavelength = self.wavelength[band]
else:
wavelength = self.wavelength[BandLabel.REF]
# Get the intrinsic Zernikes in wavelengths
zkIntrinsic = batoid.zernike(
batoidModel,
*np.deg2rad([xAngle, yAngle]),
wavelength,
jmax=jmax,
eps=batoidModel.pupilObscuration,
nx=128,
)
# Multiply by wavelength to get Zernikes in meters
zkIntrinsic *= wavelength
return zkIntrinsic
def getIntrinsicZernikes(
self,
xAngle: float,
yAngle: float,
band: Union[BandLabel, str] = BandLabel.REF,
nollIndices: Sequence = tuple(np.arange(4, 79)),
) -> np.ndarray:
"""Return the intrinsic Zernikes associated with the optical design.
Parameters
----------
xAngle : float
The x-component of the field angle in degrees.
yAngle : float
The y-component of the field angle in degrees.
band : BandLabel or str, optional
The BandLabel Enum or corresponding string, specifying which batoid
model to load. Only relevant if self.batoidModelName contains
"{band}". (the default is BandLabel.REF)
nollIndices : np.ndarray, optional
Noll indices for which to return Zernikes.
(the default is indices 4-78)
Returns
-------
np.ndarray
The Zernike coefficients in meters
"""
# Make sure this is an array
nollIndices = np.array(nollIndices)
# Retrieve cached Zernikes
zk = self._getIntrinsicZernikesCached(xAngle, yAngle, band, nollIndices.max())
return zk[nollIndices]
@lru_cache(100)
def _getIntrinsicZernikesTACached(
self,
xAngle: float,
yAngle: float,
defocalType: DefocalType,
band: Union[BandLabel, str],
jmax: int,
) -> np.ndarray:
"""Cached function for batoid.zernikeTA.
Parameters
----------
xAngle : float
The x-component of the field angle in degrees.
yAngle : float
The y-component of the field angle in degrees.
defocalType : DefocalType or str
The DefocalType Enum or corresponding string, specifying which side
of focus to model.
band : BandLabel or str
The BandLabel Enum or corresponding string, specifying which
batoid model to load. Only relevant if self.batoidModelName
contains "{band}".
jmax : int
The maximum Noll index of the off-axis model Zernikes.
Returns
-------
np.ndarray
The Zernike coefficients in meters, starting with index 0
Notes
-----
In the ZernikeTA calculation below, we use nrad=10 and choose naz so
the pupil is approximately uniformly sampled. Not all the Zernike
coefficients have converged with nrad=10, but we chose this number so
the image positions have converged. In particular, for nrad=10, the
residuals with Batoid are less than 0.5 microns.
"""
# Get the band enum
band = BandLabel(band)
# Get the batoid model
batoidModel = self.getBatoidModel(band)
# If there is no batoid model, just return zeros
if batoidModel is None:
return np.zeros(jmax + 1)
# Offset the focal plane
defocalType = DefocalType(defocalType)
defocalSign = +1 if defocalType == DefocalType.Extra else -1
offset = [0, 0, defocalSign * self.defocalOffset]
batoidModel = batoidModel.withLocallyShiftedOptic("Detector", offset)
# Get the wavelength
if len(self.wavelength) > 1:
wavelength = self.wavelength[band]
else:
wavelength = self.wavelength[BandLabel.REF]
# Get the off-axis model Zernikes in wavelengths
zkIntrinsic = batoid.zernikeTA(
batoidModel,
*np.deg2rad([xAngle, yAngle]),
wavelength,
jmax=jmax,
eps=batoidModel.pupilObscuration,
focal_length=self.focalLength,
nrad=10,
naz=int(2 * np.pi * 10),
)
# Multiply by wavelength to get Zernikes in meters
zkIntrinsic *= wavelength
return zkIntrinsic
def getOffAxisCoeff(
self,
xAngle: float,
yAngle: float,
defocalType: DefocalType,
band: Union[BandLabel, str] = BandLabel.REF,
nollIndicesModel: Sequence = tuple(np.arange(4, 79)),
nollIndicesIntr: Sequence = tuple(np.arange(4, 79)),
) -> np.ndarray:
"""Return the Zernike coefficients associated with the off-axis model.
Parameters
----------
xAngle : float
The x-component of the field angle in degrees.
yAngle : float
The y-component of the field angle in degrees.
defocalType : DefocalType or str
The DefocalType Enum or corresponding string, specifying which side
of focus to model.
band : BandLabel or str, optional
The BandLabel Enum or corresponding string, specifying which
batoid model to load. Only relevant if self.batoidModelName
contains "{band}". (the default is BandLabel.REF)
nollIndicesModel : np.ndarray, optional
Noll indices of Zernikes retrieved for the off-axis model.
(the default is indices 4-78)
nollIndicesIntr : np.ndarray, optional
Noll indices of Zernikes you are estimating in the TIE or
Danish. The off-axis coefficients are calculated by retrieving
coefficients from batoid.zernikeTA, and then subtracting off the
intrinsic Zernikes for Noll indices you are estimating. This is
allows you to determine whether intrinsic Zernikes are included
in wavefront estimates when using WfEstimator.
(the default is indices 4-22).
Returns
-------
np.ndarray
The Zernike coefficients in meters, for Noll indices >= 4
"""
# Make sure these are arrays
nollIndicesModel = np.array(nollIndicesModel)
nollIndicesIntr = np.array(nollIndicesIntr)
# Get zernikeTA
zkTA = self._getIntrinsicZernikesTACached(
xAngle,
yAngle,
defocalType,
band,
nollIndicesModel.max(),
)
# Get regular intrinsic zernikes
zk = self._getIntrinsicZernikesCached(
xAngle,
yAngle,
band,
nollIndicesIntr.max(),
)
# Subtract intrinsics from zernikeTA
offAxisCoeff = np.zeros(max(zkTA.size, zk.size), dtype=float)
offAxisCoeff[nollIndicesModel] = zkTA[nollIndicesModel]
offAxisCoeff[nollIndicesIntr] -= zk[nollIndicesIntr]
return offAxisCoeff[nollIndicesModel]
@property
def maskParams(self) -> dict:
"""The mask parameter dictionary."""
# Get the parameters if they exist
params = getattr(self, "_maskParams", None)
# If they don't exist, use the primary inner and outer radii
if params is None:
params = {
"Pupil": {
"outer": {
"clear": True,
"thetaMin": 0,
"thetaMax": np.inf,
"center": [0],
"radius": [self.radius],
},
"inner": {
"clear": False,
"thetaMin": 0,
"thetaMax": np.inf,
"center": [0],
"radius": [self.obscuration * self.radius],
},
}
}
return params
@maskParams.setter
def maskParams(self, value: Optional[dict]) -> None:
"""Set the mask parameters.
Parameters
----------
value : dict or None
Dictionary of mask parameters. Each key in this dictionary
corresponds to a different mask element. The corresponding
values are dictionaries that define circles with different
centers and radii. The key, value pairs are
- thetaMin: the minimum field angle in degrees for which
this mask element is relevant
- center: list of polynomial coefficients (in meters) for
np.polyval() to determine the center of the circle
- radius: list of polynomial coefficients (in meters) for
np.polyval() to determine the radius of the circle
None defaults to an empty dictionary.
Raises
------
TypeError
If value is not a dictionary or None
"""
if isinstance(value, dict):
self._maskParams = value
elif value is None:
self._maskParams = dict()
else:
raise TypeError("maskParams must be a dictionary or None.")
@property
def nPupilPixels(self) -> int:
"""The number of pupil pixels (on a side).
This number is set so that the resolution of the pupil roughly
matches the resolution of the image.
"""
return np.ceil(self.donutDiameter).astype(int)
def createPupilGrid(self) -> Tuple[np.ndarray, np.ndarray]:
"""Create a grid for the pupil.
The coordinates of the grid are in normalized pupil coordinates.
These coordinates are defined such that u^2 + v^2 = 1 is the outer
edge of the pupil, and u^2 + v^2 = obscuration^2 is the inner edge.
The number of pixels is chosen to match the resolution of the image.
Returns
-------
np.ndarray
The 2D u-grid on the pupil plane
np.ndarray
The 2D v-grid on the pupil plane
"""
# Create a 1D array with the correct number of pixels
grid = np.linspace(-1.01, 1.01, self.nPupilPixels)
# Create u and v grids
uPupil, vPupil = np.meshgrid(grid, grid)
return uPupil, vPupil
def createImageGrid(self, nPixels: int) -> Tuple[np.ndarray, np.ndarray]:
"""Create an (nPixel x nPixel) grid for the image.
The coordinates of the grid are in normalized image coordinates.
These coordinates are defined such that u^2 + v^2 = 1 is the outer
edge of the unaberrated donut, and u^2 + v^2 = obscuration^2 is the
inner edge.
Parameters
----------
nPixels : int
The number of pixels on a side.
Returns
-------
np.ndarray
The 2D u-grid on the image plane
np.ndarray
The 2D v-grid on the image plane
"""
# Create a 1D array with the correct number of pixels
grid = np.arange(nPixels, dtype=float)
# Center the grid
grid -= grid.mean()
# Convert to pupil normalized coordinates
grid /= self.donutRadius
# Create u and v grids
uImage, vImage = np.meshgrid(grid, grid)
return uImage, vImage
|
lsst-tsREPO_NAMEts_wepPATH_START.@ts_wep_extracted@ts_wep-main@python@lsst@ts@wep@instrument.py@.PATH_END.py
|
{
"filename": "ImageViewTemplate_generic.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/imageview/ImageViewTemplate_generic.py",
"type": "Python"
}
|
# Form implementation generated from reading ui file '../pyqtgraph/imageview/ImageViewTemplate.ui'
#
# Created by: PyQt6 UI code generator 6.1.0
#
# WARNING: Any manual changes made to this file will be lost when pyuic6 is
# run again. Do not edit this file unless you know what you are doing.
from ..Qt import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(726, 588)
self.gridLayout_3 = QtWidgets.QGridLayout(Form)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setSpacing(0)
self.gridLayout_3.setObjectName("gridLayout_3")
self.splitter = QtWidgets.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Orientation.Vertical)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.graphicsView = GraphicsView(self.layoutWidget)
self.graphicsView.setObjectName("graphicsView")
self.gridLayout.addWidget(self.graphicsView, 0, 0, 2, 1)
self.histogram = HistogramLUTWidget(self.layoutWidget)
self.histogram.setObjectName("histogram")
self.gridLayout.addWidget(self.histogram, 0, 1, 1, 2)
self.roiBtn = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.roiBtn.sizePolicy().hasHeightForWidth())
self.roiBtn.setSizePolicy(sizePolicy)
self.roiBtn.setCheckable(True)
self.roiBtn.setObjectName("roiBtn")
self.gridLayout.addWidget(self.roiBtn, 1, 1, 1, 1)
self.menuBtn = QtWidgets.QPushButton(self.layoutWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Minimum, QtWidgets.QSizePolicy.Policy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.menuBtn.sizePolicy().hasHeightForWidth())
self.menuBtn.setSizePolicy(sizePolicy)
self.menuBtn.setObjectName("menuBtn")
self.gridLayout.addWidget(self.menuBtn, 1, 2, 1, 1)
self.roiPlot = PlotWidget(self.splitter)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Policy.Preferred, QtWidgets.QSizePolicy.Policy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.roiPlot.sizePolicy().hasHeightForWidth())
self.roiPlot.setSizePolicy(sizePolicy)
self.roiPlot.setMinimumSize(QtCore.QSize(0, 40))
self.roiPlot.setObjectName("roiPlot")
self.gridLayout_3.addWidget(self.splitter, 0, 0, 1, 1)
self.normGroup = QtWidgets.QGroupBox(Form)
self.normGroup.setObjectName("normGroup")
self.gridLayout_2 = QtWidgets.QGridLayout(self.normGroup)
self.gridLayout_2.setContentsMargins(0, 0, 0, 0)
self.gridLayout_2.setSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.normSubtractRadio = QtWidgets.QRadioButton(self.normGroup)
self.normSubtractRadio.setObjectName("normSubtractRadio")
self.gridLayout_2.addWidget(self.normSubtractRadio, 0, 2, 1, 1)
self.normDivideRadio = QtWidgets.QRadioButton(self.normGroup)
self.normDivideRadio.setChecked(False)
self.normDivideRadio.setObjectName("normDivideRadio")
self.gridLayout_2.addWidget(self.normDivideRadio, 0, 1, 1, 1)
self.label_5 = QtWidgets.QLabel(self.normGroup)
font = QtGui.QFont()
font.setBold(True)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.gridLayout_2.addWidget(self.label_5, 0, 0, 1, 1)
self.label_3 = QtWidgets.QLabel(self.normGroup)
font = QtGui.QFont()
font.setBold(True)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridLayout_2.addWidget(self.label_3, 1, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.normGroup)
font = QtGui.QFont()
font.setBold(True)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.gridLayout_2.addWidget(self.label_4, 2, 0, 1, 1)
self.normROICheck = QtWidgets.QCheckBox(self.normGroup)
self.normROICheck.setObjectName("normROICheck")
self.gridLayout_2.addWidget(self.normROICheck, 1, 1, 1, 1)
self.normXBlurSpin = QtWidgets.QDoubleSpinBox(self.normGroup)
self.normXBlurSpin.setObjectName("normXBlurSpin")
self.gridLayout_2.addWidget(self.normXBlurSpin, 2, 2, 1, 1)
self.label_8 = QtWidgets.QLabel(self.normGroup)
self.label_8.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_8.setObjectName("label_8")
self.gridLayout_2.addWidget(self.label_8, 2, 1, 1, 1)
self.label_9 = QtWidgets.QLabel(self.normGroup)
self.label_9.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_9.setObjectName("label_9")
self.gridLayout_2.addWidget(self.label_9, 2, 3, 1, 1)
self.normYBlurSpin = QtWidgets.QDoubleSpinBox(self.normGroup)
self.normYBlurSpin.setObjectName("normYBlurSpin")
self.gridLayout_2.addWidget(self.normYBlurSpin, 2, 4, 1, 1)
self.label_10 = QtWidgets.QLabel(self.normGroup)
self.label_10.setAlignment(QtCore.Qt.AlignmentFlag.AlignRight|QtCore.Qt.AlignmentFlag.AlignTrailing|QtCore.Qt.AlignmentFlag.AlignVCenter)
self.label_10.setObjectName("label_10")
self.gridLayout_2.addWidget(self.label_10, 2, 5, 1, 1)
self.normOffRadio = QtWidgets.QRadioButton(self.normGroup)
self.normOffRadio.setChecked(True)
self.normOffRadio.setObjectName("normOffRadio")
self.gridLayout_2.addWidget(self.normOffRadio, 0, 3, 1, 1)
self.normTimeRangeCheck = QtWidgets.QCheckBox(self.normGroup)
self.normTimeRangeCheck.setObjectName("normTimeRangeCheck")
self.gridLayout_2.addWidget(self.normTimeRangeCheck, 1, 3, 1, 1)
self.normFrameCheck = QtWidgets.QCheckBox(self.normGroup)
self.normFrameCheck.setObjectName("normFrameCheck")
self.gridLayout_2.addWidget(self.normFrameCheck, 1, 2, 1, 1)
self.normTBlurSpin = QtWidgets.QDoubleSpinBox(self.normGroup)
self.normTBlurSpin.setObjectName("normTBlurSpin")
self.gridLayout_2.addWidget(self.normTBlurSpin, 2, 6, 1, 1)
self.gridLayout_3.addWidget(self.normGroup, 1, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "PyQtGraph"))
self.roiBtn.setText(_translate("Form", "ROI"))
self.menuBtn.setText(_translate("Form", "Menu"))
self.normGroup.setTitle(_translate("Form", "Normalization"))
self.normSubtractRadio.setText(_translate("Form", "Subtract"))
self.normDivideRadio.setText(_translate("Form", "Divide"))
self.label_5.setText(_translate("Form", "Operation:"))
self.label_3.setText(_translate("Form", "Mean:"))
self.label_4.setText(_translate("Form", "Blur:"))
self.normROICheck.setText(_translate("Form", "ROI"))
self.label_8.setText(_translate("Form", "X"))
self.label_9.setText(_translate("Form", "Y"))
self.label_10.setText(_translate("Form", "T"))
self.normOffRadio.setText(_translate("Form", "Off"))
self.normTimeRangeCheck.setText(_translate("Form", "Time range"))
self.normFrameCheck.setText(_translate("Form", "Frame"))
from ..widgets.GraphicsView import GraphicsView
from ..widgets.HistogramLUTWidget import HistogramLUTWidget
from ..widgets.PlotWidget import PlotWidget
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@imageview@ImageViewTemplate_generic.py@.PATH_END.py
|
{
"filename": "load_LWAOV_data_into_EPIC.py",
"repo_name": "epic-astronomy/EPIC",
"repo_path": "EPIC_extracted/EPIC-master/main/load_LWAOV_data_into_EPIC.py",
"type": "Python"
}
|
from glob import glob
import numpy as NP
import h5py
import data_interface as DI
import progressbar as PGB
import ipdb as PDB
basedir = '/data5/LWA_OV_data/'
reformatted_data_dir = 'data_reformatted/'
subdir = 'jun11/47mhz/'
fglob = basedir + reformatted_data_dir + subdir + '*.dada.hdf5'
progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Files'.format(len(glob(fglob))), PGB.ETA()], maxval=len(glob(fglob))).start()
for findex,infile in enumerate(glob(fglob)):
# infile = basedir + reformatted_data_dir + subdir + '2016-06-11-08-00-37_0000001151877120.000000.dada.hdf5'
with h5py.File(infile, 'r') as fileobj:
ntimes = fileobj['header']['ntimes'].value
nant = fileobj['header']['nant'].value
nchan = fileobj['header']['nchan'].value
npol = fileobj['header']['npol'].value
dstream = DI.DataStreamer()
# progress = PGB.ProgressBar(widgets=[PGB.Percentage(), PGB.Bar(marker='-', left=' |', right='| '), PGB.Counter(), '/{0:0d} Files'.format(ntimes), PGB.ETA()], maxval=ntimes).start()
for ti in xrange(ntimes):
dstream.load(infile, ti, datatype='Ef', pol=None)
progress.update(findex+1)
progress.finish()
|
epic-astronomyREPO_NAMEEPICPATH_START.@EPIC_extracted@EPIC-master@main@load_LWAOV_data_into_EPIC.py@.PATH_END.py
|
{
"filename": "local_plots.py",
"repo_name": "NuSpaceSim/nuSpaceSim",
"repo_path": "nuSpaceSim_extracted/nuSpaceSim-main/src/nuspacesim/simulation/eas_optical/local_plots.py",
"type": "Python"
}
|
# The Clear BSD License
#
# Copyright (c) 2021 Alexander Reustle and the NuSpaceSim Team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted (subject to the limitations in the disclaimer
# below) provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY
# THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from matplotlib import pyplot as plt
from ...utils.plots import hist2d
def eas_optical_density(inputs, results, *args, **kwargs):
r"""Plot some density plots"""
eas_cls, betas, altDec, showerEnergy, *_ = inputs
numPEs, costhetaChEff = results
# Issue 94 == https://github.com/NuSpaceSim/nuSpaceSim/issues/94
# Include only events with Npe >=photo_electron_threshold
valid = numPEs >= eas_cls.config.detector.optical.photo_electron_threshold
betas = betas[valid]
altDec = altDec[valid]
showerEnergy = showerEnergy[valid]
numPEs = numPEs[valid]
costhetaChEff = costhetaChEff[valid]
fig, ax = plt.subplots(2, 3, figsize=(15, 8), constrained_layout=True)
hist2d(fig, ax[0, 0], np.degrees(betas), numPEs, "β", "numPEs", cmap="plasma")
hist2d(
fig,
ax[1, 0],
np.degrees(betas),
costhetaChEff,
"β",
"cos(θ_chEff)",
cmap="plasma",
)
hist2d(
fig, ax[0, 1], altDec, numPEs, "decay altitude (km)", "numPEs", cmap="plasma"
)
hist2d(
fig,
ax[1, 1],
altDec,
costhetaChEff,
"decay altitude (km)",
"cos(θ_chEff)",
cmap="plasma",
)
hist2d(
fig,
ax[0, 2],
showerEnergy,
numPEs,
"showerEnergy (100 PeV)",
"numPEs",
cmap="plasma",
)
hist2d(
fig,
ax[1, 2],
showerEnergy,
costhetaChEff,
"showerEnergy (100 PeV)",
"cos(θ_chEff)",
cmap="plasma",
)
fig.suptitle("EAS Optical Cherenkov properties.")
plt.show()
def eas_optical_histogram(inputs, results, *args, **kwargs):
r"""Plot some histograms"""
eas_cls, *_ = inputs
numPEs, costhetaChEff = results
# Issue 94 == https://github.com/NuSpaceSim/nuSpaceSim/issues/94
# Include only events with Npe >=photo_electron_threshold
valid = numPEs >= eas_cls.config.detector.optical.photo_electron_threshold
numPEs = numPEs[valid]
costhetaChEff = costhetaChEff[valid]
color = "salmon"
alpha = 1
fig, ax = plt.subplots(2, 1, constrained_layout=True)
ax[0].hist(numPEs, 100, log=True, facecolor=color, alpha=alpha)
ax[0].set_xlabel("log(numPEs)")
ax[1].hist(costhetaChEff, 100, log=True, facecolor=color, alpha=alpha)
ax[1].set_xlabel("log(cos(θ_chEff))")
fig.suptitle("EAS Optical Cherenkov property Histograms")
plt.show()
|
NuSpaceSimREPO_NAMEnuSpaceSimPATH_START.@nuSpaceSim_extracted@nuSpaceSim-main@src@nuspacesim@simulation@eas_optical@local_plots.py@.PATH_END.py
|
{
"filename": "transpose_conv.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/testing/op_tests/transpose_conv.py",
"type": "Python"
}
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for transpose_conv."""
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
# Since compute output_shape is fairly complicated for
# tf.nn.conv2d_transpose input_sizes argument, so we here first perform a
# "conv2d" operation to get the output, then we use the output to feed in
# tf.nn.conv2d_backprop_input.
# This test will depend on the "conv2d" operation's correctness.
@register_make_test_function()
def make_transpose_conv_tests(options):
"""Make a set of tests to do transpose_conv."""
# Tensorflow only supports equal strides
test_parameters = [
{
"input_shape": [[1, 3, 4, 1], [1, 10, 10, 3], [3, 20, 20, 1]],
"filter_size": [[1, 1], [1, 2], [3, 3]],
"has_bias": [False],
"strides": [[1, 1, 1, 1], [1, 3, 3, 1]],
"padding": ["SAME", "VALID"],
"data_format": ["NHWC"],
"channel_multiplier": [1, 2],
"output_shape": [[]],
"fully_quantize": [False],
"const_weight_bias": [False]
},
# TODO(yunluli): Adding simple tests for now to unblock edgetpu debugging.
# Need to add more test cases.
{
"input_shape": [[1, 3, 3, 1]],
"filter_size": [[3, 3, 2, 1]],
"has_bias": [False],
"strides": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
"output_shape": [[1, 3, 3, 2]],
"fully_quantize": [True],
"const_weight_bias": [True]
},
{
"input_shape": [[1, 3, 3, 1]],
"filter_size": [[3, 3, 2, 1]],
"has_bias": [False],
"strides": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
"output_shape": [[1, 3, 3, 2]],
"fully_quantize": [False],
"const_weight_bias": [True]
},
{
"input_shape": [[1, 3, 3, 1]],
"filter_size": [[3, 3, 2, 1]],
"has_bias": [False],
"strides": [[1, 2, 2, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
"output_shape": [[1, 6, 6, 2]],
"fully_quantize": [True],
"const_weight_bias": [True]
},
{
"input_shape": [[1, 4, 3, 1]],
"filter_size": [[3, 3, 2, 1]],
"has_bias": [False],
"strides": [[1, 2, 2, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
"output_shape": [[1, 8, 6, 2]],
"fully_quantize": [True],
"const_weight_bias": [True]
},
{
"input_shape": [[1, 3, 3, 1]],
"filter_size": [[3, 3, 2, 1]],
"has_bias": [True],
"strides": [[1, 1, 1, 1]],
"padding": ["SAME"],
"data_format": ["NHWC"],
"channel_multiplier": [1],
"output_shape": [[1, 3, 3, 2]],
"fully_quantize": [True],
"const_weight_bias": [True]
},
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
if not parameters["const_weight_bias"]:
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
return [input_shape, filter_size]
def build_graph(parameters):
"""Build a transpose_conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
filter_input = tf.compat.v1.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
if not parameters["const_weight_bias"]:
input_tensors = [input_tensor, filter_input]
conv_outputs = tf.nn.conv2d(
input=input_tensor,
filters=filter_input,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
out = tf.compat.v1.nn.conv2d_backprop_input(
input_shape,
filter_input,
conv_outputs,
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
else:
input_tensors = [input_tensor]
if parameters["fully_quantize"]:
filter_input = create_tensor_data(
np.float32, filter_shape, min_value=-1, max_value=1)
else:
filter_input = create_tensor_data(np.float32, filter_shape)
out = tf.nn.conv2d_transpose(
input_tensor,
filter_input,
parameters["output_shape"],
strides=parameters["strides"],
padding=parameters["padding"],
data_format=parameters["data_format"])
if parameters["has_bias"]:
if parameters["fully_quantize"]:
bias_input = create_tensor_data(
np.float32, (parameters["output_shape"][-1],),
min_value=-1,
max_value=1)
else:
bias_input = create_tensor_data(np.float32,
(parameters["output_shape"][-1],))
out = tf.nn.bias_add(
out, bias_input, data_format=parameters["data_format"])
mul_data = create_tensor_data(np.float32,
(parameters["output_shape"][-1],))
out = tf.math.multiply(out, mul_data)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_shape, filter_shape = get_tensor_shapes(parameters)
if not parameters["const_weight_bias"]:
values = [
create_tensor_data(np.float32, input_shape),
create_tensor_data(np.float32, filter_shape)
]
else:
if parameters["fully_quantize"]:
values = [
create_tensor_data(
np.float32, input_shape, min_value=-1, max_value=1),
]
else:
values = [create_tensor_data(np.float32, input_shape),]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@testing@op_tests@transpose_conv.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/surface/hoverlabel/font/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="family", parent_name="surface.hoverlabel.font", **kwargs
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@surface@hoverlabel@font@_family.py@.PATH_END.py
|
{
"filename": "ellipse.py",
"repo_name": "lpsinger/ligo.skymap",
"repo_path": "ligo.skymap_extracted/ligo.skymap-main/ligo/skymap/postprocess/ellipse.py",
"type": "Python"
}
|
#
# Copyright (C) 2013-2024 Leo Singer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
import astropy_healpix as ah
from astropy import units as u
from astropy.wcs import WCS
import healpy as hp
import numpy as np
from .. import moc
from ..extern.numpy.quantile import quantile
__all__ = ('find_ellipse',)
def find_ellipse(prob, cl=90, projection='ARC', nest=False):
"""For a HEALPix map, find an ellipse that contains a given probability.
The orientation is defined as the angle of the semimajor axis
counterclockwise from west on the plane of the sky. If you think of the
semimajor distance as the width of the ellipse, then the orientation is the
clockwise rotation relative to the image x-axis. Equivalently, the
orientation is the position angle of the semi-minor axis.
These conventions match the definitions used in DS9 region files [1]_ and
Aladin drawing commands [2]_.
Parameters
----------
prob : np.ndarray, astropy.table.Table
The HEALPix probability map, either as a full rank explicit array
or as a multi-order map.
cl : float, np.ndarray
The desired credible level or levels (default: 90).
projection : str, optional
The WCS projection (default: 'ARC', or zenithal equidistant).
For a list of possible values, see the Astropy documentation [3]_.
nest : bool
HEALPix pixel ordering (default: False, or ring ordering).
Returns
-------
ra : float
The ellipse center right ascension in degrees.
dec : float
The ellipse center right ascension in degrees.
a : float, np.ndarray
The lenth of the semimajor axis in degrees.
b : float, np.ndarray
The length of the semiminor axis in degrees.
pa : float
The orientation of the ellipse axis on the plane of the sky in degrees.
area : float, np.ndarray
The area of the ellipse in square degrees.
Notes
-----
The center of the ellipse is the median a posteriori sky position. The
length and orientation of the semi-major and semi-minor axes are measured
as follows:
1. The sky map is transformed to a WCS projection that may be specified by
the caller. The default projection is ``ARC`` (zenithal equidistant), in
which radial distances are proportional to the physical angular
separation from the center point.
2. A 1-sigma ellipse is estimated by calculating the covariance matrix in
the projected image plane using three rounds of sigma clipping to reject
distant outlier points.
3. The 1-sigma ellipse is inflated until it encloses an integrated
probability of ``cl`` (default: 90%).
The function returns a tuple of the right ascension, declination,
semi-major distance, semi-minor distance, and orientation angle, all in
degrees.
If no ellipse can be found that contains integrated probability greater
than or equal to the desired credible level ``cl``, then the return values
``a``, ``b``, and ``area`` will be set to nan.
References
----------
.. [1] http://ds9.si.edu/doc/ref/region.html
.. [2] http://aladin.u-strasbg.fr/java/AladinScriptManual.gml#draw
.. [3] http://docs.astropy.org/en/stable/wcs/index.html#supported-projections
Examples
--------
**Example 1**
First, we need some imports.
>>> from astropy.io import fits
>>> from astropy.utils.data import download_file
>>> from astropy.wcs import WCS
>>> import healpy as hp
>>> from reproject import reproject_from_healpix
>>> import subprocess
Next, we download the BAYESTAR sky map for GW170817 from the
LIGO Document Control Center.
>>> url = 'https://dcc.ligo.org/public/0146/G1701985/001/bayestar.fits.gz' # doctest: +SKIP
>>> filename = download_file(url, cache=True, show_progress=False) # doctest: +SKIP
>>> _, healpix_hdu = fits.open(filename) # doctest: +SKIP
>>> prob = hp.read_map(healpix_hdu, verbose=False) # doctest: +SKIP
Then, we calculate ellipse and write it to a DS9 region file.
>>> ra, dec, a, b, pa, area = find_ellipse(prob) # doctest: +SKIP
>>> print(*np.around([ra, dec, a, b, pa, area], 5)) # doctest: +SKIP
195.03732 -19.29358 8.66545 1.1793 63.61698 32.07665
>>> s = 'fk5;ellipse({},{},{},{},{})'.format(ra, dec, a, b, pa) # doctest: +SKIP
>>> open('ds9.reg', 'w').write(s) # doctest: +SKIP
Then, we reproject a small patch of the HEALPix map, and save it to a file.
>>> wcs = WCS() # doctest: +SKIP
>>> wcs.wcs.ctype = ['RA---ARC', 'DEC--ARC'] # doctest: +SKIP
>>> wcs.wcs.crval = [ra, dec] # doctest: +SKIP
>>> wcs.wcs.crpix = [128, 128] # doctest: +SKIP
>>> wcs.wcs.cdelt = [-0.1, 0.1] # doctest: +SKIP
>>> img, _ = reproject_from_healpix(healpix_hdu, wcs, [256, 256]) # doctest: +SKIP
>>> img_hdu = fits.ImageHDU(img, wcs.to_header()) # doctest: +SKIP
>>> img_hdu.writeto('skymap.fits') # doctest: +SKIP
Now open the image and region file in DS9. You should find that the ellipse
encloses the probability hot spot. You can load the sky map and region file
from the command line:
.. code-block:: sh
$ ds9 skymap.fits -region ds9.reg
Or you can do this manually:
1. Open DS9.
2. Open the sky map: select "File->Open..." and choose ``skymap.fits``
from the dialog box.
3. Open the region file: select "Regions->Load Regions..." and choose
``ds9.reg`` from the dialog box.
Now open the image and region file in Aladin.
1. Open Aladin.
2. Open the sky map: select "File->Load Local File..." and choose
``skymap.fits`` from the dialog box.
3. Open the sky map: select "File->Load Local File..." and choose
``ds9.reg`` from the dialog box.
You can also compare the original HEALPix file with the ellipse in Aladin:
1. Open Aladin.
2. Open the HEALPix file by pasting the URL from the top of this
example in the Command field at the top of the window and hitting
return, or by selecting "File->Load Direct URL...", pasting the URL,
and clicking "Submit."
3. Open the sky map: select "File->Load Local File..." and choose
``ds9.reg`` from the dialog box.
**Example 2**
This example shows that we get approximately the same answer for GW171087
if we read it in as a multi-order map.
>>> from ..io import read_sky_map # doctest: +SKIP
>>> skymap_moc = read_sky_map(healpix_hdu, moc=True) # doctest: +SKIP
>>> ellipse = find_ellipse(skymap_moc) # doctest: +SKIP
>>> print(*np.around(ellipse, 5)) # doctest: +SKIP
195.03709 -19.27589 8.67611 1.18167 63.60454 32.08015
**Example 3**
I'm not showing the `ra` or `pa` output from the examples below because
the right ascension is arbitary when dec=90° and the position angle is
arbitrary when a=b; their arbitrary values may vary depending on your math
library. Also, I add 0.0 to the outputs because on some platforms you tend
to get values of dec or pa that get rounded to -0.0, which is within
numerical precision but would break the doctests (see
https://stackoverflow.com/questions/11010683).
This is an example sky map that is uniform in sin(theta) out to a given
radius in degrees. The 90% credible radius should be 0.9 * radius. (There
will be deviations for small radius due to finite resolution.)
>>> def make_uniform_in_sin_theta(radius, nside=512):
... npix = ah.nside_to_npix(nside)
... theta, phi = hp.pix2ang(nside, np.arange(npix))
... theta_max = np.deg2rad(radius)
... prob = np.where(theta <= theta_max, 1 / np.sin(theta), 0)
... return prob / prob.sum()
...
>>> prob = make_uniform_in_sin_theta(1)
>>> ra, dec, a, b, pa, area = find_ellipse(prob)
>>> print(dec, a, b, area) # doctest: +FLOAT_CMP
89.90862520480792 0.8703361458208101 0.8703357768874356 2.3788811576269793
>>> prob = make_uniform_in_sin_theta(10)
>>> ra, dec, a, b, pa, area = find_ellipse(prob)
>>> print(dec, a, b, area) # doctest: +FLOAT_CMP
89.90827657529562 9.024846562072115 9.024842703023806 255.11972196535515
>>> prob = make_uniform_in_sin_theta(120)
>>> ra, dec, a, b, pa, area = find_ellipse(prob)
>>> print(dec, a, b, area) # doctest: +FLOAT_CMP
90.0 107.97450376105762 107.97450376105755 26988.70467497216
**Example 4**
These are approximately Gaussian distributions.
>>> from scipy import stats
>>> def make_gaussian(mean, cov, nside=512):
... npix = ah.nside_to_npix(nside)
... xyz = np.transpose(hp.pix2vec(nside, np.arange(npix)))
... dist = stats.multivariate_normal(mean, cov)
... prob = dist.pdf(xyz)
... return prob / prob.sum()
...
This one is centered at RA=45°, Dec=0° and has a standard deviation of ~1°.
>>> prob = make_gaussian(
... [1/np.sqrt(2), 1/np.sqrt(2), 0],
... np.square(np.deg2rad(1)))
...
>>> print(*find_ellipse(prob)) # doctest: +FLOAT_CMP
45.0 0.0 2.1424077148886798 2.1420790721225518 90.0 14.467701995920123
This one is centered at RA=45°, Dec=0°, and is elongated in the north-south
direction.
>>> prob = make_gaussian(
... [1/np.sqrt(2), 1/np.sqrt(2), 0],
... np.diag(np.square(np.deg2rad([1, 1, 10]))))
...
>>> print(*find_ellipse(prob)) # doctest: +FLOAT_CMP
45.0 0.0 13.587688827198997 2.082984617824178 90.0 88.57796576937045
This one is centered at RA=0°, Dec=0°, and is elongated in the east-west
direction.
>>> prob = make_gaussian(
... [1, 0, 0],
... np.diag(np.square(np.deg2rad([1, 10, 1]))))
...
>>> print(*find_ellipse(prob)) # doctest: +FLOAT_CMP
0.0 0.0 13.583918022027142 2.082376991240146 0.0 88.54622940628768
This one is centered at RA=0°, Dec=0°, and has its long axis tilted about
10° to the west of north.
>>> prob = make_gaussian(
... [1, 0, 0],
... [[0.1, 0, 0],
... [0, 0.1, -0.15],
... [0, -0.15, 1]])
...
>>> print(*find_ellipse(prob)) # doctest: +FLOAT_CMP
0.0 0.0 64.77133127092944 33.50754131182688 80.78231196786841 6372.344658663043
This one is centered at RA=0°, Dec=0°, and has its long axis tilted about
10° to the east of north.
>>> prob = make_gaussian(
... [1, 0, 0],
... [[0.1, 0, 0],
... [0, 0.1, 0.15],
... [0, 0.15, 1]])
...
>>> print(*find_ellipse(prob)) # doctest: +FLOAT_CMP
0.0 0.0 64.7713312709305 33.507541311827445 99.21768803213162 6372.344658663096
This one is centered at RA=0°, Dec=0°, and has its long axis tilted about
80° to the east of north.
>>> prob = make_gaussian(
... [1, 0, 0],
... [[0.1, 0, 0],
... [0, 1, 0.15],
... [0, 0.15, 0.1]])
...
>>> print(*find_ellipse(prob)) # doctest: +FLOAT_CMP
0.0 0.0 64.77564486039145 33.509863018519894 170.78252287327365 6372.42573159241
This one is centered at RA=0°, Dec=0°, and has its long axis tilted about
80° to the west of north.
>>> prob = make_gaussian(
... [1, 0, 0],
... [[0.1, 0, 0],
... [0, 1, -0.15],
... [0, -0.15, 0.1]])
...
>>> print(*find_ellipse(prob)) # doctest: +FLOAT_CMP
0.0 0.0 64.77564486039145 33.50986301851988 9.217477126726351 6372.42573159241
**Example 5**
You can ask for other credible levels:
>>> print(*find_ellipse(prob, cl=50)) # doctest: +FLOAT_CMP
0.0 0.0 37.05420765328508 19.168955020016 9.217477126726351 2182.5580135410632
Or even for multiple credible levels:
>>> print(*find_ellipse(prob, cl=[50, 90])) # doctest: +FLOAT_CMP
0.0 0.0 [37.05420765 64.77564486] [19.16895502 33.50986302] 9.217477126726351 [2182.55801354 6372.42573159]
""" # noqa: E501
try:
prob['UNIQ']
except (IndexError, KeyError, ValueError):
npix = len(prob)
nside = ah.npix_to_nside(npix)
ipix = range(npix)
area = ah.nside_to_pixel_area(nside).to_value(u.deg**2)
else:
order, ipix = moc.uniq2nest(prob['UNIQ'])
nside = 1 << order.astype(int)
ipix = ipix.astype(int)
area = ah.nside_to_pixel_area(nside).to_value(u.sr)
prob = prob['PROBDENSITY'] * area
area *= np.square(180 / np.pi)
nest = True
# Find median a posteriori sky position.
xyz0 = [quantile(x, 0.5, weights=prob)
for x in hp.pix2vec(nside, ipix, nest=nest)]
(ra,), (dec,) = hp.vec2ang(np.asarray(xyz0), lonlat=True)
# Construct WCS with the specified projection
# and centered on mean direction.
w = WCS()
w.wcs.crval = [ra, dec]
w.wcs.ctype = ['RA---' + projection, 'DEC--' + projection]
# Transform HEALPix to the specified projection.
xy = w.wcs_world2pix(
np.transpose(
hp.pix2ang(
nside, ipix, nest=nest, lonlat=True)), 1)
# Keep only values that were inside the projection.
keep = np.logical_and.reduce(np.isfinite(xy), axis=1)
xy = xy[keep]
prob = prob[keep]
if not np.isscalar(area):
area = area[keep]
# Find covariance matrix, performing three rounds of sigma-clipping
# to reject outliers.
keep = np.ones(len(xy), dtype=bool)
for _ in range(3):
c = np.cov(xy[keep], aweights=prob[keep], rowvar=False)
nsigmas = np.sqrt(np.sum(xy.T * np.linalg.solve(c, xy.T), axis=0))
keep &= (nsigmas < 3)
# Find the number of sigma that enclose the cl% credible level.
i = np.argsort(nsigmas)
nsigmas = nsigmas[i]
cls = np.cumsum(prob[i])
if np.isscalar(area):
careas = np.arange(1, len(i) + 1) * area
else:
careas = np.cumsum(area[i])
# np.multiply rather than * to automatically convert to ndarray if needed
cl = np.multiply(cl, 1e-2)
nsigma = np.interp(cl, cls, nsigmas, right=np.nan)
area = np.interp(cl, cls, careas, right=np.nan)
# Find the eigendecomposition of the covariance matrix.
w, v = np.linalg.eigh(c)
# Find the semi-minor and semi-major axes.
b, a = (nsigma * root_w for root_w in np.sqrt(w))
# Find the position angle.
pa = np.rad2deg(np.arctan2(*v[0]))
# An ellipse is symmetric under rotations of 180°.
# Return the smallest possible positive position angle.
pa %= 180
# Done!
return ra, dec, a, b, pa, area
|
lpsingerREPO_NAMEligo.skymapPATH_START.@ligo.skymap_extracted@ligo.skymap-main@ligo@skymap@postprocess@ellipse.py@.PATH_END.py
|
{
"filename": "test_interpolators.py",
"repo_name": "astropy/photutils",
"repo_path": "photutils_extracted/photutils-main/photutils/background/tests/test_interpolators.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for the interpolators module.
"""
import astropy.units as u
import numpy as np
import pytest
from astropy.utils.exceptions import AstropyDeprecationWarning
from photutils.background.background_2d import Background2D
from photutils.background.interpolators import (BkgIDWInterpolator,
BkgZoomInterpolator)
def test_zoom_interp():
data = np.ones((300, 300))
bkg = Background2D(data, 100)
mesh = np.array([[0.01, 0.01, 0.02],
[0.01, 0.02, 0.03],
[0.03, 0.03, 12.9]])
interp = BkgZoomInterpolator(clip=False)
zoom = interp(mesh, **bkg._interp_kwargs)
assert zoom.shape == (300, 300)
with pytest.warns(AstropyDeprecationWarning):
bkg = Background2D(data, 100, edge_method='crop')
zoom2 = interp(mesh, **bkg._interp_kwargs)
assert zoom2.shape == (300, 300)
# test with units
unit = u.nJy
bkg = Background2D(data << unit, 100)
interp = BkgZoomInterpolator(clip=False)
zoom = interp(mesh << unit, **bkg._interp_kwargs)
assert zoom.shape == (300, 300)
# test repr
cls_repr = repr(interp)
assert cls_repr.startswith(f'{interp.__class__.__name__}')
def test_zoom_interp_clip():
bkg = Background2D(np.ones((300, 300)), 100)
mesh = np.array([[0.01, 0.01, 0.02],
[0.01, 0.02, 0.03],
[0.03, 0.03, 12.9]])
interp1 = BkgZoomInterpolator(clip=False)
zoom1 = interp1(mesh, **bkg._interp_kwargs)
interp2 = BkgZoomInterpolator(clip=True)
zoom2 = interp2(mesh, **bkg._interp_kwargs)
minval = np.min(mesh)
maxval = np.max(mesh)
assert np.min(zoom1) < minval
assert np.max(zoom1) > maxval
assert np.min(zoom2) == minval
assert np.max(zoom2) == maxval
def test_idw_interp():
data = np.ones((300, 300))
interp = BkgIDWInterpolator()
bkg = Background2D(data, 100, interpolator=interp)
mesh = np.array([[0.01, 0.01, 0.02],
[0.01, 0.02, 0.03],
[0.03, 0.03, 12.9]])
zoom = interp(mesh, **bkg._interp_kwargs)
assert zoom.shape == (300, 300)
# test with units
unit = u.nJy
bkg = Background2D(data << unit, 100, interpolator=interp)
zoom = interp(mesh << unit, **bkg._interp_kwargs)
assert zoom.shape == (300, 300)
# test repr
cls_repr = repr(interp)
assert cls_repr.startswith(f'{interp.__class__.__name__}')
|
astropyREPO_NAMEphotutilsPATH_START.@photutils_extracted@photutils-main@photutils@background@tests@test_interpolators.py@.PATH_END.py
|
{
"filename": "coi_digger.py",
"repo_name": "mbejger/polgraw-allsky",
"repo_path": "polgraw-allsky_extracted/polgraw-allsky-master/coincidences/src/coi_digger.py",
"type": "Python"
}
|
import numpy as np
import sys
import os
import subprocess
import re
import itertools
import tempfile
from configparser import ConfigParser, ExtendedInterpolation
# Parser initialisation
p = ConfigParser(interpolation=ExtendedInterpolation())
# Parse the ini file
p.read(sys.argv[1])
band = sys.argv[2]
hemi = sys.argv[3]
coi_res = p.get('paths', 'coi_res')
veto_frac_file = p.get('paths', 'veto_frac_file')
griddir = p.get('paths', 'griddir')
coi_out_prefix = p.get('settings', 'coi_out_prefix')
coi_out_suffix = p.get('settings', 'coi_out_suffix')
coi_bin_suffix = p.get('settings', 'coi_bin_suffix')
mincoin = int(p.get('settings', 'mincoin'))
nod = p.get('settings', 'nod')
cellsize = p.get('settings', 'cellsize')
threshold = p.get('settings', 'threshold')
# read in the veto fraction file
try:
fin = open(veto_frac_file, 'r')
print('Opening {}...'.format(veto_frac_file))
except IOError:
print('Problem with {} file. Exiting...'.format(veto_frac_file))
sys.exit(1)
else:
with fin:
veto_frac = [line.rstrip('\n').split(' ') for line in fin]
fin.close()
# select the veto fraction corresponding to the band (one element list)
vetofrac = [x[1] for x in veto_frac if x[0] == band][0]
fap_cmd = './fap-many -nod ' + nod + ' -band ' + band + ' -vetofrac ' + vetofrac + ' -cellsize ' + cellsize + ' -threshold ' + threshold + ' -grid ' + griddir
list_of_shifts = list(itertools.product([0,1], repeat=4))
#list_of_shifts = [(1, 0, 0, 1)]
for index, item in enumerate(list_of_shifts):
shift = ''.join(map(str, item))
# reading coi_out file (stdout from the coincidences code)
#mb this part should be replaced by reading this data from
# the .coi file header
coi_out = coi_res + band + '/' + coi_out_prefix + band + '_' + hemi + '-' + shift + coi_out_suffix
frame_cand_info = []
try:
fin = open(coi_out, "r")
print("Opening {}...".format(coi_out))
except IOError:
print("Problem with {} file. Exiting...".format(coi_out))
break
else:
with fin:
lines = [line.rstrip('\n') for line in fin]
fin.close()
for ind, line in enumerate(lines):
# first line - info on best coincidence
if ind == 0:
best_coincidence = list(filter(None, line.split(' ')))
bc = ' '.join(best_coincidence[0:4])
res = re.search('.*Frame (.*): (.*)/(.*)', line)
if res:
frame_number = int(res.group(1))
all_cands = int(res.group(3))
unique_cands = int(res.group(2))
frame_cand_info.append([frame_number, all_cands, unique_cands])
frame_cand_info_flatten = [item for sublist in frame_cand_info for item in sublist]
# reading .coi file (binary output from the coincidences code)
coi_bin = coi_res + band + '/' + shift + '_' + band + '_' + hemi + coi_bin_suffix
# starting value of coin, set to maximal for this shift
coin = int(best_coincidence[3])
try:
fin = open(coi_bin, "rb")
print("Opening {}...".format(coi_bin))
except IOError:
print("Problem with {} file. Exiting...".format(coi_bin))
break
# temporary file for the coincidences info - input for the fap code
fp = tempfile.NamedTemporaryFile(delete=False)
# dtype for first 6 numbers: coincidence multiplicity,
# mean parameters of the coincidence (f, s, d, a) and snr
dtype = [('coin', 'uint16'),
('f', 'float32'),
('s', 'float32'),
('d', 'float32'),
('a', 'float32'),
('snr', 'float32'),]
with fin:
while coin >= mincoin:
coin_mean_vals_of_pars = np.fromfile(fin, dtype=dtype, count=1)
coin = int(coin_mean_vals_of_pars['coin'])
cmvop = ' '.join(map(str, coin_mean_vals_of_pars[0,]))
# reading frames info
frames = []
for _ in range(coin):
frames.append(int(np.fromfile(fin, dtype=np.uint16, count=1)))
# positions of triggers in the trigger files (not used right now)
tripos = np.fromfile(fin, dtype=np.int32, count=coin)
# all frames information (number, all cands, unique cands)
fci = ' '.join([str(x) for x in frame_cand_info_flatten])
# numbers of frames in coincidence
fic = ' '.join([str(x) for x in frames])
coin_data = bc + ' ' + cmvop + ' ' + fci + ' ' + fic
try:
fp.write(coin_data.encode())
fp.write('\n'.encode())
except IOError:
print('Problem with the temporary file {}. Exiting...'.format(fp.name))
break
fin.close()
fp.close()
# FAP results files
fin = open(shift + '_' + band + '_' + hemi + '.fap', 'w')
# evaluate the False Alarm Probability
cmd = fap_cmd + ' -data ' + fp.name
subprocess.call([cmd], shell=True, stderr=fin)
os.unlink(fp.name)
fin.close()
#mb better output handling needed
# do not write if output empty (use Popen instead of call)
|
mbejgerREPO_NAMEpolgraw-allskyPATH_START.@polgraw-allsky_extracted@polgraw-allsky-master@coincidences@src@coi_digger.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/mesh3d/hoverlabel/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="font", parent_name="mesh3d.hoverlabel", **kwargs):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@mesh3d@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "example-2.py",
"repo_name": "fjdu/myRadex",
"repo_path": "myRadex_extracted/myRadex-master/example-2.py",
"type": "Python"
}
|
import myRadex
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 2000)
a = myRadex.MyRadexModel(
dir_transition_rates='/Users/fjdu/_o/radex/data/',
filename_molecule='hco+.dat')
a.run_one_params(
Tkin=20.0, dv_CGS=1e5,
dens_X_CGS=1e0, Ncol_X_CGS=1e20,
H2_density_CGS=1e3, solve_method='Newton', geotype='sphere')
print('Did the solving proceed correctly?', a.flag_good)
df = pd.DataFrame(data=a.data_transitions, columns=a.column_names)
print(df)
print('Cooling rate: ', a.cooling_rate)
|
fjduREPO_NAMEmyRadexPATH_START.@myRadex_extracted@myRadex-master@example-2.py@.PATH_END.py
|
{
"filename": "q_ball.py",
"repo_name": "jtksai/PyCOOL",
"repo_path": "PyCOOL_extracted/PyCOOL-master/models/q_ball.py",
"type": "Python"
}
|
import numpy as np
pi = np.pi
"""
###############################################################################
# Define a scalar field model and a lattice
###############################################################################
"""
class Model:
"""Model class that defines the scalar field. Change these values
for different models:"""
def __init__(self):
self.model_name = 'q-ball'
"Model parameters and values:"
"Reduced Planck mass (mpl) and regular Planck mass (MPl):"
self.mpl = 1.0
self.MPl = np.sqrt(8*np.pi)*self.mpl
"Mass unit that is used to define other variables:"
self.m = 1e2/(2.4353431507105459e+18)
"Scalar field masses:"
self.m2f1 = 2*self.m**2.
self.m2f2 = 2*self.m**2.
self.m2_fields = [self.m2f1, self.m2f2]
"Coupling strength:"
self.K = -0.1
self.M_term = 1e14/(2.4353431507105459e+18)
self.lamb = 0.5
"Initial values for the fields and the field time derivatives:"
self.f10 = 2.5e7*self.m
self.f20 = 0.
self.df1_dt0 = 0.
self.df2_dt0 = 2.5e7*self.m**2
self.fields0 = [self.f10, self.f20]
self.pis0 = [self.df1_dt0, self.df2_dt0]
"List of the potential functions:"
"Potentials functions of the fields including self-interactions:"
#self.V_list = None
self.V_list = ['0.5*C1*f1**2', '0.5*C2*f2**2']
"Interaction terms of the fields:"
#self.V_int = ["C1*(log(1+D1*(f1**2+f2**2)))+C2*(f1**2+f2**2)**3"]
self.V_int = [("C3*(f1**2+f2**2)*(log(D1*(f1**2+f2**2)))"+
"+C4*(f1**2+f2**2)**3")]
"Temporary variable that can be used to make calculations a bit faster:"
self.tmp_var = ['log(D1*(f1**2+f2**2))']
"""Numerical values for C1, C2, ... These will be multiplied by
a**3*dtau:"""
self.C_coeff = [self.m2f1, self.m2f2, self.K*self.m**2, 1.]
"Numerical values for bare coefficients D1, D2, ..."
self.D_coeff = [1./self.M_term**2]
#self.D_coeff = []
"""List of functions which are in 'powerform' in potential. For
example for potential V = 1 + sin(f1)**2 power_list = ['sin(f1)'].
Field variables are included automatically."""
self.power_list = []
"Initial and final times:"
self.t_in = 100./self.m
self.t_fin = 10000./self.m
self.t_fin_hom = 10000./self.m
"""If adaptQ = True scales conformal time with inverse scale factor
meaning that time steps are alsmost constant in physical time:"""
self.adaptQ = False
"Initial values for homogeneous radiation and matter components:"
self.rho_r0 = 3.*(2./(3.*self.t_in))**2.
self.rho_m0 = 0.
"Time step:"
self.dtau = 0.005/self.m
#self.dtau = 1./(1000*m)
"Time step for homogeneous system:"
self.dtau_hom = 1./(2000*self.m)
"Lattice side length:"
self.L = 16./self.m
"Lattice size, where n should be a power of two:"
self.n = 64
"Initial scale parameter:"
self.a_in = 0.1*(self.t_in*self.m)**(2./3.)
"Limit for scale factor in linearized evolution:"
self.a_limit = 2
"Set if to use linearized evolution:"
self.lin_evo = False
"Solve homogeneous field evolution if True:"
self.homogenQ = False
"Set True to solve non-linearized evolution:"
self.evoQ = True
"""Whether to do curvature perturbation (zeta) calculations
(this disables post-processing). Also disables evoQ:"""
self.zetaQ = False#True#
"""Whether to solve tensor perturbations:"""
self.gwsQ = True#False#
"Number of different simulations to run with identical intial values:"
self.sim_num = 1
"How frequently to save data:"
self.flush_freq = 256
self.flush_freq_hom = 128*8
"If True write to file:"
self.saveQ = True#False#
"If True make a superfolder that has all the different simulations:"
self.superfolderQ = False#True#
"Name of the superfolder:"
self.superfolder = 'zeta_run_1'
"""If True multiplies energy densities with 1/m^2.
VisIt might not plot properly very small densities."""
self.scale = True
"""If fieldsQ = True save the field data (fields, rho etc.) in
the Silo files:"""
self.fieldsQ = True
"The used discretization. Options 'defrost' or 'hlattice'."
self.discQ = 'defrost'#'latticeeasy'#'hlattice'#
"If spectQ = True calculate spectrums at the end:"
self.spectQ = True
"""The used method to calculate gravitaional spectrums.
Options 'std' which uses a continuum based wave numbers
and 'k_eff' which uses k^_eff related to the discretized
Laplacian to calculate the spectra."""
self.spect_gw_m = 'std'#'k_eff'#
#This has been depracated:"
"""The used method to calculate spectrums. Options 'latticeeasy' and
'defrost'. Defrost uses aliasing polynomial to smooth
the spectrums."""
#self.spect_m = 'defrost'#'latticeeasy'
"If distQ = True calculate empirical CDF and CDF:"
self.distQ = False#True#
"""If statQ = True calculate skewness and kurtosis of the fields:"""
self.statsQ = True#False#
"""If field_r = True calculate also energy densities of fields
without interaction terms:"""
self.field_rho = False
"""If field_lpQ = True calculate correlation lengths of
the energy densities of the fields without interaction terms:"""
self.field_lpQ = False#True#
"If deSitter = True include -9H^2/(4m^2) terms in \omega_k^2 term:"
self.deSitterQ = True#False#
"""If testQ = True use a constant seed. Can be used for debugging and
testing:"""
self.testQ = False#True#
"""If m2_effQ = True writes a*m_eff/m to SILO file. This includes
also comoving number density."""
self.m2_effQ = True#False#
"If csvQ = True writes curves from Silo files to csv files:"
self.csvQ = True#False#
"""Maximum number of registers useb per thread. If set to None uses
default values 24 for single and 32 for double precision.
Note that this will also affect the used block size"""
self.max_reg = 45
"""For curvature perturbation studies disable post-processing
by default:"""
if self.zetaQ == True:
self.evoQ = False
self.spectQ = False
self.distQ = False
self.statsQ = False
self.fieldsQ = False
self.field_rho = False
self.field_lpQ = False
self.testQ = False
self.m2_effQ = False
self.flush_freq = 256*120*100000
self.superfolderQ = True
self.saveQ = False
|
jtksaiREPO_NAMEPyCOOLPATH_START.@PyCOOL_extracted@PyCOOL-master@models@q_ball.py@.PATH_END.py
|
{
"filename": "_maxpoints.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/surface/stream/_maxpoints.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="maxpoints", parent_name="surface.stream", **kwargs):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 10000),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@surface@stream@_maxpoints.py@.PATH_END.py
|
{
"filename": "check_imports.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/partners/prompty/scripts/check_imports.py",
"type": "Python"
}
|
import sys
import traceback
from importlib.machinery import SourceFileLoader
if __name__ == "__main__":
files = sys.argv[1:]
has_failure = False
for file in files:
try:
SourceFileLoader("x", file).load_module()
except Exception:
has_faillure = True
print(file)
traceback.print_exc()
print()
sys.exit(1 if has_failure else 0)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@partners@prompty@scripts@check_imports.py@.PATH_END.py
|
{
"filename": "_ticklabeloverflow.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/coloraxis/colorbar/_ticklabeloverflow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklabeloverflowValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="ticklabeloverflow",
parent_name="layout.coloraxis.colorbar",
**kwargs,
):
super(TicklabeloverflowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["allow", "hide past div", "hide past domain"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@coloraxis@colorbar@_ticklabeloverflow.py@.PATH_END.py
|
{
"filename": "xlsx2table.py",
"repo_name": "jbroll/starbase",
"repo_path": "starbase_extracted/starbase-master/python/xlsx2table.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
import sys
import re
import os
include starbase.py
program = os.path.splitext(os.path.basename(sys.argv[0]))[0]
if len(sys.argv) <= 1 :
print """
usage: %s file sheet range [-data]
""" % program
sys.exit(1)
file = sys.argv[1]
if not os.path.isfile(file) and program != "table2xlsx":
print program + ": file not found: " + file
sys.exit(1)
from openpyxl import Workbook
from openpyxl import load_workbook
from openpyxl.cell.cell import column_index_from_string
if os.path.isfile(file) and len(sys.argv) <= 2:
try:
xlsx = load_workbook(file)
except:
print program + ": cannot open xlsx woorkbook: " + file
sys.exit(1)
print "Sheets"
print "------"
print "\n".join( xlsx.get_sheet_names())
sys.exit(1)
if len(sys.argv) <= 2:
print """
usage: %s file sheet range [-data]
""" % program
sys.exit(1)
sheet = sys.argv[2]
cells = sys.argv[3]
if len(sys.argv) == 5 and sys.argv[4] == "-data" :
header = False
else:
header = True
if os.path.isfile(file):
try:
xlsx = load_workbook(file)
except:
print program + ": cannot open xlsx woorkbook: " + file
sys.exit(1)
else:
if program == "table2xlsx":
xlsx = Workbook()
ws = xlsx.active
ws.title = sheet
else:
print program + ": cannot find xlsx woorkbook: " + file
sys.exit(1)
try:
ws = xlsx[sheet]
except:
if program == "table2xlsx":
ws = xlsx.create_sheet()
ws.title = sheet
else:
print program + ": cannot find sheet: " + sheet
sys.exit(1)
if program == "xlsx2table":
cells = cells.split(":")
if len(cells) == 1 :
cell0 = ws[cells[0]]
col = 0
while cell0.offset(0, col+1).value != None and cell0.offset(0, col+1).value != "":
col = col+1
row = 0
while cell0.offset(row+1, 0).value != None and cell0.offset(row+1, 0).value != "":
row = row+1
cells.append(cell0.offset(row, col).coordinate)
else:
cell0 = ws[cells[0]]
cell1 = ws[cells[1]]
col = column_index_from_string(cell1.column)
print "source ", file, " ", sheet, " ", "\t".join(cells)
print ""
if header:
header = []
dashes = []
for row in ws.range(cell0.coordinate + ":" + cell0.offset(0, col).coordinate):
for cell in row:
name = re.sub('[^a-zA-Z0-9_]', "", str(cell.value)).strip()
header.append( name)
dashes.append("-" * len(name))
print "\t".join(header)
print "\t".join(dashes)
cells[0] = cell0.offset(1, 0).coordinate
for row in ws.range(cells[0] + ":" + cells[1]):
tab = ""
for cell in row:
print tab, str(cell.value).strip(),
tab = "\t"
print ""
sys.exit(0)
if program == "table2xlsx":
try:
cell0 = ws[cells]
except:
print program + ": cannot find upper left corner of table: " + cells
table = Starbase(sys.stdin)
if header:
for ( i, value ) in enumerate(table ** "headline"):
ws[cell0.offset(0, i).coordinate] = value
cell0 = cell0.offset(1, 0)
for ( j, row ) in enumerate(table):
for ( i, value ) in enumerate(row):
ws[cell0.offset(j, i).coordinate] = value
xlsx.save(file)
sys.exit(0)
print "xlsx2table: unknown function"
sys.exit(1)
|
jbrollREPO_NAMEstarbasePATH_START.@starbase_extracted@starbase-master@python@xlsx2table.py@.PATH_END.py
|
{
"filename": "scriptFixVLSS.py",
"repo_name": "bill-cotton/Obit",
"repo_path": "Obit_extracted/Obit-master/ObitSystem/Obit/share/scripts/scriptFixVLSS.py",
"type": "Python"
}
|
# python/Obit script to relabel the headers in VLSS images to J2000
# Arguments:
# 1) Name of Image to be updated
import sys, Obit, Image, ImageDesc, OSystem, OErr
# Init Obit
err=OErr.OErr()
#ObitSys=OSystem.OSystem ("Feather", 1, 100, 1, ["../AIPSdata/"], 1, ["../PythonData/"], 1, 0, err)
ObitSys=OSystem.OSystem ("Feather", 1, 100, 1, ["../AIPSdata/"], 1, ["/home/nraoweb/cv/content/4mass/MAPS/work/"], 1, 0, err)
OErr.printErrMsg(err, "Error with Obit startup")
# For debugging
#Obit.Bomb()
# Get file names
inFile = sys.argv[1]
inDisk = 1
outDisk = 1
equinox = 2000.0
# Debug
print "input",inFile
# Convert file into Images
inImage = Image.newPImage(inFile, inFile, inDisk, 1, err)
OErr.printErrMsg(err, "Error initializing image")
# Open/get descriptor
Image.POpen(inImage, 3, err)
desc = Image.PGetDesc(inImage)
# update descriptor
descDict = ImageDesc.PGetDict(desc) # Get descriptor as Python Dict
descDict["epoch"] = equinox # Update "epoch"
descDict["equinox"] = equinox # Update equinox
descDict["origin"] = "Obit to fix equinox" # Update origin
ImageDesc.PSetDict(desc, descDict) # update descriptor
Image.PClose(inImage, err) # Close to update disk
OErr.printErrMsg(err, "Error writing updated header for "+Image.PGetName(inImage))
# Say something
print "Updated Equinox (EPOCH) in",inFile,"to",equinox
# Shutdown Obit
OErr.printErr(err)
del ObitSys
|
bill-cottonREPO_NAMEObitPATH_START.@Obit_extracted@Obit-master@ObitSystem@Obit@share@scripts@scriptFixVLSS.py@.PATH_END.py
|
{
"filename": "2022_10_19_093902_6d548701edef_add_created_by.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/src/prefect/server/database/_migrations/versions/postgresql/2022_10_19_093902_6d548701edef_add_created_by.py",
"type": "Python"
}
|
"""add_created_by
Revision ID: 6d548701edef
Revises: ad4b1b4d1e9d
Create Date: 2022-10-19 09:39:02.371032
"""
import sqlalchemy as sa
from alembic import op
import prefect
# revision identifiers, used by Alembic.
revision = "6d548701edef"
down_revision = "ad4b1b4d1e9d"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"deployment",
sa.Column(
"created_by",
prefect.server.utilities.database.Pydantic(
prefect.server.schemas.core.CreatedBy
),
nullable=True,
),
)
op.add_column(
"deployment",
sa.Column(
"updated_by",
prefect.server.utilities.database.Pydantic(
prefect.server.schemas.core.UpdatedBy
),
nullable=True,
),
)
op.add_column(
"flow_run",
sa.Column(
"created_by",
prefect.server.utilities.database.Pydantic(
prefect.server.schemas.core.CreatedBy
),
nullable=True,
),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("flow_run", "created_by")
op.drop_column("deployment", "updated_by")
op.drop_column("deployment", "created_by")
# ### end Alembic commands ###
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@src@prefect@server@database@_migrations@versions@postgresql@2022_10_19_093902_6d548701edef_add_created_by.py@.PATH_END.py
|
{
"filename": "test_3D.py",
"repo_name": "AFD-Illinois/iharm3d",
"repo_path": "iharm3d_extracted/iharm3d-master/prob/old_problems/mhdmodes3d/test/test_3D.py",
"type": "Python"
}
|
################################################################################
# #
# SOD SHOCKTUBE #
# #
################################################################################
import os
import sys; sys.dont_write_bytecode = True
from subprocess import call
from shutil import copyfile
import glob
import numpy as np
#import h5py
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pylab as pl
sys.path.insert(0, '../../../script/')
sys.path.insert(0, '../../../script/analysis/')
import util
import hdf5_to_dict as io
AUTO = False
for arg in sys.argv:
if arg == '-auto':
AUTO = True
RES = [16, 32, 64]#, 128]
# LOOP OVER EIGENMODES
MODES = [0,1,2,3]
NAMES = ['ENTROPY', 'SLOW', 'ALFVEN', 'FAST']
NVAR = 8
VARS = ['rho', 'u', 'u1', 'u2', 'u3', 'B1', 'B2', 'B3']
amp = 1.e-4
k1 = 2.*np.pi
k2 = 2.*np.pi
k3 = 2.*np.pi
var0 = np.zeros(NVAR)
var0[0] = 1.
var0[1] = 1.
# Magnetic field
var0[5] = 1.
var0[6] = 0.
var0[7] = 0.
L1 = np.zeros([len(MODES), len(RES), NVAR])
powerfits = np.zeros([len(MODES), NVAR])
for n in xrange(len(MODES)):
# EIGENMODES
dvar = np.zeros(NVAR)
if MODES[n] == 0: # ENTROPY
dvar[0] = 1.
if MODES[n] == 1: # SLOW/SOUND
dvar[0] = 0.556500332363
dvar[1] = 0.742000443151
dvar[2] = -0.282334999306
dvar[3] = 0.0367010491491
dvar[4] = 0.0367010491491
dvar[5] = -0.195509141461
dvar[6] = 0.0977545707307
dvar[7] = 0.0977545707307
if MODES[n] == 2: # ALFVEN
# dvar[4] = 0.480384461415
# dvar[7] = 0.877058019307
dvar[3] = -0.339683110243
dvar[4] = 0.339683110243
dvar[6] = 0.620173672946
dvar[7] = -0.620173672946
if MODES[n] == 3: # FAST
# dvar[0] = 0.476395427447
# dvar[1] = 0.635193903263
# dvar[2] = -0.102965815319
# dvar[3] = -0.316873207561
# dvar[5] = 0.359559114174
# dvar[6] = -0.359559114174
dvar[0] = 0.481846076323;
dvar[1] = 0.642461435098;
dvar[2] = -0.0832240462505;
dvar[3] = -0.224080007379;
dvar[4] = -0.224080007379;
dvar[5] = 0.406380545676;
dvar[6] = -0.203190272838;
dvar[7] = -0.203190272838;
dvar *= amp
# USE DUMPS IN FOLDERS OF GIVEN FORMAT
for m in xrange(len(RES)):
print '../dumps_' + str(RES[m]) + '_' + str(MODES[n])
os.chdir('../dumps_' + str(RES[m]) + '_' + str(MODES[n]))
dfile = np.sort(glob.glob('dump*.h5'))[-1]
hdr = io.load_hdr(dfile)
geom = io.load_geom(hdr, dfile)
dump = io.load_dump(hdr, geom, dfile)
X1 = dump['x']
X2 = dump['y']
X3 = dump['z']
dvar_code = []
dvar_code.append(dump['RHO'] - var0[0])
dvar_code.append(dump['UU'] - var0[1])
dvar_code.append(dump['U1'] - var0[2])
dvar_code.append(dump['U2'] - var0[3])
dvar_code.append(dump['U3'] - var0[4])
dvar_code.append(dump['B1'] - var0[5])
dvar_code.append(dump['B2'] - var0[6])
dvar_code.append(dump['B3'] - var0[7])
dvar_sol = []
for k in xrange(NVAR):
dvar_sol.append(np.real(dvar[k])*np.cos(k1*X1 + k2*X2 + k3*X3))
L1[n][m][k] = np.mean(np.fabs(dvar_code[k] - dvar_sol[k]))
mid = RES[m]/2
# Plot slice at each timestep
# for fnum in xrange(len(np.sort(glob.glob('dump*.h5')))):
# dfile = np.sort(glob.glob('dump*.h5'))[fnum]
#
# hdr = io.load_hdr(dfile)
# geom = io.load_geom(hdr, dfile)
# dump = io.load_dump(hdr, geom, dfile)
#
# X1 = dump['x']
# X2 = dump['y']
# X3 = dump['z']
#
# dvar_code = []
# dvar_code.append(dump['RHO'] - var0[0])
# dvar_code.append(dump['UU'] - var0[1])
# dvar_code.append(dump['U1'] - var0[2])
# dvar_code.append(dump['U2'] - var0[3])
# dvar_code.append(dump['U3'] - var0[4])
# dvar_code.append(dump['B1'] - var0[5])
# dvar_code.append(dump['B2'] - var0[6])
# dvar_code.append(dump['B3'] - var0[7])
#
# dvar_plane = []
# for k in xrange(NVAR):
# dvar_plane.append(np.zeros((dump['N1'], dump['N2'])))
# for i in xrange(dump['N1']):
# for j in xrange(dump['N2']):
# dvar_plane[k][i,j] = dvar_code[k][i,j,int(i/2 + j/2)]
#
# # Plot dvar
# for k in xrange(NVAR):
# if abs(dvar[k]) != 0.:
# fig = plt.figure(figsize=(16.18,10))
# ax = fig.add_subplot(1,1,1)
# ax.pcolormesh(X1[:,:,mid], X2[:,:,mid], dvar_code[k][:,:,mid], label=VARS[k])
# #ax.plot(X1[:,mid,mid], dvar_sol[k][:,mid,mid], marker='s', label=(VARS[k] + " analytic"))
# plt.title(NAMES[MODES[n]] + ' ' + VARS[k] + ' ' + str(RES[m]))
# plt.legend(loc=1)
# plt.savefig('../test/modes_' + NAMES[MODES[n]] + '_' + VARS[k] + '_' + str(RES[m]) + '_' + str(fnum) + '.png', bbox_inches='tight')
# MEASURE CONVERGENCE
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
powerfits[n,k] = np.polyfit(np.log(RES), np.log(L1[n,:,k]), 1)[0]
os.chdir('../test')
if not AUTO:
# MAKE PLOTS
fig = plt.figure(figsize=(16.18,10))
ax = fig.add_subplot(1,1,1)
for k in xrange(NVAR):
if abs(dvar[k]) != 0.:
ax.plot(RES, L1[n,:,k], marker='s', label=VARS[k])
ax.plot([RES[0]/2., RES[-1]*2.],
10.*amp*np.asarray([RES[0]/2., RES[-1]*2.])**-2.,
color='k', linestyle='--', label='N^-2')
plt.xscale('log', basex=2); plt.yscale('log')
plt.xlim([RES[0]/np.sqrt(2.), RES[-1]*np.sqrt(2.)])
plt.xlabel('N'); plt.ylabel('L1')
plt.title(NAMES[MODES[n]])
plt.legend(loc=1)
plt.savefig('mhdmodes3d_' + NAMES[MODES[n]] + '.png', bbox_inches='tight')
if AUTO:
data = {}
data['SOL'] = -2.*np.zeros([len(MODES), NVAR])
data['CODE'] = powerfits
import pickle
pickle.dump(data, open('data.p', 'wb'))
|
AFD-IllinoisREPO_NAMEiharm3dPATH_START.@iharm3d_extracted@iharm3d-master@prob@old_problems@mhdmodes3d@test@test_3D.py@.PATH_END.py
|
{
"filename": "expand_import_all.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/pythran/pythran/transformations/expand_import_all.py",
"type": "Python"
}
|
""" ExpandImportAll replaces import * by all their modules. """
from pythran.passmanager import Transformation
from pythran.tables import MODULES
import gast as ast
class ExpandImportAll(Transformation):
'''
Expands all import when '*' detected
>>> import gast as ast
>>> from pythran import passmanager, backend
>>> node = ast.parse("from math import *")
>>> pm = passmanager.PassManager("test")
>>> _, node = pm.apply(ExpandImportAll, node)
>>> print(pm.dump(backend.Python, node))
from math import acos, acosh, asin, asinh, atan, atan2, atanh, ceil, \
copysign, cos, cosh, degrees, e, erf, erfc, exp, expm1, fabs, factorial, \
floor, fmod, frexp, gamma, hypot, isinf, isnan, ldexp, lgamma, log, log10, \
log1p, modf, pi, pow, radians, sin, sinh, sqrt, tan, tanh, trunc
'''
def visit_ImportFrom(self, node):
for alias in node.names:
if alias.name == '*':
self.update = True
node.names.pop()
node.names.extend(ast.alias(fname, None)
for fname in sorted(MODULES[node.module]))
return node
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@pythran@pythran@transformations@expand_import_all.py@.PATH_END.py
|
{
"filename": "converters.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/preprocessing/image_preprocessing/bounding_boxes/converters.py",
"type": "Python"
}
|
from keras.src.api_export import keras_export
from keras.src.layers.preprocessing.image_preprocessing.bounding_boxes.bounding_box import ( # noqa: E501
BoundingBox,
)
from keras.src.utils import backend_utils
@keras_export("keras.utils.bounding_boxes.convert_format")
def convert_format(
boxes, source, target, height=None, width=None, dtype="float32"
):
# Switch to tensorflow backend if we are in tf.data pipe
box_utils = BoundingBox()
if backend_utils.in_tf_graph():
box_utils.backend.set_backend("tensorflow")
boxes = box_utils.convert_format(
boxes=boxes,
source=source,
target=target,
height=height,
width=width,
dtype=dtype,
)
# Switch back to original backend
box_utils.backend.reset()
return boxes
@keras_export("keras.utils.bounding_boxes.clip_to_image_size")
def clip_to_image_size(bounding_boxes, height=None, width=None, format="xyxy"):
# Switch to tensorflow backend if we are in tf.data pipe
box_utils = BoundingBox()
if backend_utils.in_tf_graph():
box_utils.backend.set_backend("tensorflow")
bounding_boxes = box_utils.clip_to_image_size(
bounding_boxes, height=height, width=width, format=format
)
# Switch back to original backend
box_utils.backend.reset()
return bounding_boxes
@keras_export("keras.utils.bounding_boxes.affine_transform")
def affine_transform(
boxes,
angle,
translate_x,
translate_y,
scale,
shear_x,
shear_y,
height,
width,
center_x=None,
center_y=None,
format="xyxy",
):
if format != "xyxy":
raise NotImplementedError
# Switch to tensorflow backend if we are in tf.data pipe
box_utils = BoundingBox()
if backend_utils.in_tf_graph():
box_utils.backend.set_backend("tensorflow")
outputs = box_utils.affine(
boxes,
angle,
translate_x,
translate_y,
scale,
shear_x,
shear_y,
height,
width,
center_x=center_x,
center_y=center_y,
)
box_utils.backend.reset()
return outputs
@keras_export("keras.utils.bounding_boxes.crop")
def crop(boxes, top, left, height, width, format="xyxy"):
if format != "xyxy":
raise NotImplementedError
box_utils = BoundingBox()
if backend_utils.in_tf_graph():
box_utils.backend.set_backend("tensorflow")
outputs = box_utils.crop(boxes, top, left, height, width)
box_utils.backend.reset()
return outputs
@keras_export("keras.utils.bounding_boxes.pad")
def pad(boxes, top, left, format="xyxy"):
if format != "xyxy":
raise NotImplementedError
box_utils = BoundingBox()
if backend_utils.in_tf_graph():
box_utils.backend.set_backend("tensorflow")
outputs = box_utils.pad(boxes, top, left)
box_utils.backend.reset()
return outputs
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@preprocessing@image_preprocessing@bounding_boxes@converters.py@.PATH_END.py
|
{
"filename": "python-reference_utils_get_gpu_device_count.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/concepts/python-reference_utils_get_gpu_device_count.md",
"type": "Markdown"
}
|
# get_gpu_device_count
{% include [utils-utils__get-gpu-device-count__desc](../_includes/work_src/reusage-python/utils__get-gpu-device-count__desc.md) %}
{% note info %}
- The returned value is <q>0</q> if the installed or compiled package does not support training on GPU.
- Use the `CUDA_VISIBLE_DEVICES` environment variable to limit the list of available devices.
{% endnote %}
## {{ dl--invoke-format }} {#call-format}
```python
get_gpu_device_count()
```
## {{ dl__usage-examples }} {#usage-examples}
```python
from catboost.utils import get_gpu_device_count
print('I see %i GPU devices' % get_gpu_device_count())
```
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@concepts@python-reference_utils_get_gpu_device_count.md@.PATH_END.py
|
{
"filename": "_ticktext.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/bar/marker/colorbar/_ticktext.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicktextValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(
self, plotly_name="ticktext", parent_name="bar.marker.colorbar", **kwargs
):
super(TicktextValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@bar@marker@colorbar@_ticktext.py@.PATH_END.py
|
{
"filename": "temper_file.py",
"repo_name": "sambit-giri/tools21cm",
"repo_path": "tools21cm_extracted/tools21cm-master/src/tools21cm/temper_file.py",
"type": "Python"
}
|
from . import const
import numpy as np
from .helper_functions import print_msg
class TemperFile:
'''
A C2Ray Temper3D file.
Use the read_from_file method to load an Temper3D file, or
pass the filename to the constructor.
Some useful attributes of this class are:
* temper (numpy array): the temperature
* z (float): the redshift of the file (-1 if it couldn't be determined from the file name)
'''
def __init__(self, filename = None, old_format=False):
'''
Initialize the file. If filename is given, read data. Otherwise,
do nothing.
Parameters:
* filename = None (string): the file to read from.
* old_format = False (bool): whether to use the old-style
file format.
Returns:
Nothing
'''
if filename:
self.read_from_file(filename, old_format)
def read_from_file(self, filename, old_format=False):
'''
Read data from file.
Parameters:
* filename (string): the file to read from.
* old_format = False (bool): whether to use the old-style (32 bits)
file format.
Returns:
Nothing
'''
print_msg('Reading Temper3D file:%s...' % filename)
self.filename = filename
f = open(filename, 'rb')
temp_mesh = np.fromfile(f, count=6, dtype='int32')
self.mesh_x, self.mesh_y, self.mesh_z = temp_mesh[1:4]
if old_format:
self.temper = np.fromfile(f, dtype='float32')
else:
self.temper = np.fromfile(f, count=self.mesh_x*self.mesh_y*self.mesh_z, dtype='float32')
self.temper = self.temper.reshape((self.mesh_x, self.mesh_y, self.mesh_z), order='F')
f.close()
print_msg('...done')
#Store the redshift from the filename
import os.path
try:
name = os.path.split(filename)[1]
self.z = float(name.split('_')[1][:-4])
except:
print_msg('Could not determine redshift from file name')
self.z = -1
|
sambit-giriREPO_NAMEtools21cmPATH_START.@tools21cm_extracted@tools21cm-master@src@tools21cm@temper_file.py@.PATH_END.py
|
{
"filename": "test_special.py",
"repo_name": "numba/numba-scipy",
"repo_path": "numba-scipy_extracted/numba-scipy-main/numba_scipy/tests/test_special.py",
"type": "Python"
}
|
import itertools
import warnings
import pytest
from unittest.mock import patch, Mock
import numpy as np
from numpy.testing import assert_allclose
import numba
from numba.types import float64
import scipy.special as sc
from numba_scipy.special import signatures as special_signatures
from numba_scipy.special.signatures import (parse_capsule_name,
de_mangle_function_name,
get_signatures_from_pyx_capi,
generate_signatures_dicts,
)
NUMBA_TYPES_TO_TEST_POINTS = {
numba.types.float64: np.array(
[-100.0, -10.0, -1.0, -0.1, 0.0, 0.1, 1.0, 10.0, 100.0],
dtype=np.float64
),
numba.types.float32: np.array(
[-100.0, -10.0, -1.0, -0.1, 0.0, 0.1, 1.0, 10.0, 100.0],
dtype=np.float32
),
numba.types.long_: np.array(
[-100, -10, -1, 0, 1, 10, 100],
dtype=np.int_
)
}
SKIP_LIST = {
# Should be fixed by https://github.com/scipy/scipy/pull/10455
(
'hyperu',
(numba.types.float64,) * 3
),
# Sometimes returns nan, sometimes returns inf. Likely a SciPy bug.
(
'eval_jacobi',
(numba.types.float64,) * 4
),
# Sometimes returns nan, sometimes returns inf. Likely a SciPy bug.
(
'eval_sh_jacobi',
(numba.types.float64,) * 4
)
}
def compare_functions(args, scipy_func, numba_func):
for arg in args:
overload_value = numba_func(*arg)
scipy_value = scipy_func(*arg)
if np.isnan(overload_value):
assert np.isnan(scipy_value)
else:
rtol = 2**8 * np.finfo(scipy_value.dtype).eps
assert_allclose(overload_value, scipy_value, atol=0, rtol=rtol)
def get_parametrize_arguments():
signatures = special_signatures.name_to_numba_signatures.items()
for name, specializations in signatures:
for signature in specializations:
yield name, signature
def test_parse_capsule_name():
input_capsule = ('<capsule object "double (double, double, double, '
'int __pyx_skip_dispatch)" at 0x7f8c8d5f5150>')
expected = ['double', 'double', 'double', 'double']
received = parse_capsule_name(input_capsule)
assert received == expected
def test_parse_capsule_name_with_invalid_capsule():
with pytest.raises(ValueError) as excinfo:
input_capsule = '<TESTING object "double (double)" at 0x7f>'
parse_capsule_name(input_capsule)
assert "Unexpected capsule name" in str(excinfo.value)
def test_parse_capsule_name_with_invalid_signature():
with pytest.raises(ValueError) as excinfo:
input_capsule = '<capsule object "TESTING" at 0x7f>'
parse_capsule_name(input_capsule)
assert "Unexpected signature" in str(excinfo.value)
def test_de_mangle_function_name():
mangled_name = "__pyx_fuse_0pdtr"
received = de_mangle_function_name(mangled_name)
expected = "pdtr"
assert expected == received
def test_de_mangle_function_name_with_invalid_name():
with pytest.raises(ValueError) as excinfo:
# The empty string was the only thing that the regex didn't recognise.
mangled_name = ""
print(de_mangle_function_name(mangled_name))
assert "Unexpected mangled name" in str(excinfo.value)
@patch("numba_scipy.special.signatures.cysc")
def test_get_signatures_from_pyx_capi(cysc_mock):
capsule = ('<capsule object "double (double, double, double, '
'int __pyx_skip_dispatch)" at 0x7f8c8d5f5150>')
mangled_name = "__pyx_fuse_0pdtr"
cysc_mock.__pyx_capi__ = {
mangled_name: capsule
}
expected = {
('__pyx_fuse_0pdtr', float64, float64, float64, float64):
('<capsule object "double (double, double, double, '
'int __pyx_skip_dispatch)" at 0x7f8c8d5f5150>')
}
received = get_signatures_from_pyx_capi()
assert expected == received
@patch("numba_scipy.special.signatures.get_cython_function_address", Mock())
@patch("numba_scipy.special.signatures.ctypes.CFUNCTYPE",
Mock(return_value=Mock(return_value='0123456789')))
def test_generate_signatures_dicts():
signature_to_pointer = {
('__pyx_fuse_0pdtr', float64, float64, float64, float64):
('<capsule object "double (double, double, double, '
'int __pyx_skip_dispatch)" at 0x7f8c8d5f5150>')
}
expected = ({'pdtr': ((float64, float64, float64),)},
{('pdtr', float64, float64, float64): "0123456789"})
received = generate_signatures_dicts(signature_to_pointer)
assert expected == received
def test_ensure_signatures_generated():
from numba_scipy.special.signatures import (name_to_numba_signatures,
name_and_types_to_pointer,
signature_to_pointer,
)
assert len(name_to_numba_signatures) != 0
assert len(name_and_types_to_pointer) != 0
assert len(signature_to_pointer) != 0
assert (len(name_and_types_to_pointer) ==
len(signature_to_pointer))
@pytest.mark.parametrize(
'name, specialization',
get_parametrize_arguments(),
)
def test_function(name, specialization):
if (name, specialization) in SKIP_LIST:
pytest.xfail()
scipy_func = getattr(sc, name)
@numba.njit
def numba_func(*args):
return scipy_func(*args)
args = itertools.product(*(
NUMBA_TYPES_TO_TEST_POINTS[numba_type] for numba_type in specialization
))
with warnings.catch_warnings():
# Ignore warnings about unsafe casts generated by SciPy.
warnings.filterwarnings(
action='ignore',
message='floating point number truncated to an integer',
category=RuntimeWarning,
)
compare_functions(args, scipy_func, numba_func)
|
numbaREPO_NAMEnumba-scipyPATH_START.@numba-scipy_extracted@numba-scipy-main@numba_scipy@tests@test_special.py@.PATH_END.py
|
{
"filename": "test_events.py",
"repo_name": "StingraySoftware/stingray",
"repo_path": "stingray_extracted/stingray-main/stingray/tests/test_events.py",
"type": "Python"
}
|
import importlib
import copy
import os
import pytest
import numpy as np
from astropy.time import Time
from ..events import EventList
from ..lightcurve import Lightcurve
curdir = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(curdir, "data")
_HAS_XARRAY = importlib.util.find_spec("xarray") is not None
_HAS_PANDAS = importlib.util.find_spec("pandas") is not None
_HAS_H5PY = importlib.util.find_spec("h5py") is not None
_HAS_YAML = importlib.util.find_spec("yaml") is not None
class TestEvents(object):
@classmethod
def setup_class(self):
np.random.seed(57239875)
self.time = [0.5, 1.5, 2.5, 3.5]
self.counts = [3000, 2000, 2200, 3600]
self.counts_flat = [3000, 3000, 3000, 3000]
self.spectrum = [[1, 2, 3, 4, 5, 6], [1000, 2040, 1000, 3000, 4020, 2070]]
self.gti = np.asanyarray([[0, 4]])
def test_warn_wrong_keywords(self):
with pytest.warns(UserWarning) as record:
_ = EventList(self.time, self.counts, gti=self.gti, bubu="settete")
assert np.any(["Unrecognized keywords:" in r.message.args[0] for r in record])
def test_warn_wrong_keywords_ncounts(self):
with pytest.warns(DeprecationWarning, match="The ncounts keyword does nothing"):
_ = EventList(self.time, self.counts, gti=self.gti, ncounts=10)
def test_initiate_from_ndarray(self):
times = np.sort(np.random.uniform(1e8, 1e8 + 1000, 101).astype(np.longdouble))
ev = EventList(times, mjdref=54600)
assert np.allclose(ev.time, times, atol=1e-15)
assert np.allclose(ev.mjdref, 54600)
def test_print(self):
times = [1.01, 2, 3]
ev = EventList(times, mjdref=54600)
print(ev)
def test_initiate_from_astropy_time(self):
times = np.sort(np.random.uniform(1e8, 1e8 + 1000, 101).astype(np.longdouble))
mjdref = 54600
mjds = Time(mjdref + times / 86400, format="mjd")
ev = EventList(mjds, mjdref=mjdref)
assert np.allclose(ev.time, times, atol=1e-15)
assert np.allclose(ev.mjdref, mjdref)
def test_create_high_precision_object(self):
times = np.sort(np.random.uniform(1e8, 1e8 + 1000, 101).astype(np.longdouble))
ev = EventList(times, high_precision=True)
assert np.allclose(ev.time, times, atol=1e-15)
def test_inequal_length(self):
"""Check that exception is raised in case of
disparity in length of 'time' and 'energy'
"""
with pytest.raises(ValueError):
EventList(time=[1, 2, 3], energy=[10, 12])
def test_concatenate(self):
"""Join two overlapping event lists."""
ev = EventList(time=[1, 1.1, 2, 3, 4], energy=[3, 4, 7, 4, 3], gti=[[1, 2], [3, 4]])
ev_other1 = EventList(time=[5, 6, 6.1], energy=[4, 3, 8], gti=[[6, 6.2]])
ev_other2 = EventList(time=[7, 10], energy=[1, 2], gti=[[6.5, 7]])
ev_new = ev.concatenate([ev_other1, ev_other2])
assert (ev_new.time == np.array([1, 1.1, 2, 3, 4, 5, 6, 6.1, 7, 10])).all()
assert (ev_new.energy == np.array([3, 4, 7, 4, 3, 4, 3, 8, 1, 2])).all()
assert (ev_new.gti == np.array([[1, 2], [3, 4], [6, 6.2], [6.5, 7]])).all()
def test_to_lc(self):
"""Create a light curve from event list."""
ev = EventList(self.time, gti=self.gti)
lc = ev.to_lc(1)
assert np.allclose(lc.time, [0.5, 1.5, 2.5, 3.5])
assert (lc.gti == self.gti).all()
def test_to_timeseries(self):
"""Create a time series from event list."""
ev = EventList(self.time, gti=self.gti)
ev.bla = np.zeros_like(ev.time) + 2
lc = ev.to_lc(1)
ts = ev.to_binned_timeseries(1)
assert np.allclose(ts.time, [0.5, 1.5, 2.5, 3.5])
assert (ts.gti == self.gti).all()
assert np.array_equal(ts.counts, lc.counts)
assert np.array_equal(ts.bla, ts.counts * 2)
def test_from_lc(self):
"""Load event list from lightcurve"""
lc = Lightcurve(time=[0.5, 1.5, 2.5], counts=[2, -1, 2])
ev = EventList.from_lc(lc)
assert np.array_equal(ev.time, np.array([0.5, 0.5, 2.5, 2.5]))
def test_simulate_times_warns_bin_time(self):
"""Simulate photon arrival times for an event list
from light curve.
"""
lc = Lightcurve(self.time, self.counts_flat, gti=self.gti)
ev = EventList()
with pytest.warns(DeprecationWarning) as record:
ev.simulate_times(lc, bin_time=lc.dt)
assert np.any(["Bin time will be ignored" in r.message.args[0] for r in record])
lc_sim = ev.to_lc(dt=lc.dt, tstart=lc.tstart, tseg=lc.tseg)
assert np.all((lc - lc_sim).counts < 3 * np.sqrt(lc.counts))
@pytest.mark.parametrize("use_spline", [True, False])
def test_simulate_times(self, use_spline):
"""Simulate photon arrival times, with use_spline option
enabled.
"""
lc = Lightcurve(self.time, self.counts_flat, gti=self.gti)
ev = EventList()
ev.simulate_times(lc, use_spline=use_spline)
lc_sim = ev.to_lc(dt=lc.dt, tstart=lc.tstart, tseg=lc.tseg)
assert np.all((lc - lc_sim).counts < 3 * np.sqrt(lc.counts))
def test_simulate_energies(self):
"""Assign photon energies to an event list."""
ev = EventList(np.arange(10))
ev.simulate_energies(self.spectrum)
def test_simulate_energies_with_1d_spectrum(self):
"""Test that simulate_energies() method raises index
error exception is spectrum is 1-d.
"""
ev = EventList(np.arange(10))
with pytest.raises(IndexError):
ev.simulate_energies(self.spectrum[0])
def test_simulate_energies_with_wrong_spectrum_type(self):
"""Test that simulate_energies() method raises type error
exception when wrong spectrum type is supplied.
"""
ev = EventList(np.arange(10))
with pytest.raises(TypeError):
ev.simulate_energies(1)
def test_simulate_energies_with_counts_not_set(self):
ev = EventList()
with pytest.warns(UserWarning, match="empty event list"):
ev.simulate_energies(self.spectrum)
def test_compare_energy(self):
"""Compare the simulated energy distribution to actual distribution."""
fluxes = np.array(self.spectrum[1])
ev = EventList(np.arange(1000))
ev.simulate_energies(self.spectrum)
# Note: I'm passing the edges: when the bin center is 1, the
# edge is at 0.5
lc, _ = np.histogram(ev.energy, bins=np.arange(0.5, 7, 1))
# Calculate probabilities and compare
lc_prob = lc / float(sum(lc))
fluxes_prob = fluxes / float(sum(fluxes))
assert np.all(np.abs(lc_prob - fluxes_prob) < 3 * np.sqrt(fluxes_prob))
assert np.all((ev.energy >= 0.5) & (ev.energy < 6.5))
@pytest.mark.skipif("not (_HAS_YAML)")
def test_io_with_ascii(self):
ev = EventList(self.time)
with pytest.warns(UserWarning, match=".* output does not serialize the metadata"):
ev.write("ascii_ev.ecsv", fmt="ascii")
ev = ev.read("ascii_ev.ecsv", fmt="ascii")
assert np.allclose(ev.time, self.time)
os.remove("ascii_ev.ecsv")
def test_io_with_pickle(self):
ev = EventList(self.time, mjdref=54000)
ev.write("ev.pickle", fmt="pickle")
ev = ev.read("ev.pickle", fmt="pickle")
assert np.allclose(ev.time, self.time)
os.remove("ev.pickle")
@pytest.mark.skipif("not _HAS_H5PY")
def test_io_with_hdf5_auto(self):
ev = EventList(time=self.time, mjdref=54000)
ev.write("ev.hdf5")
ev = ev.read("ev.hdf5")
assert np.allclose(ev.time, self.time)
os.remove("ev.hdf5")
@pytest.mark.skipif("not _HAS_H5PY")
def test_io_with_hdf5(self):
ev = EventList(time=self.time, mjdref=54000)
ev.write("ev.hdf5", fmt="hdf5")
ev = ev.read("ev.hdf5", fmt="hdf5")
assert np.allclose(ev.time, self.time)
os.remove("ev.hdf5")
def test_io_with_fits(self):
ev = EventList(time=self.time, mjdref=54000)
with pytest.warns(UserWarning, match=".* output does not serialize the metadata"):
ev.write("ev.fits", fmt="fits")
ev = ev.read("ev.fits", fmt="fits")
assert np.allclose(ev.time, self.time)
os.remove("ev.fits")
def test_calibrate_directly_warns(self):
rmf_file = os.path.join(datadir, "test.rmf")
with pytest.warns(UserWarning, match="PI channels must be provided"):
EventList(time=self.time, mjdref=54000, rmf_file=rmf_file)
def test_calibrate_directly(self):
rmf_file = os.path.join(datadir, "test.rmf")
pis = np.random.randint(0, 1000, np.size(self.time))
ev1 = EventList(time=self.time, pi=pis, mjdref=54000, rmf_file=rmf_file)
ev2 = EventList(time=self.time, pi=pis, mjdref=54000)
ev2.convert_pi_to_energy(rmf_file)
assert np.array_equal(ev1.energy, ev2.energy)
def test_fits_standard(self):
"""Test that fits works with a standard event list
file.
"""
fname = os.path.join(datadir, "monol_testA.evt")
ev = EventList()
ev = ev.read(fname, fmt="hea")
assert np.isclose(ev.mjdref, 55197.00076601852)
def test_fits_with_standard_file_and_calibrate_directly(self):
"""Test that fits works and calibration works."""
fname = os.path.join(datadir, "monol_testA.evt")
rmf_file = os.path.join(datadir, "test.rmf")
ev1 = EventList()
ev1 = ev1.read(fname, fmt="hea")
ev2 = EventList()
ev2 = ev2.read(fname, fmt="hea", rmf_file=rmf_file)
ev1.convert_pi_to_energy(rmf_file)
assert np.array_equal(ev1.energy, ev2.energy)
def test_fits_with_additional(self):
"""Test that fits works with a standard event list
file.
"""
fname = os.path.join(datadir, "xmm_test.fits")
ev = EventList()
with pytest.warns(UserWarning, match="HDU EVENTS not found"):
ev = ev.read(fname, fmt="hea", additional_columns=["PRIOR"])
assert hasattr(ev, "prior")
def test_timeseries_empty_evts(self):
N = len(self.time)
ev = EventList()
ts = ev.to_astropy_timeseries()
assert len(ts.columns) == 0
def test_timeseries_roundtrip(self):
N = len(self.time)
ev = EventList(
time=self.time,
gti=self.gti,
energy=np.zeros(N),
pi=np.ones(N),
mission="BUBU",
instr="BABA",
mjdref=53467.0,
)
ts = ev.to_astropy_timeseries()
new_ev = ev.from_astropy_timeseries(ts)
for attr in ["time", "energy", "pi", "gti"]:
assert np.allclose(getattr(ev, attr), getattr(new_ev, attr))
for attr in ["mission", "instr", "mjdref"]:
assert getattr(ev, attr) == getattr(new_ev, attr)
def test_table_roundtrip(self):
N = len(self.time)
ev = EventList(
time=self.time,
gti=self.gti,
energy=np.zeros(N),
pi=np.ones(N),
mission="BUBU",
instr="BABA",
mjdref=53467.0,
)
ts = ev.to_astropy_table()
new_ev = ev.from_astropy_table(ts)
for attr in ["time", "energy", "pi", "gti"]:
assert np.allclose(getattr(ev, attr), getattr(new_ev, attr))
for attr in ["mission", "instr", "mjdref"]:
assert getattr(ev, attr) == getattr(new_ev, attr)
@pytest.mark.skipif("not _HAS_XARRAY")
def test_xarray_roundtrip(self):
N = len(self.time)
ev = EventList(
time=self.time,
gti=self.gti,
energy=np.zeros(N),
pi=np.ones(N),
mission="BUBU",
instr="BABA",
mjdref=53467.0,
)
ts = ev.to_xarray()
new_ev = ev.from_xarray(ts)
for attr in ["time", "energy", "pi", "gti"]:
assert np.allclose(getattr(ev, attr), getattr(new_ev, attr))
for attr in ["mission", "instr", "mjdref"]:
assert getattr(ev, attr) == getattr(new_ev, attr)
@pytest.mark.skipif("not _HAS_PANDAS")
def test_pandas_roundtrip(self):
N = len(self.time)
ev = EventList(
time=self.time,
gti=self.gti,
energy=np.zeros(N),
pi=np.ones(N),
mission="BUBU",
instr="BABA",
mjdref=53467.0,
)
ts = ev.to_pandas()
new_ev = ev.from_pandas(ts)
for attr in ["time", "energy", "pi", "gti"]:
assert np.allclose(getattr(ev, attr), getattr(new_ev, attr))
for attr in ["mission", "instr", "mjdref"]:
assert getattr(ev, attr) == getattr(new_ev, attr)
class TestJoinEvents:
def test_join_without_times_simulated(self):
"""Test if exception is raised when join method is
called before first simulating times.
"""
ev = EventList()
ev_other = EventList()
with pytest.warns(UserWarning, match="One of the time series you are joining is empty."):
assert ev.join(ev_other, strategy="union").time is None
def test_join_empty_lists(self):
"""Test if an empty event list can be concatenated
with a non-empty event list.
"""
ev = EventList(time=[1, 2, 3])
ev_other = EventList()
with pytest.warns(UserWarning, match="One of the time series you are joining is empty."):
ev_new = ev.join(ev_other, strategy="union")
assert np.allclose(ev_new.time, [1, 2, 3])
ev = EventList()
ev_other = EventList(time=[1, 2, 3])
ev_new = ev.join(ev_other, strategy="union")
assert np.allclose(ev_new.time, [1, 2, 3])
ev = EventList()
ev_other = EventList()
with pytest.warns(UserWarning, match="One of the time series you are joining is empty."):
ev_new = ev.join(ev_other, strategy="union")
assert ev_new.time is None
assert ev_new.gti is None
assert ev_new.pi is None
assert ev_new.energy is None
ev = EventList(time=[1, 2, 3])
ev_other = EventList([])
with pytest.warns(UserWarning, match="One of the time series you are joining is empty."):
ev_new = ev.join(ev_other, strategy="union")
assert np.allclose(ev_new.time, [1, 2, 3])
ev = EventList([])
ev_other = EventList(time=[1, 2, 3])
ev_new = ev.join(ev_other, strategy="union")
assert np.allclose(ev_new.time, [1, 2, 3])
def test_join_different_dt(self):
ev = EventList(time=[10, 20, 30], dt=1)
ev_other = EventList(time=[40, 50, 60], dt=3)
with pytest.warns(UserWarning, match="The time resolution is different."):
ev_new = ev.join(ev_other, strategy="union")
assert np.array_equal(ev_new.dt, [1, 1, 1, 3, 3, 3])
assert np.allclose(ev_new.time, [10, 20, 30, 40, 50, 60])
def test_join_different_instr(self):
ev = EventList(time=[10, 20, 30], instr="fpma")
ev_other = EventList(time=[40, 50, 60], instr="fpmb")
with pytest.warns(
UserWarning,
match="Attribute instr is different in the time series being merged.",
):
ev_new = ev.join(ev_other, strategy="union")
assert ev_new.instr == "fpma,fpmb"
def test_join_different_meta_attribute(self):
ev = EventList(time=[10, 20, 30])
ev_other = EventList(time=[40, 50, 60])
ev_other.bubu = "settete"
ev.whatstheanswer = 42
ev.unmovimentopara = "arriba"
ev_other.unmovimentopara = "abajo"
with pytest.warns(
UserWarning,
match=(
"Attribute (bubu|whatstheanswer|unmovimentopara) is different "
"in the time series being merged."
),
):
ev_new = ev.join(ev_other, strategy="union")
assert ev_new.bubu == (None, "settete")
assert ev_new.whatstheanswer == (42, None)
assert ev_new.unmovimentopara == "arriba,abajo"
def test_join_without_energy(self):
ev = EventList(time=[1, 2, 3], energy=[3, 3, 3])
ev_other = EventList(time=[4, 5])
with pytest.warns(
UserWarning, match="The energy array is empty in one of the time series being merged."
):
ev_new = ev.join(ev_other, strategy="union")
assert np.allclose(ev_new.energy, [3, 3, 3, np.nan, np.nan], equal_nan=True)
def test_join_without_pi(self):
ev = EventList(time=[1, 2, 3], pi=[3, 3, 3])
ev_other = EventList(time=[4, 5])
with pytest.warns(
UserWarning, match="The pi array is empty in one of the time series being merged."
):
ev_new = ev.join(ev_other, strategy="union")
assert np.allclose(ev_new.pi, [3, 3, 3, np.nan, np.nan], equal_nan=True)
def test_join_with_arbitrary_attribute(self):
ev = EventList(time=[1, 2, 4])
ev_other = EventList(time=[3, 5])
ev.u = [3, 3, 3]
ev_other.q = [1, 2]
with pytest.warns(
UserWarning, match="The (u|q) array is empty in one of the time series being merged."
):
ev_new = ev.join(ev_other, strategy="union")
assert np.allclose(ev_new.q, [np.nan, np.nan, 1, np.nan, 2], equal_nan=True)
assert np.allclose(ev_new.u, [3, 3, np.nan, 3, np.nan], equal_nan=True)
def test_join_with_gti_none(self):
ev = EventList(time=[1, 2, 3])
ev_other = EventList(time=[4, 5], gti=[[3.5, 5.5]])
ev_new = ev.join(ev_other, strategy="union")
assert np.allclose(ev_new.gti, [[1, 3], [3.5, 5.5]])
ev = EventList(time=[1, 2, 3], gti=[[0.5, 3.5]])
ev_other = EventList(time=[4, 5])
ev_new = ev.join(ev_other, strategy="union")
assert np.allclose(ev_new.gti, [[0.5, 3.5], [4, 5]])
ev = EventList(time=[1, 2, 3])
ev_other = EventList(time=[4, 5])
ev_new = ev.join(ev_other, strategy="union")
assert ev_new._gti is None
def test_non_overlapping_join_infer(self):
"""Join two overlapping event lists."""
ev = EventList(time=[1, 1.1, 2, 3, 4], energy=[3, 4, 7, 4, 3], gti=[[1, 2], [3, 4]])
ev_other = EventList(time=[5, 6, 6.1, 7, 10], energy=[4, 3, 8, 1, 2], gti=[[6, 7]])
ev_new = ev.join(ev_other, strategy="infer")
assert (ev_new.time == np.array([1, 1.1, 2, 3, 4, 5, 6, 6.1, 7, 10])).all()
assert (ev_new.energy == np.array([3, 4, 7, 4, 3, 4, 3, 8, 1, 2])).all()
assert (ev_new.gti == np.array([[1, 2], [3, 4], [6, 7]])).all()
def test_overlapping_join_infer(self):
"""Join two non-overlapping event lists."""
with pytest.warns(UserWarning, match="The time array is not sorted."):
ev = EventList(time=[1, 1.1, 10, 6, 5], energy=[10, 6, 3, 11, 2], gti=[[1, 3], [5, 6]])
with pytest.warns(UserWarning, match="The time array is not sorted."):
ev_other = EventList(
time=[5.1, 7, 6.1, 6.11, 10.1], energy=[2, 3, 8, 1, 2], gti=[[5, 7], [8, 10]]
)
ev_new = ev.join(ev_other, strategy="infer")
assert (ev_new.time == np.array([1, 1.1, 5, 5.1, 6, 6.1, 6.11, 7, 10, 10.1])).all()
assert (ev_new.energy == np.array([10, 6, 2, 2, 11, 8, 1, 3, 3, 2])).all()
assert (ev_new.gti == np.array([[5, 6]])).all()
def test_overlapping_join_change_mjdref(self):
"""Join two non-overlapping event lists."""
with pytest.warns(UserWarning, match="The time array is not sorted."):
ev = EventList(
time=[1, 1.1, 10, 6, 5],
energy=[10, 6, 3, 11, 2],
gti=[[1, 3], [5, 6]],
mjdref=57001,
)
with pytest.warns(UserWarning, match="The time array is not sorted."):
ev_other = EventList(
time=np.asanyarray([5.1, 7, 6.1, 6.11, 10.1]) + 86400,
energy=[2, 3, 8, 1, 2],
gti=np.asanyarray([[5, 7], [8, 10]]) + 86400,
mjdref=57000,
)
with pytest.warns(UserWarning, match="Attribute mjdref is different"):
ev_new = ev.join(ev_other, strategy="intersection")
assert np.allclose(ev_new.time, np.array([1, 1.1, 5, 5.1, 6, 6.1, 6.11, 7, 10, 10.1]))
assert (ev_new.energy == np.array([10, 6, 2, 2, 11, 8, 1, 3, 3, 2])).all()
assert np.allclose(ev_new.gti, np.array([[5, 6]]))
def test_multiple_join(self):
"""Test if multiple event lists can be joined."""
ev = EventList(time=[1, 2, 4], instr="a", mission=1)
ev_other = EventList(time=[3, 5, 7], instr="b", mission=2)
ev_other2 = EventList(time=[6, 8, 9], instr="c", mission=3)
ev.pibiri = [1, 1, 1]
ev_other.pibiri = [2, 2, 2]
ev_other2.pibiri = [3, 3, 3]
with pytest.warns(
UserWarning,
match="Attribute (instr|mission) is different in the time series being merged.",
):
ev_new = ev.join([ev_other, ev_other2], strategy="union")
assert np.allclose(ev_new.time, [1, 2, 3, 4, 5, 6, 7, 8, 9])
assert np.allclose(ev_new.pibiri, [1, 1, 2, 1, 2, 3, 2, 3, 3])
assert ev_new.instr == "a,b,c"
assert ev_new.mission == (1, 2, 3)
class TestFilters(object):
@classmethod
def setup_class(cls):
events = np.array([1, 1.05, 1.07, 1.08, 1.1, 2, 2.2, 3, 3.1, 3.2])
events = EventList(events, gti=[[0, 3.3]])
events.pi = np.array([1, 2, 2, 2, 2, 1, 1, 1, 2, 1])
events.energy = np.array([1, 2, 2, 2, 2, 1, 1, 1, 2, 1])
events.mjdref = 10
cls.events = events
@pytest.mark.parametrize("inplace", [True, False])
def test_apply_mask(self, inplace):
events = copy.deepcopy(self.events)
mask = [True, False, False, False, False, True, True, True, False, True]
filt_events = events.apply_mask(mask, inplace=inplace)
if inplace:
assert filt_events is events
assert np.allclose(events.pi, 1)
else:
assert filt_events is not events
assert not np.allclose(events.pi, 1)
expected = np.array([1, 2, 2.2, 3, 3.2])
assert np.allclose(filt_events.time, expected)
assert np.allclose(filt_events.pi, 1)
assert np.allclose(filt_events.energy, 1)
@pytest.mark.parametrize("inplace", [True, False])
@pytest.mark.parametrize("use_pi", [True, False])
def test_filter_energy_range(self, inplace, use_pi):
events = copy.deepcopy(self.events)
filt_events = events.filter_energy_range([0.5, 1.5], use_pi=use_pi, inplace=inplace)
if inplace:
assert filt_events is events
assert np.allclose(events.pi, 1)
else:
assert filt_events is not events
assert not np.allclose(events.pi, 1)
expected = np.array([1, 2, 2.2, 3, 3.2])
assert np.allclose(filt_events.time, expected)
assert np.allclose(filt_events.pi, 1)
assert np.allclose(filt_events.energy, 1)
@pytest.mark.parametrize("inplace", [True, False])
def test_apply_deadtime(self, inplace):
events = copy.deepcopy(self.events)
filt_events, _ = events.apply_deadtime(
0.11, inplace=inplace, verbose=False, return_all=True
)
if inplace:
assert filt_events is events
assert np.allclose(events.pi, 1)
else:
assert filt_events is not events
assert not np.allclose(events.pi, 1)
expected = np.array([1, 2, 2.2, 3, 3.2])
assert np.allclose(filt_events.time, expected)
assert np.allclose(filt_events.pi, 1)
assert np.allclose(filt_events.energy, 1)
class TestColors(object):
@classmethod
def setup_class(cls):
cls.events = EventList(
time=np.arange(100000) + 0.5, energy=np.random.choice([2, 5], 100000), gti=[[0, 100000]]
)
def test_bad_interval_color(self):
with pytest.raises(ValueError, match=" 2x2 array"):
self.events.get_color_evolution([[0, 3], [4, 6], [7, 8]], 10000)
with pytest.raises(ValueError, match=" 2x2 array"):
self.events.get_color_evolution([[0, 3, 8]], 10000)
with pytest.raises(ValueError, match=" 2x2 array"):
self.events.get_color_evolution([0], 10000)
with pytest.raises(ValueError, match=" 2x2 array"):
self.events.get_color_evolution([[0, 1]], 10000)
def test_bad_interval_intensity(self):
with pytest.raises(ValueError, match="2-element list"):
self.events.get_intensity_evolution([[0, 3], [4, 6], [7, 8]], 10000)
with pytest.raises(ValueError, match="2-element list"):
self.events.get_intensity_evolution([[0, 3, 8]], 10000)
with pytest.raises(ValueError, match="2-element list"):
self.events.get_intensity_evolution([0], 10000)
with pytest.raises(ValueError, match="2-element list"):
self.events.get_intensity_evolution([[0, 1]], 10000)
def test_colors(self):
start, stop, colors, color_errs = self.events.get_color_evolution([[0, 3], [4, 6]], 10000)
# 5000 / 5000 = 1
# 2 x sqrt(5000) / 5000 = 0.0282
assert np.allclose(colors, 1, rtol=0.1)
assert np.allclose(color_errs, 0.0282, atol=0.003)
assert np.allclose(start, np.arange(10) * 10000)
assert np.allclose(stop, np.arange(1, 11) * 10000)
def test_colors_missing_energies(self):
events = copy.deepcopy(self.events)
events.filter_energy_range([0, 3], inplace=True)
with pytest.warns(UserWarning, match="No counts in one of the energy ranges"):
events.get_color_evolution([[0, 3], [4, 6]], 10000)
def test_colors_no_segment(self):
start, stop, colors, color_errs = self.events.get_color_evolution([[0, 3], [4, 6]])
# 50000 / 50000 = 1
# 2 x sqrt(50000) / 50000 = 0.0089
assert np.allclose(colors, 1, rtol=0.1)
assert np.allclose(color_errs, 0.0089, atol=0.001)
assert np.allclose(start, 0)
assert np.allclose(stop, 100000)
def test_intensity(self):
start, stop, rate, rate_errs = self.events.get_intensity_evolution([0, 6], 10000)
assert np.allclose(rate, 1, rtol=0.1)
assert np.allclose(rate_errs, 0.01, atol=0.003)
assert np.allclose(start, np.arange(10) * 10000)
assert np.allclose(stop, np.arange(1, 11) * 10000)
def test_intensity_no_segment(self):
start, stop, rate, rate_errs = self.events.get_intensity_evolution([0, 6])
assert np.allclose(rate, 1, rtol=0.1)
assert np.allclose(rate_errs, 0.003, atol=0.001)
assert np.allclose(start, 0)
assert np.allclose(stop, 100000)
|
StingraySoftwareREPO_NAMEstingrayPATH_START.@stingray_extracted@stingray-main@stingray@tests@test_events.py@.PATH_END.py
|
{
"filename": "paper.md",
"repo_name": "1313e/CMasher",
"repo_path": "CMasher_extracted/CMasher-master/joss_paper/paper.md",
"type": "Markdown"
}
|
---
title: "CMasher: Scientific colormaps for making accessible, informative and 'cmashing' plots"
tags:
- Python
- colormaps
- data visualization
- plotting
- science
authors:
- name: Ellert van der Velden
orcid: 0000-0002-1559-9832
affiliation: "1, 2"
affiliations:
- name: Centre for Astrophysics and Supercomputing, Swinburne University of Technology, PO Box 218, Hawthorn, VIC 3122, Australia
index: 1
- name: ARC Centre of Excellence for All Sky Astrophysics in 3 Dimensions (ASTRO 3D)
index: 2
date: XXX
bibliography: paper.bib
---
# Introduction
The use of colors in the visualization of scientific results is a common sight nowadays.
Color allows for more (complex) data to be plotted in the same figure without resorting to difficult-to-interpret 3D plots or subplots; online material; or interactive applications.
However, an often underappreciated aspect of data visualization, is how color affects the way the visualized data is interpreted, making it crucial to pick the correct colormap.
In order to help with picking a scientific colormap, I introduce the *CMasher* package.
# Background summary
A good scientific colormap is often described/characterized as _perceptually uniform sequential_, which means that the colormap is perceived as uniformly changing in lightness and saturation, mostly at the same hue [@Rogowitz96; @Sharpe99].
This allows for the data values of a plot to be interpreted correctly by the viewer without giving false information.
Such a colormap also often allows for a plot to be converted properly to grey-scale without losing information.
A perceptually uniform sequential colormap allows us to properly infer the relative order of the represented numerical values, without requiring a legend or colorbar.
Although there are many works out there that describe the optimal way to do this [@Rogowitz96; @Sharpe99; @Kindlmann02; @Birch12; @Brychtova16; @Szafir18] and there are tools readily available to test the performance of a colormap [@cmaputil; @colorspacious; @viscm], bad/misleading colormaps are still very commonly used.
The main issue usually is that humans do not perceive every color equally (e.g., small variations in the color green are not perceived as green is a common natural color, while small variations in the colors red and blue are perceived).
Here, we use the *jet* colormap to illustrate this issue:
![Output of the ``viscm`` package [@viscm] showing the statistics and performance of the *jet* colormap. The various different plots show how the colormap changes in perceived saturation and lightness, as well as how well the colormap converts to different types of color-vision deficiency and grey-scale. In case of a perceptually uniform sequential colormap, the two derivative plots should show a straight horizontal line; the colorspace diagram should be smooth; and the lines in the bottom-right corner plots should be visible up to the same depth across the entire colormap.](https://raw.githubusercontent.com/1313e/CMasher/master/docs/source/user/images/jet_viscm.png)
In Fig. 1, one can view the performance output of the *jet* colormap, made with the ``viscm`` package [@viscm].
For perceptually uniform sequential colormaps, the two derivative plots in the top-left should show a straight horizontal line, indicating that the colormap changes uniformly in both perceived saturation and lightness.
Consequently, the colorspace diagram in the bottom-left should be smooth.
Finally, the lines in the bottom-right plots should be visible up to the same depth across the entire colormap, otherwise it can create artificial features as would be shown by the sample images in the top-right plots.
If the colormap is also required to be color-vision deficiency (CVD; color blindness) friendly, the requirements above apply to the deuteranomaly/protanomaly and deuteranopia/protanopia statistics as well.
Using this information, we can check the performance of the *jet* colormap as shown in Fig. 1.
The *jet* colormap shows the spectrum of visible light, which trivially increases linearly in wavelength.
However, in Fig. 1, we can see that this introduces multiple problems, as the color green is perceived as the brightest of the visible colors due to its natural occurrence, and the colormap is absolutely not CVD-friendly.
This is an example of a colormap where it would be necessary to have a colorbar/legend, and it is a poor choice for representing numerical values.
Despite all of these shortcomings, *jet* is still a commonly used colormap in the scientific literature.
An often cited reason for this (besides the general _"Everyone else uses it."_), is that *jet* has a high perceptual range, making it easier to distinguish adjacent values (*jet* has a higher perceptual range than any colormap in *CMasher*, including the diverging colormaps).
Although a high perceptual range can be useful in many different cases, it certainly is not useful in all of them and there are ways to achieve this without giving false information.
This is where *CMasher* comes in.

# CMasher
The *CMasher* package provides a collection of scientific colormaps to be used by different Python packages and projects, mainly in combination with ``matplotlib`` [@MPL].
The colormaps in *CMasher* are all designed to be perceptually uniform sequential using the ``viscm`` package [@viscm]; most of them are CVD-friendly; and they cover a wide range of different color combinations to accommodate for most applications.
It offers several alternatives to commonly used colormaps, like *chroma* and *rainforest* for *jet*; *sunburst* for *hot*; *neutral* for *binary*; and *fusion* and *redshift* for *coolwarm*.
Users are encouraged to request for specific colormaps to be designed if they cannot find the perfect one.
An overview of all current colormaps in *CMasher* (as of v1.2.2) is shown in Fig. 2.
*CMasher* has already been used in several scientific studies, including model emulations [@PRISM_JOSS; @PRISM_ApJS]; galaxy kinematics (Džudžar et al., in prep); and redshift estimations for fast radio bursts [@Fruitbat].
Due to the number of different color sequences and the perceptual uniform sequential nature of the colormaps, *CMasher* is also great for representing qualitative data.
The source code for *CMasher* (including the ``viscm`` source files) can be found at https://github.com/1313e/CMasher, whereas the descriptions of all available colormaps can be found at https://cmasher.readthedocs.io with their recommended use-cases.
# Acknowledgements
I would like to thank Manodeep Sinha for motivating and inspiring me to make *CMasher*.
I would also like to thank Adam Batten; Daniel Berke; Robert Džudžar; and Dexter Hon for their valuable suggestions for improving and expanding *CMasher*.
Parts of this research were supported by the Australian Research Council Centre of Excellence for All Sky Astrophysics in 3 Dimensions (ASTRO 3D), through project number CE170100013.
# References
|
1313eREPO_NAMECMasherPATH_START.@CMasher_extracted@CMasher-master@joss_paper@paper.md@.PATH_END.py
|
{
"filename": "input_setup.fock.py",
"repo_name": "dnarayanan/powderday",
"repo_path": "powderday_extracted/powderday-master/convenience/computers/input_setup.fock.py",
"type": "Python"
}
|
#script intended to set up the qsub files and model*.py files assuming
#sphgr physical properties have been written out to an npz file, and
#that the system we're running on is Haverford's Fock cluster.
import numpy as np
from subprocess import call
import pdb,ipdb
from sphgr_progen import progen
#===============================================
#MODIFIABLE HEADER
#===============================================
#shell scripting
nnodes=6
#startsnap=27
#endsnap=123 #set the same as startsnap if you just want to do one snapshot
model_dir = '/Volumes/pegasus/pd_runs/N512L64_fftw3s/baryons/halo25/attenuation/'
hydro_dir = '/Volumes/pegasus2/gizmo_runs/N512L64_fftw3s/baryons/mufasa/halo25/output/'
#if we want to write the files locally, but have the paths in the
#parameters files lead to differnet paths (for a different computer),
#put those paths here. otherweise, set these equal to whatever is in
#model_dir and hydro_dir
model_dir_remote = '/astro/desika/pd_runs/N512L64_fftw3s/baryons/halo25/attenuation/'
hydro_dir_remote = '/astro/desika/gizmo_runs/N512L64_fftw3s/baryons/mufasa/halo25/output/'
model_run_name='halo62_attenuation'
COSMOFLAG=0 #flag for setting if the gadget snapshots are broken up into multiples or not
SPHGR_COORDINATE_REWRITE = True
#GAL = 14 #this is the galaxy from SPH_PROGEN we need to find the
#progenitors for. to do this you need to have run sphgr on
#all the snaps. NOTE - sphgr has to be set up for the corret
#galaxy for this to work!
#===============================================
#first call the initial setup_all_cluster shell
data = np.load(hydro_dir+'/Groups/caesar_physical_properties.halos.npz')
startsnap = np.min(data['snaps'])
endsnap = np.max(data['snaps'])
cmd = "./setup_all_cluster.fock.sh "+str(nnodes)+' '+str(startsnap)+' '+str(endsnap)+' '+model_dir+' '+hydro_dir+' '+model_run_name+' '+str(COSMOFLAG)+' '+model_dir_remote+' '+hydro_dir_remote
print cmd
call(cmd,shell=True)
if SPHGR_COORDINATE_REWRITE == True:
data = np.load(hydro_dir+'/Groups/caesar_physical_properties.halos.npz')
sph_snap = data['snaps'][::-1]
sph_cmx = data['xpos'][::-1]
sph_cmy = data['ypos'][::-1]
sph_cmz = data['zpos'][::-1]
snaps = np.arange(startsnap,endsnap)
for i in snaps:
wsph = (np.where(sph_snap == i))[0][0]
x_cent = sph_cmx[wsph]
y_cent = sph_cmy[wsph]
z_cent = sph_cmz[wsph]
#append positions
modelfile = model_dir+'/model_'+str(i)+'.py'
print 'appending coordinates to: ', modelfile
with open(modelfile,"a") as myfile:
myfile.write("\n\n")
myfile.write("#===============================================\n")
myfile.write("#GRID POSITIONS\n")
myfile.write("#===============================================\n")
myfile.write("x_cent = %s\n" % x_cent)
myfile.write("y_cent = %s\n" % y_cent)
myfile.write("z_cent = %s\n" % z_cent)
myfile.write("\n")
|
dnarayananREPO_NAMEpowderdayPATH_START.@powderday_extracted@powderday-master@convenience@computers@input_setup.fock.py@.PATH_END.py
|
{
"filename": "outputs.py",
"repo_name": "joezuntz/cosmosis",
"repo_path": "cosmosis_extracted/cosmosis-main/cosmosis/postprocessing/outputs.py",
"type": "Python"
}
|
from . import lazy_pylab as pylab
class MiniTable:
"""
A simple class for storing and writing out a table of data.
This is to avoid imposing a dependency on astropy tables.
"""
def __init__(self, cols):
self.cols = cols
self.rows = []
def to_astropy(self):
from astropy.table import Table
return Table(rows=self.rows, names=self.cols)
def append(self, row):
if len(row)!=len(self.cols):
raise ValueError("Row has wrong number of columns")
self.rows.append(row)
def write(self, filename):
with open(filename, "w") as f:
f.write("#")
f.write(" ".join(self.cols))
f.write("\n")
for row in self.rows:
f.write(" ".join(str(x) for x in row))
f.write("\n")
def __len__(self):
return len(self.rows)
def __getitem__(self, row_or_col):
if isinstance(row_or_col, int):
return self.rows[row_or_col]
else:
i = self.cols.index(row_or_col)
return np.array([row[i] for row in self.rows])
class PostprocessProduct(object):
def __init__(self, name, filename, value, info=None):
self.name = name
self.filename = filename
self.value = value
self.info = info
def save(self):
pass
class PostprocessPlot(PostprocessProduct):
def save(self):
pylab.figure(self.value.number)
pylab.savefig(self.filename)
pylab.close()
def tweak(self, tweak):
print("Tweaking", self.name)
pylab.figure(self.value.number)
tweak.info = self.info
tweak.run()
class PostprocessTable(PostprocessProduct):
def save(self):
self.value.write(self.filename)
class PostprocessText(PostprocessProduct):
def save(self):
self.value.seek(0)
text = self.value.read()
with open(self.filename, "w") as f:
f.write(text)
self.value.close()
|
joezuntzREPO_NAMEcosmosisPATH_START.@cosmosis_extracted@cosmosis-main@cosmosis@postprocessing@outputs.py@.PATH_END.py
|
{
"filename": "util.py",
"repo_name": "simonsobs/sorunlib",
"repo_path": "sorunlib_extracted/sorunlib-main/src/sorunlib/util.py",
"type": "Python"
}
|
import os
import re
from sorunlib.config import load_config
from ocs import site_config
from ocs.ocs_client import OCSClient
from ocs.client_http import ControlClientError
class CrossbarConnectionError(Exception):
pass
def _load_site_config(filename=None):
"""Load a site config file, searching for default.yaml in OCS_CONFIG DIR by
default.
Args:
filename (str): Path to OCS site config file
Returns:
ocs.site_config.SiteConfig object
"""
if filename is None:
assert (os.getenv('OCS_CONFIG_DIR') is not None)
site = 'default'
filename = os.path.join(os.getenv('OCS_CONFIG_DIR'),
site + '.yaml')
cfg = site_config.SiteConfig.from_yaml(filename)
return cfg
def _find_instances(agent_class, host=None, config=None):
"""Find all instances of an Agent Class in a config.
Args:
agent_class (str): Agent Class name to search for, must match Agent
Class defined by an OCS Agent (and thus also defined in the SCF.)
host (str): Specific host to search on, if None all hosts will be
searched.
config (str): Path to the OCS Site Config File. If None the default
file in OCS_CONFIG_DIR will be used.
Returns:
list: List of instance-id's matching the given agent_class.
"""
cfg = _load_site_config(config)
if host is not None:
hosts = {host: cfg.hosts[host]}
else:
hosts = cfg.hosts
instances = []
for _host, hostcfg in hosts.items():
for entry in hostcfg.instances:
if entry['agent-class'] == agent_class:
instances.append(entry['instance-id'])
return instances
def _find_active_instances(agent_class):
"""Find all instances of an Agent Class currently online, based on the
Agents known by the registry.
Args:
agent_class (str): Agent Class name to search for, must match Agent
Class defined by an OCS Agent (and thus also defined in the SCF.)
Returns:
str or list: List of instance-id's matching the given agent_class. If
the list is of length 1, just return the only instance-id.
"""
cfg = load_config()
reg_client = _try_client(cfg['registry'])
_, _, session = reg_client.main.status()
instances = []
for entry in session['data'].values():
if entry['expired']:
continue
if entry['agent_class'] == agent_class:
instance_id = entry['agent_address'].split('.')[-1]
instances.append(instance_id)
if len(instances) == 1:
return instances[0]
return instances
def _try_client(instanceid):
"""User in place of OCSClient to handle common exceptions."""
if not instanceid:
return
try:
client = OCSClient(instanceid)
except ControlClientError as e:
# crossbar connection error
if "Failed to connect" in str(e):
result = re.search(r"(http://[^ ]+)'", str(e))
crossbar_url = result.group(1)
error = f"Cannot connect to crossbar server {crossbar_url}. Check your connection."
raise CrossbarConnectionError(error)
# likely an agent connection error
if "no callee registered" in str(e):
print(f"Could not instantiate OCSClient for '{instanceid}'.")
return None
# other errors, i.e. non-200 error codes
print(f"Unexpected error trying to instantiate OCSClient for '{instanceid}'.")
raise ControlClientError(e)
return client
def _create_wiregrid_clients(config=None, sorunlib_config=None):
"""Create all wiregrid related clients for a single platform.
Args:
config (str): Path to the OCS Site Config File. If None the default
file in OCS_CONFIG_DIR will be used.
sorunlib_config (str): Path to sorunlib config file. If None the path
from environment variable SORUNLIB_CONFIG is used.
Returns:
dict: Dictionary with the keys, 'acutator', 'encoder', 'kikusui', and
'labjack' with each value being the corresponding OCSClient.
"""
actuator = _find_active_instances('WiregridActuatorAgent')
encoder = _find_active_instances('WiregridEncoderAgent')
kikusui = _find_active_instances('WiregridKikusuiAgent')
cfg = load_config(filename=sorunlib_config)
try:
labjack = cfg['wiregrid']['labjack']
except KeyError:
labjack = None
clients = {'actuator': _try_client(actuator),
'encoder': _try_client(encoder),
'kikusui': _try_client(kikusui),
'labjack': _try_client(labjack)}
return clients
def create_clients(config=None, sorunlib_config=None, test_mode=False):
"""Create all clients needed for commanding a single platform.
Args:
config (str): Path to the OCS Site Config File. If None the default
file in OCS_CONFIG_DIR will be used.
sorunlib_config (str): Path to sorunlib config file. If None the path
from environment variable SORUNLIB_CONFIG is used.
test_mode (bool): Operate in 'test mode'. Use this to find Agents that
are meant to stand in for real agents while testing, i.e.
SmurfFileEmulators instead of PysmurfControllers.
Returns:
dict: Dictionary with the ACU and SMuRF clients needed for commanding
in the format::
clients = {'acu': acu_client,
'hwp': hwp_supervisor_client,
'smurf': [smurf_client1, smurf_client2, smurf_client3],
'wiregrid': {'actuator': actuator_client,
'encoder': encoder_client,
'kikusui': kikusui_client,
'labjack': labjack_client}}
"""
clients = {}
if test_mode:
smurf_agent_class = 'SmurfFileEmulator'
else:
smurf_agent_class = 'PysmurfController'
acu_id = _find_active_instances('ACUAgent')
hwp_id = _find_active_instances('HWPSupervisor')
smurf_ids = _find_active_instances(smurf_agent_class)
if acu_id:
acu_client = _try_client(acu_id)
clients['acu'] = acu_client
if hwp_id:
hwp_client = _try_client(hwp_id)
clients['hwp'] = hwp_client
if isinstance(smurf_ids, str):
# when only a single SMuRF controller online
smurf_clients = [smurf_ids]
else:
# create smurf client list, even if empty
smurf_clients = [_try_client(x) for x in smurf_ids]
clients['smurf'] = smurf_clients
clients['wiregrid'] = _create_wiregrid_clients(
config=config,
sorunlib_config=sorunlib_config)
return clients
|
simonsobsREPO_NAMEsorunlibPATH_START.@sorunlib_extracted@sorunlib-main@src@sorunlib@util.py@.PATH_END.py
|
{
"filename": "models_mg.py",
"repo_name": "Guo-Jian-Wang/colfi",
"repo_path": "colfi_extracted/colfi-master/colfi/models_mg.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from . import cosmic_params, fcnet_mdn
from . import data_processor as dp
from . import data_simulator as ds
from .models_mdn import OneBranchMDN, MultiBranchMDN
from .models_ann import Loader
import numpy as np
import torch
from torch.distributions import Categorical
def gaussian_sampler(omega, params, _sigma):
omegas = Categorical(omega).sample().view(omega.size(0), 1, 1)
samples = params.detach().gather(2, omegas).squeeze()
return samples
def multivariateGaussian_sampler(omega, params, _cholesky_f):
omegas = Categorical(omega).sample().view(omega.size(0), 1, 1, 1)
samples = params.detach().gather(1, omegas.expand(omegas.size(0), 1, params.size(2), params.size(3))).squeeze()
return samples
def samplers(params_n):
if params_n==1:
return gaussian_sampler
else:
return multivariateGaussian_sampler
class OneBranchMLP_MG(OneBranchMDN):
"""Predict cosmological parameters with mixture neural network (MNN) for one set of datasets.
Parameters
----------
train_set : list
The training set that contains simulated observations (measurements) with
shape (N, obs_length) and simulated parameters of a specific
cosmological (or theoretical) model. i.e. [observations, parameters]
param_names : list
A list which contains the parameter names, e.g. ['H0','ombh2','omch2'].
vali_set : list, optional
The validation set that contains simulated observations (measurements)
with shape (N, obs_length) and simulated parameters of a specific
cosmological (or theoretical) model, i.e. [observations, parameters].
The validation set can also be set to None, i.e. [None, None]. Default: [None, None]
obs_errors : None or array-like, optional
Observational errors with shape (obs_length,). If ``cov_matrix`` is set to None,
the observational errors should be given. Default: None
cov_matrix : None or array-like, optional
Covariance matrix of the observational data. If a covariance matrix is given,
``obs_errors`` will be ignored. Default: None
params_dict : dict or None, optional
Information of cosmological parameters that include the labels, the minimum values,
and the maximum values. See :func:`~.cosmic_params.params_dict_zoo`. Default: None
comp_type : str, optional
The name of component used in the ``MDN`` method, which should be 'Gaussian'.
Since the loss function of ``MNN`` is similar to that of ``MDN`` with Gaussian
mixture model, we are using the loss function of ``MDN``. Default: 'Gaussian'
comp_n : int, optional
The number of components used in the ``MNN`` method. Default: 3
hidden_layer : int, optional
The number of the hidden layer of the network. Default: 3
activation_func : str, optional
Activation function, which can be 'ReLU', 'LeakyReLU', 'PReLU',
'RReLU', 'ReLU6', 'ELU', 'CELU', 'SELU', 'SiLU', 'Sigmoid', 'LogSigmoid',
'Tanh', 'Tanhshrink', 'Softsign', or 'Softplus' (see :func:`~.element.activation`). Default: 'Softplus'
noise_type : str, optional
The type of Gaussian noise added to the training set, which can be 'singleNormal' or
'multiNormal'. Default: 'multiNormal'
factor_sigma : float, optional
For the case of ``noise_type`` = 'singleNormal', ``factor_sigma`` should be
set to 1. For the case of ``noise_type`` = 'multiNormal', it is the standard
deviation of the coefficient of the observational error (standard deviation). Default: 0.2
multi_noise : int, optional
The number of realization of noise added to the measurement in one epoch. Default: 5
Attributes
----------
obs_base : array-like, optional
The base value of observations that is used for data normalization when
training the network to ensure that the scaled observations are ~ 1.,
it is suggested to set the mean of the simulated observations.
The default is the mean of the simulated observations.
params_base : array-like, optional
The base value of parameters that is used for data normalization when
training the network to ensure that the scaled parameters are ~ 1.,
it is suggested to set the mean of the posterior distribution (or the simulated parameters).
The default is the mean of the simulated parameters.
params_space : array-like
The parameter space with the shape of (n, 2), where n is the number of parameters.
For each parameter, it is: [lower_limit, upper_limit].
lr : float, optional
The learning rate setting of the network. Default: 1e-2
lr_min : float, optional
The minimum of the learning rate. Default: 1e-8
batch_size : int, optional
The batch size setting of the network. Default: 1250
auto_batchSize : bool, optional
If True, the batch size will be set automatically in the training process,
otherwise, use the setting of ``batch_size``. Default: False
epoch : int, optional
The number of epoch of the training process. Default: 2000
base_epoch : int, optional
The base number (or the minimum number) of epoch. Default: 1000
auto_epoch : bool, optional
If True, the epoch will be set automatically in the training process,
otherwise, use the setting of ``epoch``. Default: False
print_info : bool, optional
If True, will print the information of the network. Default: False
scale_obs : bool, optional
If True, the input data (measurements) will be scaled based on the
base values of the data. Default: False
scale_params : bool, optional
If True, the target data (cosmological parameters) will be scaled based on
the base values of parameters. See :class:`~.data_processor.DataPreprocessing`.
Default: True
norm_obs : bool, optional
If True, the input data (measurements) of the network will be normalized. Default: True
norm_params : bool, optional
If True, the target data (cosmological parameters) will be normalized. Default: True
independent_norm_obs : bool, optional
If True, the measurements will be normalized independently. This only works when
``norm_obs=True``. Default: False
independent_norm_params : bool, optional
If True, the target data (cosmological parameters) will be normalized independently.
This only works when ``norm_params=True``. Default: True
norm_type : str, optional
The method of normalization, which can be 'z_score', 'minmax', or 'mean'
(see :class:`~.data_processor.Normalize`). Default: 'z_score'
spaceSigma_min : int, optional
The minimum parameter space to be learned, e.g. for spaceSigma_min=5,
the parameter space to be learned is :math:`[-5\sigma, +5\sigma]`. Default: 5
burnInEnd : bool, optional
If True, it is the end of the burn-in phase, which means the ANN chain
has reached a stable state. Default: False
burnInEnd_step : None or int, optional
The burn-in end step. If None, it means the burn-in phase not end. Default: None
transfer_learning : bool, optional
If True, the network will be initialized using the well-trained network of
the previous step. Default: False
randn_num : float or str, optional
A random number that identifies the saved results. Default: float
nde_type : str, optional
A string that indicate which NDE is used, which should be 'MNN'.
file_identity_str : str, optional
A string that identifies the files saved to the disk, which is useful to
identify the saved files. Default: ''
"""
def __init__(self, train_set, param_names, vali_set=[None,None], obs_errors=None,
cov_matrix=None, params_dict=None, comp_type='Gaussian', comp_n=3,
hidden_layer=3, activation_func='Softplus', noise_type='multiNormal',
factor_sigma=0.2, multi_noise=5):
#data
self.obs, self.params = train_set
self.obs_base = np.mean(self.obs, axis=0)
# self.obs_base = self.obs[0] #need test
self.params_base = np.mean(self.params, axis=0)
self.param_names = param_names
self.params_n = len(param_names)
self.obs_vali, self.params_vali = vali_set
self.obs_errors = obs_errors
self.cholesky_factor = self._cholesky_factor(cov_matrix)
self.params_dict = params_dict
p_property = cosmic_params.ParamsProperty(param_names, params_dict=params_dict)
self.params_limit = p_property.params_limit
self.params_space = np.array([])
#MNN model
self.comp_type = comp_type
self.comp_n = comp_n
self.hidden_layer = hidden_layer
self.activation_func = activation_func
self.lr = 1e-2
self.lr_min = 1e-8
self.batch_size = 1250
self.auto_batchSize = False
self.epoch = 2000
self.base_epoch = 1000
self.auto_epoch = False
self.print_info = False
#data preprocessing
self.noise_type = noise_type
self.factor_sigma = factor_sigma
self.multi_noise = multi_noise
self.scale_obs = False
self.scale_params = True
self.norm_obs = True
self.norm_params = True
self.independent_norm_obs = False
self.independent_norm_params = True
self.norm_type = 'z_score'
#training
self.spaceSigma_min = 5
self.auto_repeat_n = False
self.burnInEnd = False
self.burnInEnd_step = None
self.transfer_learning = False
self.randn_num = round(abs(np.random.randn()/5.), 5)
self.nde_type = 'MNN'
self.file_identity_str = ''
@property
def sampler(self):
return samplers(self.params_n)
#to be tested???
def predict_chain(self, obs_data, cov_matrix=None, chain_leng=10000):
obs_data = dp.numpy2torch(obs_data)
obs_best, obs_errors = obs_data[:,1], obs_data[:,2]
self.obs_best_multi = torch.ones((chain_leng, len(obs_best))) * obs_best
if cov_matrix is None:
cholesky_factor = None
else:
cholesky_factor = dp.numpy2torch(np.linalg.cholesky(cov_matrix))
self.obs_best_multi = ds.AddGaussianNoise(self.obs_best_multi, obs_errors=obs_errors, cholesky_factor=cholesky_factor, noise_type='singleNormal', factor_sigma=1, use_GPU=False).noisyObs()
self.obs_best_multi = dp.torch2numpy(self.obs_best_multi)#
self.chain = self.predict(self.obs_best_multi, in_type='numpy')
self.chain = self.cut_params(self.chain) #remove non-physical parameters
return self.chain
class PredictOBMLP_MG(OneBranchMLP_MG, Loader):
"""Repredict cosmological parameters using the saved networks.
Parameters
----------
path : str
The path of the results saved. Default: 'ann'
randn_num : str or int
A random number that identifies the saved results.
"""
def __init__(self, path='ann', randn_num='0.123'):
self.path = path
self.randn_num = str(randn_num)
#%%
class MultiBranchMLP_MG(MultiBranchMDN):
"""Predict cosmological parameters with multibranch MNN for multiple sets of datasets.
Parameters
----------
train_set : list
The training set that contains simulated observations (measurements) which
is a list observations with shape [(N,obs_length_1), (N,obs_length_2), ...]
and simulated parameters of a specific cosmological (or theoretical) model.
i.e. [observations, parameters]
param_names : list
A list which contains the parameter names, e.g. ['H0','ombh2','omch2'].
vali_set : list, optional
The validation set that contains simulated observations (measurements) which
is a list observations with shape [(N,obs_length_1), (N,obs_length_2), ...]
and simulated parameters of a specific cosmological (or theoretical) model.
The validation set can also be set to None. i.e. [observations, parameters] or [None, None].
Default: [None, None]
obs_errors : None or list, optional
Observational errors, it is a list of errors with shape [(obs_length_1,), (obs_length_2,), ...].
If ``cov_matrix`` is set to None, the observational errors should be given. Default: None
cov_matrix : None or list, optional
A list of covariance matrix with shape [(obs_length_1, obs_length_1), (obs_length_2, obs_length_2), ...].
If there is no covariance for some observations, the covariance matrix
should be set to None. e.g. [cov_matrix_1, None, cov_matrix_3]. If a covariance
matrix is given, ``obs_errors`` will be ignored. Default: None
params_dict : dict or None, optional
Information of cosmological parameters that include the labels, the minimum values,
and the maximum values. See :func:`~.cosmic_params.params_dict_zoo`. Default: None
comp_type : str, optional
The name of component used in the ``MDN`` method, which should be 'Gaussian'.
Since the loss function of ``MNN`` is similar to that of ``MDN`` with Gaussian
mixture model, we are using the loss function of ``MDN``. Default: 'Gaussian'
comp_n : int, optional
The number of components used in the ``MNN`` method. Default: 3
branch_hiddenLayer : int, optional
The number of the hidden layer for the branch part of the network. Default: 1
trunk_hiddenLayer : int, optional
The number of the hidden layer for the trunk part of the network. Default: 2
activation_func : str, optional
Activation function, which can be 'ReLU', 'LeakyReLU', 'PReLU',
'RReLU', 'ReLU6', 'ELU', 'CELU', 'SELU', 'SiLU', 'Sigmoid', 'LogSigmoid',
'Tanh', 'Tanhshrink', 'Softsign', or 'Softplus' (see :func:`~.element.activation`). Default: 'Softplus'
noise_type : str, optional
The type of Gaussian noise added to the training set, which can be 'singleNormal' or
'multiNormal'. Default: 'multiNormal'
factor_sigma : float, optional
For the case of ``noise_type`` = 'singleNormal', ``factor_sigma`` should be
set to 1. For the case of ``noise_type`` = 'multiNormal', it is the standard
deviation of the coefficient of the observational error (standard deviation). Default: 0.2
multi_noise : int, optional
The number of realization of noise added to the measurement in one epoch. Default: 5
Attributes
----------
obs_base : array-like, optional
The base value of observations that is used for data normalization when
training the network to ensure that the scaled observations are ~ 1.,
it is suggested to set the mean of the simulated observations.
The default is the mean of the simulated observations.
params_base : array-like, optional
The base value of parameters that is used for data normalization when
training the network to ensure that the scaled parameters are ~ 1.,
it is suggested to set the mean of the posterior distribution (or the simulated parameters).
The default is the mean of the simulated parameters.
params_space : array-like
The parameter space with the shape of (n, 2), where n is the number of parameters.
For each parameter, it is: [lower_limit, upper_limit].
lr : float, optional
The learning rate setting of the network. Default: 1e-2
lr_branch : float, optional
The learning rate setting of the branch part. Default: 1e-2
lr_min : float, optional
The minimum of the learning rate. Default: 1e-8
batch_size : int, optional
The batch size setting of the network. Default: 1250
auto_batchSize : bool, optional
If True, the batch size will be set automatically in the training process,
otherwise, use the setting of ``batch_size``. Default: False
epoch : int, optional
The number of epoch of the training process. Default: 2000
epoch_branch : int, optional
The number of epoch for the branch part. This only works when training the branch part. Default: 2000
base_epoch : int, optional
The base number (or the minimum number) of epoch. Default: 1000
auto_epoch : bool, optional
If True, the epoch will be set automatically in the training process,
otherwise, use the setting of ``epoch``. Default: False
print_info : bool, optional
If True, will print the information of the network. Default: False
scale_obs : bool, optional
If True, the input data (measurements) will be scaled based on the
base values of the data. Default: True
scale_params : bool, optional
If True, the target data (cosmological parameters) will be scaled based on
the base values of parameters. See :class:`~.data_processor.DataPreprocessing`.
Default: True
norm_obs : bool, optional
If True, the input data (measurements) of the network will be normalized. Default: True
norm_params : bool, optional
If True, the target data (cosmological parameters) will be normalized. Default: True
independent_norm_obs : bool, optional
If True, the measurements will be normalized independently. This only works when
``norm_obs=True``. Default: False
independent_norm_params : bool, optional
If True, the target data (cosmological parameters) will be normalized independently.
This only works when ``norm_params=True``. Default: True
norm_type : str, optional
The method of normalization, which can be 'z_score', 'minmax', or 'mean'
(see :class:`~.data_processor.Normalize`). Default: 'z_score'
spaceSigma_min : int, optional
The minimum parameter space to be learned, e.g. for spaceSigma_min=5,
the parameter space to be learned is :math:`[-5\sigma, +5\sigma]`. Default: 5
burnInEnd : bool, optional
If True, it is the end of the burn-in phase, which means the ANN chain
has reached a stable state. Default: False
burnInEnd_step : None or int, optional
The burn-in end step. If None, it means the burn-in phase not end. Default: None
transfer_learning : bool, optional
If True, the network will be initialized using the well-trained network of
the previous step. Default: False
randn_num : float or str, optional
A random number that identifies the saved results. Default: float
nde_type : str, optional
A string that indicate which NDE is used, which should be 'MNN'.
file_identity_str : str, optional
A string that identifies the files saved to the disk, which is useful to
identify the saved files. Default: ''
"""
def __init__(self, train_set, param_names, vali_set=[None,None], obs_errors=None,
cov_matrix=None, params_dict=None, comp_type='Gaussian', comp_n=3,
branch_hiddenLayer=1, trunk_hiddenLayer=2, activation_func='Softplus',
noise_type='multiNormal', factor_sigma=0.2, multi_noise=5):
#data
self.obs, self.params = train_set
self.branch_n = len(self.obs)
self.obs_base = [np.mean(self.obs[i], axis=0) for i in range(self.branch_n)]
# self.obs_base = [self.obs[i][0] for i in range(self.branch_n)] #need test
self.params_base = np.mean(self.params, axis=0)
self.param_names = param_names
self.params_n = len(param_names)
self.obs_vali, self.params_vali = vali_set
self.obs_errors = self._obs_errors(obs_errors)
self.cholesky_factor = self._cholesky_factor(cov_matrix)
self.params_dict = params_dict
p_property = cosmic_params.ParamsProperty(param_names, params_dict=params_dict)
self.params_limit = p_property.params_limit
self.params_space = np.array([])
#MDN model
self.comp_type = comp_type
self.comp_n = comp_n
self.branch_hiddenLayer = branch_hiddenLayer
self.trunk_hiddenLayer = trunk_hiddenLayer
self.activation_func = activation_func
self.lr = 1e-2
self.lr_branch = 1e-2
self.lr_min = 1e-8
self.batch_size = 1250
self.auto_batchSize = False
self.epoch = 2000
self.epoch_branch = 2000
self.base_epoch = 1000
self.auto_epoch = False
self.print_info = False
#data preprocessing
self.noise_type = noise_type
self.factor_sigma = factor_sigma
self.multi_noise = multi_noise
self.scale_obs = True
self.scale_params = True
self.norm_obs = True
self.norm_params = True
self.independent_norm_obs = False
self.independent_norm_params = True
self.norm_type = 'z_score'
#training
self.spaceSigma_min = 5
self.auto_repeat_n = False
self.burnInEnd = False
self.burnInEnd_step = None
self.transfer_learning = False
self.randn_num = round(abs(np.random.randn()/5.), 5)
self.nde_type = 'MNN'
self.file_identity_str = ''
@property
def sampler(self):
return samplers(self.params_n)
#to be tested???
def predict_chain(self, obs_data, cov_matrix=None, chain_leng=10000):
# obs_data: observations in a list [obs1, obs2, ...], each element has shape (N, 3)
if cov_matrix is None:
cov_matrix = [None for i in range(len(obs_data))]
obs_data = [dp.numpy2torch(obs_data[i]) for i in range(len(obs_data))]
obs_best = [obs_data[i][:,1] for i in range(len(obs_data))]
obs_errors = [obs_data[i][:,2] for i in range(len(obs_data))]
obs_best_multi = [torch.ones((chain_leng, len(obs_best[i]))) * obs_best[i] for i in range(len(obs_data))]
cholesky_factor = []
for i in range(len(obs_data)):
if cov_matrix[i] is None:
cholesky_factor.append(None)
else:
cholesky_factor.append(dp.numpy2torch(np.linalg.cholesky(cov_matrix[i])))
obs_best_multi = ds.AddGaussianNoise(obs_best_multi, obs_errors=obs_errors, cholesky_factor=cholesky_factor, noise_type='singleNormal', factor_sigma=1, use_GPU=False).noisyObs()
obs_best_multi = [dp.torch2numpy(obs_best_multi[i]) for i in range(len(obs_best_multi))]#
self.chain = self.predict(obs_best_multi, in_type='numpy')
self.chain = self.cut_params(self.chain) #remove non-physical parameters
return self.chain
class PredictMBMLP_MG(MultiBranchMLP_MG, Loader):
"""Repredict cosmological parameters using the saved networks.
Parameters
----------
path : str
The path of the results saved. Default: 'ann'
randn_num : str or int
A random number that identifies the saved results.
"""
def __init__(self, path='ann', randn_num='0.123'):
self.path = path
self.randn_num = str(randn_num)
|
Guo-Jian-WangREPO_NAMEcolfiPATH_START.@colfi_extracted@colfi-master@colfi@models_mg.py@.PATH_END.py
|
{
"filename": "fitting_specify.py",
"repo_name": "dartoon/galight",
"repo_path": "galight_extracted/galight-master/galight/fitting_specify.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 14 12:16:43 2020
@author: Xuheng Ding
"""
import numpy as np
import copy
import lenstronomy
from packaging import version
import warnings
class FittingSpecify(object):
"""
A class to generate the materials for the 'FittingSequence', defined by 'lenstronomy'
key materials include the following, which are prepared by 'prepare_fitting_seq()':
- kwargs_data_joint: data materils
- kwargs_model: a list of class
- kwargs_constraints
- kwargs_likelihood
- kwargs_params
- imageModel
"""
def __init__(self, data_process_class, sersic_major_axis=True):
self.data_process_class = data_process_class
self.deltaPix = data_process_class.deltaPix
self.numPix = len(self.data_process_class.target_stamp)
self.zp = data_process_class.zp
self.apertures = copy.deepcopy(data_process_class.apertures)
self.mask_apertures = copy.deepcopy(data_process_class.mask_apertures)
self.header = copy.deepcopy(data_process_class.header)
self.target_pos = copy.deepcopy(data_process_class.target_pos)
self.segm_deblend = np.array(data_process_class.segm_deblend)
if sersic_major_axis is None:
if version.parse(lenstronomy.__version__) >= version.parse("1.9.0"):
from lenstronomy.Conf import config_loader
convention_conf = config_loader.conventions_conf()
self.sersic_major_axis = convention_conf['sersic_major_axis'] #If sersic_major_axis == None, the sersic_major_axis follows Lenstronomy.
else:
self.sersic_major_axis = sersic_major_axis
def sepc_kwargs_data(self, supersampling_factor, point_source_supersampling_factor, psf_data = None, psf_error_map = None):
import lenstronomy.Util.simulation_util as sim_util
kwargs_data = sim_util.data_configure_simple(self.numPix, self.deltaPix,
inverse=True) #inverse: if True, coordinate system is ra to the left, if False, to the right
kwargs_data['image_data'] = self.data_process_class.target_stamp
kwargs_data['noise_map'] = self.data_process_class.noise_map
if psf_data is None:
psf_data = self.data_process_class.PSF_list[self.data_process_class.psf_id_for_fitting]
kwargs_psf = {'psf_type': 'PIXEL', 'kernel_point_source': psf_data,'pixel_size': self.deltaPix}
if psf_error_map is not None:
kwargs_psf['psf_error_map'] = psf_error_map
# here we super-sample the resolution of some of the pixels where the surface brightness profile has a high gradient
supersampled_indexes = np.zeros((self.numPix, self.numPix), dtype=bool)
supersampled_indexes[int(self.numPix/2)-int(self.numPix/10):int(self.numPix/2)+int(self.numPix/10),
int(self.numPix/2)-int(self.numPix/10):int(self.numPix/2)+int(self.numPix/10)] = True
kwargs_numerics = {'supersampling_factor': supersampling_factor,
'compute_mode': 'adaptive',
'supersampled_indexes': supersampled_indexes}
if point_source_supersampling_factor >= 1:
kwargs_numerics['point_source_supersampling_factor'] = point_source_supersampling_factor
# kwargs_numerics = {'supersampling_factor': supersampling_factor}
image_band = [kwargs_data, kwargs_psf, kwargs_numerics]
multi_band_list = [image_band]
self.kwargs_data = kwargs_data
self.kwargs_psf = kwargs_psf
self.kwargs_numerics = kwargs_numerics
self.kwargs_data_joint = {'multi_band_list': multi_band_list, 'multi_band_type': 'multi-linear'} # 'single-band', 'multi-linear', 'joint-linear'
def sepc_kwargs_model(self, extend_source_model = ['SERSIC_ELLIPSE'] * 1, point_source_num = 1):
point_source_list = ['UNLENSED'] * point_source_num
kwargs_model = {'point_source_model_list': point_source_list}
if extend_source_model != None and extend_source_model != []:
light_model_list = extend_source_model
kwargs_model['lens_light_model_list'] = light_model_list
else:
light_model_list = []
self.point_source_list = point_source_list
self.light_model_list = light_model_list
kwargs_model['sersic_major_axis'] = self.sersic_major_axis
self.kwargs_model = kwargs_model
def sepc_kwargs_constraints(self, fix_center_list = None):
"""
Prepare the 'kwargs_constraints' for the fitting.
Parameter
--------
fix_center_list: list.
-if not None, describe how to fix the center [[0,0]] for example.
This list defines how to 'joint_lens_light_with_point_source' definied by lenstronomy:
[[i_point_source, k_lens_light], [...], ...], see
https://lenstronomy.readthedocs.io/en/latest/_modules/lenstronomy/Sampling/parameters.html?highlight=joint_lens_light_with_point_source#
for example [[0, 1]], joint first (0) point source with the second extend source (1).
"""
kwargs_constraints = {'num_point_source_list': [1] * len(self.point_source_list) #kwargs_constraints also generated here
}
if fix_center_list is not None:
kwargs_constraints['joint_lens_light_with_point_source'] = fix_center_list
self.kwargs_constraints = kwargs_constraints
def sepc_kwargs_likelihood(self, condition=None):
"""
Prepare the 'kwargs_likelihood' for the fitting.
Most default values will be assigned.
Parameter
--------
condition: input as a defination.
Set up extra prior. For example if one want the first component have lower
Sersic index, it can be set by first define a condition:
def condition_def(kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, kwargs_special, kwargs_extinction):
logL = 0
cond_0 = (kwargs_source[0]['n_sersic'] > kwargs_source[1]['n_sersic'])
if cond_0:
logL -= 10**15
return logL
Then assign to condition:
fit_sepc.prepare_fitting_seq(**, condition = condition_def)
"""
kwargs_likelihood = {'check_bounds': True, #Set the bonds, if exceed, reutrn "penalty"
'image_likelihood_mask_list': [self.data_process_class.target_mask],
'custom_logL_addition': condition
}
if self.light_model_list != []:
kwargs_likelihood['source_marg'] = False #In likelihood_module.LikelihoodModule -- whether to fully invert the covariance matrix for marginalization
kwargs_likelihood['check_positive_flux'] = True #penalty is any component's flux is 'negative'.
self.kwargs_likelihood = kwargs_likelihood
def sepc_kwargs_params(self, source_params = None, fix_n_list = None, fix_Re_list = None, ps_params = None, ps_pix_center_list= None,
neighborhood_size = 4, threshold = 5, apertures_center_focus = False):
"""
Setting up the 'kwargs_params' (i.e., the parameters) for the fitting. If 'source_params' or 'ps_params'
are given, rather then setting as None, then, the input settings will be used.
Parameter
--------
fix_n_list: list.
Describe a prior if want to fix the Sersic index.
e.g., fix_n_list= [[0,4], [1,1]], means the first (i.e., 0) fix n = 4; the second (i.e., 1) fix n = 1.
fix_Re_list: list.
Describe a prior if want to fix the Sersic effective radius.
e.g., fix_n_list= [[0,0.4], [1,1]], means the first (i.e., 0) fix Reff value as 0.4.
apertures_center_focus: bool.
If true, the default parameters will have strong prior so that the center of the fitted Sersic will
be closer to the apertures.
"""
kwargs_params = {}
if self.light_model_list != []:
if source_params is None:
source_params = source_params_generator(frame_size = self.numPix,
apertures = self.apertures,
deltaPix = self.deltaPix,
fix_n_list = fix_n_list,
fix_Re_list = fix_Re_list,
apertures_center_focus = apertures_center_focus)
else:
source_params = source_params
kwargs_params['lens_light_model'] = source_params
if ps_params is None and len(self.point_source_list) > 0:
if ps_pix_center_list is None:
from galight.tools.measure_tools import find_loc_max
x, y = find_loc_max(self.data_process_class.target_stamp, neighborhood_size = neighborhood_size, threshold = threshold) #Automaticlly find the local max as PS center.
# if x == []:
if len(x) < len(self.point_source_list):
x, y = find_loc_max(self.data_process_class.target_stamp, neighborhood_size = neighborhood_size, threshold = threshold/2) #Automaticlly find the local max as PS center.
# raise ValueError("Warning: could not find the enough number of local max to match the PS numbers. Thus,\
# the ps_params must input manually or change the neighborhood_size and threshold values")
if len(x) < len(self.point_source_list):
warnings.warn("\nWarning: could not find the enough number of local max to match the PS numbers. Thus, all the initial PS set the same initial parameters.")
if x == []:
x, y = [self.numPix/2], [self.numPix/2]
else:
x = x * len(self.point_source_list)
y = y * len(self.point_source_list)
flux_ = []
for i in range(len(x)):
flux_.append(self.data_process_class.target_stamp[int(x[i]), int(y[i])])
_id = np.flipud(np.argsort(flux_))
arr_x = np.array(x)
arr_y = np.array(y)
ps_x = - 1 * ((arr_x - int(self.numPix/2) ) )
ps_y = (arr_y - int(self.numPix/2) )
center_list = []
flux_list = []
for i in range(len(self.point_source_list)):
center_list.append([ps_x[_id[i]], ps_y[_id[i]]])
flux_list.append(flux_[_id[i]] * 10 )
elif ps_pix_center_list is not None:
if len(ps_pix_center_list) != len(self.point_source_list):
raise ValueError("Point source number mismatch between ps_pix_center_list and point_source_num")
center_list = ps_pix_center_list
for i in range(len(center_list)):
center_list[i][0] = -center_list[i][0]
ps_params = ps_params_generator(centers = center_list,
deltaPix = self.deltaPix)
else:
ps_params = ps_params
kwargs_params['point_source_model'] = ps_params
center_pix_pos = []
if len(self.point_source_list) > 0:
for i in range(len(ps_params[0])):
x = -1 * ps_params[0][i]['ra_image'][0]/self.deltaPix
y = ps_params[0][i]['dec_image'][0]/self.deltaPix
center_pix_pos.append([x, y])
center_pix_pos = np.array(center_pix_pos)
center_pix_pos = center_pix_pos + int(self.numPix/2)
self.center_pix_pos = center_pix_pos
self.kwargs_params = kwargs_params
def sepc_imageModel(self, sersic_major_axis):
from lenstronomy.Data.imaging_data import ImageData
from lenstronomy.Data.psf import PSF
data_class = ImageData(**self.kwargs_data)
from lenstronomy.PointSource.point_source import PointSource
pointSource = PointSource(point_source_type_list=self.point_source_list)
psf_class = PSF(**self.kwargs_psf)
from lenstronomy.LightModel.light_model import LightModel
try:
lightModel = LightModel(light_model_list=self.light_model_list, sersic_major_axis=sersic_major_axis) # By this setting: fit_sepc.lightModel.func_list[1]._sersic_major_axis
except:
lightModel = LightModel(light_model_list=self.light_model_list)
if version.parse(lenstronomy.__version__) >= version.parse("1.9.0"):
warnings.warn("\nWarning: The current Lenstronomy Version doesn't not allow for sersic_major_axis=True. Please update you Lenstrnomy version or change you Lenstronomy configure file.")
self.data_class = data_class
self.psf_class = psf_class
self.lightModel = lightModel
self.pointSource = pointSource
def plot_fitting_sets(self, savename = None, show_plot=True):
"""
To make a plot show how the data will be fitted. The extend source will be shown using aperture, point source will be show as point source.
Parameter
--------
savename: None or string.
-Defining the saving name.
show_plot: bool.
-Plot or not plot. Note that figure can be saved without shown.
"""
from galight.tools.plot_tools import plot_data_apertures_point
plot_data_apertures_point(self.kwargs_data['image_data'] * self.kwargs_likelihood['image_likelihood_mask_list'][0], # + (self.kwargs_likelihood['image_likelihood_mask_list'][0]==0)*1.e6 ,
self.apertures, self.center_pix_pos, savename = savename, show_plot=show_plot)
def prepare_fitting_seq(self, supersampling_factor = 2, point_source_supersampling_factor = 1,
psf_data = None,
extend_source_model = None,
point_source_num = 0, ps_pix_center_list = None,
fix_center_list = None, source_params = None,
fix_n_list = None, fix_Re_list = None, ps_params = None, condition = None,
neighborhood_size = 4, threshold = 5, apertures_center_focus = False,
psf_error_map = None, mpi = False):
"""
Key function used to prepared for the fitting. Parameters will be passed to the corresponding functions.
"""
self.mpi = mpi
if extend_source_model is None:
extend_source_model = ['SERSIC_ELLIPSE'] * len(self.apertures)
self.sepc_kwargs_data(supersampling_factor = supersampling_factor, point_source_supersampling_factor = point_source_supersampling_factor,
psf_data = psf_data, psf_error_map = psf_error_map)
self.sepc_kwargs_model(extend_source_model = extend_source_model, point_source_num = point_source_num)
self.sepc_kwargs_constraints(fix_center_list = fix_center_list)
self.sepc_kwargs_likelihood(condition)
self.sepc_kwargs_params(source_params = source_params, fix_n_list = fix_n_list, fix_Re_list = fix_Re_list,
ps_params = ps_params, neighborhood_size = neighborhood_size, threshold = threshold,
apertures_center_focus = apertures_center_focus, ps_pix_center_list = ps_pix_center_list)
if point_source_num == 0 or point_source_num == None:
del self.kwargs_params['point_source_model']
del self.kwargs_constraints['num_point_source_list']
del self.kwargs_model['point_source_model_list']
self.sepc_imageModel(sersic_major_axis = self.sersic_major_axis)
print("The settings for the fitting is done. Ready to pass to FittingProcess. \n However, please make updates manullay if needed.")
def build_fitting_seq(self):
from lenstronomy.Workflow.fitting_sequence import FittingSequence
self.fitting_seq = FittingSequence(self.kwargs_data_joint, self.kwargs_model,
self.kwargs_constraints, self.kwargs_likelihood,
self.kwargs_params, mpi=self.mpi)
if 'linear_solver' in self.kwargs_constraints.keys():
if self.kwargs_constraints['linear_solver'] == False:
for i in range(len(self.kwargs_params['lens_light_model'][0])):
if 'amp' not in self.kwargs_params['lens_light_model'][0][i].keys():
self.kwargs_params['lens_light_model'][0][i]['amp'] = 1
self.kwargs_params['lens_light_model'][1][i]['amp'] = 1
self.kwargs_params['lens_light_model'][3][i]['amp'] = 0
self.kwargs_params['lens_light_model'][4][i]['amp'] = 1.e8
if 'point_source_model' in self.kwargs_params.keys():
for i in range(len(self.kwargs_params['point_source_model'][0])):
if 'point_amp' not in self.kwargs_params['point_source_model'][0][i].keys():
self.kwargs_params['point_source_model'][0][i]['point_amp'] = [1]
self.kwargs_params['point_source_model'][1][i]['point_amp'] = [1]
self.kwargs_params['point_source_model'][3][i]['point_amp'] = [0]
self.kwargs_params['point_source_model'][4][i]['point_amp'] = [1.e8]
def source_params_generator(frame_size, apertures = [], deltaPix = 1, fix_n_list = None, fix_Re_list = None,
apertures_center_focus = False):
"""
Quickly generate a source parameters for the fitting.
Parameter
--------
frame_size: int.
The frame size, to define the center of the frame
apertures:
The apertures of the targets
deltaPix:
The pixel size of the data
fix_n_list:
A list to define how to fix the sersic index, default = []
-for example: fix_n_list = [[0,1],[1,4]], fix first and disk and second as bulge.
apertures_center_focus:
If True, the prior of the Sersic postion will be most limited to the center of the aperture.
Return
--------
A Params list for the fitting.
"""
import lenstronomy.Util.param_util as param_util
fixed_source = []
kwargs_source_init = []
kwargs_source_sigma = []
kwargs_lower_source = []
kwargs_upper_source = []
center = int(frame_size/2)
for i in range(len(apertures)):
aper = apertures[i]
Reff = aper.a * deltaPix
q = aper.b/aper.a
phi = - aper.theta # 0 means along the x-axis. 'aperture' is anti-clock-wise , and data_configure_simple(inverse=True)means lenstronomy's phi is clock-wise
e1, e2 = param_util.phi_q2_ellipticity(phi, q)
if isinstance(apertures[0].positions[0],float):
pos_x, pos_y = aper.positions[0], aper.positions[1]
elif isinstance(apertures[0].positions[0],np.ndarray):
pos_x, pos_y = aper.positions[0]
c_x = -(pos_x - center) * deltaPix #Lenstronomy defines x flipped, (i.e., East on the left.)
c_y = (pos_y - center) * deltaPix
if fix_n_list is not None:
fix_n_list = np.array(fix_n_list)
if i in fix_n_list[:,0]:
fix_n_value = (fix_n_list[:,1])[fix_n_list[:,0]==i]
if len(fix_n_value) != 1:
raise ValueError("fix_n are not assigned correctly - {0} component have two assigned values.".format(i))
else:
fix_n_value = fix_n_value[0] #extract the fix n value from the list
fixed_source.append({'n_sersic': fix_n_value})
kwargs_source_init.append({'R_sersic': Reff, 'n_sersic': fix_n_value,
'e1': e1, 'e2': e2, 'center_x': c_x, 'center_y': c_y})
else:
fixed_source.append({})
kwargs_source_init.append({'R_sersic': Reff, 'n_sersic': 2., 'e1': e1, 'e2': e2, 'center_x': c_x, 'center_y': c_y})
else:
fixed_source.append({})
kwargs_source_init.append({'R_sersic': Reff, 'n_sersic': 2., 'e1': e1, 'e2': e2, 'center_x': c_x, 'center_y': c_y})
if fix_Re_list is not None:
fix_Re_list = np.array(fix_Re_list)
if i in fix_Re_list[:,0]:
fix_Re_value = (fix_Re_list[:,1])[fix_Re_list[:,0]==i]
if len(fix_Re_value) != 1:
raise ValueError("fix_Re are not assigned correctly - {0} component have two assigned values.".format(i))
else:
fix_Re_value = fix_Re_value[0] #extract the fix Re value from the list
fixed_source[-1]['R_sersic'] = fix_Re_value
kwargs_source_init[-1]['R_sersic'] = fix_Re_value
kwargs_source_sigma.append({'n_sersic': 2, 'R_sersic': 0.2*Reff, 'e1': 0.1, 'e2': 0.1, 'center_x': 2*deltaPix, 'center_y': 2*deltaPix})
if apertures_center_focus == False:
kwargs_lower_source.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': deltaPix*0.05, 'n_sersic': 0.3, 'center_x': c_x-10*deltaPix, 'center_y': c_y-10*deltaPix})
kwargs_upper_source.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': Reff*30, 'n_sersic': 9., 'center_x': c_x+10*deltaPix, 'center_y': c_y+10*deltaPix})
elif apertures_center_focus == True:
kwargs_lower_source.append({'e1': -0.5, 'e2': -0.5, 'R_sersic': deltaPix*0.05, 'n_sersic': 0.3, 'center_x': c_x-2*deltaPix, 'center_y': c_y-2*deltaPix})
kwargs_upper_source.append({'e1': 0.5, 'e2': 0.5, 'R_sersic': Reff*30, 'n_sersic': 9., 'center_x': c_x+2*deltaPix, 'center_y': c_y+2*deltaPix})
source_params = [kwargs_source_init, kwargs_source_sigma, fixed_source, kwargs_lower_source, kwargs_upper_source]
return source_params
def ps_params_generator(centers, deltaPix = 1):
"""
Quickly generate a point source parameters for the fitting.
"""
fixed_ps = []
kwargs_ps_init = []
kwargs_ps_sigma = []
kwargs_lower_ps = []
kwargs_upper_ps = []
for i in range(len(centers)):
center_x = centers[i][0] * deltaPix
center_y = centers[i][1] * deltaPix
# point_amp = flux_list[i]
fixed_ps.append({})
kwargs_ps_init.append({'ra_image': [center_x], 'dec_image': [center_y]}) # , 'point_amp': [point_amp]})
kwargs_ps_sigma.append({'ra_image': [deltaPix], 'dec_image': [deltaPix]})
kwargs_lower_ps.append({'ra_image': [center_x-2*deltaPix], 'dec_image': [center_y-2*deltaPix] } )
kwargs_upper_ps.append({'ra_image': [center_x+2*deltaPix], 'dec_image': [center_y+2*deltaPix] } )
ps_params = [kwargs_ps_init, kwargs_ps_sigma, fixed_ps, kwargs_lower_ps, kwargs_upper_ps]
return ps_params
|
dartoonREPO_NAMEgalightPATH_START.@galight_extracted@galight-master@galight@fitting_specify.py@.PATH_END.py
|
{
"filename": "dataloaders.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/pyro/contrib/mue/dataloaders.py",
"type": "Python"
}
|
# Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
import numpy as np
import torch
from torch.utils.data import Dataset
alphabets = {
"amino-acid": np.array(
[
"R",
"H",
"K",
"D",
"E",
"S",
"T",
"N",
"Q",
"C",
"G",
"P",
"A",
"V",
"I",
"L",
"M",
"F",
"Y",
"W",
]
),
"dna": np.array(["A", "C", "G", "T"]),
}
class BiosequenceDataset(Dataset):
"""
Load biological sequence data, either from a fasta file or a python list.
:param source: Either the input fasta file path (str) or the input list
of sequences (list of str).
:param str source_type: Type of input, either 'list' or 'fasta'.
:param str alphabet: Alphabet to use. Alphabets 'amino-acid' and 'dna' are
preset; any other input will be interpreted as the alphabet itself,
i.e. you can use 'ACGU' for RNA.
:param int max_length: Total length of the one-hot representation of the
sequences, including zero padding. Defaults to the maximum sequence
length in the dataset.
:param bool include_stop: Append stop symbol to the end of each sequence
and add the stop symbol to the alphabet.
:param torch.device device: Device on which data should be stored in
memory.
"""
def __init__(
self,
source,
source_type="list",
alphabet="amino-acid",
max_length=None,
include_stop=False,
device=None,
):
super().__init__()
# Determine device
if device is None:
device = torch.tensor(0.0).device
self.device = device
# Get sequences.
self.include_stop = include_stop
if source_type == "list":
seqs = [seq + include_stop * "*" for seq in source]
elif source_type == "fasta":
seqs = self._load_fasta(source)
# Get lengths.
self.L_data = torch.tensor([float(len(seq)) for seq in seqs], device=device)
if max_length is None:
self.max_length = int(torch.max(self.L_data))
else:
self.max_length = max_length
self.data_size = len(self.L_data)
# Get alphabet.
if alphabet in alphabets:
alphabet = alphabets[alphabet]
else:
alphabet = np.array(list(alphabet))
if self.include_stop:
alphabet = np.array(list(alphabet) + ["*"])
self.alphabet = alphabet
self.alphabet_length = len(alphabet)
# Build dataset.
self.seq_data = torch.cat(
[self._one_hot(seq, alphabet, self.max_length).unsqueeze(0) for seq in seqs]
)
def _load_fasta(self, source):
"""A basic multiline fasta parser."""
seqs = []
seq = ""
with open(source, "r") as fr:
for line in fr:
if line[0] == ">":
if seq != "":
if self.include_stop:
seq += "*"
seqs.append(seq)
seq = ""
else:
seq += line.strip("\n")
if seq != "":
if self.include_stop:
seq += "*"
seqs.append(seq)
return seqs
def _one_hot(self, seq, alphabet, length):
"""One hot encode and pad with zeros to max length."""
# One hot encode.
oh = torch.tensor(
(np.array(list(seq))[:, None] == alphabet[None, :]).astype(np.float64),
device=self.device,
)
# Pad.
x = torch.cat(
[oh, torch.zeros([length - len(seq), len(alphabet)], device=self.device)]
)
return x
def __len__(self):
return self.data_size
def __getitem__(self, ind):
return (self.seq_data[ind], self.L_data[ind])
def write(x, alphabet, file, truncate_stop=False, append=False, scores=None):
"""
Write sequence samples to file.
:param ~torch.Tensor x: One-hot encoded sequences, with size
``(data_size, seq_length, alphabet_length)``. May be padded with
zeros for variable length sequences.
:param ~np.array alphabet: Alphabet.
:param str file: Output file, where sequences will be written
in fasta format.
:param bool truncate_stop: If True, sequences will be truncated at the
first stop symbol (i.e. the stop symbol and everything after will not
be written). If False, the whole sequence will be written, including
any internal stop symbols.
:param bool append: If True, sequences are appended to the end of the
output file. If False, the file is first erased.
"""
print_alphabet = np.array(list(alphabet) + [""])
x = torch.cat([x, torch.zeros(list(x.shape[:2]) + [1])], -1)
if truncate_stop:
mask = (
torch.cumsum(
torch.matmul(
x, torch.tensor(print_alphabet == "*", dtype=torch.double)
),
-1,
)
> 0
).to(torch.double)
x = x * (1 - mask).unsqueeze(-1)
x[:, :, -1] = mask
else:
x[:, :, -1] = (torch.sum(x, -1) < 0.5).to(torch.double)
index = (
torch.matmul(x, torch.arange(x.shape[-1], dtype=torch.double))
.to(torch.long)
.cpu()
.numpy()
)
if scores is None:
seqs = [
">{}\n".format(j) + "".join(elem) + "\n"
for j, elem in enumerate(print_alphabet[index])
]
else:
seqs = [
">{}\n".format(j) + "".join(elem) + "\n"
for j, elem in zip(scores, print_alphabet[index])
]
if append:
open_flag = "a"
else:
open_flag = "w"
with open(file, open_flag) as fw:
fw.write("".join(seqs))
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@pyro@contrib@mue@dataloaders.py@.PATH_END.py
|
{
"filename": "future.py",
"repo_name": "TRASAL/frbpoppy",
"repo_path": "frbpoppy_extracted/frbpoppy-master/tests/lognlogs/future.py",
"type": "Python"
}
|
"""Check the log N log F slope for future surveys."""
import numpy as np
import matplotlib.pyplot as plt
from frbpoppy import CosmicPopulation, Survey, SurveyPopulation, hist
from frbpoppy import unpickle, pprint
from tests.convenience import plot_aa_style, rel_path
MAKE = True
SURVEYS = ('parkes-htru', 'fast-crafts', 'puma-full', 'chord', 'ska1-low', 'ska1-mid')
if MAKE:
surv_pops = []
pop = CosmicPopulation.complex(1e5, generate=False)
pop.generate()
for name in SURVEYS:
survey = Survey(name)
surv_pop = SurveyPopulation(pop, survey)
surv_pop.save()
surv_pops.append(surv_pop)
else:
surv_pops = []
for name in SURVEYS:
surv_pops.append(unpickle(f'complex_{name}'))
# Start plot
plot_aa_style()
fig, ax1 = plt.subplots(1, 1)
# Fluence plot
ax1.set_xlabel('S/N')
ax1.set_xscale('log')
ax1.set_ylabel(r'\#(${>}\text{S/N}$)')
ax1.set_yscale('log')
# Update fluence plot
for i, surv_pop in enumerate(surv_pops):
name = surv_pop.name.split('_')[-1]
snr = surv_pop.frbs.snr
if snr.size == 0:
pprint(f'No FRBs in {name} population')
continue
bins, values = hist(snr, bin_type='log', norm=None)
# Cumulative sum
values = np.cumsum(values[::-1])[::-1]
# Normalise to area on sky
if not np.isnan(values.all()):
values = values * surv_pop.source_rate.f_area
plt.step(bins, values, where='mid', label=name)
plt.legend()
plt.tight_layout()
plt.savefig(rel_path('./plots/logn_logs_future_surveys.pdf'))
|
TRASALREPO_NAMEfrbpoppyPATH_START.@frbpoppy_extracted@frbpoppy-master@tests@lognlogs@future.py@.PATH_END.py
|
{
"filename": "axioms.py",
"repo_name": "ESDS-Leipzig/sen2nbar",
"repo_path": "sen2nbar_extracted/sen2nbar-main/sen2nbar/axioms.py",
"type": "Python"
}
|
fiso = dict(
B02=0.0774,
B03=0.1306,
B04=0.1690,
B05=0.2085,
B06=0.2316,
B07=0.2599,
B08=0.3093,
B11=0.3430,
B12=0.2658,
)
fgeo = dict(
B02=0.0079,
B03=0.0178,
B04=0.0227,
B05=0.0256,
B06=0.0273,
B07=0.0294,
B08=0.0330,
B11=0.0453,
B12=0.0387,
)
fvol = dict(
B02=0.0372,
B03=0.0580,
B04=0.0574,
B05=0.0845,
B06=0.1003,
B07=0.1197,
B08=0.1535,
B11=0.1154,
B12=0.0639,
)
bands = dict(
B02=10,
B03=10,
B04=10,
B05=20,
B06=20,
B07=20,
B08=10,
B11=20,
B12=20,
)
|
ESDS-LeipzigREPO_NAMEsen2nbarPATH_START.@sen2nbar_extracted@sen2nbar-main@sen2nbar@axioms.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.