metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "astrometry.md",
"repo_name": "cmillion/gPhoton",
"repo_path": "gPhoton_extracted/gPhoton-master/calpaper/src/astrometry.md",
"type": "Markdown"
}
|
%pylab
import gQuery as gq
import galextools as gt
import dbasetools as dt
import gphoton_utils as gu
from gAperture import gAperture
band, ra0, dec0, radius = ('FUV', 323.06766667, 0.254, 0.1)
ra0, dec0 = 53.1032558472, -27.7963826072 # PS_CDFS_MOS00
ra0, dec0 = 53.1273118244, -27.8744513656 # CDFS_00
radius = 0.5
aper = 4
maglimit = 23.
data = dt.get_mags(band,ra0,dec0,0.5,maglimit,mode='coadd')
skypos = np.array(dt.parse_unique_sources(data['ra'],data['dec'],
data['FUV']['mag'],data['NUV']['mag'],margin=0.001))
# Time range query...
# select top 10 objid, minPhotoObsDate, maxPhotoObsDate, obs_date, obsdatim, nobs_dat, nobssecs, nobsdati, fobs_dat, fobssecs, fobsdati, nexptime, fexptime
# from visitphotoobjall as vp
# inner join imgrun as ir on vp.photoextractid=ir.imgrunid
# inner join visitphotoextract as vpe on vp.photoextractid=vpe.photoextractid
aper = 4
radius = gt.aper2deg(aper)
search_radius = 0.001
annulus = [0.01,0.02]
ac = gt.apcorrect1(radius,band)
plt.ioff()
for i, pos in enumerate(skypos):
print i, pos
d = gAperture(band,pos,radius,verbose=2,minexp=30,maxgap=10)#,annulus=annulus)
c = dt.get_mags(band,pos[0],pos[1],search_radius,maglimit+1,mode='coadd')
refmag = gt.counts2mag(gt.mag2counts(c[band][aper],band).mean(),band)
bot,top=gu.model_errors(refmag-ac,band)
plt.figure()
plt.gca().invert_yaxis()
c = dt.get_mags(band,pos[0],pos[1],search_radius,maglimit+1,mode='visit')
for mag in c[band][aper]:
plt.plot(np.arange(1600),np.zeros(1600)+mag-ac,color='0.75')
plt.plot(c[band]['expt'],c[band][aper]-ac,'x')
plt.plot(top)
plt.plot(bot)
#plt.plot(d['exptime'],d['mag_bgsub_cheese']-ac,'o')
#plt.plot(d['exptime'],d['mag_bgsub']-ac,'x')
plt.plot(d['exptime'],d['mag']-ac,'.')
plt.axis([0,1600.,min(d['mag'].min()-ac-0.01,c[band][aper].min()-ac-0.01),
max(d['mag'].max()-ac+0.01,c[band][aper].max()-ac+0.01)])
print "Saving figure."
plt.savefig(str(i)+'_'+str(pos[0])+'_'+str(pos[1])+'.png')
plt.close()
|
cmillionREPO_NAMEgPhotonPATH_START.@gPhoton_extracted@gPhoton-master@calpaper@src@astrometry.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/matplotlib/py2/matplotlib/axes/__init__.py",
"type": "Python"
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ._subplots import *
from ._axes import *
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@matplotlib@py2@matplotlib@axes@__init__.py@.PATH_END.py
|
{
"filename": "_version.py",
"repo_name": "ma-xu/pointMLP-pytorch",
"repo_path": "pointMLP-pytorch_extracted/pointMLP-pytorch-main/pointnet2_ops_lib/pointnet2_ops/_version.py",
"type": "Python"
}
|
__version__ = "3.0.0"
|
ma-xuREPO_NAMEpointMLP-pytorchPATH_START.@pointMLP-pytorch_extracted@pointMLP-pytorch-main@pointnet2_ops_lib@pointnet2_ops@_version.py@.PATH_END.py
|
{
"filename": "test_infinite_line.py",
"repo_name": "macrocosme/shwirl",
"repo_path": "shwirl_extracted/shwirl-master/shwirl/extern/vispy/visuals/tests/test_infinite_line.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Tests for InfiniteLineVisual
All images are of size (100,100) to keep a small file size
"""
import numpy as np
from vispy.scene import visuals
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main)
from vispy.testing.image_tester import assert_image_approved
from vispy.testing import assert_raises
@requires_application()
def test_set_data():
"""Test InfiniteLineVisual"""
pos = 5.0
color = [1.0, 1.0, 0.5, 0.5]
expected_color = np.array(color, dtype=np.float32)
for is_vertical, reference_image in [(True, 'infinite_line.png'),
(False, 'infinite_line_h.png')]:
with TestingCanvas() as c:
# Check set_data is working correctly within visual constructor
region = visuals.InfiniteLine(pos=pos,
color=color,
vertical=is_vertical,
parent=c.scene)
assert region.pos == pos
assert np.all(region.color == expected_color)
assert region.is_vertical == is_vertical
# Check tuple color argument is accepted
region.set_data(color=tuple(color))
assert np.all(region.color == expected_color)
assert_image_approved(c.render(), 'visuals/%s' % reference_image)
# Check only numbers are accepted
assert_raises(TypeError, region.set_data, pos=[[1, 2], [3, 4]])
# Check color argument can be only a 4 length 1D array
assert_raises(ValueError, region.set_data, color=[[1, 2], [3, 4]])
assert_raises(ValueError, region.set_data, color=[1, 2])
run_tests_if_main()
|
macrocosmeREPO_NAMEshwirlPATH_START.@shwirl_extracted@shwirl-master@shwirl@extern@vispy@visuals@tests@test_infinite_line.py@.PATH_END.py
|
{
"filename": "tests.py",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/webserver/lasair/apps/db_schema/tests.py",
"type": "Python"
}
|
from django.test import TestCase
# Create your tests here.
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@webserver@lasair@apps@db_schema@tests.py@.PATH_END.py
|
{
"filename": "test_lombscargle.py",
"repo_name": "johnh2o2/cuvarbase",
"repo_path": "cuvarbase_extracted/cuvarbase-master/cuvarbase/tests/test_lombscargle.py",
"type": "Python"
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import zip
from builtins import range
from builtins import object
import numpy as np
import pytest
from numpy.testing import assert_allclose
from astropy.timeseries import LombScargle
from ..lombscargle import LombScargleAsyncProcess
from pycuda.tools import mark_cuda_test
#import pycuda.autoinit
import pycuda.autoprimaryctx
spp = 3
nfac = 3
lsrtol = 1E-2
lsatol = 1E-2
nfft_sigma = 5
rand = np.random.RandomState(100)
def data(seed=100, sigma=0.1, ndata=100, freq=3.):
t = np.sort(rand.rand(ndata))
y = np.cos(2 * np.pi * freq * t)
y += sigma * rand.randn(len(t))
err = sigma * np.ones_like(y)
return t, y, err
def assert_similar(pdg0, pdg, top=5):
inds = (np.argsort(pdg0)[::-1])[:top]
p0 = np.asarray(pdg0)[inds]
p = np.asarray(pdg)[inds]
diff = np.absolute(p - p0)
res = sorted(zip(p0, p, diff), key=lambda x: -x[2])
for p0v, pv, dv in res:
if dv > 1e-3:
print(p0v, pv, dv)
assert_allclose(p, p0, atol=lsatol, rtol=lsrtol)
assert(all(diff < lsrtol * 0.5 * (p + p0) + lsatol))
class TestLombScargle(object):
def test_against_astropy_double(self):
t, y, err = data()
ls_proc = LombScargleAsyncProcess(use_double=True,
sigma=nfft_sigma)
results = ls_proc.run([(t, y, err)], nyquist_factor=nfac,
use_fft=True,
samples_per_peak=spp)
ls_proc.finish()
fgpu, pgpu = results[0]
power = LombScargle(t, y, err).power(fgpu)
assert_similar(power, pgpu)
def test_against_astropy_single(self):
t, y, err = data()
ls_proc = LombScargleAsyncProcess(use_double=False,
sigma=nfft_sigma)
results = ls_proc.run([(t, y, err)], nyquist_factor=nfac,
samples_per_peak=spp)
ls_proc.finish()
fgpu, pgpu = results[0]
power = LombScargle(t, y, err).power(fgpu)
assert_similar(power, pgpu)
def test_ls_kernel(self):
t, y, err = data()
ls_proc = LombScargleAsyncProcess(use_double=False,
sigma=nfft_sigma)
results = ls_proc.run([(t, y, err)], nyquist_factor=nfac,
samples_per_peak=spp)
ls_proc.finish()
fgpu, pgpu = results[0]
ls = LombScargle(t, y, err, fit_mean=True, center_data=False)
power = ls.power(fgpu)
assert_similar(power, pgpu)
def test_ls_kernel_direct_sums(self):
t, y, err = data()
ls_proc = LombScargleAsyncProcess(use_double=True,
sigma=nfft_sigma)
results = ls_proc.run([(t, y, err)], nyquist_factor=nfac,
samples_per_peak=spp, use_fft=False)
ls_proc.finish()
fgpu, pgpu = results[0]
ls = LombScargle(t, y, err, fit_mean=True, center_data=True)
power = ls.power(fgpu)
assert_similar(power, pgpu)
def test_ls_kernel_direct_sums_is_consistent(self):
t, y, err = data()
ls_proc = LombScargleAsyncProcess(use_double=False,
sigma=nfft_sigma)
results_ds = ls_proc.run([(t, y, err)], nyquist_factor=nfac,
samples_per_peak=spp, use_fft=False)
ls_proc.finish()
fgpu_ds, pgpu_ds = results_ds[0]
results_reg = ls_proc.run([(t, y, err)], nyquist_factor=nfac,
samples_per_peak=spp, use_cpu_nfft=True)
ls_proc.finish()
fgpu_reg, pgpu_reg = results_reg[0]
assert_similar(pgpu_reg, pgpu_ds)
def test_ls_kernel_direct_sums_against_python(self):
t, y, err = data()
ls_proc = LombScargleAsyncProcess(use_double=False, sigma=nfft_sigma)
result_ds = ls_proc.run([(t, y, err)], nyquist_factor=nfac,
samples_per_peak=spp, use_fft=False)
ls_proc.finish()
fgpu_ds, pgpu_ds = result_ds[0]
result_reg = ls_proc.run([(t, y, err)], nyquist_factor=nfac,
samples_per_peak=spp,
use_fft=False,
python_dir_sums=True)
ls_proc.finish()
fgpu_reg, pgpu_reg = result_reg[0]
assert_similar(pgpu_reg, pgpu_ds)
def test_multiple_datasets(self, ndatas=5):
datas = [data() for i in range(ndatas)]
ls_proc = LombScargleAsyncProcess(sigma=nfft_sigma)
mult_results = ls_proc.run(datas, nyquist_factor=nfac,
samples_per_peak=spp)
ls_proc.finish()
sing_results = []
for d in datas:
sing_results.extend(ls_proc.run([d], nyquist_factor=nfac,
samples_per_peak=spp))
ls_proc.finish()
for rb, rnb in zip(mult_results, sing_results):
fb, pb = rb
fnb, pnb = rnb
assert_allclose(pnb, pb, rtol=lsrtol, atol=lsatol)
assert_allclose(fnb, fb, rtol=lsrtol, atol=lsatol)
def test_batched_run(self, ndatas=5, batch_size=5, sigma=nfft_sigma,
samples_per_peak=spp, nyquist_factor=nfac,
**kwargs):
datas = [data(ndata=rand.randint(50, 100))
for i in range(ndatas)]
ls_proc = LombScargleAsyncProcess(sigma=sigma, **kwargs)
kw = dict(nyquist_factor=nyquist_factor,
samples_per_peak=samples_per_peak)
batched_results = ls_proc.batched_run(datas, **kw)
ls_proc.finish()
non_batched_results = []
for d in datas:
r = ls_proc.run([d], nyquist_factor=nyquist_factor,
samples_per_peak=samples_per_peak)
ls_proc.finish()
non_batched_results.extend(r)
for rb, rnb in zip(batched_results, non_batched_results):
fb, pb = rb
fnb, pnb = rnb
assert_allclose(pnb, pb, rtol=lsrtol, atol=lsatol)
assert_allclose(fnb, fb, rtol=lsrtol, atol=lsatol)
def test_batched_run_const_nfreq(self, make_plot=False, ndatas=27,
batch_size=5, sigma=nfft_sigma,
samples_per_peak=spp,
nyquist_factor=nfac,
**kwargs):
frequencies = 10 + rand.rand(ndatas) * 100.
datas = [data(ndata=rand.randint(50, 100),
freq=freq)
for i, freq in enumerate(frequencies)]
ls_proc = LombScargleAsyncProcess(sigma=sigma, **kwargs)
kw = dict(samples_per_peak=spp,
batch_size=batch_size)
kw.update(kwargs)
batched_results = ls_proc.batched_run_const_nfreq(datas, **kw)
ls_proc.finish()
ls_procnb = LombScargleAsyncProcess(sigma=nfft_sigma,
use_double=False, **kwargs)
non_batched_results = []
for d, (frq, p) in zip(datas, batched_results):
r = ls_procnb.run([d], freqs=frq, **kwargs)
ls_procnb.finish()
non_batched_results.extend(r)
# for f0, (fb, pb), (fnb, pnb) in zip(frequencies, batched_results,
# non_batched_results):
# print f0, fb[np.argmax(pb)], fnb[np.argmax(pnb)]
for f0, (fb, pb), (fnb, pnb) in zip(frequencies, batched_results,
non_batched_results):
if make_plot:
import matplotlib.pyplot as plt
plt.plot(fnb, pnb, color='k', lw=3)
plt.plot(fb, pb, color='r')
plt.axvline(f0)
plt.show()
assert_allclose(pnb, pb, rtol=lsrtol, atol=lsatol)
assert_allclose(fnb, fb, rtol=lsrtol, atol=lsatol)
|
johnh2o2REPO_NAMEcuvarbasePATH_START.@cuvarbase_extracted@cuvarbase-master@cuvarbase@tests@test_lombscargle.py@.PATH_END.py
|
{
"filename": "_meta.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choropleth/_meta.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MetaValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="meta", parent_name="choropleth", **kwargs):
super(MetaValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "plot"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choropleth@_meta.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "jkrogager/PyNOT",
"repo_path": "PyNOT_extracted/PyNOT-master/README.md",
"type": "Markdown"
}
|
# PyNOT-redux
A Data Processing Pipeline for ALFOSC at the Nordic Optical Telescope
PyNOT handles long-slit spectroscopic data (an extension for imaging data is currently being developed). The pipeline is implemented entirely in Python and can be run directly from the terminal. The main workflow is mostly automated (and can in fact be run fully automated) and includes a graphical user interface for certain tasks (such as line identification for wavelength calibration and spectral 1D extraction).
A special thank you goes out to Prof. Johan Fynbo for helpful discussions and feedback, and for introducing me to the Nordic Optical Telescope in the first place (back in 2012).
```diff
- The pipeline is currently in a testing stage!
Feel free to test it on your own data and let me know if you find any issues.
I'll respond as fast as possible.
```
## Installation
Before installing the pipeline, it is recommended to setup a virtual environment to avoid contaminating your local python environment. You can create a `pynot` environment using `python -m venv pynot` (or similar in `conda`). The activate the environment before installing and when running PyNOT: `source /path_to_venv/pynot/bin/activate`. Remember to `deactivate` the environment when you're done.
The pipeline can be installed using [pip](https://www.pypi.org):
]% pip install PyNOT-redux
and requires the following packages : `astroalign`, `astropy`, `astroquery`, `astroscrappy`, `lmfit`, `matplotlib`, `numpy`, `PyQt5`, `PyYAML`, `scipy`, `sep`, and `spectres`. I want to give a huge shout out to all the developers of these packages. Thanks for sharing your work!
Alternatively, clone the repository and install it using:
```
git clone git@github.com:jkrogager/PyNOT.git
cd PyNOT
python -m venv venv-pynot
source venv-pynot/bin/activate
python -m pip -r requirements.txt
```
Then run the installer:
`python -m pip install .` or `python setup.py install`.
## Basic Usage
The pipeline is implemented as a series of modules or "tasks" that can either be executed individually or as a fully assembled pipeline. The available tasks can be shown by running:
]% pynot -h
and the input parameters for each task can be inspected by running:
]% pynot task-name -h
Three of the tasks have slightly special behavior:
- **`init`** : classifies the data in the given input directory (or directories) and creates a default parameter file in YAML format.
- **`spex`** : runs the full spectroscopic pipeline using the parameter file generated by `pynot init spex`. The full pipeline performs bias and flat field correction, wavelength calibration and rectifies the 2D spectrum, subtracts the sky background, corrects cosmic ray hits, flux calibrates the 2D spectrum and performs an automated optimal extraction of all objects identified in the slit (see more details below).
The extracted 1D spectra are saved as a multi-extension FITS file where each object identified in the slit has its own extension:
No. Name Ver Type Cards Dimensions Format
0 PRIMARY 1 PrimaryHDU 4 ()
1 OBJ1 1 BinTableHDU 158 1026R x 3C [D, D, D]
2 OBJ2 1 BinTableHDU 158 1026R x 3C [D, D, D]
: : : : : : :
: : : : : : :
Each spectrum is saved as a Binary Table with three columns 'WAVE', 'FLUX', and 'ERR'. The header of each extension contains the information about the original image such as exposure time and instrument settings.
- **`phot`** : runs the full spectroscopic pipeline using the parameter file generated by `pynot init phot`. The photometric pipeline performs bias and flat field correction, correction of cosmis ray hits, fringe correction, image registration and combination, source extraction and WCS calibration (using Gaia as reference). The final combined images are in units of counts per second (see more details below).
If the observed frames are covered by the SDSS imaging foot print, PyNOT will perform an automatic self-calibration using SDSS photometry of sources in the field.
## Documentation
You can see the documentation on the [PyNOT Website](https://jkrogager.github.io/pynot/).
The full documentation is currently being compiled... stay tuned.
## Example: Spectroscopy
A standard example would be the reduction of the data from one night of observations. All the raw data would be located in a single folder - let's call it `raw_data/`. This folder will contain the necessary raw data: bias frames, flux standard star spectra, arc line frames, spectroscopic flat fields, and the object spectra. Any other data in the folder (imaging files, sky flats, acquisition images, slit images etc.) will be ignored in the pipeline.
A default reduction would require the following steps:
1. **Create a parameter file and classify the data:**
`pynot init spex raw_data --pars night1.yml`
This step creates the PyNOT File Classification (dataset.pfc) table which looks something like:
# PyNOT File Classification Table
# ARC_HeNe:
#FILENAME TYPE OBJECT EXPTIME GRISM SLIT FILTER
raw/ALzh010234.fits ARC_HeNe HeNe 3.0 Grism_#4 Slit_1.3 Open
raw/ALzh010235.fits ARC_HeNe HeNe 3.0 Grism_#4 Slit_1.3 Open
raw/ALzh010247.fits ARC_HeNe HeNe 3.0 Grism_#4 Slit_1.0 Open
raw/ALzh010250.fits ARC_HeNe HeNe 3.0 Grism_#4 Slit_1.0 Open
# BIAS:
#FILENAME TYPE OBJECT EXPTIME GRISM SLIT FILTER
raw/ALzh010001.fits BIAS bias-full 0.0 Open_(Lyot) Open Open
raw/ALzh010002.fits BIAS bias-full 0.0 Open_(Lyot) Open Open
raw/ALzh010003.fits BIAS bias-full 0.0 Open_(Lyot) Open Open
...
If there are any bad frames (that you know of) you can delete or comment out (using #) the corresponding line to ignore the file in the pipeline.
This step will also initiate a new parameter file with default values (default filename: 'options_spex.yml'). All available parameters of the steps of the pipeline are laid out in this file. Open the file with your favorite text editor and edit any other values as you see fit. A short description of the parameters is given in the file. For more detail, see the full documentation (coming soon).
For now we will just focus on the interactive parameters: There are three tasks that can be used in interactive mode, which will start a graphical interface to allow the user more flexibility. These are: line identification (for wavelength calibration), extraction of the 1-dimensional spectra, and calculation of the response function. By default, these are all turned on. Note that the line identification can be defined in two ways:
(i) once for all grisms in the given dataset, this line identification information will then automatically be used for all objects observed with the given grism;
or (ii) for each object in the dataset based on the arc file observed closest in time to the science frame. This provides more accurate rectification of the image, but the difference in low-resolution data is usually negligible.
2. **Run the pipeline:**
`pynot spex night1.yml`
This will start the full pipeline reduction of *all* objects identified in the dataset (with file classification `SPEC_OBJECT`). If you only want to reduce a few targets, you can specify these as: `pynot spex night1.yml --object TARGET1 TARGET2 ...` where the target names must match the value of the `OBJECT` keyword in the FITS headers.
By default the pipeline creates separate output directories for each target where a detailed log file is saved. This file summarizes the steps of the pipeline and shows any warnings and output generated by the pipeline. By default, the pipeline also generates diagnostic plots of the 2D rectification, response function, sky subtraction and 1D extraction.
The log is also printed to the terminal as the pipeline progresses. If you want to turn this off, you can run the pipeline with the `-s` (or `--silent`) option.
3. **Verify the steps** of the data products and make sure that everything terminated successfully. You should pay special attention to the automated sky subtraction. This can be adjusted during the interactive extraction step, if necessary.
4. Now it's time to do your scientific analysis on your newly calibrated 1D and 2D spectra. Enjoy!
## Example: Imaging
A standard example would be the reduction of the data from one night of observations. All the raw data would be located in a single folder - let's call it `raw_night1/`. This folder will contain the necessary raw data: bias frames, flat field frames in all filters, flux standard star fields (if available), and the raw science images. Any other data in the folder (spectroscopic files, focus images etc.) will be ignored by the pipeline.
A basic automated reduction would require the following steps:
1. **Create a parameter file and classify the data:**
`pynot init phot raw_night1 --pars pars1.yml`
This step will classify all the data in `raw_night1/` and create the PyNOT classification table (dataset.pfc). This step will also initiate a new parameter file with default values (the filename 'options_phot.yml' is used by default unless the `--pars` option is used). All available parameters of the steps of the pipeline are laid out in this file. Open the file with your favorite text editor and edit any other values as you see fit. A short description for each parameter is given in the file. For more detail, see the full documentation (coming soon).
2. **Run the pipeline:**
`pynot phot pars1.yml`
This will start the full pipeline reduction of *all* objects in *all* filters identified in the dataset (with file classification `IMG_OBJECT`). If you only want to reduce a subset of objects or filters, you can ignore files by editing the 'dataset.pfc' file. Deleting or commenting out (using #) a given line in the .pfc file will tell the pipeline to ignore the file on that line.
The processed files are structured in sub-directories from the main working directory:
working_dir/
|- imaging/
|- OBJECT_1/
| |- B_band/
| |- R_band/
| |- combined_B.fits
| |- combined_R.fits
| |...
|
|- OBJECT_2/
|- B_band/
|- R_band/
|- V_band/
|- combined_B.fits
|- combined_R.fits
|- combined_V.fits
|...
The individual images for each filter of each target are kept in the desginated folders under each object, and are automatically combined. The combined image is in the folder of the given object. The last step of the pipeline as of now is to run a source extraction algorithm (SEP/SExtractor) to provide a final source table with aperture fluxes, a segmentation map as well as a figure showing the identified sources in the field.
In each designated filter folder, the pipeline also produces a file log showing which files are combined into the final image as well as some basic image statistics: an estimate of the seeing, the PSF ellipticity, and the exposure time. This file can be used as input for further refined image combinations using the task `pynot imcombine filelist_OBJECT_1.txt new_combined_R.fits`. Individual frames can be commented out in the file log in order to exclude them in subsequent combinations. The combined images are given in units of counts per second.
3. **Verify the steps** of the data products and make sure that everything terminated successfully.
4. Now it's time to do your scientific analysis on your newly calibrated images. Enjoy!
### Identify Transients
PyNOT comes with a task for identifying bright, new transient objects from SWIFT. This is achieved by cross-matching with the Gaia all-sky catalog. Sources in the field without a match in Gaia are flagged according to three classes: (**red**) if the source is not consistent with the SWIFT localisation, (**orange**) if the source is consistent with the broad BAT error circle, and (**green**) if the source is consistent with the X-ray error circle.
The task can be run as:
pynot findnew reduced_image_wcs.fits reduced_image_phot.fits [--bat ra dec radius --xrt ra dec radius -z ZP --limit 20.1]
Note: Coordinates (ra and dec) are given in degrees! The radius for --bat is given in arcmin, the radius for --xrt is given in arcsec! Both --bat and --xrt are optional.
If the source catalog from PyNOT (_phot.fits) has not been flux calibrated (outside SDSS footprint), you can provide a zero point manually by giving the `-z ZP` option, where ZP denotes the magnitude zero point. The default magnitude limit is 20.1 mag in order to match the depth of Gaia. Sources fainter than this will not be considered.
The task creates a figure showing the field together with the localization estimates given by --bat and --xrt. The task also prints the identified sources to the terminal and to a text file (new_sources_*.txt), which looks something like:
ra dec mag_auto a b theta flux_auto flux_err_auto class
(deg) (deg) (AB) (pix) (pix) (rad) (count/s) (count/s)
54.51772 -26.98964 19.64 2.4 2.2 -1.28 1.27e+03 1.97e+01 0
54.50497 -26.94632 16.92 3.8 3.8 -0.53 1.56e+04 2.62e+01 0
54.54830 -26.93043 19.63 2.5 2.4 0.11 1.29e+03 1.97e+01 0
where `class` refers to the classes defined above:
0: not consistent with BAT nor XRT (red)
1: consistent with BAT (orange)
2: consistent with BAT and XRT (green)
|
jkrogagerREPO_NAMEPyNOTPATH_START.@PyNOT_extracted@PyNOT-master@README.md@.PATH_END.py
|
{
"filename": "axis.py",
"repo_name": "macrocosme/shwirl",
"repo_path": "shwirl_extracted/shwirl-master/shwirl/extern/vispy/scene/widgets/axis.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from .widget import Widget
from ...visuals import AxisVisual
class AxisWidget(Widget):
"""Widget containing an axis
Parameters
----------
orientation : str
Orientation of the axis, 'left' or 'bottom'.
**kwargs : dict
Keyword arguments to pass to AxisVisual.
"""
def __init__(self, orientation='left', **kwargs):
if 'tick_direction' not in kwargs:
tickdir = {'left': (-1, 0), 'right': (1, 0), 'bottom': (0, 1),
'top': (0, -1)}[orientation]
kwargs['tick_direction'] = tickdir
self.axis = AxisVisual(**kwargs)
self.orientation = orientation
self._linked_view = None
Widget.__init__(self)
self.add_subvisual(self.axis)
def on_resize(self, event):
"""Resize event handler
Parameters
----------
event : instance of Event
The event.
"""
self._update_axis()
def _update_axis(self):
self.axis.pos = self._axis_ends()
def _axis_ends(self):
r = self.rect
if self.orientation == 'left':
return np.array([[r.right, r.top], [r.right, r.bottom]])
elif self.orientation == 'bottom':
return np.array([[r.left, r.bottom], [r.right, r.bottom]])
elif self.orientation == 'right':
return np.array([[r.left, r.top], [r.left, r.bottom]])
elif self.orientation == 'top':
return np.array([[r.left, r.top], [r.right, r.top]])
else:
raise RuntimeError(
'Orientation %s not supported.' % self.orientation)
def link_view(self, view):
"""Link this axis to a ViewBox
This makes it so that the axis's domain always matches the
visible range in the ViewBox.
Parameters
----------
view : instance of ViewBox
The ViewBox to link.
"""
if view is self._linked_view:
return
if self._linked_view is not None:
self._linked_view.scene.transform.changed.disconnect(
self._view_changed)
self._linked_view = view
view.scene.transform.changed.connect(self._view_changed)
self._view_changed()
def _view_changed(self, event=None):
"""Linked view transform has changed; update ticks.
"""
tr = self.node_transform(self._linked_view.scene)
p1, p2 = tr.map(self._axis_ends())
if self.orientation in ('left', 'right'):
self.axis.domain = (p1[1], p2[1])
else:
self.axis.domain = (p1[0], p2[0])
|
macrocosmeREPO_NAMEshwirlPATH_START.@shwirl_extracted@shwirl-master@shwirl@extern@vispy@scene@widgets@axis.py@.PATH_END.py
|
{
"filename": "math.py",
"repo_name": "Keck-DataReductionPipelines/KCWI_DRP",
"repo_path": "KCWI_DRP_extracted/KCWI_DRP-master/kcwidrp/core/bspline/math.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""This module corresponds to the math directory in idlutils.
"""
import numpy as np
from numpy.linalg import svd
from scipy.special import legendre
import astropy.utils as au
from .misc import djs_laxisnum
from .median import median
def flegendre(x, m):
"""Compute the first `m` Legendre polynomials.
Parameters
----------
x : array-like
Compute the Legendre polynomials at these abscissa values.
m : :class:`int`
The number of Legendre polynomials to compute. For example, if
:math:`m = 3`, :math:`P_0 (x)`, :math:`P_1 (x)` and :math:`P_2 (x)`
will be computed.
Returns
-------
:class:`numpy.ndarray`
The values of the Legendre functions.
"""
if isinstance(x, np.ndarray):
n = x.size
else:
n = 1
if m < 1:
raise ValueError('Number of Legendre polynomials must be at least 1.')
try:
dt = x.dtype
except AttributeError:
dt = np.float64
leg = np.ones((m, n), dtype=dt)
if m >= 2:
leg[1, :] = x
if m >= 3:
for k in range(2, m):
leg[k, :] = np.polyval(legendre(k), x)
return leg
class computechi2(object):
"""Solve the linear set of equations :math:`A x = b` using SVD.
The attributes of this class are all read-only properties, implemented
with :class:`~astropy.utils.decorators.lazyproperty`.
Parameters
----------
bvec : :class:`numpy.ndarray`
The :math:`b` vector in :math:`A x = b`. This vector has length
:math:`N`.
sqivar : :class:`numpy.ndarray`
The reciprocal of the errors in `bvec`. The name comes from the square
root of the inverse variance, which is what this is.
amatrix : :class:`numpy.ndarray`
The matrix :math:`A` in :math:`A x = b`.
The shape of this matrix is (:math:`N`, :math:`M`).
"""
def __init__(self, bvec, sqivar, amatrix):
"""Initialize the object and perform initial computations.
"""
#
# Save the inputs
#
# self.bvec = bvec
self.sqivar = sqivar
self.amatrix = amatrix
if len(amatrix.shape) > 1:
self.nstar = amatrix.shape[1]
else:
self.nstar = 1
self.bvec = bvec * sqivar
self.mmatrix = self.amatrix * np.tile(sqivar, self.nstar).reshape(
self.nstar, bvec.size).transpose()
mm = np.dot(self.mmatrix.T, self.mmatrix)
self.uu, self.ww, self.vv = svd(mm, full_matrices=False)
self.mmi = np.dot((self.vv.T / np.tile(self.ww, self.nstar).reshape(
self.nstar, self.nstar)), self.uu.T)
return
@au.lazyproperty
def acoeff(self):
"""(:class:`~numpy.ndarray`) The fit parameters, :math:`x`,
in :math:`A x = b`. This vector has length :math:`M`.
"""
return np.dot(self.mmi, np.dot(self.mmatrix.T, self.bvec))
@au.lazyproperty
def chi2(self):
"""(:class:`float <numpy.generic>`) The :math:`\chi^2` value of the fit.
"""
return np.sum((np.dot(self.mmatrix, self.acoeff) - self.bvec)**2)
@au.lazyproperty
def yfit(self):
"""(:class:`~numpy.ndarray`) The evaluated best-fit at each point.
This vector has length :math:`N`.
"""
return np.dot(self.amatrix, self.acoeff)
@au.lazyproperty
def dof(self):
"""(:class:`int <numpy.generic>`) The degrees of freedom of the fit.
This is the number of values of `bvec` that have `sqivar` > 0 minus
the number of fit paramaters, which is equal to :math:`M`.
"""
return (self.sqivar > 0).sum() - self.nstar
@au.lazyproperty
def covar(self):
"""(:class:`~numpy.ndarray`) The covariance matrix.
The shape of this matrix is (:math:`M`, :math:`M`).
"""
wwt = self.ww.copy()
wwt[self.ww > 0] = 1.0/self.ww[self.ww > 0]
covar = np.zeros((self.nstar, self.nstar), dtype=self.ww.dtype)
for i in range(self.nstar):
for j in range(i + 1):
covar[i, j] = np.sum(wwt * self.vv[:, i] * self.vv[:, j])
covar[j, i] = covar[i, j]
return covar
@au.lazyproperty
def var(self):
"""(:class:`~numpy.ndarray`) The variances of the fit.
This is identical to the diagonal of the covariance matrix.
This vector has length :math:`M`.
"""
return np.diag(self.covar)
def djs_median(array, dimension=None, width=None, boundary='none'):
"""Compute the median of an array.
Use a filtering box or collapse the image along one dimension.
Parameters
----------
array : :class:`numpy.ndarray`
input array
dimension : :class:`int`, optional
Compute the median over this dimension. It is an error to specify both
`dimension` and `width`.
width : :class:`int`, optional
Width of the median window. In general, this should be an odd
integer. It is an error to specify both `dimension` and `width`.
boundary : { 'none', 'reflect', 'nearest', 'wrap' }, optional
Boundary condition to impose. 'none' means no filtering is done within
`width`/2 of the boundary. 'reflect' means reflect pixel values around the
boundary. 'nearest' means use the values of the nearest boundary pixel.
'wrap' means wrap pixel values around the boundary. 'nearest' and 'wrap'
are not implemented.
Returns
-------
:class:`numpy.ndarray`
The output. If neither `dimension` nor `width` are set, this is a scalar
value, just the output of ``numpy.median()``. If `dimension` is set,
then the result simply ``numpy.median(array,dimension)``.
If `width` is set, the result has the same shape as the input array.
"""
if dimension is None and width is None:
return np.median(array)
elif width is None:
return np.median(array, axis=dimension)
elif dimension is None:
if width == 1:
return array
if boundary == 'none':
if array.ndim == 1:
return median(array, width)
elif array.ndim == 2:
return median(array, width)
else:
raise ValueError('Unsupported number of dimensions with ' +
'this boundary condition.')
elif boundary == 'reflect':
padsize = int(np.ceil(width/2.0))
if array.ndim == 1:
bigarr = np.zeros(array.shape[0]+2*padsize, dtype=array.dtype)
bigarr[padsize:padsize+array.shape[0]] = array
bigarr[0:padsize] = array[0:padsize][::-1]
bigarr[padsize+array.shape[0]:padsize*2+array.shape[0]] = (
array[array.shape[0]-padsize:array.shape[0]][::-1])
f = median(bigarr, width)
medarray = f[padsize:padsize+array.shape[0]]
return medarray
elif array.ndim == 2:
bigarr = np.zeros((array.shape[0]+2*padsize,
array.shape[1]+2*padsize),
dtype=array.dtype)
bigarr[padsize:padsize+array.shape[0], padsize:padsize+array.shape[1]] = array
# Copy into top + bottom
bigarr[0:padsize, padsize:array.shape[1]+padsize] = array[0:padsize, :][::-1, :]
bigarr[array.shape[0]+padsize:bigarr.shape[0], padsize:array.shape[1]+padsize] = array[array.shape[0]-padsize:array.shape[0], :][::-1, :]
# Copy into left + right
bigarr[padsize:array.shape[0]+padsize, 0:padsize] = array[:, 0:padsize][:, ::-1]
bigarr[padsize:array.shape[0]+padsize, array.shape[1]+padsize:bigarr.shape[1]] = array[:, array.shape[1]-padsize:array.shape[1]][:, ::-1]
# Copy into top left
bigarr[0:padsize, 0:padsize] = array[0:padsize, 0:padsize][::-1, ::-1]
# Copy into top right
bigarr[0:padsize, bigarr.shape[1]-padsize:bigarr.shape[1]] = array[0:padsize, array.shape[1]-padsize:array.shape[1]][::-1, ::-1]
# Copy into bottom left
bigarr[bigarr.shape[0]-padsize:bigarr.shape[0], 0:padsize] = array[array.shape[0]-padsize:array.shape[0], 0:padsize][::-1, ::-1]
# Copy into bottom right
bigarr[bigarr.shape[0]-padsize:bigarr.shape[0], bigarr.shape[1]-padsize:bigarr.shape[1]] = array[array.shape[0]-padsize:array.shape[0], array.shape[1]-padsize:array.shape[1]][::-1, ::-1]
f = median(bigarr, min(width, array.size))
medarray = f[padsize:array.shape[0]+padsize, padsize:array.shape[1]+padsize]
return medarray
else:
raise ValueError('Unsupported number of dimensions with ' +
'this boundary condition.')
elif boundary == 'nearest':
raise ValueError('This boundary condition not implemented')
elif boundary == 'wrap':
raise ValueError('This boundary condition not implemented')
else:
raise ValueError('Unknown boundary condition.')
else:
raise ValueError('Invalid to specify both dimension & width.')
def djs_reject(data, model, outmask=None, inmask=None, sigma=None,
invvar=None, lower=None, upper=None, maxdev=None,
maxrej=None, groupdim=None, groupsize=None, groupbadpix=False,
grow=0, sticky=False):
"""Routine to reject points when doing an iterative fit to data.
Parameters
----------
data : :class:`numpy.ndarray`
The data
model : :class:`numpy.ndarray`
The model, must have the same number of dimensions as `data`.
outmask : :class:`numpy.ndarray`, optional
Output mask, generated by a previous call to `djs_reject`. If not supplied,
this mask will be initialized to a mask that masks nothing. Although
this parameter is technically optional, it will almost always be set.
inmask : :class:`numpy.ndarray`, optional
Input mask. Bad points are marked with a value that evaluates to ``False``.
Must have the same number of dimensions as `data`.
sigma : :class:`numpy.ndarray`, optional
Standard deviation of the data, used to reject points based on the values
of `upper` and `lower`.
invvar : :class:`numpy.ndarray`, optional
Inverse variance of the data, used to reject points based on the values
of `upper` and `lower`. If both `sigma` and `invvar` are set, `invvar`
will be ignored.
lower : :class:`int` or :class:`float`, optional
If set, reject points with data < model - lower * sigma.
upper : :class:`int` or :class:`float`, optional
If set, reject points with data > model + upper * sigma.
maxdev : :class:`int` or :class:`float`, optional
If set, reject points with abs(data-model) > maxdev. It is permitted to
set all three of `lower`, `upper` and `maxdev`.
maxrej : :class:`int` or :class:`numpy.ndarray`, optional
Maximum number of points to reject in this iteration. If `groupsize` or
`groupdim` are set to arrays, this should be an array as well.
groupdim
To be documented.
groupsize
To be documented.
groupbadpix : :class:`bool`, optional
If set to ``True``, consecutive sets of bad pixels are considered groups,
overriding the values of `groupsize`.
grow : :class:`int`, optional
If set to a non-zero integer, N, the N nearest neighbors of rejected
pixels will also be rejected.
sticky : :class:`bool`, optional
If set to ``True``, pixels rejected in one iteration remain rejected in
subsequent iterations, even if the model changes.
Returns
-------
:func:`tuple`
A tuple containing a mask where rejected data values are ``False`` and
a boolean value set to ``True`` if `djs_reject` believes there is no
further rejection to be done.
Raises
------
:exc:`ValueError`
If dimensions of various inputs do not match.
"""
#
# Create outmask setting = 1 for good data.
#
if outmask is None:
outmask = np.ones(data.shape, dtype='bool')
else:
if data.shape != outmask.shape:
raise ValueError('Dimensions of data and outmask do not agree.')
#
# Check other inputs.
#
if model is None:
if inmask is not None:
outmask = inmask
return (outmask, False)
else:
if data.shape != model.shape:
raise ValueError('Dimensions of data and model do not agree.')
if inmask is not None:
if data.shape != inmask.shape:
raise ValueError('Dimensions of data and inmask do not agree.')
if maxrej is not None:
if groupdim is not None:
if len(maxrej) != len(groupdim):
raise ValueError('maxrej and groupdim must have the same number of elements.')
else:
groupdim = []
if groupsize is not None:
if len(maxrej) != len(groupsize):
raise ValueError('maxrej and groupsize must have the same number of elements.')
else:
groupsize = len(data)
if sigma is None and invvar is None:
if inmask is not None:
igood = (inmask & outmask).nonzero()[0]
else:
igood = outmask.nonzero()[0]
if len(igood > 1):
sigma = np.std(data[igood] - model[igood])
else:
sigma = 0
diff = data - model
#
# The working array is badness, which is set to zero for good points
# (or points already rejected), and positive values for bad points.
# The values determine just how bad a point is, either corresponding
# to the number of sigma above or below the fit, or to the number
# of multiples of maxdev away from the fit.
#
badness = np.zeros(outmask.shape, dtype=data.dtype)
#
# Decide how bad a point is according to lower.
#
if lower is not None:
if sigma is not None:
qbad = diff < (-lower * sigma)
badness += ((-diff/(sigma + (sigma == 0))) > 0) * qbad
else:
qbad = (diff * np.sqrt(invvar)) < -lower
badness += ((-diff * np.sqrt(invvar)) > 0) * qbad
#
# Decide how bad a point is according to upper.
#
if upper is not None:
if sigma is not None:
qbad = diff > (upper * sigma)
badness += ((diff/(sigma + (sigma == 0))) > 0) * qbad
else:
qbad = (diff * np.sqrt(invvar)) > upper
badness += ((diff * np.sqrt(invvar)) > 0) * qbad
#
# Decide how bad a point is according to maxdev.
#
if maxdev is not None:
qbad = np.absolute(diff) > maxdev
badness += np.absolute(diff) / maxdev * qbad
#
# Do not consider rejecting points that are already rejected by inmask.
# Do not consider rejecting points that are already rejected by outmask,
# if sticky is set.
#
if inmask is not None:
badness *= inmask
if sticky:
badness *= outmask
#
# Reject a maximum of maxrej (additional) points in all the data, or
# in each group as specified by groupsize, and optionally along each
# dimension specified by groupdim.
#
if maxrej is not None:
#
# Loop over each dimension of groupdim or loop once if not set.
#
for iloop in range(max(len(groupdim), 1)):
#
# Assign an index number in this dimension to each data point.
#
if len(groupdim) > 0:
yndim = len(ydata.shape)
if groupdim[iloop] > yndim:
raise ValueError('groupdim is larger than the number of dimensions for ydata.')
dimnum = djs_laxisnum(ydata.shape, iaxis=groupdim[iloop]-1)
else:
dimnum = np.asarray([0])
#
# Loop over each vector specified by groupdim. For example, if
# this is a 2-D array with groupdim=1, then loop over each
# column of the data. If groupdim=2, then loop over each row.
# If groupdim is not set, then use the whole image.
#
for ivec in range(max(dimnum)):
#
# At this point it is not possible that dimnum is not set.
#
indx = (dimnum == ivec).nonzero()[0]
#
# Within this group of points, break it down into groups
# of points specified by groupsize, if set.
#
nin = len(indx)
if groupbadpix:
goodtemp = badness == 0
groups_lower = (-1*np.diff(np.insert(goodtemp, 0, 1)) == 1).nonzero()[0]
groups_upper = (np.diff(np.append(goodtemp, 1)) == 1).nonzero()[0]
ngroups = len(groups_lower)
else:
#
# The IDL version of this test makes no sense because
# groupsize will always be set.
#
if 'groupsize' in kwargs:
ngroups = nin/groupsize + 1
groups_lower = np.arange(ngroups, dtype='i4')*groupsize
foo = (np.arange(ngroups, dtype='i4')+1)*groupsize
groups_upper = np.where(foo < nin, foo, nin) - 1
else:
ngroups = 1
groups_lower = [0, ]
groups_upper = [nin - 1, ]
for igroup in range(ngroups):
i1 = groups_lower[igroup]
i2 = groups_upper[igroup]
nii = i2 - i1 + 1
#
# Need the test that i1 != -1 below to prevent a crash
# condition, but why is it that we ever get groups
# without any points? Because this is badly-written,
# that's why.
#
if nii > 0 and i1 != -1:
jj = indx[i1:i2+1]
#
# Test if too many points rejected in this group.
#
if np.sum(badness[jj] != 0) > maxrej[iloop]:
isort = badness[jj].argsort()
#
# Make the following points good again.
#
badness[jj[isort[0:nii-maxrej[iloop]]]] = 0
i1 += groupsize[iloop]
#
# Now modify outmask, rejecting points specified by inmask=0, outmask=0
# if sticky is set, or badness > 0.
#
# print(badness)
newmask = badness == 0
# print(newmask)
if grow > 0:
rejects = newmask == 0
if rejects.any():
irejects = rejects.nonzero()[0]
for k in range(1, grow):
newmask[(irejects - k) > 0] = 0
newmask[(irejects + k) < (data.shape[0]-1)] = 0
if inmask is not None:
newmask = newmask & inmask
if sticky:
newmask = newmask & outmask
#
# Set qdone if the input outmask is identical to the output outmask;
# convert np.bool to Python built-in bool.
#
qdone = bool(np.all(newmask == outmask))
outmask = newmask
return (outmask, qdone)
def find_contiguous(x):
"""Find the longest sequence of contiguous non-zero array elements.
Parameters
----------
x : :class:`numpy.ndarray`
A 1d array. A dtype of bool is preferred although any dtype where the
operation ``if x[k]:`` is well-defined should work.
Returns
-------
:class:`list`
A list of indices of the longest contiguous non-zero sequence.
Examples
--------
>>> import numpy as np
>>> from pydl.pydlutils.math import find_contiguous
>>> find_contiguous(np.array([0,1,1,1,0,1,1,0,1]))
[1, 2, 3]
"""
contig = list()
for k in range(x.size):
if x[k]:
if len(contig) == 0:
contig.append([k])
else:
if k == contig[-1][-1]+1:
contig[-1].append(k)
else:
contig.append([k])
lengths = [len(c) for c in contig]
longest = contig[lengths.index(max(lengths))]
return longest
|
Keck-DataReductionPipelinesREPO_NAMEKCWI_DRPPATH_START.@KCWI_DRP_extracted@KCWI_DRP-master@kcwidrp@core@bspline@math.py@.PATH_END.py
|
{
"filename": "spleaf_multidimensional_esp_activity_slow.py",
"repo_name": "LucaMalavolta/PyORBIT",
"repo_path": "PyORBIT_extracted/PyORBIT-main/pyorbit/models/spleaf_multidimensional_esp_activity_slow.py",
"type": "Python"
}
|
from pyorbit.subroutines.common import *
from pyorbit.models.abstract_model import *
from pyorbit.keywords_definitions import *
from scipy.linalg import cho_factor, cho_solve, lapack, LinAlgError
from scipy import matrix, spatial
import sys
__all__ = ['SPLEAF_Multidimensional_ESP_slow']
try:
from spleaf import cov as spleaf_cov
from spleaf import term as spleaf_term
except (ModuleNotFoundError,ImportError):
pass
class SPLEAF_Multidimensional_ESP_slow(AbstractModel):
''' Three parameters out of four are the same for all the datasets, since they are related to
the properties of the physical process rather than the observed effects on a dataset
From Grunblatt+2015, Affer+2016
- theta: is usually related to the rotation period of the star( or one of its harmonics);
- lambda: is the correlation decay timescale, and it can be related to the lifetime of the active regions.
- omega: is the length scale of the periodic component, and can be linked to the size evolution of the active regions;
- h: represents the amplitude of the correlations '''
default_common = 'activity'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model_class = 'spleaf_multidimensional_esp'
self.internal_likelihood = True
self.delayed_lnlk_computation = True
self.list_pams_common = OrderedSet([
'Prot', # Rotational period of the star
'Pdec', # Decay timescale of activity
'Oamp', # Granulation of activity
])
self.list_pams_dataset = OrderedSet([
'rot_amp', # Amplitude of the covariance matrix
'con_amp' # Amplitude of the first derivative of the covariance matrix
])
try:
from spleaf import cov as spleaf_cov
from spleaf import term as spleaf_term
except (ModuleNotFoundError,ImportError):
print("ERROR: S+LEAF package not installed, this will not work")
quit()
self.internal_parameter_values = None
#self._dist_t1 = None
#self._dist_t2 = None
#self._added_datasets = 0
#self.dataset_ordering = {}
#self.inds_cache = None
self._dataset_x0 = []
self._dataset_label = []
self._dataset_e2 = []
self._dataset_names = {}
self._dataset_nindex = {}
#self.use_derivative_dict = {}
self.internal_coeff_prime = None
self.internal_coeff_deriv = None
self._dataset_ej2 = None
self._dataset_res = None
self._added_datasets = 0
self.n_harmonics = 4
#self.pi2 = np.pi * np.pi
def initialize_model(self, mc, **kwargs):
self.n_harmonics = kwargs.get('n_harmonics', self.n_harmonics)
print(' S+LEAF model, number of harmonics:', self.n_harmonics)
print()
if kwargs.get('hyperparameters_condition', False):
self.hyper_condition = self._hypercond_01
else:
self.hyper_condition = self._hypercond_00
if kwargs.get('rotation_decay_condition', False):
self.rotdec_condition = self._hypercond_02
else:
self.rotdec_condition = self._hypercond_00
if kwargs.get('halfrotation_decay_condition', False):
self.halfrotdec_condition = self._hypercond_03
else:
self.halfrotdec_condition = self._hypercond_00
for common_ref in self.common_ref:
if mc.common_models[common_ref].model_class == 'activity':
self.use_stellar_rotation_period = getattr(mc.common_models[common_ref], 'use_stellar_rotation_period', False)
break
for keyword in keywords_stellar_rotation:
self.use_stellar_rotation_period = kwargs.get(keyword, self.use_stellar_rotation_period)
if self.use_stellar_rotation_period:
self.list_pams_common.update(['rotation_period'])
self.list_pams_common.discard('Prot')
for common_ref in self.common_ref:
if mc.common_models[common_ref].model_class == 'activity':
self.use_stellar_activity_decay = getattr(mc.common_models[common_ref], 'use_stellar_activity_decay', False)
break
for keyword in keywords_stellar_activity_decay:
self.use_stellar_activity_decay = kwargs.get(keyword, self.use_stellar_activity_decay)
if self.use_stellar_activity_decay:
self.list_pams_common.update(['activity_decay'])
self.list_pams_common.discard('Pdec')
def initialize_model_dataset(self, mc, dataset, **kwargs):
""" when reloading the .p files, the object is not reinitialized, so we have to skip the
incremental addition of datasets if they are already present """
if dataset.name_ref in self._dataset_nindex:
return
self._dataset_x0.append(dataset.x0)
self._dataset_e2.append(dataset.e**2)
self._dataset_nindex[dataset.name_ref] = self._added_datasets
self._added_datasets += 1
self.internal_coeff_prime = np.empty(self._added_datasets)
self.internal_coeff_deriv = np.empty(self._added_datasets)
self.spleaf_time, self.spleaf_res, self.spleaf_err, self.spleaf_series_index = \
spleaf_cov.merge_series(self._dataset_x0, self._dataset_e2, self._dataset_e2)
if 'derivative'in kwargs:
use_derivative = kwargs['derivative'].get(dataset.name_ref, False)
elif dataset.name_ref in kwargs:
use_derivative = kwargs[dataset.name_ref].get('derivative', False)
else:
if dataset.kind == 'H-alpha' or \
dataset.kind == 'S_index' or \
dataset.kind == 'Ca_HK' or \
dataset.kind == 'FWHM':
use_derivative = False
else:
use_derivative = True
if not use_derivative:
self.fix_list[dataset.name_ref] = {'rot_amp': [0., 0.]}
return
def add_internal_dataset(self, parameter_values, dataset):
if self.use_stellar_rotation_period:
parameter_values['Prot'] = parameter_values['rotation_period']
if self.use_stellar_activity_decay:
parameter_values['Pdec'] = parameter_values['activity_decay']
self.internal_parameter_values = parameter_values
d_ind = self._dataset_nindex[dataset.name_ref]
self.spleaf_res[self.spleaf_series_index[d_ind]] = dataset.residuals
self.spleaf_err[self.spleaf_series_index[d_ind]] = np.sqrt(self._dataset_e2[d_ind] + dataset.jitter**2.0)
#self.internal_jitter[d_ind] = dataset.jitter
self.internal_coeff_prime[d_ind] = parameter_values['con_amp']
self.internal_coeff_deriv[d_ind] = parameter_values['rot_amp']
def lnlk_compute(self):
if not self.hyper_condition(self.internal_parameter_values):
return -np.inf
if not self.rotdec_condition(self.internal_parameter_values):
return -np.inf
if not self.halfrotdec_condition(self.internal_parameter_values):
return -np.inf
""" I'm creating the kernel here has """
D = spleaf_cov.Cov(self.spleaf_time,
err=spleaf_term.Error(self.spleaf_err),
GP=spleaf_term.MultiSeriesKernel(spleaf_term.ESPKernel(1.0,
self.internal_parameter_values['Prot'],
self.internal_parameter_values['Pdec'],
self.internal_parameter_values['Oamp'],
nharm=self.n_harmonics),
self.spleaf_series_index,
self.internal_coeff_prime,
self.internal_coeff_deriv))
return D.loglike(self.spleaf_res)
def sample_predict(self, dataset, x0_input=None, return_covariance=False, return_variance=False):
""" I'm creating the kernel here has """
D = spleaf_cov.Cov(self.spleaf_time,
err=spleaf_term.Error(self.spleaf_err),
GP=spleaf_term.MultiSeriesKernel(spleaf_term.ESPKernel(1.0,
self.internal_parameter_values['Prot'],
self.internal_parameter_values['Pdec'],
self.internal_parameter_values['Oamp'],
nharm=self.n_harmonics),
self.spleaf_series_index,
self.internal_coeff_prime,
self.internal_coeff_deriv))
d_ind = self._dataset_nindex[dataset.name_ref]
D.kernel['GP'].set_conditional_coef(series_id=d_ind)
if x0_input is None:
t_predict = dataset.x0
else:
t_predict = x0_input
mu, var = D.conditional(self.spleaf_res, t_predict, calc_cov='diag')
if return_variance:
return mu, np.sqrt(var)
else:
return mu
@staticmethod
def _hypercond_00(parameter_values):
#Condition from Rajpaul 2017, Rajpaul+2021
return True
@staticmethod
def _hypercond_01(parameter_values):
# Condition from Rajpaul 2017, Rajpaul+2021
# Taking into account that Pdec^2 = 2*lambda_2^2
return parameter_values['Pdec']**2 > (3. / 2. / np.pi) * parameter_values['Oamp']**2 * parameter_values['Prot']**2
@staticmethod
def _hypercond_02(parameter_values):
#Condition on Rotation period and decay timescale
return parameter_values['Pdec'] > 2. * parameter_values['Prot']
@staticmethod
def _hypercond_03(parameter_values):
#Condition on Rotation period and decay timescale
return parameter_values['Pdec'] > 0.5 * parameter_values['Prot']
|
LucaMalavoltaREPO_NAMEPyORBITPATH_START.@PyORBIT_extracted@PyORBIT-main@pyorbit@models@spleaf_multidimensional_esp_activity_slow.py@.PATH_END.py
|
{
"filename": "spiderman_spot.py",
"repo_name": "kevin218/POET",
"repo_path": "POET_extracted/POET-master/code/lib/models_c/py_func/spiderman_spot.py",
"type": "Python"
}
|
import numpy as np
import spiderman
def spiderman_spot(params, t, etc = []):
"""
This function creates a model that fits a hotspot model
Parameters
----------
t0: time of conjunction
per: orbital period
a_abs: semi-major axis (AU)
inc: inclinations (deg)
ecc: eccentricity
w: arg of periastron (deg)
rp: planet radius (stellar radii)
a: semi-major axis (stellar radii)
p_u1: planet linear limb darkening parameter
p_u2: planet quadratic limb darkening
T_s: stellar Teff
l1: blue wavelength (m)
l2: red wavelength (m)
la0: latitude of hotspot
lo0: longitude of hotspot
spotsize: hot spot radius in degrees
spot_T: the surface temperature of the hotspot as a fraction of temperature of the star
p_T: the temperature of the planet that is not in the hotspot
Returns
-------
This function returns planet-to-star flux at each time t.
Revisions
---------
2017-09-11 Laura Kreidberg
laura.kreidberg@gmail.com
Original version
2019-02-24 update interpolation, add to github version
TODO add response function, nlayers to etc
"""
p = spiderman.ModelParams(brightness_model = 'hotspot_t', stellar_model = 'blackbody')
p.nlayers = 5
p.t0 = params[0]
p.per = params[1]
p.a_abs = params[2]
p.inc = np.arccos(params[3])*180./np.pi
p.ecc = params[4]
p.w = params[5]
p.rp = params[6]
p.a = params[7]
p.p_u1 = params[8]
p.p_u2 = params[9]
p.T_s = params[10]
p.l1 = params[11]
p.l2 = params[12]
p.la0 = params[13]
p.lo0 = params[14]
p.size = params[15]
p.spot_T = params[16]
p.p_T = params[17]
npoints = int(params[18])
#p.filter = "/Users/lkreidberg/Desktop/Util/Throughput/spitzer_irac_ch2.txt"
#calculate light curve over npoints phase bins
phase = (t - p.t0)/p.per
phase -= np.round(phase)
phase_bin = np.linspace(phase.min(), phase.max(), npoints)
t_bin = phase_bin*p.per + p.t0
lc_bin = spiderman.web.lightcurve(t_bin, p)
#interpolate the binned light curve to the original t array
lc = np.interp(phase, phase_bin, lc_bin)
return lc
|
kevin218REPO_NAMEPOETPATH_START.@POET_extracted@POET-master@code@lib@models_c@py_func@spiderman_spot.py@.PATH_END.py
|
{
"filename": "_tickmode.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/scene/zaxis/_tickmode.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="tickmode", parent_name="layout.scene.zaxis", **kwargs
):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {}),
values=kwargs.pop("values", ["auto", "linear", "array"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@scene@zaxis@_tickmode.py@.PATH_END.py
|
{
"filename": "download_test_data.py",
"repo_name": "trident-project/trident",
"repo_path": "trident_extracted/trident-main/tests/download_test_data.py",
"type": "Python"
}
|
"""
Testing utilities for Trident
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2017, Trident Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
#-----------------------------------------------------------------------------
import os
import tarfile
from trident.testing import \
answer_test_data_dir
from trident.utilities import \
download_file
def download_datasets(local_dir=None, progress_bar=True):
if local_dir is None:
local_dir = answer_test_data_dir
urls = open("test_datasets.txt", "r").readlines()
for url in urls:
if url.strip().startswith("#"):
continue
url = url.strip()
filename = os.path.join(local_dir, os.path.basename(url))
target_filename = filename[:filename.rfind(".tar.gz")]
if os.path.exists(target_filename):
continue
print ("Downloading %s to %s." % (url, local_dir))
download_file(url, local_directory=local_dir, progress_bar=True)
assert os.path.exists(filename), \
"Failed to download %s." % url
print ("Untarring %s." % filename)
tar = tarfile.open(filename)
tar.extractall(path=local_dir)
tar.close()
os.remove(filename)
print ("Data downloaded and untarred successfully.")
if __name__ == "__main__":
download_datasets()
|
trident-projectREPO_NAMEtridentPATH_START.@trident_extracted@trident-main@tests@download_test_data.py@.PATH_END.py
|
{
"filename": "_bordercolor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/treemap/marker/colorbar/_bordercolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BordercolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="bordercolor", parent_name="treemap.marker.colorbar", **kwargs
):
super(BordercolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@treemap@marker@colorbar@_bordercolor.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter/unselected/textfont/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scatter.unselected.textfont", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter@unselected@textfont@_color.py@.PATH_END.py
|
{
"filename": "image.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/core/langchain_core/prompts/image.py",
"type": "Python"
}
|
from typing import Any
from pydantic import Field
from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.string import (
DEFAULT_FORMATTER_MAPPING,
PromptTemplateFormat,
)
from langchain_core.runnables import run_in_executor
class ImagePromptTemplate(BasePromptTemplate[ImageURL]):
"""Image prompt template for a multimodal model."""
template: dict = Field(default_factory=dict)
"""Template for the prompt."""
template_format: PromptTemplateFormat = "f-string"
"""The format of the prompt template.
Options are: 'f-string', 'mustache', 'jinja2'."""
def __init__(self, **kwargs: Any) -> None:
if "input_variables" not in kwargs:
kwargs["input_variables"] = []
overlap = set(kwargs["input_variables"]) & {"url", "path", "detail"}
if overlap:
msg = (
"input_variables for the image template cannot contain"
" any of 'url', 'path', or 'detail'."
f" Found: {overlap}"
)
raise ValueError(msg)
super().__init__(**kwargs)
@property
def _prompt_type(self) -> str:
"""Return the prompt type key."""
return "image-prompt"
@classmethod
def get_lc_namespace(cls) -> list[str]:
"""Get the namespace of the langchain object."""
return ["langchain", "prompts", "image"]
def format_prompt(self, **kwargs: Any) -> PromptValue:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=self.format(**kwargs))
async def aformat_prompt(self, **kwargs: Any) -> PromptValue:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
"""
return ImagePromptValue(image_url=await self.aformat(**kwargs))
def format(
self,
**kwargs: Any,
) -> ImageURL:
"""Format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Raises:
ValueError: If the url is not provided.
ValueError: If the url is not a string.
Example:
.. code-block:: python
prompt.format(variable1="foo")
"""
formatted = {}
for k, v in self.template.items():
if isinstance(v, str):
formatted[k] = DEFAULT_FORMATTER_MAPPING[self.template_format](
v, **kwargs
)
else:
formatted[k] = v
url = kwargs.get("url") or formatted.get("url")
if kwargs.get("path") or formatted.get("path"):
msg = (
"Loading images from 'path' has been removed as of 0.3.15 for security "
"reasons. Please specify images by 'url'."
)
raise ValueError(msg)
detail = kwargs.get("detail") or formatted.get("detail")
if not url:
msg = "Must provide url."
raise ValueError(msg)
elif not isinstance(url, str):
msg = "url must be a string."
raise ValueError(msg)
else:
output: ImageURL = {"url": url}
if detail:
# Don't check literal values here: let the API check them
output["detail"] = detail # type: ignore[typeddict-item]
return output
async def aformat(self, **kwargs: Any) -> ImageURL:
"""Async format the prompt with the inputs.
Args:
kwargs: Any arguments to be passed to the prompt template.
Returns:
A formatted string.
Raises:
ValueError: If the path or url is not a string.
"""
return await run_in_executor(None, self.format, **kwargs)
def pretty_repr(self, html: bool = False) -> str:
"""Return a pretty representation of the prompt.
Args:
html: Whether to return an html formatted string.
Returns:
A pretty representation of the prompt.
"""
raise NotImplementedError
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@core@langchain_core@prompts@image.py@.PATH_END.py
|
{
"filename": "boost_from_prediction.py",
"repo_name": "dmlc/xgboost",
"repo_path": "xgboost_extracted/xgboost-master/demo/guide-python/boost_from_prediction.py",
"type": "Python"
}
|
"""
Demo for boosting from prediction
=================================
"""
import os
import xgboost as xgb
CURRENT_DIR = os.path.dirname(__file__)
dtrain = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.train?format=libsvm")
)
dtest = xgb.DMatrix(
os.path.join(CURRENT_DIR, "../data/agaricus.txt.test?format=libsvm")
)
watchlist = [(dtest, "eval"), (dtrain, "train")]
###
# advanced: start from a initial base prediction
#
print("start running example to start from a initial prediction")
# specify parameters via map, definition are same as c++ version
param = {"max_depth": 2, "eta": 1, "objective": "binary:logistic"}
# train xgboost for 1 round
bst = xgb.train(param, dtrain, 1, watchlist)
# Note: we need the margin value instead of transformed prediction in
# set_base_margin
# do predict with output_margin=True, will always give you margin values
# before logistic transformation
ptrain = bst.predict(dtrain, output_margin=True)
ptest = bst.predict(dtest, output_margin=True)
dtrain.set_base_margin(ptrain)
dtest.set_base_margin(ptest)
print("this is result of running from initial prediction")
bst = xgb.train(param, dtrain, 1, watchlist)
|
dmlcREPO_NAMExgboostPATH_START.@xgboost_extracted@xgboost-master@demo@guide-python@boost_from_prediction.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "natashabatalha/PandExo",
"repo_path": "PandExo_extracted/PandExo-master/pandexo/engine/utils/__init__.py",
"type": "Python"
}
|
# This is a special __init__.py required to for namespace packages.
# There should be no other code in this module.
try:
from pkg_resources import declare_namespace
declare_namespace(__name__)
except ImportError:
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
|
natashabatalhaREPO_NAMEPandExoPATH_START.@PandExo_extracted@PandExo-master@pandexo@engine@utils@__init__.py@.PATH_END.py
|
{
"filename": "plot_total_xs.py",
"repo_name": "johnh2o2/ggadt",
"repo_path": "ggadt_extracted/ggadt-master/example/plot_total_xs.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import numpy as np
import argparse
parser=argparse.ArgumentParser(description="Plot 1d total extinction cross section output from GGADT")
parser.add_argument('file_name',metavar='ggadt-output-file',type=str, nargs=1, help="a file containing the output of GGADT")
args = parser.parse_args()
file_name = args.file_name[0]
data = np.loadtxt(file_name, dtype=np.dtype([('E', np.float_), ('qsca', np.float_), ('qabs', np.float_), ('qext', np.float_)]))
# now plot!
f = plt.figure()
ax = f.add_subplot(111)
ax.plot(data['E']*1000, data['qabs'], lw=1, color='r', label='abs')
ax.plot(data['E']*1000, data['qsca'], lw=1, color='b', label='sca')
ax.plot(data['E']*1000, data['qext'], lw=1.5, color='k', label='ext')
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlabel("Energy [eV]")
ax.set_ylabel("$Q$")
ax.legend(loc='best')
plt.show(block=True)
|
johnh2o2REPO_NAMEggadtPATH_START.@ggadt_extracted@ggadt-master@example@plot_total_xs.py@.PATH_END.py
|
{
"filename": "example.py",
"repo_name": "sherpa/sherpa",
"repo_path": "sherpa_extracted/sherpa-main/docs/_examples/examples/simple_user_model/example.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
def savefig(name):
plt.savefig(name)
print("Created: {}".format(name))
def report(name):
print("# print({})".format(name))
print(eval(name))
def dump(name):
print("# dump")
print(name)
print(repr(eval(name)))
from openpyxl import load_workbook
wb = load_workbook('pone.0171996.s001.xlsx')
fig4 = wb['Fig4data']
t = []; y = []; dy = []
for r in list(fig4.values)[2:]:
t.append(r[0])
y.append(r[3])
dy.append(r[4])
from sherpa.data import Data1D
d = Data1D('NaNO_3', t, y, dy)
from sherpa.plot import DataPlot
dplot = DataPlot()
dplot.prepare(d)
dplot.plot()
savefig("data.png")
report("d")
dump("d.get_filter()")
d.ignore(None, 1)
dump("d.get_filter()")
dump("d.get_filter(format='%d')")
dplot.prepare(d)
from sherpa.models.basic import Const1D, Exp
plateau = Const1D('plateau')
rise = Exp('rise')
mdl = plateau / (1 + rise)
report("mdl")
rise.ampl.freeze()
report("mdl")
from sherpa.plot import ModelPlot
mplot = ModelPlot()
mplot.prepare(d, mdl)
plt.subplot(2, 1, 1)
mplot.plot(clearwindow=False)
plt.subplot(2, 1, 2)
dplot.plot(clearwindow=False)
plt.title('')
savefig("model_data_before_fit.png")
from sherpa.stats import Chi2
from sherpa.fit import Fit
f = Fit(d, mdl, stat=Chi2())
report("f")
print("Starting statistic: {}".format(f.calc_stat()))
fitres = f.fit()
report("fitres.format()")
print("Reduced chi square = {:.2f}".format(fitres.rstat))
mplot.prepare(d, mdl)
dplot.plot()
mplot.overplot()
savefig("model_data_fit1.png")
from sherpa.optmethods import NelderMead
f.method = NelderMead()
fitres2 = f.fit()
report("mdl")
dump("fitres2.dstatval")
mdl.reset()
report("mdl")
plateau.c0 = np.max(d.y)
mplot.prepare(d, mdl)
dplot.plot()
mplot.overplot()
savefig("model_data_reset.png")
fitres3 = f.fit()
report("fitres3.format()")
mplot.prepare(d, mdl)
dplot.plot()
mplot.overplot()
savefig("model_data_fit2.png")
from sherpa.plot import DelchiPlot
residplot = DelchiPlot()
residplot.prepare(d, mdl, f.stat)
residplot.plot()
savefig("model_data_delchi.png")
d.notice()
dump("d.get_filter(format='%d')")
from sherpa.plot import FitPlot
fitplot = FitPlot()
dplot.prepare(d)
mplot.prepare(d, mdl)
fitplot.prepare(dplot, mplot)
fitplot.plot()
savefig("model_data_fit_all.png")
# do we get an error? Actually, it looks to not be the divide-by-zero
# being the problem but list/list instead:
#
"""
residplot.prepare(d, mdl, f.stat)
/home/djburke/miniconda2/envs/sherpa410-py35/lib/python3.5/site-packages/sherpa-4.10.0-py3.5-linux-x86_64.egg/sherpa/plot/__init__.py:1128: RuntimeWarning: divide by zero encountered in true_divide
return (ylist[0] - ylist[1]) / staterr
Traceback (most recent call last):
File "example.py", line 125, in <module>
residplot.prepare(d, mdl, f.stat)
File "/home/djburke/miniconda2/envs/sherpa410-py35/lib/python3.5/site-packages/sherpa-4.10.0-py3.5-linux-x86_64.egg/sherpa/plot/__init__.py", line 1140, in prepare
self.yerr = staterr / staterr
TypeError: unsupported operand type(s) for /: 'list' and 'list'
"""
d.ignore(None, 1)
statinfo = f.calc_stat_info()
report("statinfo")
dump("statinfo.rstat == fitres3.rstat")
dump("f.estmethod.name")
coverrs = f.est_errors()
report("coverrs.format()")
dump("f.estmethod.sigma")
f.estmethod.sigma = 1.6
coverrs90 = f.est_errors()
report("coverrs90.format()")
from sherpa.estmethods import Confidence
f.estmethod = Confidence()
print("*** start confidence errors")
conferrs = f.est_errors()
print("*** end confidence errors")
report("conferrs.format()")
print("*** start confidence errors")
offseterrs = f.est_errors(parlist=(mdl.pars[1], ))
print("*** end confidence errors")
report("offseterrs")
fmt = "{:13s} covar=±{:4.2f} conf={:+5.2f} {:+5.2f}"
for i in range(len(conferrs.parnames)):
print(fmt.format(conferrs.parnames[i], coverrs.parmaxes[i],
conferrs.parmins[i], conferrs.parmaxes[i]))
from sherpa.plot import IntervalProjection
intproj = IntervalProjection()
intproj.calc(f, plateau.c0)
intproj.plot()
savefig("intproj_c0_auto.png")
intproj.prepare(min=12.5, max=20, nloop=51)
intproj.calc(f, plateau.c0)
intproj.plot()
s0 = f.calc_stat()
for ds in [1, 4, 9]:
intproj.hline(s0 + ds, overplot=True, linestyle='dot', linecolor='gray')
savefig("intproj_c0_manual.png")
from sherpa.plot import RegionProjection
regproj = RegionProjection()
regproj.calc(f, rise.offset, rise.coeff)
regproj.contour()
savefig("regproj_offset_coeff_auto.png")
regproj.prepare(min=(2, -1.2), max=(8, -0.1), nloop=(21, 21))
regproj.calc(f, rise.offset, rise.coeff)
regproj.contour()
savefig("regproj_offset_coeff_manual.png")
from sherpa.models.basic import ArithmeticModel
from sherpa.models.parameter import Parameter
class MyExp(ArithmeticModel):
"""A simpler form of the Exp model.
The model is f(x) = exp(a + b * x).
"""
def __init__(self, name='myexp'):
self.a = Parameter(name, 'a', 0)
self.b = Parameter(name, 'b', -1)
# The _exp instance is used to perform the model calculation,
# as shown in the calc method.
self._exp = Exp('hidden')
return ArithmeticModel.__init__(self, name, (self.a, self.b))
def calc(self, pars, *args, **kwargs):
"""Calculate the model"""
# Tell the exp model to evaluate the model, after converting
# the parameter values to the required form, and order, of:
# offset, coeff, ampl.
#
coeff = pars[1]
offset = -1 * pars[0] / coeff
ampl = 1.0
return self._exp.calc([offset, coeff, ampl], *args, **kwargs)
plateau2 = Const1D('plateau2')
rise2 = MyExp('rise2')
mdl2 = plateau2 / (1 + rise2)
report("mdl2")
fit2 = Fit(d, mdl2, stat=Chi2())
res2 = fit2.fit()
report("res2.format()")
dplot.prepare(d)
mplot2 = ModelPlot()
mplot2.prepare(d, mdl2)
dplot.plot()
mplot2.overplot()
savefig("model_data_myexp.png")
fit2.estmethod = Confidence()
print("*** start confidence errors")
conferrs2 = fit2.est_errors()
print("*** end confidence errors")
report("conferrs2.format()")
regproj2 = RegionProjection()
regproj2.prepare(min=(0.5, -1.2), max=(5, -0.1), nloop=(21, 21))
regproj2.calc(fit2, rise2.a, rise2.b)
regproj2.contour()
plt.plot(1.941, -0.453, 'ko', label='NaNO$_3$ Table 5')
plt.legend(loc=1)
savefig("regproj_a_b_manual.png")
|
sherpaREPO_NAMEsherpaPATH_START.@sherpa_extracted@sherpa-main@docs@_examples@examples@simple_user_model@example.py@.PATH_END.py
|
{
"filename": "plot_mg_fs8_bs8.py",
"repo_name": "philbull/RadioFisher",
"repo_path": "RadioFisher_extracted/RadioFisher-master/plotting/plot_mg_fs8_bs8.py",
"type": "Python"
}
|
#!/usr/bin/python
"""
Plot functions of redshift for RSDs.
"""
import numpy as np
import pylab as P
from rfwrapper import rf
import matplotlib.patches
import matplotlib.cm
e = rf.experiments
eg = rf.experiments_galaxy
cosmo = rf.experiments.cosmo
fname = 'mg-fbsigma8-combined.pdf'
names = ['SKA1MID900_mg', 'SKA1MID350_mg', 'fSKA1SUR650_mg', 'fSKA1SUR350_mg',
'gSKAMIDMKB2_mg', 'gSKASURASKAP_mg', 'gSKA2_mg', 'EuclidRef_mg_Dz_kmg0.01']
labels = ['SKA1-MID 900 (IM)', 'SKA1-MID 350 (IM)', 'SKA1-SUR 650 (IM)',
'SKA1-SUR 350 (IM)', 'SKA1-MID (gal.)', 'SKA1-SUR (gal.)',
'SKA2 (gal.)', 'Euclid (gal.)']
expts = [e.SKA1MID900, e.SKA1MID350, e.SKA1SUR650, e.SKA1SUR350,
eg.SKAMIDMKB2, eg.SKASURASKAP, eg.SKA2, eg.EuclidRef]
colours = ['#8082FF', '#1619A1', '#FFB928', '#ff6600', '#95CD6D', '#007A10', '#CC0000',
'#000000', '#858585', '#c1c1c1']
linestyle = [[], [], [], [], [], [], [], [], []]
marker = ['s', 's', 's', 's', 'o', 'o', 'o', 'o', 'o']
ms = [6., 6., 6., 6., 6., 6., 5., 5., 5.]
# Fiducial value and plotting
fig = P.figure()
ax = [fig.add_subplot(311), fig.add_subplot(312), fig.add_subplot(313)]
for k in range(len(names)):
root = "output/" + names[k]
# Load cosmo fns.
dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T
zc, Hc, dAc, Dc, fc = dat
z, H, dA, D, f = np.genfromtxt(root+"-cosmofns-smooth.dat").T
kc = np.genfromtxt(root+"-fisher-kc.dat").T
# Load Fisher matrices as fn. of z
Nbins = zc.size
F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)]
# EOS FISHER MATRIX
# Actually, (aperp, apar) are (D_A, H)
pnames = rf.load_param_names(root+"-fisher-full-0.dat")
#zfns = ['A', 'b_HI', 'f', 'H', 'DA', 'aperp', 'apar']
zfns = ['A', 'bs8', 'fs8', 'H', 'DA', 'aperp', 'apar']
excl = ['Tb', 'n_s', 'sigma8', 'omegak', 'omegaDE', 'w0', 'wa', 'h',
'gamma', 'N_eff', 'pk*', 'f', 'b_HI',
'gamma0', 'gamma1', 'eta0', 'eta1', 'A_xi', 'k_mg']
# Marginalising over b_1
F, lbls = rf.combined_fisher_matrix( F_list,
expand=zfns, names=pnames,
exclude=excl )
cov = np.linalg.inv(F)
errs = np.sqrt(np.diag(cov))
# Fixing b_1
excl.append('b_1')
F2, lbls2 = rf.combined_fisher_matrix( F_list,
expand=zfns, names=pnames,
exclude=excl )
cov2 = np.linalg.inv(F2)
errs2 = np.sqrt(np.diag(cov2))
# Identify functions of z
pfs8 = rf.indices_for_param_names(lbls, 'fs8*')
pfs8_2 = rf.indices_for_param_names(lbls2, 'fs8*')
pbs8 = rf.indices_for_param_names(lbls, 'bs8*')
pbs8_2 = rf.indices_for_param_names(lbls2, 'bs8*')
"""
print ""
print "#", names[k]
print "# z, fsigma8, sigma(fsigma8)"
for j in range(zc.size):
print "%4.4f %5.5e %5.5e" % (zc[j], (cosmo['sigma_8']*fc*Dc)[j], errs[pfs8][j])
"""
# FIXME: Disable to get redshift markers
#marker[k] = None
print lbls2
print errs2
# Load bias for experiment
try:
print names[k]
expt = rf.experiments_galaxy.load_expt(expts[k])
b = expt['b']
print b
except:
print "IM survey!"
b = rf.bias_HI(zc, e.cosmo)
pass
bs8 = cosmo['sigma_8']*b*Dc
# (1) Plot bs8 errors as fn. of redshift (b_1 fixed)
err2 = errs2[pbs8_2]
print err2/bs8
line = ax[2].plot( zc, err2/bs8, color=colours[k], label=labels[k],
lw=2.4, ls='solid', marker='None', markersize=ms[k],
markeredgecolor=colours[k] )
ax[2].set_ylabel('$\sigma(b \sigma_8) / (b \sigma_8)$', labelpad=15.,
fontdict={'fontsize':'xx-large'})
# (2) Plot fs8 errors as fn. of redshift (b_1 fixed)
#err2 = errs2[pfs8_2] / (cosmo['sigma_8']*fc*Dc)
#line = ax[1].plot( zc, err2, color=colours[k], label=labels[k], lw=1.8,
# ls='solid', marker=marker[k], markersize=ms[k],
# markeredgecolor=colours[k] )
#ax[1].set_ylabel('$\sigma(f \sigma_8) / (f \sigma_8)$', labelpad=15.,
# fontdict={'fontsize':'xx-large'})
# (2) Plot fs8-bs8 correlation as fn. of redshift (b_1 fixed)
err2 = errs2[pfs8_2] / (cosmo['sigma_8']*fc*Dc)
r = cov2[pfs8_2, pbs8_2] / np.sqrt(cov2[pfs8_2, pfs8_2] * cov2[pbs8_2, pbs8_2])
line = ax[0].plot( zc, r, color=colours[k], label=labels[k], lw=2.4,
ls='solid', marker='None', markersize=ms[k],
markeredgecolor=colours[k] )
ax[0].set_ylabel(r'$\rho(f\sigma_8, b\sigma_8)$', labelpad=15.,
fontdict={'fontsize':'xx-large'})
# (3a) Plot fs8 errors as fn. of redshift (b_1 marginalised)
err = errs[pfs8] / (cosmo['sigma_8']*fc*Dc)
line = ax[1].plot( zc, err, color=colours[k], lw=2.4, ls='dashed',
marker='None', markersize=ms[k],
markeredgecolor=colours[k] )
# (3b) Plot fs8 errors as fn. of redshift (b_1 fixed)
err2 = errs2[pfs8_2] / (cosmo['sigma_8']*fc*Dc)
line = ax[1].plot( zc, err2, color=colours[k], label=labels[k], lw=2.4,
ls='solid', marker='None', markersize=ms[k],
markeredgecolor=colours[k] )
ax[1].set_ylabel('$\sigma(f \sigma_8) / (f \sigma_8)$', labelpad=15.,
fontdict={'fontsize':'xx-large'})
# Load actual f.sigma_8 data and plot it
#dat = np.genfromtxt("fsigma8_data.dat").T
#ax[1].plot(dat[1], dat[3], 'kD')
#ax[0].plot(dat[1], dat[3], 'kD')
# Set common axis properties
for _ax in ax:
_ax.tick_params(axis='both', which='major', labelsize=20, labelbottom=False,
width=1.5, size=8., pad=10)
_ax.tick_params(axis='both', which='minor', width=1.5, size=5., pad=10)
# Set tick locations
_ax.yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(0.01))
_ax.yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(0.01))
# Set axis limits
_ax.set_xlim((-0.02, 2.6))
# Labels only on bottom panel
ax[0].tick_params(axis='both', which='major', labelbottom=True, labelsize=20, width=1.5, size=8., pad=10)
ax[0].tick_params(axis='both', which='minor', labelsize=20, width=1.5, size=8.)
ax[0].set_ylim((-0.8, 0.52))
ax[1].set_ylim((0.0, 0.059))
ax[2].set_ylim((0.0, 0.059))
ax[0].yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(0.2))
ax[0].yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(0.1))
ax[0].axhline(0., color='k', ls='dashed', lw=1.5)
ax[0].set_xlabel('$z$', labelpad=10., fontdict={'fontsize':'xx-large'})
# Set tick locations
#P.gca().yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(0.01))
#P.gca().yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(0.01))
leg = ax[2].legend(prop={'size':'large'}, loc='upper right', frameon=True, ncol=2)
leg.get_frame().set_edgecolor('w')
leg.get_frame().set_alpha(0.8)
# Set size
#P.gcf().set_size_inches(9.5, 6.8)
P.gcf().set_size_inches(9., 15.5)
# Move subplots
# pos = [[x0, y0], [x1, y1]]
l0 = 0.15
b0 = 0.07
ww = 0.8
hh = 0.9 / 3.
for i in range(len(ax))[::-1]:
ax[i].set_position([l0, b0 + hh*i, ww, hh])
P.savefig(fname, transparent=True)
P.show()
|
philbullREPO_NAMERadioFisherPATH_START.@RadioFisher_extracted@RadioFisher-master@plotting@plot_mg_fs8_bs8.py@.PATH_END.py
|
{
"filename": "canonical_constraint.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py2/scipy/optimize/_trustregion_constr/canonical_constraint.py",
"type": "Python"
}
|
import numpy as np
import scipy.sparse as sps
class CanonicalConstraint(object):
"""Canonical constraint to use with trust-constr algorithm.
It represents the set of constraints of the form::
f_eq(x) = 0
f_ineq(x) <= 0
Where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see
below.
The class is supposed to be instantiated by factory methods, which
should prepare the parameters listed below.
Parameters
----------
n_eq, n_ineq : int
Number of equality and inequality constraints respectively.
fun : callable
Function defining the constraints. The signature is
``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq`
components and ``c_ineq`` is ndarray with `n_ineq` components.
jac : callable
Function to evaluate the Jacobian of the constraint. The signature
is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are
either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n)
respectively.
hess : callable
Function to evaluate the Hessian of the constraints multiplied
by Lagrange multipliers, that is
``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is
``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied
shape (n, n) and provide a matrix-vector product operation
``H.dot(p)``.
keep_feasible : ndarray, shape (n_ineq,)
Mask indicating which inequality constraints should be kept feasible.
"""
def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible):
self.n_eq = n_eq
self.n_ineq = n_ineq
self.fun = fun
self.jac = jac
self.hess = hess
self.keep_feasible = keep_feasible
@classmethod
def from_PreparedConstraint(cls, constraint):
"""Create an instance from `PreparedConstrained` object."""
lb, ub = constraint.bounds
cfun = constraint.fun
keep_feasible = constraint.keep_feasible
if np.all(lb == -np.inf) and np.all(ub == np.inf):
return cls.empty(cfun.n)
if np.all(lb == -np.inf) and np.all(ub == np.inf):
return cls.empty(cfun.n)
elif np.all(lb == ub):
return cls._equal_to_canonical(cfun, lb)
elif np.all(lb == -np.inf):
return cls._less_to_canonical(cfun, ub, keep_feasible)
elif np.all(ub == np.inf):
return cls._greater_to_canonical(cfun, lb, keep_feasible)
else:
return cls._interval_to_canonical(cfun, lb, ub, keep_feasible)
@classmethod
def empty(cls, n):
"""Create an "empty" instance.
This "empty" instance is required to allow working with unconstrained
problems as if they have some constraints.
"""
empty_fun = np.empty(0)
empty_jac = np.empty((0, n))
empty_hess = sps.csr_matrix((n, n))
def fun(x):
return empty_fun, empty_fun
def jac(x):
return empty_jac, empty_jac
def hess(x, v_eq, v_ineq):
return empty_hess
return cls(0, 0, fun, jac, hess, np.empty(0))
@classmethod
def concatenate(cls, canonical_constraints, sparse_jacobian):
"""Concatenate multiple `CanonicalConstraint` into one.
`sparse_jacobian` (bool) determines the Jacobian format of the
concatenated constraint. Note that items in `canonical_constraints`
must have their Jacobians in the same format.
"""
def fun(x):
eq_all = []
ineq_all = []
for c in canonical_constraints:
eq, ineq = c.fun(x)
eq_all.append(eq)
ineq_all.append(ineq)
return np.hstack(eq_all), np.hstack(ineq_all)
if sparse_jacobian:
vstack = sps.vstack
else:
vstack = np.vstack
def jac(x):
eq_all = []
ineq_all = []
for c in canonical_constraints:
eq, ineq = c.jac(x)
eq_all.append(eq)
ineq_all.append(ineq)
return vstack(eq_all), vstack(ineq_all)
def hess(x, v_eq, v_ineq):
hess_all = []
index_eq = 0
index_ineq = 0
for c in canonical_constraints:
vc_eq = v_eq[index_eq:index_eq + c.n_eq]
vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq]
hess_all.append(c.hess(x, vc_eq, vc_ineq))
index_eq += c.n_eq
index_ineq += c.n_ineq
def matvec(p):
result = np.zeros_like(p)
for h in hess_all:
result += h.dot(p)
return result
n = x.shape[0]
return sps.linalg.LinearOperator((n, n), matvec, dtype=float)
n_eq = sum(c.n_eq for c in canonical_constraints)
n_ineq = sum(c.n_ineq for c in canonical_constraints)
keep_feasible = np.hstack([c.keep_feasible for c in
canonical_constraints])
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
@classmethod
def _equal_to_canonical(cls, cfun, value):
empty_fun = np.empty(0)
n = cfun.n
n_eq = value.shape[0]
n_ineq = 0
keep_feasible = np.empty(0, dtype=bool)
if cfun.sparse_jacobian:
empty_jac = sps.csr_matrix((0, n))
else:
empty_jac = np.empty((0, n))
def fun(x):
return cfun.fun(x) - value, empty_fun
def jac(x):
return cfun.jac(x), empty_jac
def hess(x, v_eq, v_ineq):
return cfun.hess(x, v_eq)
empty_fun = np.empty(0)
n = cfun.n
if cfun.sparse_jacobian:
empty_jac = sps.csr_matrix((0, n))
else:
empty_jac = np.empty((0, n))
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
@classmethod
def _less_to_canonical(cls, cfun, ub, keep_feasible):
empty_fun = np.empty(0)
n = cfun.n
if cfun.sparse_jacobian:
empty_jac = sps.csr_matrix((0, n))
else:
empty_jac = np.empty((0, n))
finite_ub = ub < np.inf
n_eq = 0
n_ineq = np.sum(finite_ub)
if np.all(finite_ub):
def fun(x):
return empty_fun, cfun.fun(x) - ub
def jac(x):
return empty_jac, cfun.jac(x)
def hess(x, v_eq, v_ineq):
return cfun.hess(x, v_ineq)
else:
finite_ub = np.nonzero(finite_ub)[0]
keep_feasible = keep_feasible[finite_ub]
ub = ub[finite_ub]
def fun(x):
return empty_fun, cfun.fun(x)[finite_ub] - ub
def jac(x):
return empty_jac, cfun.jac(x)[finite_ub]
def hess(x, v_eq, v_ineq):
v = np.zeros(cfun.m)
v[finite_ub] = v_ineq
return cfun.hess(x, v)
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
@classmethod
def _greater_to_canonical(cls, cfun, lb, keep_feasible):
empty_fun = np.empty(0)
n = cfun.n
if cfun.sparse_jacobian:
empty_jac = sps.csr_matrix((0, n))
else:
empty_jac = np.empty((0, n))
finite_lb = lb > -np.inf
n_eq = 0
n_ineq = np.sum(finite_lb)
if np.all(finite_lb):
def fun(x):
return empty_fun, lb - cfun.fun(x)
def jac(x):
return empty_jac, -cfun.jac(x)
def hess(x, v_eq, v_ineq):
return cfun.hess(x, -v_ineq)
else:
finite_lb = np.nonzero(finite_lb)[0]
keep_feasible = keep_feasible[finite_lb]
lb = lb[finite_lb]
def fun(x):
return empty_fun, lb - cfun.fun(x)[finite_lb]
def jac(x):
return empty_jac, -cfun.jac(x)[finite_lb]
def hess(x, v_eq, v_ineq):
v = np.zeros(cfun.m)
v[finite_lb] = -v_ineq
return cfun.hess(x, v)
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
@classmethod
def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible):
lb_inf = lb == -np.inf
ub_inf = ub == np.inf
equal = lb == ub
less = lb_inf & ~ub_inf
greater = ub_inf & ~lb_inf
interval = ~equal & ~lb_inf & ~ub_inf
equal = np.nonzero(equal)[0]
less = np.nonzero(less)[0]
greater = np.nonzero(greater)[0]
interval = np.nonzero(interval)[0]
n_less = less.shape[0]
n_greater = greater.shape[0]
n_interval = interval.shape[0]
n_ineq = n_less + n_greater + 2 * n_interval
n_eq = equal.shape[0]
keep_feasible = np.hstack((keep_feasible[less],
keep_feasible[greater],
keep_feasible[interval],
keep_feasible[interval]))
def fun(x):
f = cfun.fun(x)
eq = f[equal] - lb[equal]
le = f[less] - ub[less]
ge = lb[greater] - f[greater]
il = f[interval] - ub[interval]
ig = lb[interval] - f[interval]
return eq, np.hstack((le, ge, il, ig))
def jac(x):
J = cfun.jac(x)
eq = J[equal]
le = J[less]
ge = -J[greater]
il = J[interval]
ig = -il
if sps.issparse(J):
ineq = sps.vstack((le, ge, il, ig))
else:
ineq = np.vstack((le, ge, il, ig))
return eq, ineq
def hess(x, v_eq, v_ineq):
n_start = 0
v_l = v_ineq[n_start:n_start + n_less]
n_start += n_less
v_g = v_ineq[n_start:n_start + n_greater]
n_start += n_greater
v_il = v_ineq[n_start:n_start + n_interval]
n_start += n_interval
v_ig = v_ineq[n_start:n_start + n_interval]
v = np.zeros_like(lb)
v[equal] = v_eq
v[less] = v_l
v[greater] = -v_g
v[interval] = v_il - v_ig
return cfun.hess(x, v)
return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible)
def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian):
"""Convert initial values of the constraints to the canonical format.
The purpose to avoid one additional call to the constraints at the initial
point. It takes saved values in `PreparedConstraint`, modify and
concatenate them to the the canonical constraint format.
"""
c_eq = []
c_ineq = []
J_eq = []
J_ineq = []
for c in prepared_constraints:
f = c.fun.f
J = c.fun.J
lb, ub = c.bounds
if np.all(lb == ub):
c_eq.append(f - lb)
J_eq.append(J)
elif np.all(lb == -np.inf):
finite_ub = ub < np.inf
c_ineq.append(f[finite_ub] - ub[finite_ub])
J_ineq.append(J[finite_ub])
elif np.all(ub == np.inf):
finite_lb = lb > -np.inf
c_ineq.append(lb[finite_lb] - f[finite_lb])
J_ineq.append(-J[finite_lb])
else:
lb_inf = lb == -np.inf
ub_inf = ub == np.inf
equal = lb == ub
less = lb_inf & ~ub_inf
greater = ub_inf & ~lb_inf
interval = ~equal & ~lb_inf & ~ub_inf
c_eq.append(f[equal] - lb[equal])
c_ineq.append(f[less] - ub[less])
c_ineq.append(lb[greater] - f[greater])
c_ineq.append(f[interval] - ub[interval])
c_ineq.append(lb[interval] - f[interval])
J_eq.append(J[equal])
J_ineq.append(J[less])
J_ineq.append(-J[greater])
J_ineq.append(J[interval])
J_ineq.append(-J[interval])
c_eq = np.hstack(c_eq) if c_eq else np.empty(0)
c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0)
if sparse_jacobian:
vstack = sps.vstack
empty = sps.csr_matrix((0, n))
else:
vstack = np.vstack
empty = np.empty((0, n))
J_eq = vstack(J_eq) if J_eq else empty
J_ineq = vstack(J_ineq) if J_ineq else empty
return c_eq, c_ineq, J_eq, J_ineq
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py2@scipy@optimize@_trustregion_constr@canonical_constraint.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "johnh2o2/cuvarbase",
"repo_path": "cuvarbase_extracted/cuvarbase-master/cuvarbase/tests/__init__.py",
"type": "Python"
}
|
johnh2o2REPO_NAMEcuvarbasePATH_START.@cuvarbase_extracted@cuvarbase-master@cuvarbase@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "test_age_graph.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/graphs/test_age_graph.py",
"type": "Python"
}
|
import os
import re
import unittest
from typing import Any, Dict
from langchain_core.documents import Document
from langchain_community.graphs.age_graph import AGEGraph
from langchain_community.graphs.graph_document import GraphDocument, Node, Relationship
test_data = [
GraphDocument(
nodes=[Node(id="foo", type="foo"), Node(id="bar", type="bar")],
relationships=[
Relationship(
source=Node(id="foo", type="foo"),
target=Node(id="bar", type="bar"),
type="REL",
)
],
source=Document(page_content="source document"),
)
]
class TestAGEGraph(unittest.TestCase):
def test_node_properties(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.query(
"""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
"""
)
# Refresh schema information
# graph.refresh_schema()
n_labels, e_labels = graph._get_labels()
node_properties = graph._get_node_properties(n_labels)
expected_node_properties = [
{
"properties": [{"property": "property_a", "type": "STRING"}],
"labels": "LabelA",
},
{
"properties": [],
"labels": "LabelB",
},
{
"properties": [],
"labels": "LabelC",
},
]
self.assertEqual(
sorted(node_properties, key=lambda x: x["labels"]), expected_node_properties
)
def test_edge_properties(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.query(
"""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
"""
)
# Refresh schema information
# graph.refresh_schema()
n_labels, e_labels = graph._get_labels()
relationships_properties = graph._get_edge_properties(e_labels)
expected_relationships_properties = [
{
"type": "REL_TYPE",
"properties": [{"property": "rel_prop", "type": "STRING"}],
}
]
self.assertEqual(relationships_properties, expected_relationships_properties)
def test_relationships(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.query(
"""
CREATE (la:LabelA {property_a: 'a'})
CREATE (lb:LabelB)
CREATE (lc:LabelC)
MERGE (la)-[:REL_TYPE]-> (lb)
MERGE (la)-[:REL_TYPE {rel_prop: 'abc'}]-> (lc)
"""
)
# Refresh schema information
# graph.refresh_schema()
n_labels, e_labels = graph._get_labels()
relationships = graph._get_triples(e_labels)
expected_relationships = [
{"start": "LabelA", "type": "REL_TYPE", "end": "LabelB"},
{"start": "LabelA", "type": "REL_TYPE", "end": "LabelC"},
]
self.assertEqual(
sorted(relationships, key=lambda x: x["end"]), expected_relationships
)
def test_add_documents(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.add_graph_documents(test_data)
output = graph.query(
"MATCH (n) RETURN labels(n) AS label, count(*) AS count ORDER BY labels(n)"
)
self.assertEqual(
output, [{"label": ["bar"], "count": 1}, {"label": ["foo"], "count": 1}]
)
def test_add_documents_source(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
# Delete all nodes in the graph
graph.query("MATCH (n) DETACH DELETE n")
# Create two nodes and a relationship
graph.add_graph_documents(test_data, include_source=True)
output = graph.query(
"MATCH (n) RETURN labels(n) AS label, count(*) AS count ORDER BY labels(n)"
)
expected = [
{"label": ["bar"], "count": 1},
{"label": ["Document"], "count": 1},
{"label": ["foo"], "count": 1},
]
self.assertEqual(output, expected)
def test_get_schema(self) -> None:
conf = {
"database": os.getenv("AGE_PGSQL_DB"),
"user": os.getenv("AGE_PGSQL_USER"),
"password": os.getenv("AGE_PGSQL_PASSWORD"),
"host": os.getenv("AGE_PGSQL_HOST", "localhost"),
"port": int(os.getenv("AGE_PGSQL_PORT", 5432)),
}
self.assertIsNotNone(conf["database"])
self.assertIsNotNone(conf["user"])
self.assertIsNotNone(conf["password"])
graph_name = os.getenv("AGE_GRAPH_NAME", "age_test")
graph = AGEGraph(graph_name, conf)
graph.query("MATCH (n) DETACH DELETE n")
graph.refresh_schema()
expected = """
Node properties are the following:
[]
Relationship properties are the following:
[]
The relationships are the following:
[]
"""
# check that works on empty schema
self.assertEqual(
re.sub(r"\s", "", graph.get_schema), re.sub(r"\s", "", expected)
)
expected_structured: Dict[str, Any] = {
"node_props": {},
"rel_props": {},
"relationships": [],
"metadata": {},
}
self.assertEqual(graph.get_structured_schema, expected_structured)
# Create two nodes and a relationship
graph.query(
"""
MERGE (a:a {id: 1})-[b:b {id: 2}]-> (c:c {id: 3})
"""
)
# check that schema doesn't update without refresh
self.assertEqual(
re.sub(r"\s", "", graph.get_schema), re.sub(r"\s", "", expected)
)
self.assertEqual(graph.get_structured_schema, expected_structured)
# two possible orderings of node props
expected_possibilities = [
"""
Node properties are the following:
[
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'labels': 'a'},
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'labels': 'c'}
]
Relationship properties are the following:
[
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'type': 'b'}
]
The relationships are the following:
[
'(:`a`)-[:`b`]->(:`c`)'
]
""",
"""
Node properties are the following:
[
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'labels': 'c'},
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'labels': 'a'}
]
Relationship properties are the following:
[
{'properties': [{'property': 'id', 'type': 'INTEGER'}], 'type': 'b'}
]
The relationships are the following:
[
'(:`a`)-[:`b`]->(:`c`)'
]
""",
]
expected_structured2 = {
"node_props": {
"a": [{"property": "id", "type": "INTEGER"}],
"c": [{"property": "id", "type": "INTEGER"}],
},
"rel_props": {"b": [{"property": "id", "type": "INTEGER"}]},
"relationships": [{"start": "a", "type": "b", "end": "c"}],
"metadata": {},
}
graph.refresh_schema()
# check that schema is refreshed
self.assertIn(
re.sub(r"\s", "", graph.get_schema),
[re.sub(r"\s", "", x) for x in expected_possibilities],
)
self.assertEqual(graph.get_structured_schema, expected_structured2)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@graphs@test_age_graph.py@.PATH_END.py
|
{
"filename": "epsie.py",
"repo_name": "gwastro/pycbc",
"repo_path": "pycbc_extracted/pycbc-master/pycbc/inference/sampler/epsie.py",
"type": "Python"
}
|
# Copyright (C) 2019 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""This module provides classes for interacting with epsie samplers.
"""
import numpy
import epsie
from epsie.samplers import ParallelTemperedSampler
# we'll use emcee_pt's default beta ladder for temperature levels
from emcee.ptsampler import default_beta_ladder
from pycbc.pool import choose_pool
from .base import (BaseSampler, setup_output)
from .base_mcmc import (BaseMCMC, get_optional_arg_from_config,
nsamples_in_chain)
from .base_multitemper import (MultiTemperedSupport, compute_acf, compute_acl,
acl_from_raw_acls)
from ..burn_in import MultiTemperedMCMCBurnInTests
from ..jump import epsie_proposals_from_config
from ..io import EpsieFile
from .. import models
class EpsieSampler(MultiTemperedSupport, BaseMCMC, BaseSampler):
"""Constructs an MCMC sampler using epsie's parallel-tempered sampler.
Parameters
----------
model : model
A model from ``pycbc.inference.models``.
nchains : int
Number of chains to use in the sampler.
ntemps : int, optional
Number of temperatures to use in the sampler. A geometrically-spaced
temperature ladder with the gievn number of levels will be constructed
based on the number of parameters. If not provided, must provide
``betas``.
betas : array, optional
An array of inverse temperature values to be used in for the
temperature ladder. If not provided, must provide ``ntemps``.
proposals : list, optional
List of proposals to use. Any parameters that do not have a proposal
provided will use the ``default_propsal``. **Note:** proposals should
be specified for the sampling parameters, not the
variable parameters.
default_proposal : an epsie.Proposal class, optional
The default proposal to use for parameters not in ``proposals``.
Default is :py:class:`epsie.proposals.Normal`.
default_proposal_args : dict, optional
Dictionary of arguments to pass to the default proposal.
swap_interval : int, optional
The number of iterations between temperature swaps. Default is 1.
seed : int, optional
Seed for epsie's random number generator. If None provided, will create
one.
checkpoint_interval : int, optional
Specify the number of iterations to do between checkpoints. If not
provided, no checkpointin will be done.
checkpoint_signal : str, optional
Set the signal to use when checkpointing. For example, 'USR2'.
loglikelihood_function : str, optional
Set the function to call from the model for the ``loglikelihood``.
Default is ``loglikelihood``.
nprocesses : int, optional
The number of parallel processes to use. Default is 1
(no paralleliztion).
use_mpi : bool, optional
Use MPI for parallelization. Default (False) will use python's
multiprocessing.
"""
name = "epsie"
_io = EpsieFile
burn_in_class = MultiTemperedMCMCBurnInTests
def __init__(self, model, nchains, ntemps=None, betas=None,
proposals=None, default_proposal=None,
default_proposal_args=None, seed=None,
swap_interval=1,
checkpoint_interval=None, checkpoint_signal=None,
loglikelihood_function=None,
nprocesses=1, use_mpi=False):
# create the betas if not provided
if betas is None:
betas = default_beta_ladder(len(model.variable_params),
ntemps=ntemps)
self.model = model
# create a wrapper for calling the model
model_call = _EpsieCallModel(model, loglikelihood_function)
# these are used to help paralleize over multiple cores / MPI
models._global_instance = model_call
model_call = models._call_global_model
# Set up the pool
pool = choose_pool(mpi=use_mpi, processes=nprocesses)
# initialize the sampler
self._sampler = ParallelTemperedSampler(
model.sampling_params, model_call, nchains, betas=betas,
swap_interval=swap_interval,
proposals=proposals, default_proposal=default_proposal,
default_proposal_args=default_proposal_args,
seed=seed, pool=pool)
# set other parameters
self.nchains = nchains
self._ntemps = ntemps
self._checkpoint_interval = checkpoint_interval
self._checkpoint_signal = checkpoint_signal
@property
def io(self):
return self._io
@property
def base_shape(self):
return (self.ntemps, self.nchains,)
@property
def betas(self):
"""The inverse temperatures being used."""
return self._sampler.betas
@property
def seed(self):
"""The seed used for epsie's random bit generator.
This is not the same as the seed used for the prior distributions.
"""
return self._sampler.seed
@property
def swap_interval(self):
"""Number of iterations between temperature swaps."""
return self._sampler.swap_interval
@staticmethod
def compute_acf(filename, **kwargs):
r"""Computes the autocorrelation function.
Calls :py:func:`base_multitemper.compute_acf`; see that
function for details.
Parameters
----------
filename : str
Name of a samples file to compute ACFs for.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.compute_acf`.
Returns
-------
dict :
Dictionary of arrays giving the ACFs for each parameter. The arrays
will have shape ``ntemps x nchains x niterations``.
"""
return compute_acf(filename, **kwargs)
@staticmethod
def compute_acl(filename, **kwargs):
r"""Computes the autocorrelation length.
Calls :py:func:`base_multitemper.compute_acl`; see that
function for details.
Parameters
-----------
filename : str
Name of a samples file to compute ACLs for.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.compute_acl`.
Returns
-------
dict
A dictionary of ntemps-long arrays of the ACLs of each parameter.
"""
return compute_acl(filename, **kwargs)
@property
def acl(self): # pylint: disable=invalid-overridden-method
"""The autocorrelation lengths of the chains.
"""
return acl_from_raw_acls(self.raw_acls)
@property
def effective_nsamples(self): # pylint: disable=invalid-overridden-method
"""The effective number of samples post burn-in that the sampler has
acquired so far.
"""
act = self.act
if act is None:
act = numpy.inf
if self.burn_in is None:
start_iter = 0
else:
start_iter = self.burn_in.burn_in_iteration
nperchain = nsamples_in_chain(start_iter, act, self.niterations)
if self.burn_in is not None:
# ensure that any chain not burned in has zero samples
nperchain[~self.burn_in.is_burned_in] = 0
# and that any chain that is burned in has at least one sample
nperchain[self.burn_in.is_burned_in & (nperchain < 1)] = 1
return int(nperchain.sum())
@property
def samples(self):
"""A dict mapping ``variable_params`` to arrays of samples currently
in memory.
The arrays have shape ``ntemps x nchains x niterations``.
The dictionary also contains sampling parameters.
"""
samples = epsie.array2dict(self._sampler.positions)
# apply boundary conditions
samples = self.model.prior_distribution.apply_boundary_conditions(
**samples)
# apply transforms to go to model's variable params space
if self.model.sampling_transforms is not None:
samples = self.model.sampling_transforms.apply(
samples, inverse=True)
return samples
@property
def model_stats(self):
"""A dict mapping the model's ``default_stats`` to arrays of values.
The arrays have shape ``ntemps x nchains x niterations``.
"""
return epsie.array2dict(self._sampler.blobs)
def clear_samples(self):
"""Clears the chain and blobs from memory.
"""
# store the iteration that the clear is occuring on
self._lastclear = self.niterations
self._itercounter = 0
# now clear the sampler
self._sampler.clear()
def set_state_from_file(self, filename):
"""Sets the state of the sampler back to the instance saved in a file.
"""
with self.io(filename, 'r') as fp:
# get the numpy state
numpy_rstate_group = '/'.join([fp.sampler_group,
'numpy_random_state'])
rstate = fp.read_random_state(group=numpy_rstate_group)
# set the sampler state for epsie
self._sampler.set_state_from_checkpoint(fp, path=fp.sampler_group)
# set the global numpy random state for pycbc
numpy.random.set_state(rstate)
def set_p0(self, samples_file=None, prior=None):
p0 = super(EpsieSampler, self).set_p0(samples_file=samples_file,
prior=prior)
self._sampler.start_position = p0
@property
def pos(self):
"""A dictionary of the current chain positions."""
# we override BaseMCMC's pos property because this can be directly
# retrieved from epsie
return self._sampler.current_positions
def run_mcmc(self, niterations):
"""Advance the chains for a number of iterations.
Parameters
----------
niterations : int
Number of samples to get from sampler.
"""
self._sampler.run(niterations)
def write_results(self, filename):
"""Writes samples, model stats, acceptance ratios, and random state
to the given file.
Parameters
-----------
filename : str
The file to write to. The file is opened using the ``io`` class
in an an append state.
"""
with self.io(filename, 'a') as fp:
# write samples
fp.write_samples(self.samples,
parameters=self.model.variable_params,
last_iteration=self.niterations)
# write stats
fp.write_samples(self.model_stats, last_iteration=self.niterations)
# write accpetance ratio
acceptance = self._sampler.acceptance
fp.write_acceptance_ratio(acceptance['acceptance_ratio'],
last_iteration=self.niterations)
# write temperature data
if self.ntemps > 1:
temp_ar = self._sampler.temperature_acceptance
temp_swaps = self._sampler.temperature_swaps
fp.write_temperature_data(temp_swaps, temp_ar,
self.swap_interval,
last_iteration=self.niterations)
# write numpy's global state (for the distributions)
numpy_rstate_group = '/'.join([fp.sampler_group,
'numpy_random_state'])
fp.write_random_state(group=numpy_rstate_group)
# write the sampler's state
self._sampler.checkpoint(fp, path=fp.sampler_group)
def finalize(self):
pass
@classmethod
def from_config(cls, cp, model, output_file=None, nprocesses=1,
use_mpi=False):
"""Loads the sampler from the given config file.
The following options are retrieved in the ``[sampler]`` section:
* ``name`` :
(required) must match the samlper's name
* ``nchains`` :
(required) the number of chains to use
* ``ntemps`` :
The number of temperatures to use. Either this, or
``inverse-temperatures-file`` must be provided (but not both).
* ``inverse-temperatures-file`` :
Path to an hdf file containing the inverse temperatures ("betas")
to use. The betas will be retrieved from the file's
``.attrs['betas']``. Either this or ``ntemps`` must be provided
(but not both).
* ``niterations`` :
The number of iterations to run the sampler for. Either this or
``effective-nsamples`` must be provided (but not both).
* ``effective-nsamples`` :
Run the sampler until the given number of effective samples are
obtained. A ``checkpoint-interval`` must also be provided in this
case. Either this or ``niterations`` must be provided (but not
both).
* ``thin-interval`` :
Thin the samples by the given value before saving to disk. May
provide this, or ``max-samples-per-chain``, but not both. If
neither options are provided, will save all samples.
* ``max-samples-per-chain`` :
Thin the samples such that the number of samples per chain per
temperature that are saved to disk never exceeds the given value.
May provide this, or ``thin-interval``, but not both. If neither
options are provided, will save all samples.
* ``checkpoint-interval`` :
Sets the checkpoint interval to use. Must be provided if using
``effective-nsamples``.
* ``checkpoint-signal`` :
Set the checkpoint signal, e.g., "USR2". Optional.
* ``seed`` :
The seed to use for epsie's random number generator. If not
provided, epsie will create one.
* ``logl-function`` :
The attribute of the model to use for the loglikelihood. If
not provided, will default to ``loglikelihood``.
* ``swap-interval`` :
The number of iterations between temperature swaps. Default is 1.
Jump proposals must be provided for every sampling
parameter. These are retrieved from subsections
``[jump_proposal-{params}]``, where params is a
:py:const:`pycbc.VARARGS_DELIM` separated list of parameters the
proposal should be used for. See
:py:func:`inference.jump.epsie_proposals_from_config` for
details.
.. note::
Jump proposals should be specified for **sampling parameters**,
not **variable parameters**.
Settings for burn-in tests are read from ``[sampler-burn_in]``. In
particular, the ``burn-in-test`` option is used to set the burn in
tests to perform. See
:py:func:`MultiTemperedMCMCBurnInTests.from_config` for details. If no
``burn-in-test`` is provided, no burn in tests will be carried out.
Parameters
----------
cp : WorkflowConfigParser instance
Config file object to parse.
model : pycbc.inference.model.BaseModel instance
The model to use.
output_file : str, optional
The name of the output file to checkpoint and write results to.
nprocesses : int, optional
The number of parallel processes to use. Default is 1.
use_mpi : bool, optional
Use MPI for parallelization. Default is False.
Returns
-------
EpsiePTSampler :
The sampler instance.
"""
section = "sampler"
# check name
assert cp.get(section, "name") == cls.name, (
"name in section [sampler] must match mine")
nchains = int(cp.get(section, "nchains"))
seed = get_optional_arg_from_config(cp, section, 'seed', dtype=int)
ntemps, betas = cls.betas_from_config(cp, section)
# get the swap interval
swap_interval = get_optional_arg_from_config(cp, section,
'swap-interval',
dtype=int)
if swap_interval is None:
swap_interval = 1
# get the checkpoint interval, if it's specified
checkpoint_interval = cls.checkpoint_from_config(cp, section)
checkpoint_signal = cls.ckpt_signal_from_config(cp, section)
# get the loglikelihood function
logl = get_optional_arg_from_config(cp, section, 'logl-function')
# get the proposals
proposals = epsie_proposals_from_config(cp)
# check that all of the sampling parameters have a specified
# proposal
sampling_params = set(model.sampling_params)
proposal_params = set(param for prop in proposals
for param in prop.parameters)
missing = sampling_params - proposal_params
if missing:
raise ValueError("Missing jump proposals for sampling parameters "
"{}".format(', '.join(missing)))
# initialize
obj = cls(model, nchains,
ntemps=ntemps, betas=betas, proposals=proposals,
swap_interval=swap_interval, seed=seed,
checkpoint_interval=checkpoint_interval,
checkpoint_signal=checkpoint_signal,
loglikelihood_function=logl,
nprocesses=nprocesses, use_mpi=use_mpi)
# set target
obj.set_target_from_config(cp, section)
# add burn-in if it's specified
obj.set_burn_in_from_config(cp)
# set prethin options
obj.set_thin_interval_from_config(cp, section)
# Set up the output file
setup_output(obj, output_file)
if obj.new_checkpoint:
obj.set_start_from_config(cp)
else:
obj.resume_from_checkpoint()
return obj
class _EpsieCallModel(object):
"""Model wrapper for epsie.
Allows model to be called like a function. Returns the loglikelihood
function, logprior, and the model's default stats.
"""
def __init__(self, model, loglikelihood_function=None):
self.model = model
if loglikelihood_function is None:
loglikelihood_function = 'loglikelihood'
self.loglikelihood_function = loglikelihood_function
def __call__(self, **kwargs):
"""Calls update, then calls the loglikelihood and logprior."""
self.model.update(**kwargs)
logp = self.model.logprior
if logp == -numpy.inf:
# don't try to call the log likelihood if the prior rules it out
logl = numpy.nan
else:
logl = getattr(self.model, self.loglikelihood_function)
return logl, logp, self.model.current_stats
|
gwastroREPO_NAMEpycbcPATH_START.@pycbc_extracted@pycbc-master@pycbc@inference@sampler@epsie.py@.PATH_END.py
|
{
"filename": "fit_ns.py",
"repo_name": "mtalapinto/moes",
"repo_path": "carmenes/fit_ns.py",
"type": "Python"
}
|
import matplotlib as mpl
# mpl.use('qt4agg')
from optics import env_data
import matplotlib.pyplot as plt
import pymultinest
import numpy as np
import utils
from optics import parameters
from optics import vis_spectrometer
import time
import ws_load
import os
import pandas as pd
import dynesty
import corner
from astropy.time import Time
import do_cornerplot
def rewrite_best_fit_params(date):
pars_index = [42, 11, 12, 9, 13, 10, 35, 17, 28, 21, 33, 24, 29, 36, 39]
bestfit_file = pd.read_csv('plots/posteriors/'+str(date)+'/best_fit_params.tsv', names=['pars'])
print(bestfit_file)
init_state_a = parameters.load_sa(date, 'a')
print(init_state_a)
for k in range(len(pars_index)):
init_state_a[pars_index[k]] = bestfit_file['pars'].values[k]
parameters.write_sim(date, init_state_a, 'a')
# We load our data set
def carmenes_vis(date, fiber):
par_ini = parameters.load_date(fiber, date)
# write_old = parameters.write_old(par_ini)
# We load the data
wsa_data, wsb_data = ws_load.carmenes_vis_ws_for_fit(date)
wsa_data = np.array(wsa_data)
wsb_data = np.array(wsb_data)
if fiber == 'a':
spec = ws_load.spectrum_from_ws(wsa_data)
fib = 'A'
else:
spec = ws_load.spectrum_from_ws(wsb_data)
fib = 'B'
temps = env_data.get_temps_date(date)
init_state = parameters.load_sa(str(fiber))
pressure = env_data.get_p_date(date)
init_state[-1] = pressure
print(init_state)
if fiber == 'a':
ws_model = vis_spectrometer.tracing(spec, init_state, 'A', temps)
# ws_data, ws_model = sigma_clip(wsa_data, wsa_model, 5)
ws_data = wsa_data
else:
ws_model = vis_spectrometer.tracing(spec, init_state, 'B', temps)
ws_data = wsb_data
# ws_data, ws_model = sigma_clip(wsb_data, wsb_model, 5)
plt.plot(ws_data[:, 3], ws_data[:, 5], 'k+')
plt.plot(ws_model[:, 2], ws_model[:, 3], 'r+')
# plt.show()
y = ws_data
x = spec
sigma_fit_x = ws_data[:, 4]
plt.plot(ws_data[:, 3], ws_data[:, 5] - ws_model[:, 3], 'k.')
# plt.show()
plt.clf()
plt.plot(ws_data[:, 3], ws_data[:, 3] - ws_model[:, 2], 'k.')
# plt.show()
plt.clf()
# Define the prior (you have to transform your parameters, that come from the unit cube,
# to the prior you want):
def prior(cube, ndim, nparams):
# Prior on RAMSES parameters, sorted by importance
delta0 = 1e-4
delta1 = 1e-6
delta2 = 1e-5
cube[0] = utils.transform_uniform(cube[0], par_ini[42] - delta1, par_ini[42] + delta1) # ccd tilt z
cube[1] = utils.transform_uniform(cube[1], par_ini[11] - delta0, par_ini[11] + delta0) # echelle G
cube[2] = utils.transform_uniform(cube[2], par_ini[12] - delta0, par_ini[12] + delta0) # echelle blaze
cube[3] = utils.transform_uniform(cube[3], par_ini[9] - delta0, par_ini[9] + delta0) # coll tilt x
cube[4] = utils.transform_uniform(cube[4], par_ini[13] - delta0, par_ini[13] + delta0) # echelle gamma
cube[5] = utils.transform_uniform(cube[5], par_ini[10] - delta0, par_ini[10] + delta0) # coll tilt y
cube[6] = utils.transform_uniform(cube[6], par_ini[35] - delta0, par_ini[35] + delta0) # ccd ff tilt z
cube[7] = utils.transform_uniform(cube[7], par_ini[17] - delta0, par_ini[17] + delta0) # trf mirror tilt y
cube[8] = utils.transform_uniform(cube[8], par_ini[28] - delta0, par_ini[28] + delta0) # cam tilt x
cube[9] = utils.transform_uniform(cube[9], par_ini[21] - delta0, par_ini[21] + delta0) # grm tilt x
cube[10] = utils.transform_uniform(cube[10], par_ini[33] - delta0, par_ini[33] + delta0) # ccd ff tilt x
cube[11] = utils.transform_uniform(cube[11], par_ini[24] - delta0, par_ini[24] + delta0) # grm apex
cube[12] = utils.transform_uniform(cube[12], par_ini[29] - delta0, par_ini[29] + delta0) # cam tilt y
cube[13] = utils.transform_uniform(cube[13], par_ini[36] - delta0, par_ini[36] + delta0) # d ff ccd
cube[14] = utils.transform_uniform(cube[14], par_ini[39] - delta0, par_ini[39] + delta0) # ccd defocus
# Define the likelihood:
def loglike(cube, ndim, nparams):
# Load parameters
pars = parameters.load_sa(fiber)
pars[42] = cube[0] # ccd tilt z
pars[11] = cube[1] # echelle G
pars[12] = cube[2] # echelle blaze
pars[9] = cube[3] # coll tilt x
pars[13] = cube[4] # echelle gamma
pars[10] = cube[5] # coll tilt y
pars[35] = cube[6] # ccd_ff_tilt_z
pars[17] = cube[7] # trf mirror tilt y
pars[28] = cube[8] # cam tilt x
pars[21] = cube[9] # grm tilt x
pars[33] = cube[10] # ccd ff tilt x
pars[24] = cube[11] # grm apex
pars[29] = cube[12] # cam tilt y
pars[36] = cube[13] # d ff ccd
pars[39] = cube[14] # ccd defocus
# Generate model:
model = vis_spectrometer.tracing(x, pars, fib, temps)
# Evaluate the log-likelihood:
sigma_fit_y = np.full(len(y), .01)
sigma_fit_x = np.full(len(y), .01)
ndata = len(y)
loglikelihood = -0.5 * ndata * np.log(2. * np.pi * sigma_fit_x ** 2).sum() + (
-0.5 * ((model[:, 2] - y[:, 3]) / sigma_fit_x) ** 2).sum()
return loglikelihood
n_params = 15
#path = '/home/eduspec/Documentos/moes/v3.1/vis/ns_moes/'
#if not os.path.exists(path):
# os.makedirs(path)
out_file = '/luthien/carmenes/vis/params/posteriors/' + str(date) + '/carm_vis_' + fiber
# Run MultiNest:
pymultinest.run(loglike, prior, n_params, n_live_points=300, outputfiles_basename=out_file, resume=False,
verbose=False)
# Get output:
output = pymultinest.Analyzer(outputfiles_basename=out_file, n_params=n_params)
# Get out parameters: this matrix has (samples,n_params+1):
bestfit_params = output.get_best_fit()
print(bestfit_params['parameters'])
mc_samples = output.get_equal_weighted_posterior()[:, :-1]
outdir = '/luthien/carmenes/vis/params/posteriors/' + str(date) + '/'
if not os.path.exists(outdir):
os.mkdir(outdir)
mc_samples_data = pd.DataFrame(mc_samples)
mc_samples_data.to_csv(outdir + 'samples.tsv', sep=',', index=False)
#samples_out = pd.read_csv(outdir + 'samples.tsv')
'''
# print('Multicarlos optimization duration : %.3f hr' % (float(t2)))
import corner
posterior_names = []
pars_index = [42, 11, 12, 9, 13, 10, 35, 17, 28, 21, 33, 24, 29, 36, 39]
new_pars = init_state
i = 0
for par in pars_index:
posterior_names.append(parameters.get_name(par))
new_pars[par] = bestfit_params['parameters'][i]
print(parameters.get_name(par), init_state[par], new_pars[par], bestfit_params['parameters'][i])
i += 1
first_time = True
# for i in range(n_params):
# if first_time:
# posterior_data = output.posteriors['posterior_samples'][i]
# first_time = False
# else:
# posterior_data = np.vstack((posterior_data, results.posteriors['posterior_samples'][i]))
# posterior_data = posterior_data.T
samples_names = [r'CCD T$_z$ (deg)', r'G$_{ech}$(mm$^{-1}$)', r'$\theta_{Blaze}$(m/s)',
r'Coll T$_x$ (deg)', r'$\gamma_{ech}$~(deg)', r'Coll T$_y$ (deg)',
r'CCD-FF T$_z$ (deg)', r'TM T$_{y}$ (deg)', r'Cam. T$_{x}$(deg)',
r'Grism T$_{x}$(deg)', r'CCD-FF T$_{x}$(deg)', r'Grism Apex~[(deg)',
r'Cam. T$_{y}$', r'd$_{FF-CCD}$~(mm)', r'CCD defocus~(mm)']
#samples = pd.read_csv('plots/posteriors/2017-10-20/mc_samples.tsv', sep=',', names=samples_names)
samples = pd.DataFrame(mc_samples, columns=samples_names)
best_ns_pars = pd.read_csv('plots/posteriors/2017-10-20/best_fit_params.tsv', names=['pars'])
samples_plot = pd.DataFrame()
samples_plot[r'CCD T$_z$ (deg)'] = samples[r'CCD T$_z$ (deg)']
samples_plot[r'G$_{ech}$(mm$^{-1}$)'] = samples[r'G$_{ech}$(mm$^{-1}$)']
samples_plot[r'$\theta_{Blaze}$(m/s)'] = samples[r'$\theta_{Blaze}$(m/s)']
samples_plot[r'Coll T$_x$ (deg)'] = samples[r'Coll T$_x$ (deg)']
samples_plot[r'$\gamma_{ech}$~(deg)'] = samples[r'$\gamma_{ech}$~(deg)']
samples_plot[r'Coll T$_y$ (deg)'] = samples[r'Coll T$_y$ (deg)']
samples_plot[r'CCD-FF T$_z$ (deg)'] = samples[r'CCD-FF T$_z$ (deg)']
samples_plot[r'TM T$_{y}$ (deg)'] = samples[r'TM T$_{y}$ (deg)']
samples_plot[r'Cam. T$_{x}$(deg)'] = samples[r'Cam. T$_{x}$(deg)']
samples_names_plot = [r'CCD T$_z$ (deg)', r'G$_{ech}$(mm$^{-1}$)', r'$\theta_{Blaze}$(m/s)',
r'Coll T$_x$ (deg)', r'$\gamma_{ech}$~(deg)', r'Coll T$_y$ (deg)',
r'CCD-FF T$_z$ (deg)', r'TM T$_{y}$ (deg)', r'Cam. T$_{x}$(deg)']
pars_index = [42, 11, 12, 9, 13, 10, 35, 17, 28] # , 21, 33, 24, 29, 36, 39]
date = '2017-10-20'
pars = parameters.load_sa(date, 'a')
best_fit_sa = []
for k in range(len(pars_index)):
best_fit_sa.append(pars[pars_index[k]])
# best_fit_par_dyn = [-2.08231, 13.37869, 40.68816416, 456.53436023, 0.03549355, 70.53001648, 153.82247384, 13.90580052, 2289.06760189, 0.27982054, 253.14207864, 11.37663006]
# best_fit_par_stab = [-2.24, 13.22, 40.09, 456.81, 0.05, 89.68, 134.72, 12.59, 2369.76, 0.22, 210.58, 62.22]
quantiles = corner.quantile(samples_plot, 0.6827)
print(quantiles)
figure = corner.corner(samples_plot,
labels=samples_names_plot,
bins=25,
color='k',
reverse=False,
levels=(0.6827, 0.9545, 0.9973),
plot_contours=True,
show_titles=True,
title_fmt=".2E",
title_kwargs={"fontsize": 10},
truths=best_fit_sa,
dpi=400,
truth_color='r',
scale_hist=True,
no_fill_contours=True,
plot_datapoints=True)
'''
#k = 0
#corner.overplot_points(figure, best_ns_pars['pars'].values.T[None], marker="s", color="b")
# for k in range(len(best_ns_pars)):
# corner.overplot_points(figure, best_ns_pars['pars'].values[k][None], marker="s", color="b")
#plt.tight_layout()
#plt.savefig('plots/posteriors/2017-10-20/corner_final.png')
# plt.show()
#figure = corner.corner(mc_samples, labels=posterior_names)
#plt.savefig(outdir + 'moes_ins_corner_' + fiber + '_' + date + '.png')
def carmenes_vis_v2(date, fiber):
wsa_data, wsb_data = ws_load.carmenes_vis_ws_for_fit(date)
wsa_data = np.array(wsa_data)
wsb_data = np.array(wsb_data)
spec_a = ws_load.spectrum_from_ws(wsa_data)
spec_b = ws_load.spectrum_from_ws(wsb_data)
pressure = env_data.get_p_date(date)
init_state_a = parameters.load_sa(date, 'a')
init_state_b = parameters.load_sa(date, 'b')
init_state_a[-1] = pressure
init_state_b[-1] = pressure
temps = env_data.get_temps_date(date)
wsa_model = vis_spectrometer.tracing(spec_a, init_state_a, 'A', temps)
wsb_model = vis_spectrometer.tracing(spec_b, init_state_b, 'B', temps)
res_x_a = np.sqrt(np.mean((wsa_data[:, 3] - wsa_model[:, 2]) ** 2))
res_y_a = np.sqrt(np.mean((wsa_data[:, 5] - wsa_model[:, 3]) ** 2))
res_x_b = np.sqrt(np.mean((wsb_data[:, 3] - wsb_model[:, 2]) ** 2))
res_y_b = np.sqrt(np.mean((wsb_data[:, 5] - wsb_model[:, 3]) ** 2))
print('Initial residuals')
print('Fiber A')
print('res_x =', res_x_a, ', res_y = ', res_y_a)
print('Fiber B')
print('res_x =', res_x_b, ', res_y = ', res_y_b)
# We do only fiber A
if fiber == 'a':
y = wsa_data
x = spec_a
par_ini = init_state_a.copy()
elif fiber == 'b':
y = wsb_data
x = spec_b
par_ini = init_state_b.copy()
def prior(cube, ndim, nparams):
# Prior on RAMSES parameters, sorted by importance
delta0 = 3.5e-6
delta1 = 1.e-6
delta2 = 5.e-5
cube[0] = utils.transform_normal(cube[0], par_ini[42], 7.5e-6) # ccd tilt z
cube[1] = utils.transform_normal(cube[1], par_ini[11], delta0) # echelle G
cube[2] = utils.transform_normal(cube[2], par_ini[12], 5.e-5) # echelle blaze
cube[3] = utils.transform_normal(cube[3], par_ini[9], delta2) # coll tilt x
cube[4] = utils.transform_normal(cube[4], par_ini[13], delta2) # echelle gamma
cube[5] = utils.transform_normal(cube[5], par_ini[10], delta2) # coll tilt y
cube[6] = utils.transform_normal(cube[6], par_ini[35], 5.e-3) # ccd ff tilt z
cube[7] = utils.transform_normal(cube[7], par_ini[17], 2*delta2) # trf mirror tilt y
cube[8] = utils.transform_normal(cube[8], par_ini[28], delta0) # cam tilt x
# cube[9] = utils.transform_normal(cube[9], par_ini[21], delta0) # grm tilt x
# cube[10] = utils.transform_normal(cube[10], par_ini[33], delta0) # ccd ff tilt x
# cube[11] = utils.transform_normal(cube[11], par_ini[24], delta2) # grm apex
# cube[12] = utils.transform_normal(cube[12], par_ini[29], delta2) # cam tilt y
# cube[13] = utils.transform_normal(cube[13], par_ini[36], delta2) # d ff ccd
# cube[14] = utils.transform_normal(cube[14], par_ini[39], delta2) # ccd defocus
def loglike(cube, ndim, nparams):
# Load parameters
if fiber == 'a':
pars = parameters.load_sa(date, 'a')
elif fiber == 'b':
pars = parameters.load_sa(date, 'b')
pars[42] = cube[0] # ccd tilt z
pars[11] = cube[1] # echelle G
pars[12] = cube[2] # echelle blaze
pars[9] = cube[3] # coll tilt x
pars[13] = cube[4] # echelle gamma
pars[10] = cube[5] # coll tilt y
pars[35] = cube[6] # ccd_ff_tilt_z
pars[17] = cube[7] # trf mirror tilt y
pars[28] = cube[8] # cam tilt x
#pars[21] = cube[9] # grm tilt x
#pars[33] = cube[10] # ccd ff tilt x
#pars[24] = cube[11] # grm apex
#pars[29] = cube[12] # cam tilt y
#pars[36] = cube[13] # d ff ccd
#pars[39] = cube[14] # ccd defocus
if len(pars) < 43:
print('chafa')
# Generate model:
if fiber == 'a':
model = vis_spectrometer.tracing(x, pars, 'A', temps)
elif fiber == 'b':
model = vis_spectrometer.tracing(x, pars, 'B', temps)
# Evaluate the log-likelihood:
#sigma_fit_x = np.full(len(y), y[:, 4])
sigma_fit_x = y[:, 4]
ndata = len(y)
loglikelihood = -0.5 * ndata * np.log(2. * np.pi * sigma_fit_x ** 2).sum() + \
(-0.5 * ((model[:, 2] - y[:, 3]) / sigma_fit_x) ** 2).sum() + \
(-0.5 * ((model[:, 3] - y[:, 5]) / sigma_fit_x) ** 2).sum()
return loglikelihood
n_params = 9
path = "".join(['/luthien/carmenes/vis/ns_results/', date, '/'])
if not os.path.exists(path):
os.makedirs(path)
out_file = "".join([path, 'ns_fit_'+str(fiber)+'_'])
# Run MultiNest:
pymultinest.run(loglike, prior, n_params, n_live_points=300, outputfiles_basename=out_file, resume=False,
verbose=False)
# Get output:
output = pymultinest.Analyzer(outputfiles_basename=out_file, n_params=n_params)
# Get out parameters: this matrix has (samples,n_params+1):
bestfit_params = output.get_best_fit()
mc_samples = output.get_equal_weighted_posterior()[:, :-1]
#outdir = path
#if not os.path.exists(outdir):
# os.mkdir(outdir)
bestfitout = pd.DataFrame(bestfit_params['parameters'])
bestfitout.to_csv(path + 'best_fit_params_'+str(fiber)+'.tsv', index=False, header=False)
samplesout = pd.DataFrame(mc_samples)
samplesout.to_csv(path + 'mc_samples_'+str(fiber)+'.tsv', index=False, header=False)
#figure = corner.corner(mc_samples) # , labels=posterior_names)
#plt.tight_layout()
#plt.savefig(outdir + 'parameters_cornerplot_'+str(fiber)+'.png')
#plt.tight_layout()
#plt.show()
#plt.close()
print('Nested sampling of instrumental parameters for date ', date, ' done.')
def carmenes_vis_multinest(date, fiber):
if fiber == 'a':
fib = 'A'
elif fiber == 'b':
fib = 'B'
wsa_data, wsb_data = ws_load.load_ws_for_fit(date)
#wsa_data = np.array(wsa_data)
#wsb_data = np.array(wsb_data)
pressure = env_data.get_P_at_ws(date)
init_state = parameters.load_date(fib, date)
init_state[-1] = pressure
temps = env_data.get_T_at_ws(date)
if fiber == 'a':
ws_data = wsa_data
spec = ws_load.spectrum_from_data(wsa_data)
ws_model = vis_spectrometer.tracing(spec, init_state, 'A', temps)
print(len(ws_model))
fib = 'A'
elif fiber == 'b':
ws_data = wsb_data
spec = ws_load.spectrum_from_data(wsb_data)
ws_model = vis_spectrometer.tracing(spec, init_state, 'B', temps)
fib = 'B'
res_x = np.sqrt(np.sum((ws_data['posm'].values - ws_model['x'].values)**2) / len(ws_data))
res_y = np.sqrt(np.sum((ws_data['posmy'].values - ws_model['y'].values) ** 2) / len(ws_data))
print('Initial residuals, fiber = ', fiber)
print('res_x =', res_x, ', res_y = ', res_y)
print(len(ws_data), len(ws_model))
plt.plot(ws_data['posm'], ws_data['posm'].values - ws_model['x'].values, 'k.', zorder=10)
plt.plot(ws_data['posm'], ws_data['posm'].values - ws_data['posc'].values, 'b.', zorder=0, alpha=0.5)
plt.show()
# We do only fiber A
y = ws_data
x = spec
par_ini = init_state.copy()
def prior(cube, ndim, nparams):
# Prior on RAMSES parameters, sorted by importance
delta0 = 3.5e-6
delta1 = 1.e-6
delta2 = 5.e-5
cube[0] = utils.transform_normal(cube[0], par_ini[11], delta0) # ccd tilt z
cube[1] = utils.transform_normal(cube[1], par_ini[9], delta0) # echelle G
cube[2] = utils.transform_normal(cube[2], par_ini[12], delta0) # coll tilt x
cube[3] = utils.transform_normal(cube[3], par_ini[21], delta0) # echelle blaze
cube[4] = utils.transform_normal(cube[4], par_ini[28], delta0) # grism tilt x
cube[5] = utils.transform_normal(cube[5], par_ini[10], delta0) # camera x tilt
cube[6] = utils.transform_normal(cube[6], par_ini[13], delta0) # collimator y-tilt
cube[7] = utils.transform_normal(cube[7], par_ini[1], delta0) # echelle gamma angle
cube[8] = utils.transform_normal(cube[8], par_ini[32], delta0) # cam tilt x
# cube[9] = utils.transform_normal(cube[9], par_ini[21], delta0) # grm tilt x
# cube[10] = utils.transform_normal(cube[10], par_ini[33], delta0) # ccd ff tilt x
# cube[11] = utils.transform_normal(cube[11], par_ini[24], delta2) # grm apex
# cube[12] = utils.transform_normal(cube[12], par_ini[29], delta2) # cam tilt y
# cube[13] = utils.transform_normal(cube[13], par_ini[36], delta2) # d ff ccd
# cube[14] = utils.transform_normal(cube[14], par_ini[39], delta2) # ccd defocus
return cube
def loglike(cube, ndim, nparams):
# Load parameters
pars = parameters.load_date(fib, date)
# print(pars[0])
# print(cube)
pars[11] = cube[0]
pars[9] = cube[1]
pars[12] = cube[2]
pars[21] = cube[3]
pars[28] = cube[4]
pars[10] = cube[5]
pars[13] = cube[6]
pars[1] = cube[7]
pars[32] = cube[8]
# pars[21] = cube[9] # grm tilt x
# pars[33] = cube[10] # ccd ff tilt x
# pars[24] = cube[11] # grm apex
# pars[29] = cube[12] # cam tilt y
# pars[36] = cube[13] # d ff ccd
# pars[39] = cube[14] # ccd defocus
# Generate model:
model = vis_spectrometer.tracing(x, pars, fib, temps)
# Evaluate the log-likelihood:
# sigma_fit_x = np.full(len(y), y[:, 4])
sigma_fit_x = np.full(len(model), 0.1)
ndata = len(y)
loglikelihood = -0.5 * ndata * np.log(2. * np.pi * sigma_fit_x ** 2).sum() + \
(-0.5 * ((model['x'].values - y['posm'].values) / sigma_fit_x) ** 2).sum() + \
(-0.5 * ((model['y'].values - y['posmy'].values) / sigma_fit_x) ** 2).sum()
return loglikelihood
n_params = 9
path = "".join(['data/posteriors/' + date + '/'])
if not os.path.exists(path):
os.makedirs(path)
out_file = "".join([path, 'dfit_' + str(fiber) + ''])
# Run MultiNest:
pymultinest.run(loglike, prior, n_params, n_live_points=500, outputfiles_basename=out_file, resume=True,
verbose=True)
# Get output:
output = pymultinest.Analyzer(outputfiles_basename=out_file, n_params=n_params)
# Get out parameters: this matrix has (samples,n_params+1):
bestfit_params = output.get_best_fit()
maxpars = output.get_mode_stats()['modes'][0]['maximum']
print(maxpars[0])
mc_samples = output.get_equal_weighted_posterior()[:, :-1]
samples_names = [r'Ech. G [mm$^{-1}$]', r'Coll x-tilt [deg]', r'Ech. inc. angle [deg]',
r'Grism x-tilt [deg]', r'Cam x-tilt [deg]', r'Coll y-tilt [deg]',
r'Ech. $\gamma$-angle [deg]', r'Slit x-dec [mm]', r'Field flattener y-dec [mm]']
mc_samples_data = pd.DataFrame(mc_samples)
mc_samples_data.to_csv(path + 'mc_samples.tsv', sep=',', index=False, header=samples_names)
bestfitout = pd.DataFrame(bestfit_params['parameters'])
bestfitout.to_csv(path + 'best_fit_params_'+str(fiber)+'.tsv', index=False, header=False)
#samplesout = pd.DataFrame(mc_samples)
#samplesout.to_csv(path + 'mc_samples_'+str(fiber)+'.tsv', index=False, header=False)
pars = parameters.load_date(fib, date)
pars[11] = bestfit_params['parameters'][0]
pars[9] = bestfit_params['parameters'][1]
pars[12] = bestfit_params['parameters'][2]
pars[21] = bestfit_params['parameters'][3]
pars[28] = bestfit_params['parameters'][4]
pars[10] = bestfit_params['parameters'][5]
pars[13] = bestfit_params['parameters'][6]
pars[1] = bestfit_params['parameters'][7]
pars[32] = bestfit_params['parameters'][8]
#pars[42] = maxpars[0]
#pars[11] = maxpars[1]
#pars[9] = maxpars[2]
#pars[12] = maxpars[3]
#pars[21] = maxpars[4]
#pars[28] = maxpars[5]
#pars[10] = maxpars[6]
#pars[13] = maxpars[7]
#pars[1] = maxpars[8]
#parameters.write_sa(pars, date, fiber)
#fit_sa.moes_carmenes_vis_old(date)
do_cornerplot.do_plot(date)
# figure = corner.corner(mc_samples) # , labels=posterior_names)
# Get output:
#output = pymultinest.Analyzer(outputfiles_basename=out_file, n_params=n_params)
# Get out parameters: this matrix has (samples,n_params+1):
#bestfit_params = output.get_best_fit()
#mc_samples = output.get_equal_weighted_posterior()[:, :-1]
#outdir = path
#if not os.path.exists(outdir):
# os.mkdir(outdir)
#bestfitout = pd.DataFrame(bestfit_params['parameters'])
#bestfitout.to_csv(path + 'best_fit_params_'+str(fiber)+'.tsv', index=False, header=False)
#samplesout = pd.DataFrame(mc_samples)
#samplesout.to_csv(path + 'mc_samples_'+str(fiber)+'.tsv', index=False, header=False)
#figure = corner.corner(mc_samples) # , labels=posterior_names)
#plt.tight_layout()
#plt.savefig(outdir + 'parameters_cornerplot_'+str(fiber)+'.png')
#plt.tight_layout()
#plt.show()
#plt.close()
print('Multinest of instrumental parameters for date ', date, ' done.')
def carmenes_vis_multinest_full(date, fiber):
if fiber == 'a':
fib = 'A'
elif fiber == 'b':
fib = 'B'
wsa_data, wsb_data = ws_load.load_ws_for_fit(date)
#wsa_data = np.array(wsa_data)
#wsb_data = np.array(wsb_data)
pressure = env_data.get_P_at_ws(date)
init_state = parameters.load_date(fib, date)
init_state[-1] = pressure
temps = env_data.get_T_at_ws(date)
if fiber == 'a':
ws_data = wsa_data
spec = ws_load.spectrum_from_data(wsa_data)
ws_model = vis_spectrometer.tracing(spec, init_state, 'A', temps)
print(len(ws_model))
fib = 'A'
elif fiber == 'b':
ws_data = wsb_data
spec = ws_load.spectrum_from_data(wsb_data)
ws_model = vis_spectrometer.tracing(spec, init_state, 'B', temps)
fib = 'B'
res_x = np.sqrt(np.sum((ws_data['posm'].values - ws_model['x'].values)**2) / len(ws_data))
res_y = np.sqrt(np.sum((ws_data['posmy'].values - ws_model['y'].values) ** 2) / len(ws_data))
print('Initial residuals, fiber = ', fiber)
print('res_x =', res_x, ', res_y = ', res_y)
print(len(ws_data), len(ws_model))
plt.plot(ws_data['posm'], ws_data['posm'].values - ws_model['x'].values, 'k.', zorder=10)
plt.plot(ws_data['posm'], ws_data['posm'].values - ws_data['posc'].values, 'b.', zorder=0, alpha=0.5)
plt.show()
# We do only fiber A
y = ws_data
x = spec
par_ini = init_state.copy()
def prior(cube, ndim, nparams):
# Prior on RAMSES parameters, sorted by importance
delta0 = 3.5e-6
delta1 = 1.e-6
delta2 = 5.e-5
cube[0] = utils.transform_normal(cube[0], par_ini[0], delta0)
cube[1] = utils.transform_normal(cube[1], par_ini[1], delta0)
cube[2] = utils.transform_normal(cube[2], par_ini[2], delta0)
cube[3] = utils.transform_normal(cube[3], par_ini[3], delta0)
cube[4] = utils.transform_normal(cube[4], par_ini[4], delta0)
cube[5] = utils.transform_normal(cube[5], par_ini[5], delta0)
cube[6] = utils.transform_normal(cube[6], par_ini[6], delta0)
cube[7] = utils.transform_normal(cube[7], par_ini[7], delta0)
cube[8] = utils.transform_normal(cube[8], par_ini[8], delta0)
cube[9] = utils.transform_normal(cube[9], par_ini[9], delta0)
cube[10] = utils.transform_normal(cube[10], par_ini[10], delta0)
cube[11] = utils.transform_normal(cube[11], par_ini[11], delta0)
cube[12] = utils.transform_normal(cube[12], par_ini[12], delta0)
cube[13] = utils.transform_normal(cube[13], par_ini[13], delta0)
cube[14] = utils.transform_normal(cube[14], par_ini[14], delta0)
cube[15] = utils.transform_normal(cube[15], par_ini[15], delta0)
cube[16] = utils.transform_normal(cube[16], par_ini[16], delta0)
cube[17] = utils.transform_normal(cube[17], par_ini[17], delta0)
cube[18] = utils.transform_normal(cube[18], par_ini[18], delta0)
cube[19] = utils.transform_normal(cube[19], par_ini[19], delta0)
cube[20] = utils.transform_normal(cube[20], par_ini[20], delta0)
cube[21] = utils.transform_normal(cube[21], par_ini[21], delta0)
cube[22] = utils.transform_normal(cube[22], par_ini[22], delta0)
cube[23] = utils.transform_normal(cube[23], par_ini[23], delta0)
cube[24] = utils.transform_normal(cube[24], par_ini[24], delta0)
cube[25] = utils.transform_normal(cube[25], par_ini[25], delta0)
cube[26] = utils.transform_normal(cube[26], par_ini[26], delta0)
cube[27] = utils.transform_normal(cube[27], par_ini[27], delta0)
cube[28] = utils.transform_normal(cube[28], par_ini[28], delta0)
cube[29] = utils.transform_normal(cube[29], par_ini[29], delta0)
cube[30] = utils.transform_normal(cube[30], par_ini[30], delta0)
cube[31] = utils.transform_normal(cube[31], par_ini[31], delta0)
cube[32] = utils.transform_normal(cube[32], par_ini[32], delta0)
cube[33] = utils.transform_normal(cube[33], par_ini[33], delta0)
cube[34] = utils.transform_normal(cube[34], par_ini[34], delta0)
cube[35] = utils.transform_normal(cube[35], par_ini[35], delta0)
cube[36] = utils.transform_normal(cube[36], par_ini[36], delta0)
cube[37] = utils.transform_normal(cube[37], par_ini[37], delta0)
cube[38] = utils.transform_normal(cube[38], par_ini[38], delta0)
cube[39] = utils.transform_normal(cube[39], par_ini[39], delta0)
cube[40] = utils.transform_normal(cube[40], par_ini[40], delta0)
cube[41] = utils.transform_normal(cube[41], par_ini[41], delta0)
cube[42] = utils.transform_normal(cube[42], par_ini[42], delta0)
cube[43] = utils.transform_normal(cube[43], par_ini[43], delta0)
return cube
def loglike(cube, ndim, nparams):
# Load parameters
pars = parameters.load_date(fib, date)
# print(pars[0])
# print(cube)
pars[0] = cube[0]
pars[1] = cube[1]
pars[2] = cube[2]
pars[3] = cube[3]
pars[4] = cube[4]
pars[5] = cube[5]
pars[6] = cube[6]
pars[7] = cube[7]
pars[8] = cube[8]
pars[9] = cube[9]
pars[10] = cube[10]
pars[11] = cube[11]
pars[12] = cube[12]
pars[13] = cube[13]
pars[14] = cube[14]
pars[15] = cube[15]
pars[16] = cube[16]
pars[17] = cube[17]
pars[18] = cube[18]
pars[19] = cube[19]
pars[20] = cube[20]
pars[21] = cube[21]
pars[22] = cube[22]
pars[23] = cube[23]
pars[24] = cube[24]
pars[25] = cube[25]
pars[26] = cube[26]
pars[27] = cube[27]
pars[28] = cube[28]
pars[29] = cube[29]
pars[30] = cube[30]
pars[31] = cube[31]
pars[32] = cube[32]
pars[33] = cube[33]
pars[34] = cube[34]
pars[35] = cube[35]
pars[36] = cube[36]
pars[37] = cube[37]
pars[38] = cube[38]
pars[39] = cube[39]
pars[40] = cube[40]
pars[41] = cube[41]
pars[42] = cube[42]
pars[43] = cube[43]
# pars[21] = cube[9] # grm tilt x
# pars[33] = cube[10] # ccd ff tilt x
# pars[24] = cube[11] # grm apex
# pars[29] = cube[12] # cam tilt y
# pars[36] = cube[13] # d ff ccd
# pars[39] = cube[14] # ccd defocus
# Generate model:
model = vis_spectrometer.tracing(x, pars, fib, temps)
# Evaluate the log-likelihood:
# sigma_fit_x = np.full(len(y), y[:, 4])
sigma_fit_x = np.full(len(model), 0.1)
ndata = len(y)
loglikelihood = -0.5 * ndata * np.log(2. * np.pi * sigma_fit_x ** 2).sum() + \
(-0.5 * ((model['x'].values - y['posm'].values) / sigma_fit_x) ** 2).sum() + \
(-0.5 * ((model['y'].values - y['posmy'].values) / sigma_fit_x) ** 2).sum()
return loglikelihood
n_params = 44
path = "".join(['data/posteriors/' + date + '_full/'])
if not os.path.exists(path):
os.makedirs(path)
out_file = "".join([path, 'dfit_' + str(fiber) + '_full'])
# Run MultiNest:
pymultinest.run(loglike, prior, n_params, n_live_points=500, outputfiles_basename=out_file, resume=True,
verbose=True)
# Get output:
output = pymultinest.Analyzer(outputfiles_basename=out_file, n_params=n_params)
# Get out parameters: this matrix has (samples,n_params+1):
bestfit_params = output.get_best_fit()
maxpars = output.get_mode_stats()['modes'][0]['maximum']
print(maxpars[0])
mc_samples = output.get_equal_weighted_posterior()[:, :-1]
mc_samples_data = pd.DataFrame(mc_samples)
mc_samples_data.to_csv(path + 'mc_samples.tsv', sep=',', index=False)
#samples_names = [r'Ech. G [mm$^{-1}$]', r'Coll x-tilt [deg]', r'Ech. inc. angle [deg]',
# r'Grism x-tilt [deg]', r'Cam x-tilt [deg]', r'Coll y-tilt [deg]',
# r'Ech. $\gamma$-angle [deg]', r'Slit x-dec [mm]', r'Field flattener y-dec [mm]']
#bestfitout = pd.DataFrame(bestfit_params['parameters'])
#bestfitout.to_csv(path + 'best_fit_params_'+str(fiber)+'.tsv', index=False, header=False)
#samplesout = pd.DataFrame(mc_samples)
#samplesout.to_csv(path + 'mc_samples_'+str(fiber)+'.tsv', index=False, header=False)
#pars = parameters.load_date(fib, date)
print('Multinest of full set of instrumental parameters for date ', date, ' done.')
def carmenes_vis_dynesty(date, fiber):
if fiber == 'a':
fib = 'A'
elif fiber == 'b':
fib = 'B'
wsa_data, wsb_data = ws_load.load_ws_for_fit(date)
#wsa_data = np.array(wsa_data)
#wsb_data = np.array(wsb_data)
pressure = env_data.get_P_at_ws(date)
init_state = parameters.load_date(fib, date)
init_state[-1] = pressure
temps = env_data.get_T_at_ws(date)
if fiber == 'a':
ws_data = wsa_data
spec = ws_load.spectrum_from_data(wsa_data)
ws_model = vis_spectrometer.tracing(spec, init_state, 'A', temps)
print(len(ws_model))
fib = 'A'
elif fiber == 'b':
ws_data = wsb_data
spec = ws_load.spectrum_from_data(wsb_data)
ws_model = vis_spectrometer.tracing(spec, init_state, 'B', temps)
fib = 'B'
res_x = np.sqrt(np.sum((ws_data['posm'].values - ws_model['x'].values)**2) / len(ws_data))
res_y = np.sqrt(np.sum((ws_data['posmy'].values - ws_model['y'].values) ** 2) / len(ws_data))
print('Initial residuals, fiber = ', fiber)
print('res_x =', res_x, ', res_y = ', res_y)
#plt.plot(ws_data['posm'], ws_data['posm'].values - ws_model['x'].values, 'k.', zorder=10)
#plt.plot(ws_data['posm'], ws_data['posm'].values - ws_data['posc'].values, 'b.', zorder=0, alpha=0.5)
#plt.show()
# We do only fiber A
y = ws_data
x = spec
par_ini = init_state.copy()
def prior(cube):
# Prior on RAMSES parameters, sorted by importance
delta0 = 3.5e-6
delta1 = 1.e-6
delta2 = 5.e-5
cube[0] = utils.transform_normal(cube[0], par_ini[11], delta0) # ccd tilt z
cube[1] = utils.transform_normal(cube[1], par_ini[9], delta0) # echelle G
cube[2] = utils.transform_normal(cube[2], par_ini[12], delta0) # coll tilt x
cube[3] = utils.transform_normal(cube[3], par_ini[21], delta0) # echelle blaze
cube[4] = utils.transform_normal(cube[4], par_ini[28], delta0) # grism tilt x
cube[5] = utils.transform_normal(cube[5], par_ini[10], delta0) # camera x tilt
cube[6] = utils.transform_normal(cube[6], par_ini[13], delta0) # collimator y-tilt
cube[7] = utils.transform_normal(cube[7], par_ini[1], delta0) # echelle gamma angle
cube[8] = utils.transform_normal(cube[8], par_ini[32], delta0) # cam tilt x
# cube[9] = utils.transform_normal(cube[9], par_ini[21], delta0) # grm tilt x
# cube[10] = utils.transform_normal(cube[10], par_ini[33], delta0) # ccd ff tilt x
# cube[11] = utils.transform_normal(cube[11], par_ini[24], delta2) # grm apex
# cube[12] = utils.transform_normal(cube[12], par_ini[29], delta2) # cam tilt y
# cube[13] = utils.transform_normal(cube[13], par_ini[36], delta2) # d ff ccd
# cube[14] = utils.transform_normal(cube[14], par_ini[39], delta2) # ccd defocus
return cube
def loglike(cube):
# Load parameters
pars = parameters.load_date(fib, date)
# print(pars[0])
# print(cube)
pars[11] = cube[0]
pars[9] = cube[1]
pars[12] = cube[2]
pars[21] = cube[3]
pars[28] = cube[4]
pars[10] = cube[5]
pars[13] = cube[6]
pars[1] = cube[7]
pars[32] = cube[8]
# pars[21] = cube[9] # grm tilt x
# pars[33] = cube[10] # ccd ff tilt x
# pars[24] = cube[11] # grm apex
# pars[29] = cube[12] # cam tilt y
# pars[36] = cube[13] # d ff ccd
# pars[39] = cube[14] # ccd defocus
# Generate model:
model = vis_spectrometer.tracing(x, pars, fib, temps)
# Evaluate the log-likelihood:
# sigma_fit_x = np.full(len(y), y[:, 4])
sigma_fit_x = np.full(len(model), 0.1)
ndata = len(y)
loglikelihood = -0.5 * ndata * np.log(2. * np.pi * sigma_fit_x ** 2).sum() + \
(-0.5 * ((model['x'].values - y['posm'].values) / sigma_fit_x) ** 2).sum() + \
(-0.5 * ((model['y'].values - y['posmy'].values) / sigma_fit_x) ** 2).sum()
return loglikelihood
n_params = 9
path = "".join(['data/posteriors/' + date + '/'])
if not os.path.exists(path):
os.makedirs(path)
out_file = "".join([path, 'dyn_fit_' + str(fiber) + ''])
# Run MultiNest:
dsampler = dynesty.DynamicNestedSampler(
loglike,
prior,
ndim=n_params
)
dsampler.run_nested(nlive_init=500, nlive_batch=500)
results = dsampler.results
samples = results['samples']
print(samples)
'''
# Get output:
output = pymultinest.Analyzer(outputfiles_basename=out_file, n_params=n_params)
# Get out parameters: this matrix has (samples,n_params+1):
bestfit_params = output.get_best_fit()
maxpars = output.get_mode_stats()['modes'][0]['maximum']
print(maxpars[0])
mc_samples = output.get_equal_weighted_posterior()[:, :-1]
samples_names = [r'Ech. G [mm$^{-1}$]', r'Coll x-tilt [deg]', r'Ech. inc. angle [deg]',
r'Grism x-tilt [deg]', r'Cam x-tilt [deg]', r'Coll y-tilt [deg]',
r'Ech. $\gamma$-angle [deg]', r'Slit x-dec [mm]', r'Field flattener y-dec [mm]']
mc_samples_data = pd.DataFrame(mc_samples)
mc_samples_data.to_csv(path + 'mc_samples.tsv', sep=',', index=False, header=samples_names)
bestfitout = pd.DataFrame(bestfit_params['parameters'])
bestfitout.to_csv(path + 'best_fit_params_'+str(fiber)+'.tsv', index=False, header=False)
#samplesout = pd.DataFrame(mc_samples)
#samplesout.to_csv(path + 'mc_samples_'+str(fiber)+'.tsv', index=False, header=False)
pars = parameters.load_date(fib, date)
pars[11] = bestfit_params['parameters'][0]
pars[9] = bestfit_params['parameters'][1]
pars[12] = bestfit_params['parameters'][2]
pars[21] = bestfit_params['parameters'][3]
pars[28] = bestfit_params['parameters'][4]
pars[10] = bestfit_params['parameters'][5]
pars[13] = bestfit_params['parameters'][6]
pars[1] = bestfit_params['parameters'][7]
pars[32] = bestfit_params['parameters'][8]
#pars[42] = maxpars[0]
#pars[11] = maxpars[1]
#pars[9] = maxpars[2]
#pars[12] = maxpars[3]
#pars[21] = maxpars[4]
#pars[28] = maxpars[5]
#pars[10] = maxpars[6]
#pars[13] = maxpars[7]
#pars[1] = maxpars[8]
#parameters.write_sa(pars, date, fiber)
#fit_sa.moes_carmenes_vis_old(date)
do_cornerplot.do_plot(date)
# figure = corner.corner(mc_samples) # , labels=posterior_names)
# Get output:
#output = pymultinest.Analyzer(outputfiles_basename=out_file, n_params=n_params)
# Get out parameters: this matrix has (samples,n_params+1):
#bestfit_params = output.get_best_fit()
#mc_samples = output.get_equal_weighted_posterior()[:, :-1]
#outdir = path
#if not os.path.exists(outdir):
# os.mkdir(outdir)
#bestfitout = pd.DataFrame(bestfit_params['parameters'])
#bestfitout.to_csv(path + 'best_fit_params_'+str(fiber)+'.tsv', index=False, header=False)
#samplesout = pd.DataFrame(mc_samples)
#samplesout.to_csv(path + 'mc_samples_'+str(fiber)+'.tsv', index=False, header=False)
#figure = corner.corner(mc_samples) # , labels=posterior_names)
#plt.tight_layout()
#plt.savefig(outdir + 'parameters_cornerplot_'+str(fiber)+'.png')
#plt.tight_layout()
#plt.show()
#plt.close()
'''
print('dynesty of instrumental parameters for date ', date, ' done.')
def nzp_res_dd_offset():
out_path = 'output_files/'
mavc_path = '/home/eduspec/Documentos/CARMENES/CARMENES_data/CARM_VIS_AVC_201017_corrected/avcn/'
# Load data
all_data = pd.read_csv(out_path + 'all_avc_dd_update.dat', sep=',')
all_data = all_data.dropna()
print(all_data.columns)
bjdref = 2458000
nzp = all_data['nzp_obs']
nzp_err = all_data['e_nzp_obs']
nzp_mean = np.mean(nzp)
ddres = all_data['res_dd_rv_mean']
# Define the prior (you have to transform your parameters, that come from the unit cube,
# to the prior you want):
res = nzp - ddres
print(np.std(res), np.sqrt(np.mean(res) ** 2))
def prior(cube, ndim, nparams):
cube[0] = utils.transform_uniform(cube[0], -5, 5) # offset
def loglike(cube, ndim, nparams):
# Load parameters
pars = [0.]
pars[0] = cube[0] # offset
model = (ddres + pars[0])
sigma_fit_y = nzp_err
ndata = len(nzp)
loglikelihood = -0.5 * ndata * np.log(2. * np.pi * sigma_fit_y ** 2).sum() + (
-0.5 * ((nzp - model) / sigma_fit_y) ** 2).sum()
return loglikelihood
n_params = 15
path = 'ns_moes/'
if not os.path.exists(path):
os.makedirs(path)
out_file = 'ns_moes/nzp_dd_residuals_offset_'
# Run MultiNest:
pymultinest.run(loglike, prior, n_params, n_live_points=5000, outputfiles_basename=out_file, resume=True,
verbose=False)
# Get output:
output = pymultinest.Analyzer(outputfiles_basename=out_file, n_params=n_params)
# Get out parameters: this matrix has (samples,n_params+1):
bestfit_params = output.get_best_fit()
offset = bestfit_params['parameters'][0]
# ERROR PENDING
return offset
# import corner
# posterior_names = []
# pars_index = [42,11,12,9,13,10,35,17,28,21,33,24,29,36,39]
# new_pars = init_state
# i = 0
# for par in pars_index:
# posterior_names.append(parameters.get_name(par))
# new_pars[par] = bestfit_params['parameters'][i]
# print(parameters.get_name(par), init_state[par], new_pars[par], bestfit_params['parameters'][i])
# i += 1
# first_time = True
# for i in range(n_params):
# if first_time:
# posterior_data = output.posteriors['posterior_samples'][i]
# first_time = False
# else:
# posterior_data = np.vstack((posterior_data, results.posteriors['posterior_samples'][i]))
# posterior_data = posterior_data.T
# figure = corner.corner(mc_samples, labels = posterior_names)
# plt.savefig('plots/moes_ins_corner_'+fiber+'_'+date+'.png')
def carmenes_vis_dynesty_full(date, fiber):
if fiber == 'a':
fib = 'A'
elif fiber == 'b':
fib = 'B'
wsa_data, wsb_data = ws_load.load_ws_for_fit(date)
#wsa_data = np.array(wsa_data)
#wsb_data = np.array(wsb_data)
pressure = env_data.get_P_at_ws(date)
init_state = parameters.load_date(fib, date)
init_state[-1] = pressure
temps = env_data.get_T_at_ws(date)
if fiber == 'a':
ws_data = wsa_data
spec = ws_load.spectrum_from_data(wsa_data)
ws_model = vis_spectrometer.tracing(spec, init_state, 'A', temps)
print(len(ws_model))
fib = 'A'
elif fiber == 'b':
ws_data = wsb_data
spec = ws_load.spectrum_from_data(wsb_data)
ws_model = vis_spectrometer.tracing(spec, init_state, 'B', temps)
fib = 'B'
res_x = np.sqrt(np.sum((ws_data['posm'].values - ws_model['x'].values)**2) / len(ws_data))
res_y = np.sqrt(np.sum((ws_data['posmy'].values - ws_model['y'].values) ** 2) / len(ws_data))
print('Initial residuals, fiber = ', fiber)
print('res_x =', res_x, ', res_y = ', res_y)
#plt.plot(ws_data['posm'], ws_data['posm'].values - ws_model['x'].values, 'k.', zorder=10)
#plt.plot(ws_data['posm'], ws_data['posm'].values - ws_data['posc'].values, 'b.', zorder=0, alpha=0.5)
#plt.show()
# We do only fiber A
y = ws_data
x = spec
par_ini = init_state.copy()
def prior(cube):
# Prior on RAMSES parameters, sorted by importance
delta0 = 3.5e-6
delta1 = 1.e-6
delta2 = 5.e-5
cube[0] = utils.transform_normal(cube[0], par_ini[0], delta0)
cube[1] = utils.transform_normal(cube[1], par_ini[1], delta0)
cube[2] = utils.transform_normal(cube[2], par_ini[2], delta0)
cube[3] = utils.transform_normal(cube[3], par_ini[3], delta0)
cube[4] = utils.transform_normal(cube[4], par_ini[4], delta0)
cube[5] = utils.transform_normal(cube[5], par_ini[5], delta0)
cube[6] = utils.transform_normal(cube[6], par_ini[6], delta0)
cube[7] = utils.transform_normal(cube[7], par_ini[7], delta0)
cube[8] = utils.transform_normal(cube[8], par_ini[8], delta0)
cube[9] = utils.transform_normal(cube[9], par_ini[9], delta0)
cube[10] = utils.transform_normal(cube[10], par_ini[10], delta0)
cube[11] = utils.transform_normal(cube[11], par_ini[11], delta0)
cube[12] = utils.transform_normal(cube[12], par_ini[12], delta0)
cube[13] = utils.transform_normal(cube[13], par_ini[13], delta0)
cube[14] = utils.transform_normal(cube[14], par_ini[14], delta0)
cube[15] = utils.transform_normal(cube[15], par_ini[15], delta0)
cube[16] = utils.transform_normal(cube[16], par_ini[16], delta0)
cube[17] = utils.transform_normal(cube[17], par_ini[17], delta0)
cube[18] = utils.transform_normal(cube[18], par_ini[18], delta0)
cube[19] = utils.transform_normal(cube[19], par_ini[19], delta0)
cube[20] = utils.transform_normal(cube[20], par_ini[20], delta0)
cube[21] = utils.transform_normal(cube[21], par_ini[21], delta0)
cube[22] = utils.transform_normal(cube[22], par_ini[22], delta0)
cube[23] = utils.transform_normal(cube[23], par_ini[23], delta0)
cube[24] = utils.transform_normal(cube[24], par_ini[24], delta0)
cube[25] = utils.transform_normal(cube[25], par_ini[25], delta0)
cube[26] = utils.transform_normal(cube[26], par_ini[26], delta0)
cube[27] = utils.transform_normal(cube[27], par_ini[27], delta0)
cube[28] = utils.transform_normal(cube[28], par_ini[28], delta0)
cube[29] = utils.transform_normal(cube[29], par_ini[29], delta0)
cube[30] = utils.transform_normal(cube[30], par_ini[30], delta0)
cube[31] = utils.transform_normal(cube[31], par_ini[31], delta0)
cube[32] = utils.transform_normal(cube[32], par_ini[32], delta0)
cube[33] = utils.transform_normal(cube[33], par_ini[33], delta0)
cube[34] = utils.transform_normal(cube[34], par_ini[34], delta0)
cube[35] = utils.transform_normal(cube[35], par_ini[35], delta0)
cube[36] = utils.transform_normal(cube[36], par_ini[36], delta0)
cube[37] = utils.transform_normal(cube[37], par_ini[37], delta0)
cube[38] = utils.transform_normal(cube[38], par_ini[38], delta0)
cube[39] = utils.transform_normal(cube[39], par_ini[39], delta0)
cube[40] = utils.transform_normal(cube[40], par_ini[40], delta0)
cube[41] = utils.transform_normal(cube[41], par_ini[41], delta0)
cube[42] = utils.transform_normal(cube[42], par_ini[42], delta0)
cube[43] = utils.transform_normal(cube[43], par_ini[43], delta0)
# cube[9] = utils.transform_normal(cube[9], par_ini[21], delta0) # grm tilt x
# cube[10] = utils.transform_normal(cube[10], par_ini[33], delta0) # ccd ff tilt x
# cube[11] = utils.transform_normal(cube[11], par_ini[24], delta2) # grm apex
# cube[12] = utils.transform_normal(cube[12], par_ini[29], delta2) # cam tilt y
# cube[13] = utils.transform_normal(cube[13], par_ini[36], delta2) # d ff ccd
# cube[14] = utils.transform_normal(cube[14], par_ini[39], delta2) # ccd defocus
return cube
def loglike(cube):
# Load parameters
pars = parameters.load_date(fib, date)
# print(pars[0])
# print(cube)
pars[0] = cube[0]
pars[1] = cube[1]
pars[2] = cube[2]
pars[3] = cube[3]
pars[4] = cube[4]
pars[5] = cube[5]
pars[6] = cube[6]
pars[7] = cube[7]
pars[8] = cube[8]
pars[9] = cube[9]
pars[10] = cube[10]
pars[11] = cube[11]
pars[12] = cube[12]
pars[13] = cube[13]
pars[14] = cube[14]
pars[15] = cube[15]
pars[16] = cube[16]
pars[17] = cube[17]
pars[18] = cube[18]
pars[19] = cube[19]
pars[20] = cube[20]
pars[21] = cube[21]
pars[22] = cube[22]
pars[23] = cube[23]
pars[24] = cube[24]
pars[25] = cube[25]
pars[26] = cube[26]
pars[27] = cube[27]
pars[28] = cube[28]
pars[29] = cube[29]
pars[30] = cube[30]
pars[31] = cube[31]
pars[32] = cube[32]
pars[33] = cube[33]
pars[34] = cube[34]
pars[35] = cube[35]
pars[36] = cube[36]
pars[37] = cube[37]
pars[38] = cube[38]
pars[39] = cube[39]
pars[40] = cube[40]
pars[41] = cube[41]
pars[42] = cube[42]
pars[43] = cube[43]
# pars[21] = cube[9] # grm tilt x
# pars[33] = cube[10] # ccd ff tilt x
# pars[24] = cube[11] # grm apex
# pars[29] = cube[12] # cam tilt y
# pars[36] = cube[13] # d ff ccd
# pars[39] = cube[14] # ccd defocus
# Generate model:
model = vis_spectrometer.tracing(x, pars, fib, temps)
# Evaluate the log-likelihood:
# sigma_fit_x = np.full(len(y), y[:, 4])
sigma_fit_x = np.full(len(model), 0.1)
ndata = len(y)
loglikelihood = -0.5 * ndata * np.log(2. * np.pi * sigma_fit_x ** 2).sum() + \
(-0.5 * ((model['x'].values - y['posm'].values) / sigma_fit_x) ** 2).sum() + \
(-0.5 * ((model['y'].values - y['posmy'].values) / sigma_fit_x) ** 2).sum()
return loglikelihood
n_params = 44
path = "".join(['data/posteriors/' + date + '_full/'])
if not os.path.exists(path):
os.makedirs(path)
out_file = "".join([path, 'dyn_fit_' + str(fiber) + ''])
# Run MultiNest:
dsampler = dynesty.DynamicNestedSampler(
loglike,
prior,
ndim=n_params
)
dsampler.run_nested(nlive_init=500, nlive_batch=500)
results = dsampler.results
samples = results['samples']
mc_samples_data = pd.DataFrame(samples)
mc_samples_data.to_csv(path + 'dyn_mc_samples.tsv', sep=',', index=False)
'''
# Get output:
output = pymultinest.Analyzer(outputfiles_basename=out_file, n_params=n_params)
# Get out parameters: this matrix has (samples,n_params+1):
bestfit_params = output.get_best_fit()
maxpars = output.get_mode_stats()['modes'][0]['maximum']
print(maxpars[0])
mc_samples = output.get_equal_weighted_posterior()[:, :-1]
samples_names = [r'Ech. G [mm$^{-1}$]', r'Coll x-tilt [deg]', r'Ech. inc. angle [deg]',
r'Grism x-tilt [deg]', r'Cam x-tilt [deg]', r'Coll y-tilt [deg]',
r'Ech. $\gamma$-angle [deg]', r'Slit x-dec [mm]', r'Field flattener y-dec [mm]']
mc_samples_data = pd.DataFrame(mc_samples)
mc_samples_data.to_csv(path + 'mc_samples.tsv', sep=',', index=False, header=samples_names)
bestfitout = pd.DataFrame(bestfit_params['parameters'])
bestfitout.to_csv(path + 'best_fit_params_'+str(fiber)+'.tsv', index=False, header=False)
#samplesout = pd.DataFrame(mc_samples)
#samplesout.to_csv(path + 'mc_samples_'+str(fiber)+'.tsv', index=False, header=False)
pars = parameters.load_date(fib, date)
pars[11] = bestfit_params['parameters'][0]
pars[9] = bestfit_params['parameters'][1]
pars[12] = bestfit_params['parameters'][2]
pars[21] = bestfit_params['parameters'][3]
pars[28] = bestfit_params['parameters'][4]
pars[10] = bestfit_params['parameters'][5]
pars[13] = bestfit_params['parameters'][6]
pars[1] = bestfit_params['parameters'][7]
pars[32] = bestfit_params['parameters'][8]
#pars[42] = maxpars[0]
#pars[11] = maxpars[1]
#pars[9] = maxpars[2]
#pars[12] = maxpars[3]
#pars[21] = maxpars[4]
#pars[28] = maxpars[5]
#pars[10] = maxpars[6]
#pars[13] = maxpars[7]
#pars[1] = maxpars[8]
#parameters.write_sa(pars, date, fiber)
#fit_sa.moes_carmenes_vis_old(date)
do_cornerplot.do_plot(date)
# figure = corner.corner(mc_samples) # , labels=posterior_names)
# Get output:
#output = pymultinest.Analyzer(outputfiles_basename=out_file, n_params=n_params)
# Get out parameters: this matrix has (samples,n_params+1):
#bestfit_params = output.get_best_fit()
#mc_samples = output.get_equal_weighted_posterior()[:, :-1]
#outdir = path
#if not os.path.exists(outdir):
# os.mkdir(outdir)
#bestfitout = pd.DataFrame(bestfit_params['parameters'])
#bestfitout.to_csv(path + 'best_fit_params_'+str(fiber)+'.tsv', index=False, header=False)
#samplesout = pd.DataFrame(mc_samples)
#samplesout.to_csv(path + 'mc_samples_'+str(fiber)+'.tsv', index=False, header=False)
#figure = corner.corner(mc_samples) # , labels=posterior_names)
#plt.tight_layout()
#plt.savefig(outdir + 'parameters_cornerplot_'+str(fiber)+'.png')
#plt.tight_layout()
#plt.show()
#plt.close()
'''
print('dynesty of full instrumental parameters for date ', date, ' done.')
def do_priors_full_model():
init_state = parameters.load_date('A', '2017-10-20')
for i in range(len(init_state)):
print('cube['+str(int(i))+'] = utils.transform_normal(cube['+str(int(i))+'], par_ini['+str(int(i))+'], delta0)')
def do_loglikes_full_model():
init_state = parameters.load_date('A', '2017-10-20')
for i in range(len(init_state)):
print('pars['+str(int(i))+'] = cube['+str(int(i))+']')
if __name__ == '__main__':
import optimization
date = '2017-10-20'
#rewrite_best_fit_params(date)
#optimization.simulated_annealing_fit_date(date, 'A')
#carmenes_vis_multinest_full(date, 'a')
carmenes_vis_dynesty_full(date, 'a')
#do_priors_full_model()
#do_loglikes_full_model()
|
mtalapintoREPO_NAMEmoesPATH_START.@carmenes@fit_ns.py@.PATH_END.py
|
{
"filename": "readme.md",
"repo_name": "grand-mother/grand",
"repo_path": "grand_extracted/grand-main/env/docker_fedo/readme.md",
"type": "Markdown"
}
|
# Enable graphical application in docker
## For Linux
### Solution 1: with $HOME/.Xauthority
[tuto](https://medium.com/@SaravSun/running-gui-applications-inside-docker-containers-83d65c0db110)
For a GUI Application to run, we need to have a XServer which is available as part of every Linux Desktop Environment, But within a Container we don’t have any XServer — so we will
share the Host’s XServer with the Container by creating a volume
--volume="$HOME/.Xauthority:/root/.Xauthority:rw"
share the Host’s DISPLAY environment variable to the Container
--env="DISPLAY"
run container with host network driver with
--net=host
```
docker run --net=host --env="DISPLAY" --volume="$HOME/.Xauthority:/root/.Xauthority:rw" --name test_e docker_image_with_app_graph
```
### Solution 2: with /tmp/.X11
[tuto](https://leimao.github.io/blog/Docker-Container-GUI-Display/)
```
$ xhost +
$ docker run -it --rm -e DISPLAY=$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix firefox:0.0.1
$ xhost -
```
## For MacOS
[ROOT proposition](https://hub.docker.com/r/rootproject/root)
feedback...
## For Windows
[ROOT proposition](https://hub.docker.com/r/rootproject/root)
feedback...
# Dockerhub command
[repository grandlib](https://hub.docker.com/u/jcolley)
How push on DocherHub
```
docker login -u grandlib
docker tag grandlib_dev:toto grandlib/dev:1.0
docker push grandlib/dev:1.0
```
|
grand-motherREPO_NAMEgrandPATH_START.@grand_extracted@grand-main@env@docker_fedo@readme.md@.PATH_END.py
|
{
"filename": "_cmid.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnel/marker/_cmid.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CmidValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="cmid", parent_name="funnel.marker", **kwargs):
super(CmidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnel@marker@_cmid.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/gemini_instruments/f2/__init__.py",
"type": "Python"
}
|
__all__ = ['AstroDataF2']
from astrodata import factory
from ..gemini import addInstrumentFilterWavelengths
from .adclass import AstroDataF2
from .lookup import filter_wavelengths
factory.addClass(AstroDataF2)
addInstrumentFilterWavelengths('F2', filter_wavelengths)
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@gemini_instruments@f2@__init__.py@.PATH_END.py
|
{
"filename": "_lineposition.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergl/legendgrouptitle/font/_lineposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionValidator(_plotly_utils.basevalidators.FlaglistValidator):
def __init__(
self,
plotly_name="lineposition",
parent_name="scattergl.legendgrouptitle.font",
**kwargs,
):
super(LinepositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
extras=kwargs.pop("extras", ["none"]),
flags=kwargs.pop("flags", ["under", "over", "through"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergl@legendgrouptitle@font@_lineposition.py@.PATH_END.py
|
{
"filename": "ex2.py",
"repo_name": "teuben/nemo",
"repo_path": "nemo_extracted/nemo-master/scripts/templates/ex2.py",
"type": "Python"
}
|
#! /usr/bin/env python
#
# https://docs.python.org/3/library/argparse.html
import math
import argparse
# somehow formatting with \n doesn't work
my_help = """
This script grabs aaa and bbb via argparse. \nThere are several options:\n
--aaa 10\n
--aaa=10\n
-a 10 (note the = option is not allowed in short form)
"""
p = argparse.ArgumentParser(description=my_help)
p.add_argument('--aaa', type = int, default = 1, help='The aaa (int) variable')
p.add_argument('--bbb', type = float, default = 2.0, help='The bbb (float) variable')
args = p.parse_args()
if __name__ == '__main__':
print(args.aaa, args.bbb)
|
teubenREPO_NAMEnemoPATH_START.@nemo_extracted@nemo-master@scripts@templates@ex2.py@.PATH_END.py
|
{
"filename": "other.md",
"repo_name": "RadioAstronomySoftwareGroup/pyradiosky",
"repo_path": "pyradiosky_extracted/pyradiosky-main/.github/original_pr_templates/other.md",
"type": "Markdown"
}
|
---
name: Other
about: Anything that doesn't fit in any of the other types of pull requests.
---
<!--- Provide a general summary of your changes in the Title above -->
## Description
<!--- Describe your changes in detail -->
## Motivation and Context
<!--- Why is this change required? What problem does it solve? -->
<!--- If it fixes an open issue, please link to the issue here. If this PR closes an issue, put the word 'closes' before the issue link to auto-close the issue when the PR is merged. -->
## Checklist:
<!--- You may remove the checklists that don't apply to your change type(s) or just leave them empty -->
<!--- Go over all the following points, and replace the space with an `x` in all the boxes that apply. -->
<!--- If you're unsure about any of these, don't hesitate to ask. We're here to help! -->
- [ ] I have read the [contribution guide](https://github.com/RadioAstronomySoftwareGroup/pyradiosky/blob/main/.github/CONTRIBUTING.md).
- [ ] My code follows the code style of this project.
- [ ] Any new or updated docstrings use the [numpy docstring format](https://numpydoc.readthedocs.io/en/latest/format.html).
- [ ] I have added tests to cover any changes.
- [ ] My change includes a breaking change
- [ ] My change includes backwards compatibility and deprecation warnings (if possible).
- [ ] I have updated the [CHANGELOG](https://github.com/RadioAstronomySoftwareGroup/pyradiosky/blob/main/CHANGELOG.md) if appropriate.
|
RadioAstronomySoftwareGroupREPO_NAMEpyradioskyPATH_START.@pyradiosky_extracted@pyradiosky-main@.github@original_pr_templates@other.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "PrefectHQ/prefect",
"repo_path": "prefect_extracted/prefect-main/tests/test-projects/nested-project/shared_libs/__init__.py",
"type": "Python"
}
|
PrefectHQREPO_NAMEprefectPATH_START.@prefect_extracted@prefect-main@tests@test-projects@nested-project@shared_libs@__init__.py@.PATH_END.py
|
|
{
"filename": "remote.md",
"repo_name": "ML4GW/aframe",
"repo_path": "aframe_extracted/aframe-main/docs/remote.md",
"type": "Markdown"
}
|
Remote Training
===============
```{eval-rst}
.. note::
It is recommended you are familiar with running a :doc:`local pipeline <first_pipeline>` before proceeding
```
```{eval-rst}
.. important::
For remote training, you must have completed `ml4gw quickstart <https://github.com/ml4gw/quickstart/>`_ instructions, or installed the equivalent software.
Specifically, configuring :code:`s3` and `Kubernetes <https://kubernetes.io/>`_ for access to the nautilus hypercluster is required.
It is also recommended that you are familiar with Nautilus and `Kubernetes <https://kubernetes.io/>`_.
If you are not, the Nautilus introduction `tutorial <https://ucsd-prp.gitlab.io/userdocs/tutorial/introduction/>`_
is a good place to start.
```
Remote experiments can be initialized using the `aframe-init` command line tool.
To initialize an experiment directory for a remote run, specify the `--s3-bucket` argument `aframe-init`.
```bash
poetry run aframe-init offline --mode sandbox --directory ~/aframe/my-first-remote-run --s3-bucket s3://my-bucket/my-first-remote-run
```
This will configure the `AFRAME_TRAIN_RUN_DIR` and `AFRAME_TRAIN_DATA_DIR` in the `run.sh` to point to the specified remote s3 bucket.
```bash
#!/bin/bash
# Export environment variables
export AFRAME_TRAIN_DATA_DIR=s3://my-bucket/my-first-remote-run/data/train
export AFRAME_TEST_DATA_DIR=/home/albert.einstein/aframe/my-first-remote-run/data/test
export AFRAME_TRAIN_RUN_DIR=s3://my-bucket/my-first-remote-run/training
export AFRAME_CONDOR_DIR=/home/albert.einstein/aframe/my-first-remote-run/condor
export AFRAME_RESULTS_DIR=/home/albert.einstein/aframe/my-first-remote-run/results
export AFRAME_TMPDIR=/home/albert.einstein/aframe/my-first-remote-run/tmp/
# launch pipeline; modify the gpus, workers etc. to suit your needs
# note that if you've made local code changes not in the containers
# you'll need to add the --dev flag!
LAW_CONFIG_FILE=/home/albert.einstein/aframe/my-first-remote-run/sandbox.cfg poetry run --directory /home/albert.einstein/projects/aframev2 law run aframe.pipelines.sandbox.Sandbox --workers 5 --gpus 0
```
The `luigi`/`law` `Tasks` responsible for training data generation will automatically transfer your data to s3 storage, and launch a remote training job using kubernetes.
```{eval-rst}
.. note:
Only training is run remotely. The rest of the pipeline (data generation, export, inference, etc.) is run locally.
All tasks are able to interact with the s3 artifacts created by the remote training job.
```
## Configuring Remote Resources
The quantity of remote resources can be configured in the `.cfg` config file under the `[luigi_Train]` header
```cfg
[luigi_Train]
...
request_gpus = 4 # number of gpus to request
cpus_per_gpu = 12 # cpus per gpu
memory_per_cpu = 1 # memory in GB
```
It is also possible to sync remote `Aframe` code from git into the container using an optional [git-sync](https://github.com/kubernetes/git-sync) `initContainer`.
This is often useful when you are testing an idea that hasn't made
it onto the `Aframe` `main` branch (and thus hasn't been pushed to the remote container image). To do so, specify the following
in the `.cfg`.
```cfg
[luigi_Train]
...
# use kubernetes initContainer to sync code
use_init_containers = True
# path to remote git repository
git_url = git@github.com:albert.einstein/aframev2.git
# reference (e.g. branch or commit) to checkout
git_ref = my-feature
```
```{eval-rst}
.. important::
The git-sync :code:`initContainer` uses your ssh key to clone software from github. To do so, a Kubernetes secret
is made to mount your ssh key into the container. By default, :code:`Aframe` will automatically pull your ssh key from
:code:`~/.ssh/id_rsa`. You can override this default under the :code:`[luigi_ssh]` header
.. code-block:: ini
[luigi_ssh]
ssh_file = ~/.ssh/id_ed25519
```
## Local Training with S3 Data
Sometimes there are instances where you have data that lives on an `s3` filesystem, but you wish to train using local resources. To do so,
set `AFRAME_TRAIN_RUN_DIR` to a local path and `AFRAME_TRAIN_DATA_DIR` to an `s3://` location. The training project will detect that the specified data
lives on `s3`, and download it.
```bash
#!/bin/bash
# remote s3 data
export AFRAME_TRAIN_DATA_DIR=s3://my-bucket/remote-data-local-training/data/train
# local training
export AFRAME_TRAIN_RUN_DIR=/home/albert.einstein/remote-data-local-training/training
...
```
|
ML4GWREPO_NAMEaframePATH_START.@aframe_extracted@aframe-main@docs@remote.md@.PATH_END.py
|
{
"filename": "take_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/python/data/kernel_tests/take_test.py",
"type": "Python"
}
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.take()`."""
from typing import Callable, Optional
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import global_shuffle_op
from tensorflow.python.data.experimental.ops import random_access
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import options as options_lib
from tensorflow.python.framework import combinations
from tensorflow.python.framework import errors
from tensorflow.python.platform import test
class TakeTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(count=[-1, 0, 4, 10, 25])))
def testBasic(self, count):
components = (np.arange(10),)
dataset = dataset_ops.Dataset.from_tensor_slices(components).take(count)
self.assertEqual(
[c.shape[1:] for c in components],
[shape for shape in dataset_ops.get_legacy_output_shapes(dataset)])
num_output = min(count, 10) if count != -1 else 10
self.assertDatasetProduces(
dataset, [tuple(components[0][i:i + 1]) for i in range(num_output)])
@combinations.generate(test_base.default_test_combinations())
def testName(self):
dataset = dataset_ops.Dataset.from_tensors(42).take(1, name="take")
self.assertDatasetProduces(dataset, [42])
class TakeDatasetCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_take_dataset(self, count, options=None):
dataset = dataset_ops.Dataset.range(100).take(count)
if options:
dataset = dataset.with_options(options)
return dataset
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True]),
combinations.combine(count=[50], num_outputs=[50]) +
combinations.combine(count=[200, 100, -1], num_outputs=[100]) +
combinations.combine(count=[0], num_outputs=[0])))
def test(self, verify_fn, symbolic_checkpoint, count, num_outputs):
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
verify_fn(self, lambda: self._build_take_dataset(count, options),
num_outputs)
class TakeRandomAccessTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(index=[-1, 3, 4])))
def testInvalidIndex(self, index):
dataset = dataset_ops.Dataset.range(10).take(3)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(index=[-2, 0, 1])))
def testEmptyDataset(self, index):
dataset = dataset_ops.Dataset.from_tensor_slices([]).take(5)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(count=[-1, 0, 4, 10, 25])))
def testBasic(self, count):
dataset = dataset_ops.Dataset.range(10).take(count)
num_output = min(count, 10) if count != -1 else 10
for i in range(num_output):
self.assertEqual(
self.evaluate(random_access.at(dataset, index=i)), i)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=num_output))
class TakeGlobalShuffleTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
dataset_range=[100],
count=[20, 200],
repetitions=[1, 2],
seed=[None, 42],
reshuffle_each_iteration=[True, False])))
def test(
self,
dataset_range: int,
count: int,
repetitions: int,
seed: Optional[int],
reshuffle_each_iteration: bool):
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = dataset.take(count)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
if repetitions > 1:
dataset = dataset.repeat(repetitions)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration)
expected = list(range(0, min(count, dataset_range))) * repetitions
dataset_output = self.getDatasetOutput(
dataset, requires_initialization=True)
self.assertCountEqual(dataset_output, expected)
self.assertNotEqual(dataset_output, expected)
self.assertLen(dataset_output, self.evaluate(dataset.cardinality()))
class TakeGlobalShuffleCheckpointTest(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(
dataset_range=[10],
count=[2, 20],
repetitions=[1, 2],
reshuffle_each_iteration=[True, False],
symbolic_checkpoint=[True, False])))
def test(
self,
verify_fn: Callable[..., None],
dataset_range: int,
count: int,
repetitions: int,
reshuffle_each_iteration: bool,
symbolic_checkpoint: bool):
def _build_dataset() -> dataset_ops.Dataset:
dataset = dataset_ops.Dataset.range(dataset_range)
dataset = dataset.take(count)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
if repetitions > 1:
dataset = dataset.repeat(repetitions)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=42, reshuffle_each_iteration=reshuffle_each_iteration)
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
return dataset.with_options(options)
verify_fn(
self,
_build_dataset,
num_outputs=min(count, dataset_range) * repetitions,
assert_items_equal=reshuffle_each_iteration,
)
if __name__ == "__main__":
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@python@data@kernel_tests@take_test.py@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/table/legendgrouptitle/font/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="style", parent_name="table.legendgrouptitle.font", **kwargs
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@table@legendgrouptitle@font@_style.py@.PATH_END.py
|
{
"filename": "makeplot.py",
"repo_name": "VirtualPlanetaryLaboratory/vplanet",
"repo_path": "vplanet_extracted/vplanet-main/examples/AtmEscFlareProxCen/makeplot.py",
"type": "Python"
}
|
"""
This script produces a figure that show the time evolution of planetary and stellar parameters for
Proxima Centuri b, using VPLANET's ATMESC, STELLAR
and FLARE modules.
@autor: Laura N. R. do Amaral, Universidad Nacional Autónoma de México, 2021
@email: laura.nevesdoamaral@gmail.com
Date: Nov. 22st, 2021
"""
import os
import pathlib
import subprocess
import sys
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import vplot as vpl
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.axes_grid1 import make_axes_locatable
import vplanet
path = pathlib.Path(__file__).parents[0].absolute()
sys.path.insert(1, str(path.parents[0]))
from get_args import get_args
# Overwritten old files
# directory = ["./davenport", "./lacy", "./stellar"]
# for i in directory:
# os.chdir(i)
# os.system("rm *.log")
# os.system("rm *.forward")
# os.chdir(path)
# Running the simulations
run = ["./davenport/vpl.in", "./lacy/vpl.in", "./stellar/vpl.in"]
davenport = []
lacy = []
stellar = []
directory = [davenport, lacy, stellar]
for i in range(0, 3):
directory[i] = vplanet.run(path / run[i], units=False)
# Plot!
fig, axes = plt.subplots(nrows=2, ncols=4, sharex="col", figsize=(12, 5))
style = ["-", "--", "-."]
color = [vpl.colors.red, vpl.colors.orange, vpl.colors.dark_blue]
a = 1
for i in range(0, 3):
axes[0, 0].plot(
directory[i].b.Time,
directory[i].b.SurfWaterMass,
color=color[i],
linewidth=a,
linestyle=style[i],
alpha=0.5,
)
axes[0, 1].plot(
directory[i].b.Time,
directory[i].b.EnvelopeMass,
color=color[i],
linewidth=a,
linestyle=style[i],
alpha=0.5,
)
axes[0, 2].plot(
directory[i].b.Time,
directory[i].b.PlanetRadius,
color=color[i],
linewidth=a,
linestyle=style[i],
alpha=0.5,
)
axes[1, 0].plot(
directory[i].b.Time,
directory[i].b.OxygenMass,
color=color[i],
linewidth=a,
linestyle=style[i],
alpha=0.5,
)
axes[1, 3].plot(
directory[i].b.Time,
directory[i].b.FXUV,
color=color[i],
linewidth=a,
linestyle=style[i],
alpha=0.5,
)
axes[0, 3].plot(
directory[i].star.Time,
directory[i].star.LXUVTot / directory[i].star.Luminosity,
color=color[i],
linewidth=a,
linestyle=style[i],
alpha=0.5,
)
axes[1, 2].plot(
directory[0].b.Time,
directory[0].b.DEnvMassDt,
color=color[0],
linewidth=a,
linestyle=style[0],
alpha=0.5,
label="Flare(Davenport mode)+Stellar",
)
axes[1, 2].plot(
directory[1].b.Time,
directory[1].b.DEnvMassDt,
color=color[1],
linewidth=a,
linestyle=style[1],
alpha=0.5,
label="Flare(Lacy mode)+Stellar",
)
axes[1, 2].plot(
directory[2].b.Time,
directory[2].b.DEnvMassDt,
color=color[2],
linewidth=a,
linestyle=style[2],
alpha=0.5,
label="Stellar",
)
axes[1, 2].legend(loc="lower left", ncol=1, fontsize=8)
axes[1, 1].fill_between(
directory[0].b.Time,
directory[0].b.HZLimRecVenus,
directory[0].b.HZLimEarlyMars,
color=vpl.colors.purple,
alpha=0.5,
)
axes[1, 1].annotate(
"HZ",
xy=(0.1, 0.35),
xycoords="axes fraction",
horizontalalignment="left",
verticalalignment="bottom",
color="w",
)
axes[0, 0].set_ylabel("Surface Water (TO)")
axes[0, 1].set_ylabel(r"Envelope Mass (M$_{\oplus}$)")
axes[0, 2].set_ylabel(r"Planetary Radius (R$_{\oplus}$)")
axes[0, 3].set_ylabel(r"L$_{XUV}$/L$_{bol}$")
axes[1, 0].set_ylabel("Oxygen Pressure (bars)")
axes[1, 1].set_ylabel("Semi-Major Axis (AU)")
axes[1, 2].set_ylabel(r"DEnvMassDt (M$_{\oplus}$ Myr$^{-1}$)")
axes[1, 3].set_ylabel(r"XUV flux (W/m$^{2}$)")
for i in range(0, 4):
axes[0, i].set_xlim(1e6, 1e9)
axes[1, i].set_xlim(1e6, 1e9)
axes[0, i].set_xscale("log")
axes[1, i].set_xscale("log")
axes[0, i].set_xlabel(" ")
axes[1, i].set_xlabel("System Age (year)")
axes[0, 1].set_yscale("log")
axes[1, 1].annotate(
"Prox Cen b's orbit",
xy=(0.03, 0.07),
xycoords="axes fraction",
fontsize=9,
horizontalalignment="left",
verticalalignment="bottom",
color="k",
)
axes[1, 1].axhline(y=0.0485, xmin=0.0, xmax=1e11, color="k", lw=0.5)
# Save figure
ext = get_args().ext
fig.savefig(path / f"AtmEscFlareProxCen.{ext}", bbox_inches="tight", dpi=200)
|
VirtualPlanetaryLaboratoryREPO_NAMEvplanetPATH_START.@vplanet_extracted@vplanet-main@examples@AtmEscFlareProxCen@makeplot.py@.PATH_END.py
|
{
"filename": "exoplanet_orbit_database.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/exoplanet_orbit_database/exoplanet_orbit_database.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import json
import os
import warnings
from astropy.utils.data import download_file
from astropy.io import ascii
from astropy.table import QTable
import astropy.units as u
from astropy.coordinates import SkyCoord
__all__ = ['ExoplanetOrbitDatabase']
EXOPLANETS_CSV_URL = 'http://exoplanets.org/csv-files/exoplanets.csv'
TIME_ATTRS = {'TT': 'jd', 'T0': 'jd'}
BOOL_ATTRS = ('ASTROMETRY', 'BINARY', 'EOD', 'KDE', 'MICROLENSING', 'MULT',
'SE', 'TIMING', 'TRANSIT', 'TREND')
class ExoplanetOrbitDatabaseClass:
"""
Exoplanet Orbit Database querying object. Use the ``get_table`` or
``query_planet`` methods to get information about exoplanets via the
Exoplanet Orbit Database.
"""
def __init__(self):
self._param_units = None
self._table = None
@property
def param_units(self):
if self._param_units is None:
module_dir = os.path.dirname(os.path.abspath(__file__))
filename = os.path.join(module_dir, 'data', 'exoplanet_orbit_database_units.json')
with open(filename) as units_file:
self._param_units = json.load(units_file)
return self._param_units
def get_table(self, *, cache=True, show_progress=True, table_path=None):
"""
Download (and optionally cache) the `Exoplanet Orbit Database planets
table <http://www.exoplanets.org>`_.
Parameters
----------
cache : bool (optional)
Cache exoplanet table to local astropy cache? Default is `True`.
show_progress : bool (optional)
Show progress of exoplanet table download (if no cached copy is
available). Default is `True`.
table_path : str (optional)
Path to a local table file. Default `None` will trigger a
download of the table from the internet.
Returns
-------
table : `~astropy.table.QTable`
Table of exoplanet properties.
"""
if self._table is None:
if table_path is None:
table_path = download_file(EXOPLANETS_CSV_URL, cache=cache,
show_progress=show_progress)
exoplanets_table = ascii.read(table_path, fast_reader=False)
# Store column of lowercase names for indexing:
lowercase_names = [i.lower().replace(" ", "")
for i in exoplanets_table['NAME'].data]
exoplanets_table['NAME_LOWERCASE'] = lowercase_names
exoplanets_table.add_index('NAME_LOWERCASE')
# Create sky coordinate mixin column
exoplanets_table['sky_coord'] = SkyCoord(ra=exoplanets_table['RA'] * u.hourangle,
dec=exoplanets_table['DEC'] * u.deg)
# Assign units to columns where possible
for col in exoplanets_table.colnames:
if col in self.param_units:
# Check that unit is implemented in this version of astropy
try:
exoplanets_table[col].unit = u.Unit(self.param_units[col])
except ValueError:
print(f"WARNING: Unit {self.param_units[col]} not recognised")
# Masked quantities are not supported in older astropy, warnings are raised for <v5.0
with warnings.catch_warnings():
warnings.filterwarnings('ignore', message='dropping mask in Quantity column',
category=UserWarning)
self._table = QTable(exoplanets_table)
return self._table
def query_planet(self, planet_name, *, table_path=None):
"""
Get table of exoplanet properties.
Parameters
----------
planet_name : str
Name of planet
table_path : str (optional)
Path to a local table file. Default `None` will trigger a
download of the table from the internet.
Returns
-------
table : `~astropy.table.QTable`
Table of one exoplanet's properties.
"""
exoplanet_table = self.get_table(table_path=table_path)
return exoplanet_table.loc[planet_name.strip().lower().replace(' ', '')]
ExoplanetOrbitDatabase = ExoplanetOrbitDatabaseClass()
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@exoplanet_orbit_database@exoplanet_orbit_database.py@.PATH_END.py
|
{
"filename": "johnsnowlabs_embedding.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/integrations/text_embedding/johnsnowlabs_embedding.ipynb",
"type": "Jupyter Notebook"
}
|
# John Snow Labs
>[John Snow Labs](https://nlp.johnsnowlabs.com/) NLP & LLM ecosystem includes software libraries for state-of-the-art AI at scale, Responsible AI, No-Code AI, and access to over 20,000 models for Healthcare, Legal, Finance, etc.
>
>Models are loaded with [nlp.load](https://nlp.johnsnowlabs.com/docs/en/jsl/load_api) and spark session is started >with [nlp.start()](https://nlp.johnsnowlabs.com/docs/en/jsl/start-a-sparksession) under the hood.
>For all 24.000+ models, see the [John Snow Labs Model Models Hub](https://nlp.johnsnowlabs.com/models)
## Setting up
```python
%pip install --upgrade --quiet johnsnowlabs
```
```python
# If you have a enterprise license, you can run this to install enterprise features
# from johnsnowlabs import nlp
# nlp.install()
```
## Example
```python
from langchain_community.embeddings.johnsnowlabs import JohnSnowLabsEmbeddings
```
Initialize Johnsnowlabs Embeddings and Spark Session
```python
embedder = JohnSnowLabsEmbeddings("en.embed_sentence.biobert.clinical_base_cased")
```
Define some example texts . These could be any documents that you want to analyze - for example, news articles, social media posts, or product reviews.
```python
texts = ["Cancer is caused by smoking", "Antibiotics aren't painkiller"]
```
Generate and print embeddings for the texts . The JohnSnowLabsEmbeddings class generates an embedding for each document, which is a numerical representation of the document's content. These embeddings can be used for various natural language processing tasks, such as document similarity comparison or text classification.
```python
embeddings = embedder.embed_documents(texts)
for i, embedding in enumerate(embeddings):
print(f"Embedding for document {i+1}: {embedding}")
```
Generate and print an embedding for a single piece of text. You can also generate an embedding for a single piece of text, such as a search query. This can be useful for tasks like information retrieval, where you want to find documents that are similar to a given query.
```python
query = "Cancer is caused by smoking"
query_embedding = embedder.embed_query(query)
print(f"Embedding for query: {query_embedding}")
```
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@integrations@text_embedding@johnsnowlabs_embedding.ipynb@.PATH_END.py
|
{
"filename": "signals.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/unittest/signals.py",
"type": "Python"
}
|
import signal
import weakref
from functools import wraps
__unittest = True
class _InterruptHandler(object):
def __init__(self, default_handler):
self.called = False
self.original_handler = default_handler
if isinstance(default_handler, int):
if default_handler == signal.SIG_DFL:
# Pretend it's signal.default_int_handler instead.
default_handler = signal.default_int_handler
elif default_handler == signal.SIG_IGN:
# Not quite the same thing as SIG_IGN, but the closest we
# can make it: do nothing.
def default_handler(unused_signum, unused_frame):
pass
else:
raise TypeError("expected SIGINT signal handler to be "
"signal.SIG_IGN, signal.SIG_DFL, or a "
"callable object")
self.default_handler = default_handler
def __call__(self, signum, frame):
installed_handler = signal.getsignal(signal.SIGINT)
if installed_handler is not self:
# if we aren't the installed handler, then delegate immediately
# to the default handler
self.default_handler(signum, frame)
if self.called:
self.default_handler(signum, frame)
self.called = True
for result in _results.keys():
result.stop()
_results = weakref.WeakKeyDictionary()
def registerResult(result):
_results[result] = 1
def removeResult(result):
return bool(_results.pop(result, None))
_interrupt_handler = None
def installHandler():
global _interrupt_handler
if _interrupt_handler is None:
default_handler = signal.getsignal(signal.SIGINT)
_interrupt_handler = _InterruptHandler(default_handler)
signal.signal(signal.SIGINT, _interrupt_handler)
def removeHandler(method=None):
if method is not None:
@wraps(method)
def inner(*args, **kwargs):
initial = signal.getsignal(signal.SIGINT)
removeHandler()
try:
return method(*args, **kwargs)
finally:
signal.signal(signal.SIGINT, initial)
return inner
global _interrupt_handler
if _interrupt_handler is not None:
signal.signal(signal.SIGINT, _interrupt_handler.original_handler)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@unittest@signals.py@.PATH_END.py
|
{
"filename": "lambda_callback_test.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/callbacks/lambda_callback_test.py",
"type": "Python"
}
|
import numpy as np
import pytest
from absl import logging
from keras.src import callbacks
from keras.src import layers
from keras.src import losses
from keras.src import optimizers
from keras.src import testing
from keras.src.models.sequential import Sequential
class LambdaCallbackTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_lambda_callback(self):
"""Test standard LambdaCallback functionalities with training."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
y = np.random.randn(16, 1)
lambda_log_callback = callbacks.LambdaCallback(
on_train_begin=lambda logs: logging.warning("on_train_begin"),
on_epoch_begin=lambda epoch, logs: logging.warning(
"on_epoch_begin"
),
on_epoch_end=lambda epoch, logs: logging.warning("on_epoch_end"),
on_train_end=lambda logs: logging.warning("on_train_end"),
)
with self.assertLogs(level="WARNING") as logs:
model.fit(
x,
y,
batch_size=batch_size,
validation_split=0.2,
callbacks=[lambda_log_callback],
epochs=5,
verbose=0,
)
self.assertTrue(any("on_train_begin" in log for log in logs.output))
self.assertTrue(any("on_epoch_begin" in log for log in logs.output))
self.assertTrue(any("on_epoch_end" in log for log in logs.output))
self.assertTrue(any("on_train_end" in log for log in logs.output))
@pytest.mark.requires_trainable_backend
def test_lambda_callback_with_batches(self):
"""Test LambdaCallback's behavior with batch-level callbacks."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
y = np.random.randn(16, 1)
lambda_log_callback = callbacks.LambdaCallback(
on_train_batch_begin=lambda batch, logs: logging.warning(
"on_train_batch_begin"
),
on_train_batch_end=lambda batch, logs: logging.warning(
"on_train_batch_end"
),
)
with self.assertLogs(level="WARNING") as logs:
model.fit(
x,
y,
batch_size=batch_size,
validation_split=0.2,
callbacks=[lambda_log_callback],
epochs=5,
verbose=0,
)
self.assertTrue(
any("on_train_batch_begin" in log for log in logs.output)
)
self.assertTrue(
any("on_train_batch_end" in log for log in logs.output)
)
@pytest.mark.requires_trainable_backend
def test_lambda_callback_with_kwargs(self):
"""Test LambdaCallback's behavior with custom defined callback."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
y = np.random.randn(16, 1)
model.fit(
x, y, batch_size=batch_size, epochs=1, verbose=0
) # Train briefly for evaluation to work.
def custom_on_test_begin(logs):
logging.warning("custom_on_test_begin_executed")
lambda_log_callback = callbacks.LambdaCallback(
on_test_begin=custom_on_test_begin
)
with self.assertLogs(level="WARNING") as logs:
model.evaluate(
x,
y,
batch_size=batch_size,
callbacks=[lambda_log_callback],
verbose=0,
)
self.assertTrue(
any(
"custom_on_test_begin_executed" in log
for log in logs.output
)
)
@pytest.mark.requires_trainable_backend
def test_lambda_callback_no_args(self):
"""Test initializing LambdaCallback without any arguments."""
lambda_callback = callbacks.LambdaCallback()
self.assertIsInstance(lambda_callback, callbacks.LambdaCallback)
@pytest.mark.requires_trainable_backend
def test_lambda_callback_with_additional_kwargs(self):
"""Test initializing LambdaCallback with non-predefined kwargs."""
def custom_callback(logs):
pass
lambda_callback = callbacks.LambdaCallback(
custom_method=custom_callback
)
self.assertTrue(hasattr(lambda_callback, "custom_method"))
@pytest.mark.requires_trainable_backend
def test_lambda_callback_during_prediction(self):
"""Test LambdaCallback's functionality during model prediction."""
batch_size = 4
model = Sequential(
[layers.Input(shape=(2,), batch_size=batch_size), layers.Dense(1)]
)
model.compile(
optimizer=optimizers.SGD(), loss=losses.MeanSquaredError()
)
x = np.random.randn(16, 2)
def custom_on_predict_begin(logs):
logging.warning("on_predict_begin_executed")
lambda_callback = callbacks.LambdaCallback(
on_predict_begin=custom_on_predict_begin
)
with self.assertLogs(level="WARNING") as logs:
model.predict(
x, batch_size=batch_size, callbacks=[lambda_callback], verbose=0
)
self.assertTrue(
any("on_predict_begin_executed" in log for log in logs.output)
)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@callbacks@lambda_callback_test.py@.PATH_END.py
|
{
"filename": "metrics.py",
"repo_name": "shaoshanglqy/shap-shapley",
"repo_path": "shap-shapley_extracted/shap-shapley-master/shap/benchmark/metrics.py",
"type": "Python"
}
|
from .. import LinearExplainer
from .. import KernelExplainer
from .. import SamplingExplainer
from ..explainers import other
from . import measures
from . import methods
import sklearn
from sklearn.model_selection import train_test_split
import numpy as np
import copy
import functools
import time
def consistency_guarantees(X, y, model_generator, method_name):
# 1.0 - perfect consistency
# 0.8 - guarantees depend on sampling
# 0.6 - guarantees depend on approximation
# 0.0 - no garuntees
guarantees = {
"linear_shap_corr": 1.0,
"linear_shap_ind": 1.0,
"coef": 0.0,
"kernel_shap_1000_meanref": 0.8,
"sampling_shap_1000": 0.8,
"random": 0.0,
"saabas": 0.0,
"tree_gain": 0.0,
"tree_shap_tree_path_dependent": 1.0,
"tree_shap_independent_1000": 1.0,
"mean_abs_tree_shap": 1.0,
"lime_tabular_regression_1000": 0.8,
"deep_shap": 0.6,
"expected_gradients": 0.6
}
return None, guarantees[method_name]
def local_accuracy(X, y, model_generator, method_name):
def score_map(true, pred):
""" Converts local accuracy from % of standard deviation to numerical scores for coloring.
"""
v = min(1.0, np.std(pred - true) / (np.std(true) + 1e-8))
if v < 1e-6:
return 1.0
elif v < 0.01:
return 0.9
elif v < 0.05:
return 0.75
elif v < 0.1:
return 0.6
elif v < 0.2:
return 0.4
elif v < 0.3:
return 0.3
elif v < 0.5:
return 0.2
elif v < 0.7:
return 0.1
else:
return 0.0
def score_function(X_train, X_test, y_train, y_test, attr_function, trained_model):
return measures.local_accuracy(
X_train, y_train, X_test, y_test, attr_function(X_test),
model_generator, score_map, trained_model
)
return None, __score_method(X, y, None, model_generator, score_function, method_name)
def runtime(X, y, model_generator, method_name):
old_seed = np.random.seed()
np.random.seed(3293)
# average the method scores over several train/test splits
method_reps = []
for i in range(1):
X_train, X_test, y_train, _ = train_test_split(__toarray(X), y, test_size=100, random_state=i)
# define the model we are going to explain
model = model_generator()
model.fit(X_train, y_train)
# evaluate each method
start = time.time()
explainer = getattr(methods, method_name)(model, X_train)
build_time = time.time() - start
start = time.time()
explainer(X_test)
explain_time = time.time() - start
# we always normalize the explain time as though we were explaining 1000 samples
# even if to reduce the runtime of the benchmark we do less (like just 100)
method_reps.append(build_time + explain_time * 1000.0 / X_test.shape[0])
np.random.seed(old_seed)
return None, np.mean(method_reps)
def remove_positive(X, y, model_generator, method_name, num_fcounts=11):
return __run_measure(measures.remove, X, y, model_generator, method_name, 1, num_fcounts)
def remove_negative(X, y, model_generator, method_name, num_fcounts=11):
return __run_measure(measures.remove, X, y, model_generator, method_name, -1, num_fcounts)
def mask_remove_positive(X, y, model_generator, method_name, num_fcounts=11):
return __run_measure(measures.mask_remove, X, y, model_generator, method_name, 1, num_fcounts)
def mask_remove_negative(X, y, model_generator, method_name, num_fcounts=11):
return __run_measure(measures.mask_remove, X, y, model_generator, method_name, -1, num_fcounts)
def keep_positive(X, y, model_generator, method_name, num_fcounts=11):
return __run_measure(measures.keep, X, y, model_generator, method_name, 1, num_fcounts)
def keep_negative(X, y, model_generator, method_name, num_fcounts=11):
return __run_measure(measures.keep, X, y, model_generator, method_name, -1, num_fcounts)
def mask_keep_positive(X, y, model_generator, method_name, num_fcounts=11):
return __run_measure(measures.mask_keep, X, y, model_generator, method_name, 1, num_fcounts)
def mask_keep_negative(X, y, model_generator, method_name, num_fcounts=11):
return __run_measure(measures.mask_keep, X, y, model_generator, method_name, -1, num_fcounts)
def __run_measure(measure, X, y, model_generator, method_name, attribution_sign, num_fcounts):
def summary_function(true, pred):
return np.mean(pred)
def score_function(fcount, X_train, X_test, y_train, y_test, attr_function, trained_model):
A = attribution_sign * __strip_list(attr_function(X_test))
nmask = np.ones(len(y_test)) * fcount
nmask = np.minimum(nmask, np.array(A >= 0).sum(1)).astype(np.int)
return measure(
nmask, X_train, y_train, X_test, y_test, A,
model_generator, summary_function, trained_model
)
fcounts = __intlogspace(0, X.shape[1], num_fcounts)
return fcounts, __score_method(X, y, fcounts, model_generator, score_function, method_name)
def batch_remove_absolute__r2(X, y, model_generator, method_name, num_fcounts=11):
return __run_batch_abs_metric(measures.batch_remove, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
def batch_keep_absolute__r2(X, y, model_generator, method_name, num_fcounts=11):
return __run_batch_abs_metric(measures.batch_keep, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
def batch_remove_absolute__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
return __run_batch_abs_metric(measures.batch_remove, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
def batch_keep_absolute__roc_auc(X, y, model_generator, method_name, num_fcounts=11):
return __run_batch_abs_metric(measures.batch_keep, X, y, model_generator, method_name, sklearn.metrics.roc_auc_score, num_fcounts)
def __run_batch_abs_metric(metric, X, y, model_generator, method_name, loss, num_fcounts):
def score_function(fcount, X_train, X_test, y_train, y_test, attr_function, trained_model):
A_train = np.abs(__strip_list(attr_function(X_train)))
nkeep_train = (np.ones(len(y_train)) * fcount).astype(np.int)
#nkeep_train = np.minimum(nkeep_train, np.array(A_train > 0).sum(1)).astype(np.int)
A_test = np.abs(__strip_list(attr_function(X_test)))
nkeep_test = (np.ones(len(y_test)) * fcount).astype(np.int)
#nkeep_test = np.minimum(nkeep_test, np.array(A_test >= 0).sum(1)).astype(np.int)
return metric(
nkeep_train, nkeep_test, X_train, y_train, X_test, y_test, A_train, A_test,
model_generator, loss
)
fcounts = __intlogspace(0, X.shape[1], num_fcounts)
return fcounts, __score_method(X, y, fcounts, model_generator, score_function, method_name)
def __score_method(X, y, fcounts, model_generator, score_function, method_name):
""" Test an explanation method.
"""
old_seed = np.random.seed()
np.random.seed(3293)
# average the method scores over several train/test splits
method_reps = []
for i in range(3):
X_train, X_test, y_train, y_test = train_test_split(__toarray(X), y, test_size=100, random_state=i)
# define the model we are going to explain
model = model_generator()
model.fit(X_train, y_train)
def score(attr_function):
cached_attr_function = lambda X: __check_cache(attr_function, X)
if fcounts is None:
return score_function(X_train, X_test, y_train, y_test, cached_attr_function, model)
else:
scores = []
for f in fcounts:
scores.append(score_function(f, X_train, X_test, y_train, y_test, cached_attr_function, model))
return np.array(scores)
# evaluate the method
method_reps.append(score(getattr(methods, method_name)(model, X_train)))
np.random.seed(old_seed)
return np.array(method_reps).mean(0)
# used to memoize explainer functions so we don't waste time re-explaining the same object
__cache0 = None
__cache_X0 = None
__cache_f0 = None
__cache1 = None
__cache_X1 = None
__cache_f1 = None
def __check_cache(f, X):
global __cache0, __cache_X0, __cache_f0
global __cache1, __cache_X1, __cache_f1
if X is __cache_X0 and f is __cache_f0:
return __cache0
elif X is __cache_X1 and f is __cache_f1:
return __cache1
else:
__cache_f1 = __cache_f0
__cache_X1 = __cache_X0
__cache1 = __cache0
__cache_f0 = f
__cache_X0 = X
__cache0 = f(X)
return __cache0
def __intlogspace(start, end, count):
return np.unique(np.round(start + (end-start) * (np.logspace(0, 1, count, endpoint=True) - 1) / 9).astype(np.int))
def __toarray(X):
""" Converts DataFrames to numpy arrays.
"""
if hasattr(X, "values"):
X = X.values
return X
def __strip_list(attrs):
""" This assumes that if you have a list of outputs you just want the second one (the second class).
"""
if isinstance(attrs, list):
return attrs[1]
else:
return attrs
|
shaoshanglqyREPO_NAMEshap-shapleyPATH_START.@shap-shapley_extracted@shap-shapley-master@shap@benchmark@metrics.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "hruedisser/3DCOREweb",
"repo_path": "3DCOREweb_extracted/3DCOREweb-main/src/coreweb/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from coreweb.methods import ABC_SMC
from coreweb.models import ToroidalModel
__author__ = "Andreas J. Weiss"
__copyright__ = "Copyright (C) 2019 Andreas J. Weiss"
__license__ = "MIT"
__version__ = "0.0.0"
# -*- coding: utf-8 -*-
|
hruedisserREPO_NAME3DCOREwebPATH_START.@3DCOREweb_extracted@3DCOREweb-main@src@coreweb@__init__.py@.PATH_END.py
|
{
"filename": "test_proposal.py",
"repo_name": "igomezv/simplemc_tests",
"repo_path": "simplemc_tests_extracted/simplemc_tests-main/simplemc/analyzers/emcee/tests/integration/test_proposal.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import numpy as np
import pytest
import emcee
try:
from scipy import stats
except ImportError:
stats = None
__all__ = ["_test_normal", "_test_uniform"]
def normal_log_prob_blobs(params):
return -0.5 * np.sum(params ** 2), params
def normal_log_prob(params):
return -0.5 * np.sum(params ** 2)
def uniform_log_prob(params):
if np.any(params > 1) or np.any(params < 0):
return -np.inf
return 0.0
def _test_normal(
proposal,
ndim=1,
nwalkers=32,
nsteps=2000,
seed=1234,
check_acceptance=True,
pool=None,
blobs=False,
):
# Set up the random number generator.
np.random.seed(seed)
# Initialize the ensemble and proposal.
coords = np.random.randn(nwalkers, ndim)
if blobs:
lp = normal_log_prob_blobs
else:
lp = normal_log_prob
sampler = emcee.EnsembleSampler(
nwalkers, ndim, lp, moves=proposal, pool=pool
)
if hasattr(proposal, "ntune") and proposal.ntune > 0:
coords = sampler.run_mcmc(coords, proposal.ntune, tune=True)
sampler.reset()
sampler.run_mcmc(coords, nsteps)
# Check the acceptance fraction.
if check_acceptance:
acc = sampler.acceptance_fraction
assert np.all(
(acc < 0.9) * (acc > 0.1)
), "Invalid acceptance fraction\n{0}".format(acc)
# Check the resulting chain using a K-S test and compare to the mean and
# standard deviation.
samps = sampler.get_chain(flat=True)
mu, sig = np.mean(samps, axis=0), np.std(samps, axis=0)
assert np.all(np.abs(mu) < 0.08), "Incorrect mean"
assert np.all(np.abs(sig - 1) < 0.05), "Incorrect standard deviation"
if ndim == 1 and stats is not None:
ks, _ = stats.kstest(samps[:, 0], "norm")
assert ks < 0.05, "The K-S test failed"
def _test_uniform(proposal, nwalkers=32, nsteps=2000, seed=1234):
# Set up the random number generator.
np.random.seed(seed)
# Initialize the ensemble and proposal.
coords = np.random.rand(nwalkers, 1)
sampler = emcee.EnsembleSampler(
nwalkers, 1, normal_log_prob, moves=proposal
)
sampler.run_mcmc(coords, nsteps)
# Check the acceptance fraction.
acc = sampler.acceptance_fraction
assert np.all(
(acc < 0.9) * (acc > 0.1)
), "Invalid acceptance fraction\n{0}".format(acc)
if stats is not None:
# Check that the resulting chain "fails" the K-S test.
samps = sampler.get_chain(flat=True)
np.random.shuffle(samps)
ks, _ = stats.kstest(samps[::100], "uniform")
assert ks > 0.1, "The K-S test failed"
|
igomezvREPO_NAMEsimplemc_testsPATH_START.@simplemc_tests_extracted@simplemc_tests-main@simplemc@analyzers@emcee@tests@integration@test_proposal.py@.PATH_END.py
|
{
"filename": "_familysrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/hoverlabel/font/_familysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="scatter3d.hoverlabel.font", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter3d@hoverlabel@font@_familysrc.py@.PATH_END.py
|
{
"filename": "colormaps.py",
"repo_name": "orlox/mesa_input_data",
"repo_path": "mesa_input_data_extracted/mesa_input_data-master/2016_ULX/scripts/NSBH/colormaps.py",
"type": "Python"
}
|
# New matplotlib colormaps by Nathaniel J. Smith, Stefan van der Walt,
# and (in the case of viridis) Eric Firing.
#
# This file and the colormaps in it are released under the CC0 license /
# public domain dedication. We would appreciate credit if you use or
# redistribute these colormaps, but do not impose any legal restrictions.
#
# To the extent possible under law, the persons who associated CC0 with
# mpl-colormaps have waived all copyright and related or neighboring rights
# to mpl-colormaps.
#
# You should have received a copy of the CC0 legalcode along with this
# work. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
__all__ = ['magma', 'inferno', 'plasma', 'viridis']
_magma_data = [[0.001462, 0.000466, 0.013866],
[0.002258, 0.001295, 0.018331],
[0.003279, 0.002305, 0.023708],
[0.004512, 0.003490, 0.029965],
[0.005950, 0.004843, 0.037130],
[0.007588, 0.006356, 0.044973],
[0.009426, 0.008022, 0.052844],
[0.011465, 0.009828, 0.060750],
[0.013708, 0.011771, 0.068667],
[0.016156, 0.013840, 0.076603],
[0.018815, 0.016026, 0.084584],
[0.021692, 0.018320, 0.092610],
[0.024792, 0.020715, 0.100676],
[0.028123, 0.023201, 0.108787],
[0.031696, 0.025765, 0.116965],
[0.035520, 0.028397, 0.125209],
[0.039608, 0.031090, 0.133515],
[0.043830, 0.033830, 0.141886],
[0.048062, 0.036607, 0.150327],
[0.052320, 0.039407, 0.158841],
[0.056615, 0.042160, 0.167446],
[0.060949, 0.044794, 0.176129],
[0.065330, 0.047318, 0.184892],
[0.069764, 0.049726, 0.193735],
[0.074257, 0.052017, 0.202660],
[0.078815, 0.054184, 0.211667],
[0.083446, 0.056225, 0.220755],
[0.088155, 0.058133, 0.229922],
[0.092949, 0.059904, 0.239164],
[0.097833, 0.061531, 0.248477],
[0.102815, 0.063010, 0.257854],
[0.107899, 0.064335, 0.267289],
[0.113094, 0.065492, 0.276784],
[0.118405, 0.066479, 0.286321],
[0.123833, 0.067295, 0.295879],
[0.129380, 0.067935, 0.305443],
[0.135053, 0.068391, 0.315000],
[0.140858, 0.068654, 0.324538],
[0.146785, 0.068738, 0.334011],
[0.152839, 0.068637, 0.343404],
[0.159018, 0.068354, 0.352688],
[0.165308, 0.067911, 0.361816],
[0.171713, 0.067305, 0.370771],
[0.178212, 0.066576, 0.379497],
[0.184801, 0.065732, 0.387973],
[0.191460, 0.064818, 0.396152],
[0.198177, 0.063862, 0.404009],
[0.204935, 0.062907, 0.411514],
[0.211718, 0.061992, 0.418647],
[0.218512, 0.061158, 0.425392],
[0.225302, 0.060445, 0.431742],
[0.232077, 0.059889, 0.437695],
[0.238826, 0.059517, 0.443256],
[0.245543, 0.059352, 0.448436],
[0.252220, 0.059415, 0.453248],
[0.258857, 0.059706, 0.457710],
[0.265447, 0.060237, 0.461840],
[0.271994, 0.060994, 0.465660],
[0.278493, 0.061978, 0.469190],
[0.284951, 0.063168, 0.472451],
[0.291366, 0.064553, 0.475462],
[0.297740, 0.066117, 0.478243],
[0.304081, 0.067835, 0.480812],
[0.310382, 0.069702, 0.483186],
[0.316654, 0.071690, 0.485380],
[0.322899, 0.073782, 0.487408],
[0.329114, 0.075972, 0.489287],
[0.335308, 0.078236, 0.491024],
[0.341482, 0.080564, 0.492631],
[0.347636, 0.082946, 0.494121],
[0.353773, 0.085373, 0.495501],
[0.359898, 0.087831, 0.496778],
[0.366012, 0.090314, 0.497960],
[0.372116, 0.092816, 0.499053],
[0.378211, 0.095332, 0.500067],
[0.384299, 0.097855, 0.501002],
[0.390384, 0.100379, 0.501864],
[0.396467, 0.102902, 0.502658],
[0.402548, 0.105420, 0.503386],
[0.408629, 0.107930, 0.504052],
[0.414709, 0.110431, 0.504662],
[0.420791, 0.112920, 0.505215],
[0.426877, 0.115395, 0.505714],
[0.432967, 0.117855, 0.506160],
[0.439062, 0.120298, 0.506555],
[0.445163, 0.122724, 0.506901],
[0.451271, 0.125132, 0.507198],
[0.457386, 0.127522, 0.507448],
[0.463508, 0.129893, 0.507652],
[0.469640, 0.132245, 0.507809],
[0.475780, 0.134577, 0.507921],
[0.481929, 0.136891, 0.507989],
[0.488088, 0.139186, 0.508011],
[0.494258, 0.141462, 0.507988],
[0.500438, 0.143719, 0.507920],
[0.506629, 0.145958, 0.507806],
[0.512831, 0.148179, 0.507648],
[0.519045, 0.150383, 0.507443],
[0.525270, 0.152569, 0.507192],
[0.531507, 0.154739, 0.506895],
[0.537755, 0.156894, 0.506551],
[0.544015, 0.159033, 0.506159],
[0.550287, 0.161158, 0.505719],
[0.556571, 0.163269, 0.505230],
[0.562866, 0.165368, 0.504692],
[0.569172, 0.167454, 0.504105],
[0.575490, 0.169530, 0.503466],
[0.581819, 0.171596, 0.502777],
[0.588158, 0.173652, 0.502035],
[0.594508, 0.175701, 0.501241],
[0.600868, 0.177743, 0.500394],
[0.607238, 0.179779, 0.499492],
[0.613617, 0.181811, 0.498536],
[0.620005, 0.183840, 0.497524],
[0.626401, 0.185867, 0.496456],
[0.632805, 0.187893, 0.495332],
[0.639216, 0.189921, 0.494150],
[0.645633, 0.191952, 0.492910],
[0.652056, 0.193986, 0.491611],
[0.658483, 0.196027, 0.490253],
[0.664915, 0.198075, 0.488836],
[0.671349, 0.200133, 0.487358],
[0.677786, 0.202203, 0.485819],
[0.684224, 0.204286, 0.484219],
[0.690661, 0.206384, 0.482558],
[0.697098, 0.208501, 0.480835],
[0.703532, 0.210638, 0.479049],
[0.709962, 0.212797, 0.477201],
[0.716387, 0.214982, 0.475290],
[0.722805, 0.217194, 0.473316],
[0.729216, 0.219437, 0.471279],
[0.735616, 0.221713, 0.469180],
[0.742004, 0.224025, 0.467018],
[0.748378, 0.226377, 0.464794],
[0.754737, 0.228772, 0.462509],
[0.761077, 0.231214, 0.460162],
[0.767398, 0.233705, 0.457755],
[0.773695, 0.236249, 0.455289],
[0.779968, 0.238851, 0.452765],
[0.786212, 0.241514, 0.450184],
[0.792427, 0.244242, 0.447543],
[0.798608, 0.247040, 0.444848],
[0.804752, 0.249911, 0.442102],
[0.810855, 0.252861, 0.439305],
[0.816914, 0.255895, 0.436461],
[0.822926, 0.259016, 0.433573],
[0.828886, 0.262229, 0.430644],
[0.834791, 0.265540, 0.427671],
[0.840636, 0.268953, 0.424666],
[0.846416, 0.272473, 0.421631],
[0.852126, 0.276106, 0.418573],
[0.857763, 0.279857, 0.415496],
[0.863320, 0.283729, 0.412403],
[0.868793, 0.287728, 0.409303],
[0.874176, 0.291859, 0.406205],
[0.879464, 0.296125, 0.403118],
[0.884651, 0.300530, 0.400047],
[0.889731, 0.305079, 0.397002],
[0.894700, 0.309773, 0.393995],
[0.899552, 0.314616, 0.391037],
[0.904281, 0.319610, 0.388137],
[0.908884, 0.324755, 0.385308],
[0.913354, 0.330052, 0.382563],
[0.917689, 0.335500, 0.379915],
[0.921884, 0.341098, 0.377376],
[0.925937, 0.346844, 0.374959],
[0.929845, 0.352734, 0.372677],
[0.933606, 0.358764, 0.370541],
[0.937221, 0.364929, 0.368567],
[0.940687, 0.371224, 0.366762],
[0.944006, 0.377643, 0.365136],
[0.947180, 0.384178, 0.363701],
[0.950210, 0.390820, 0.362468],
[0.953099, 0.397563, 0.361438],
[0.955849, 0.404400, 0.360619],
[0.958464, 0.411324, 0.360014],
[0.960949, 0.418323, 0.359630],
[0.963310, 0.425390, 0.359469],
[0.965549, 0.432519, 0.359529],
[0.967671, 0.439703, 0.359810],
[0.969680, 0.446936, 0.360311],
[0.971582, 0.454210, 0.361030],
[0.973381, 0.461520, 0.361965],
[0.975082, 0.468861, 0.363111],
[0.976690, 0.476226, 0.364466],
[0.978210, 0.483612, 0.366025],
[0.979645, 0.491014, 0.367783],
[0.981000, 0.498428, 0.369734],
[0.982279, 0.505851, 0.371874],
[0.983485, 0.513280, 0.374198],
[0.984622, 0.520713, 0.376698],
[0.985693, 0.528148, 0.379371],
[0.986700, 0.535582, 0.382210],
[0.987646, 0.543015, 0.385210],
[0.988533, 0.550446, 0.388365],
[0.989363, 0.557873, 0.391671],
[0.990138, 0.565296, 0.395122],
[0.990871, 0.572706, 0.398714],
[0.991558, 0.580107, 0.402441],
[0.992196, 0.587502, 0.406299],
[0.992785, 0.594891, 0.410283],
[0.993326, 0.602275, 0.414390],
[0.993834, 0.609644, 0.418613],
[0.994309, 0.616999, 0.422950],
[0.994738, 0.624350, 0.427397],
[0.995122, 0.631696, 0.431951],
[0.995480, 0.639027, 0.436607],
[0.995810, 0.646344, 0.441361],
[0.996096, 0.653659, 0.446213],
[0.996341, 0.660969, 0.451160],
[0.996580, 0.668256, 0.456192],
[0.996775, 0.675541, 0.461314],
[0.996925, 0.682828, 0.466526],
[0.997077, 0.690088, 0.471811],
[0.997186, 0.697349, 0.477182],
[0.997254, 0.704611, 0.482635],
[0.997325, 0.711848, 0.488154],
[0.997351, 0.719089, 0.493755],
[0.997351, 0.726324, 0.499428],
[0.997341, 0.733545, 0.505167],
[0.997285, 0.740772, 0.510983],
[0.997228, 0.747981, 0.516859],
[0.997138, 0.755190, 0.522806],
[0.997019, 0.762398, 0.528821],
[0.996898, 0.769591, 0.534892],
[0.996727, 0.776795, 0.541039],
[0.996571, 0.783977, 0.547233],
[0.996369, 0.791167, 0.553499],
[0.996162, 0.798348, 0.559820],
[0.995932, 0.805527, 0.566202],
[0.995680, 0.812706, 0.572645],
[0.995424, 0.819875, 0.579140],
[0.995131, 0.827052, 0.585701],
[0.994851, 0.834213, 0.592307],
[0.994524, 0.841387, 0.598983],
[0.994222, 0.848540, 0.605696],
[0.993866, 0.855711, 0.612482],
[0.993545, 0.862859, 0.619299],
[0.993170, 0.870024, 0.626189],
[0.992831, 0.877168, 0.633109],
[0.992440, 0.884330, 0.640099],
[0.992089, 0.891470, 0.647116],
[0.991688, 0.898627, 0.654202],
[0.991332, 0.905763, 0.661309],
[0.990930, 0.912915, 0.668481],
[0.990570, 0.920049, 0.675675],
[0.990175, 0.927196, 0.682926],
[0.989815, 0.934329, 0.690198],
[0.989434, 0.941470, 0.697519],
[0.989077, 0.948604, 0.704863],
[0.988717, 0.955742, 0.712242],
[0.988367, 0.962878, 0.719649],
[0.988033, 0.970012, 0.727077],
[0.987691, 0.977154, 0.734536],
[0.987387, 0.984288, 0.742002],
[0.987053, 0.991438, 0.749504]]
_inferno_data = [[0.001462, 0.000466, 0.013866],
[0.002267, 0.001270, 0.018570],
[0.003299, 0.002249, 0.024239],
[0.004547, 0.003392, 0.030909],
[0.006006, 0.004692, 0.038558],
[0.007676, 0.006136, 0.046836],
[0.009561, 0.007713, 0.055143],
[0.011663, 0.009417, 0.063460],
[0.013995, 0.011225, 0.071862],
[0.016561, 0.013136, 0.080282],
[0.019373, 0.015133, 0.088767],
[0.022447, 0.017199, 0.097327],
[0.025793, 0.019331, 0.105930],
[0.029432, 0.021503, 0.114621],
[0.033385, 0.023702, 0.123397],
[0.037668, 0.025921, 0.132232],
[0.042253, 0.028139, 0.141141],
[0.046915, 0.030324, 0.150164],
[0.051644, 0.032474, 0.159254],
[0.056449, 0.034569, 0.168414],
[0.061340, 0.036590, 0.177642],
[0.066331, 0.038504, 0.186962],
[0.071429, 0.040294, 0.196354],
[0.076637, 0.041905, 0.205799],
[0.081962, 0.043328, 0.215289],
[0.087411, 0.044556, 0.224813],
[0.092990, 0.045583, 0.234358],
[0.098702, 0.046402, 0.243904],
[0.104551, 0.047008, 0.253430],
[0.110536, 0.047399, 0.262912],
[0.116656, 0.047574, 0.272321],
[0.122908, 0.047536, 0.281624],
[0.129285, 0.047293, 0.290788],
[0.135778, 0.046856, 0.299776],
[0.142378, 0.046242, 0.308553],
[0.149073, 0.045468, 0.317085],
[0.155850, 0.044559, 0.325338],
[0.162689, 0.043554, 0.333277],
[0.169575, 0.042489, 0.340874],
[0.176493, 0.041402, 0.348111],
[0.183429, 0.040329, 0.354971],
[0.190367, 0.039309, 0.361447],
[0.197297, 0.038400, 0.367535],
[0.204209, 0.037632, 0.373238],
[0.211095, 0.037030, 0.378563],
[0.217949, 0.036615, 0.383522],
[0.224763, 0.036405, 0.388129],
[0.231538, 0.036405, 0.392400],
[0.238273, 0.036621, 0.396353],
[0.244967, 0.037055, 0.400007],
[0.251620, 0.037705, 0.403378],
[0.258234, 0.038571, 0.406485],
[0.264810, 0.039647, 0.409345],
[0.271347, 0.040922, 0.411976],
[0.277850, 0.042353, 0.414392],
[0.284321, 0.043933, 0.416608],
[0.290763, 0.045644, 0.418637],
[0.297178, 0.047470, 0.420491],
[0.303568, 0.049396, 0.422182],
[0.309935, 0.051407, 0.423721],
[0.316282, 0.053490, 0.425116],
[0.322610, 0.055634, 0.426377],
[0.328921, 0.057827, 0.427511],
[0.335217, 0.060060, 0.428524],
[0.341500, 0.062325, 0.429425],
[0.347771, 0.064616, 0.430217],
[0.354032, 0.066925, 0.430906],
[0.360284, 0.069247, 0.431497],
[0.366529, 0.071579, 0.431994],
[0.372768, 0.073915, 0.432400],
[0.379001, 0.076253, 0.432719],
[0.385228, 0.078591, 0.432955],
[0.391453, 0.080927, 0.433109],
[0.397674, 0.083257, 0.433183],
[0.403894, 0.085580, 0.433179],
[0.410113, 0.087896, 0.433098],
[0.416331, 0.090203, 0.432943],
[0.422549, 0.092501, 0.432714],
[0.428768, 0.094790, 0.432412],
[0.434987, 0.097069, 0.432039],
[0.441207, 0.099338, 0.431594],
[0.447428, 0.101597, 0.431080],
[0.453651, 0.103848, 0.430498],
[0.459875, 0.106089, 0.429846],
[0.466100, 0.108322, 0.429125],
[0.472328, 0.110547, 0.428334],
[0.478558, 0.112764, 0.427475],
[0.484789, 0.114974, 0.426548],
[0.491022, 0.117179, 0.425552],
[0.497257, 0.119379, 0.424488],
[0.503493, 0.121575, 0.423356],
[0.509730, 0.123769, 0.422156],
[0.515967, 0.125960, 0.420887],
[0.522206, 0.128150, 0.419549],
[0.528444, 0.130341, 0.418142],
[0.534683, 0.132534, 0.416667],
[0.540920, 0.134729, 0.415123],
[0.547157, 0.136929, 0.413511],
[0.553392, 0.139134, 0.411829],
[0.559624, 0.141346, 0.410078],
[0.565854, 0.143567, 0.408258],
[0.572081, 0.145797, 0.406369],
[0.578304, 0.148039, 0.404411],
[0.584521, 0.150294, 0.402385],
[0.590734, 0.152563, 0.400290],
[0.596940, 0.154848, 0.398125],
[0.603139, 0.157151, 0.395891],
[0.609330, 0.159474, 0.393589],
[0.615513, 0.161817, 0.391219],
[0.621685, 0.164184, 0.388781],
[0.627847, 0.166575, 0.386276],
[0.633998, 0.168992, 0.383704],
[0.640135, 0.171438, 0.381065],
[0.646260, 0.173914, 0.378359],
[0.652369, 0.176421, 0.375586],
[0.658463, 0.178962, 0.372748],
[0.664540, 0.181539, 0.369846],
[0.670599, 0.184153, 0.366879],
[0.676638, 0.186807, 0.363849],
[0.682656, 0.189501, 0.360757],
[0.688653, 0.192239, 0.357603],
[0.694627, 0.195021, 0.354388],
[0.700576, 0.197851, 0.351113],
[0.706500, 0.200728, 0.347777],
[0.712396, 0.203656, 0.344383],
[0.718264, 0.206636, 0.340931],
[0.724103, 0.209670, 0.337424],
[0.729909, 0.212759, 0.333861],
[0.735683, 0.215906, 0.330245],
[0.741423, 0.219112, 0.326576],
[0.747127, 0.222378, 0.322856],
[0.752794, 0.225706, 0.319085],
[0.758422, 0.229097, 0.315266],
[0.764010, 0.232554, 0.311399],
[0.769556, 0.236077, 0.307485],
[0.775059, 0.239667, 0.303526],
[0.780517, 0.243327, 0.299523],
[0.785929, 0.247056, 0.295477],
[0.791293, 0.250856, 0.291390],
[0.796607, 0.254728, 0.287264],
[0.801871, 0.258674, 0.283099],
[0.807082, 0.262692, 0.278898],
[0.812239, 0.266786, 0.274661],
[0.817341, 0.270954, 0.270390],
[0.822386, 0.275197, 0.266085],
[0.827372, 0.279517, 0.261750],
[0.832299, 0.283913, 0.257383],
[0.837165, 0.288385, 0.252988],
[0.841969, 0.292933, 0.248564],
[0.846709, 0.297559, 0.244113],
[0.851384, 0.302260, 0.239636],
[0.855992, 0.307038, 0.235133],
[0.860533, 0.311892, 0.230606],
[0.865006, 0.316822, 0.226055],
[0.869409, 0.321827, 0.221482],
[0.873741, 0.326906, 0.216886],
[0.878001, 0.332060, 0.212268],
[0.882188, 0.337287, 0.207628],
[0.886302, 0.342586, 0.202968],
[0.890341, 0.347957, 0.198286],
[0.894305, 0.353399, 0.193584],
[0.898192, 0.358911, 0.188860],
[0.902003, 0.364492, 0.184116],
[0.905735, 0.370140, 0.179350],
[0.909390, 0.375856, 0.174563],
[0.912966, 0.381636, 0.169755],
[0.916462, 0.387481, 0.164924],
[0.919879, 0.393389, 0.160070],
[0.923215, 0.399359, 0.155193],
[0.926470, 0.405389, 0.150292],
[0.929644, 0.411479, 0.145367],
[0.932737, 0.417627, 0.140417],
[0.935747, 0.423831, 0.135440],
[0.938675, 0.430091, 0.130438],
[0.941521, 0.436405, 0.125409],
[0.944285, 0.442772, 0.120354],
[0.946965, 0.449191, 0.115272],
[0.949562, 0.455660, 0.110164],
[0.952075, 0.462178, 0.105031],
[0.954506, 0.468744, 0.099874],
[0.956852, 0.475356, 0.094695],
[0.959114, 0.482014, 0.089499],
[0.961293, 0.488716, 0.084289],
[0.963387, 0.495462, 0.079073],
[0.965397, 0.502249, 0.073859],
[0.967322, 0.509078, 0.068659],
[0.969163, 0.515946, 0.063488],
[0.970919, 0.522853, 0.058367],
[0.972590, 0.529798, 0.053324],
[0.974176, 0.536780, 0.048392],
[0.975677, 0.543798, 0.043618],
[0.977092, 0.550850, 0.039050],
[0.978422, 0.557937, 0.034931],
[0.979666, 0.565057, 0.031409],
[0.980824, 0.572209, 0.028508],
[0.981895, 0.579392, 0.026250],
[0.982881, 0.586606, 0.024661],
[0.983779, 0.593849, 0.023770],
[0.984591, 0.601122, 0.023606],
[0.985315, 0.608422, 0.024202],
[0.985952, 0.615750, 0.025592],
[0.986502, 0.623105, 0.027814],
[0.986964, 0.630485, 0.030908],
[0.987337, 0.637890, 0.034916],
[0.987622, 0.645320, 0.039886],
[0.987819, 0.652773, 0.045581],
[0.987926, 0.660250, 0.051750],
[0.987945, 0.667748, 0.058329],
[0.987874, 0.675267, 0.065257],
[0.987714, 0.682807, 0.072489],
[0.987464, 0.690366, 0.079990],
[0.987124, 0.697944, 0.087731],
[0.986694, 0.705540, 0.095694],
[0.986175, 0.713153, 0.103863],
[0.985566, 0.720782, 0.112229],
[0.984865, 0.728427, 0.120785],
[0.984075, 0.736087, 0.129527],
[0.983196, 0.743758, 0.138453],
[0.982228, 0.751442, 0.147565],
[0.981173, 0.759135, 0.156863],
[0.980032, 0.766837, 0.166353],
[0.978806, 0.774545, 0.176037],
[0.977497, 0.782258, 0.185923],
[0.976108, 0.789974, 0.196018],
[0.974638, 0.797692, 0.206332],
[0.973088, 0.805409, 0.216877],
[0.971468, 0.813122, 0.227658],
[0.969783, 0.820825, 0.238686],
[0.968041, 0.828515, 0.249972],
[0.966243, 0.836191, 0.261534],
[0.964394, 0.843848, 0.273391],
[0.962517, 0.851476, 0.285546],
[0.960626, 0.859069, 0.298010],
[0.958720, 0.866624, 0.310820],
[0.956834, 0.874129, 0.323974],
[0.954997, 0.881569, 0.337475],
[0.953215, 0.888942, 0.351369],
[0.951546, 0.896226, 0.365627],
[0.950018, 0.903409, 0.380271],
[0.948683, 0.910473, 0.395289],
[0.947594, 0.917399, 0.410665],
[0.946809, 0.924168, 0.426373],
[0.946392, 0.930761, 0.442367],
[0.946403, 0.937159, 0.458592],
[0.946903, 0.943348, 0.474970],
[0.947937, 0.949318, 0.491426],
[0.949545, 0.955063, 0.507860],
[0.951740, 0.960587, 0.524203],
[0.954529, 0.965896, 0.540361],
[0.957896, 0.971003, 0.556275],
[0.961812, 0.975924, 0.571925],
[0.966249, 0.980678, 0.587206],
[0.971162, 0.985282, 0.602154],
[0.976511, 0.989753, 0.616760],
[0.982257, 0.994109, 0.631017],
[0.988362, 0.998364, 0.644924]]
_plasma_data = [[0.050383, 0.029803, 0.527975],
[0.063536, 0.028426, 0.533124],
[0.075353, 0.027206, 0.538007],
[0.086222, 0.026125, 0.542658],
[0.096379, 0.025165, 0.547103],
[0.105980, 0.024309, 0.551368],
[0.115124, 0.023556, 0.555468],
[0.123903, 0.022878, 0.559423],
[0.132381, 0.022258, 0.563250],
[0.140603, 0.021687, 0.566959],
[0.148607, 0.021154, 0.570562],
[0.156421, 0.020651, 0.574065],
[0.164070, 0.020171, 0.577478],
[0.171574, 0.019706, 0.580806],
[0.178950, 0.019252, 0.584054],
[0.186213, 0.018803, 0.587228],
[0.193374, 0.018354, 0.590330],
[0.200445, 0.017902, 0.593364],
[0.207435, 0.017442, 0.596333],
[0.214350, 0.016973, 0.599239],
[0.221197, 0.016497, 0.602083],
[0.227983, 0.016007, 0.604867],
[0.234715, 0.015502, 0.607592],
[0.241396, 0.014979, 0.610259],
[0.248032, 0.014439, 0.612868],
[0.254627, 0.013882, 0.615419],
[0.261183, 0.013308, 0.617911],
[0.267703, 0.012716, 0.620346],
[0.274191, 0.012109, 0.622722],
[0.280648, 0.011488, 0.625038],
[0.287076, 0.010855, 0.627295],
[0.293478, 0.010213, 0.629490],
[0.299855, 0.009561, 0.631624],
[0.306210, 0.008902, 0.633694],
[0.312543, 0.008239, 0.635700],
[0.318856, 0.007576, 0.637640],
[0.325150, 0.006915, 0.639512],
[0.331426, 0.006261, 0.641316],
[0.337683, 0.005618, 0.643049],
[0.343925, 0.004991, 0.644710],
[0.350150, 0.004382, 0.646298],
[0.356359, 0.003798, 0.647810],
[0.362553, 0.003243, 0.649245],
[0.368733, 0.002724, 0.650601],
[0.374897, 0.002245, 0.651876],
[0.381047, 0.001814, 0.653068],
[0.387183, 0.001434, 0.654177],
[0.393304, 0.001114, 0.655199],
[0.399411, 0.000859, 0.656133],
[0.405503, 0.000678, 0.656977],
[0.411580, 0.000577, 0.657730],
[0.417642, 0.000564, 0.658390],
[0.423689, 0.000646, 0.658956],
[0.429719, 0.000831, 0.659425],
[0.435734, 0.001127, 0.659797],
[0.441732, 0.001540, 0.660069],
[0.447714, 0.002080, 0.660240],
[0.453677, 0.002755, 0.660310],
[0.459623, 0.003574, 0.660277],
[0.465550, 0.004545, 0.660139],
[0.471457, 0.005678, 0.659897],
[0.477344, 0.006980, 0.659549],
[0.483210, 0.008460, 0.659095],
[0.489055, 0.010127, 0.658534],
[0.494877, 0.011990, 0.657865],
[0.500678, 0.014055, 0.657088],
[0.506454, 0.016333, 0.656202],
[0.512206, 0.018833, 0.655209],
[0.517933, 0.021563, 0.654109],
[0.523633, 0.024532, 0.652901],
[0.529306, 0.027747, 0.651586],
[0.534952, 0.031217, 0.650165],
[0.540570, 0.034950, 0.648640],
[0.546157, 0.038954, 0.647010],
[0.551715, 0.043136, 0.645277],
[0.557243, 0.047331, 0.643443],
[0.562738, 0.051545, 0.641509],
[0.568201, 0.055778, 0.639477],
[0.573632, 0.060028, 0.637349],
[0.579029, 0.064296, 0.635126],
[0.584391, 0.068579, 0.632812],
[0.589719, 0.072878, 0.630408],
[0.595011, 0.077190, 0.627917],
[0.600266, 0.081516, 0.625342],
[0.605485, 0.085854, 0.622686],
[0.610667, 0.090204, 0.619951],
[0.615812, 0.094564, 0.617140],
[0.620919, 0.098934, 0.614257],
[0.625987, 0.103312, 0.611305],
[0.631017, 0.107699, 0.608287],
[0.636008, 0.112092, 0.605205],
[0.640959, 0.116492, 0.602065],
[0.645872, 0.120898, 0.598867],
[0.650746, 0.125309, 0.595617],
[0.655580, 0.129725, 0.592317],
[0.660374, 0.134144, 0.588971],
[0.665129, 0.138566, 0.585582],
[0.669845, 0.142992, 0.582154],
[0.674522, 0.147419, 0.578688],
[0.679160, 0.151848, 0.575189],
[0.683758, 0.156278, 0.571660],
[0.688318, 0.160709, 0.568103],
[0.692840, 0.165141, 0.564522],
[0.697324, 0.169573, 0.560919],
[0.701769, 0.174005, 0.557296],
[0.706178, 0.178437, 0.553657],
[0.710549, 0.182868, 0.550004],
[0.714883, 0.187299, 0.546338],
[0.719181, 0.191729, 0.542663],
[0.723444, 0.196158, 0.538981],
[0.727670, 0.200586, 0.535293],
[0.731862, 0.205013, 0.531601],
[0.736019, 0.209439, 0.527908],
[0.740143, 0.213864, 0.524216],
[0.744232, 0.218288, 0.520524],
[0.748289, 0.222711, 0.516834],
[0.752312, 0.227133, 0.513149],
[0.756304, 0.231555, 0.509468],
[0.760264, 0.235976, 0.505794],
[0.764193, 0.240396, 0.502126],
[0.768090, 0.244817, 0.498465],
[0.771958, 0.249237, 0.494813],
[0.775796, 0.253658, 0.491171],
[0.779604, 0.258078, 0.487539],
[0.783383, 0.262500, 0.483918],
[0.787133, 0.266922, 0.480307],
[0.790855, 0.271345, 0.476706],
[0.794549, 0.275770, 0.473117],
[0.798216, 0.280197, 0.469538],
[0.801855, 0.284626, 0.465971],
[0.805467, 0.289057, 0.462415],
[0.809052, 0.293491, 0.458870],
[0.812612, 0.297928, 0.455338],
[0.816144, 0.302368, 0.451816],
[0.819651, 0.306812, 0.448306],
[0.823132, 0.311261, 0.444806],
[0.826588, 0.315714, 0.441316],
[0.830018, 0.320172, 0.437836],
[0.833422, 0.324635, 0.434366],
[0.836801, 0.329105, 0.430905],
[0.840155, 0.333580, 0.427455],
[0.843484, 0.338062, 0.424013],
[0.846788, 0.342551, 0.420579],
[0.850066, 0.347048, 0.417153],
[0.853319, 0.351553, 0.413734],
[0.856547, 0.356066, 0.410322],
[0.859750, 0.360588, 0.406917],
[0.862927, 0.365119, 0.403519],
[0.866078, 0.369660, 0.400126],
[0.869203, 0.374212, 0.396738],
[0.872303, 0.378774, 0.393355],
[0.875376, 0.383347, 0.389976],
[0.878423, 0.387932, 0.386600],
[0.881443, 0.392529, 0.383229],
[0.884436, 0.397139, 0.379860],
[0.887402, 0.401762, 0.376494],
[0.890340, 0.406398, 0.373130],
[0.893250, 0.411048, 0.369768],
[0.896131, 0.415712, 0.366407],
[0.898984, 0.420392, 0.363047],
[0.901807, 0.425087, 0.359688],
[0.904601, 0.429797, 0.356329],
[0.907365, 0.434524, 0.352970],
[0.910098, 0.439268, 0.349610],
[0.912800, 0.444029, 0.346251],
[0.915471, 0.448807, 0.342890],
[0.918109, 0.453603, 0.339529],
[0.920714, 0.458417, 0.336166],
[0.923287, 0.463251, 0.332801],
[0.925825, 0.468103, 0.329435],
[0.928329, 0.472975, 0.326067],
[0.930798, 0.477867, 0.322697],
[0.933232, 0.482780, 0.319325],
[0.935630, 0.487712, 0.315952],
[0.937990, 0.492667, 0.312575],
[0.940313, 0.497642, 0.309197],
[0.942598, 0.502639, 0.305816],
[0.944844, 0.507658, 0.302433],
[0.947051, 0.512699, 0.299049],
[0.949217, 0.517763, 0.295662],
[0.951344, 0.522850, 0.292275],
[0.953428, 0.527960, 0.288883],
[0.955470, 0.533093, 0.285490],
[0.957469, 0.538250, 0.282096],
[0.959424, 0.543431, 0.278701],
[0.961336, 0.548636, 0.275305],
[0.963203, 0.553865, 0.271909],
[0.965024, 0.559118, 0.268513],
[0.966798, 0.564396, 0.265118],
[0.968526, 0.569700, 0.261721],
[0.970205, 0.575028, 0.258325],
[0.971835, 0.580382, 0.254931],
[0.973416, 0.585761, 0.251540],
[0.974947, 0.591165, 0.248151],
[0.976428, 0.596595, 0.244767],
[0.977856, 0.602051, 0.241387],
[0.979233, 0.607532, 0.238013],
[0.980556, 0.613039, 0.234646],
[0.981826, 0.618572, 0.231287],
[0.983041, 0.624131, 0.227937],
[0.984199, 0.629718, 0.224595],
[0.985301, 0.635330, 0.221265],
[0.986345, 0.640969, 0.217948],
[0.987332, 0.646633, 0.214648],
[0.988260, 0.652325, 0.211364],
[0.989128, 0.658043, 0.208100],
[0.989935, 0.663787, 0.204859],
[0.990681, 0.669558, 0.201642],
[0.991365, 0.675355, 0.198453],
[0.991985, 0.681179, 0.195295],
[0.992541, 0.687030, 0.192170],
[0.993032, 0.692907, 0.189084],
[0.993456, 0.698810, 0.186041],
[0.993814, 0.704741, 0.183043],
[0.994103, 0.710698, 0.180097],
[0.994324, 0.716681, 0.177208],
[0.994474, 0.722691, 0.174381],
[0.994553, 0.728728, 0.171622],
[0.994561, 0.734791, 0.168938],
[0.994495, 0.740880, 0.166335],
[0.994355, 0.746995, 0.163821],
[0.994141, 0.753137, 0.161404],
[0.993851, 0.759304, 0.159092],
[0.993482, 0.765499, 0.156891],
[0.993033, 0.771720, 0.154808],
[0.992505, 0.777967, 0.152855],
[0.991897, 0.784239, 0.151042],
[0.991209, 0.790537, 0.149377],
[0.990439, 0.796859, 0.147870],
[0.989587, 0.803205, 0.146529],
[0.988648, 0.809579, 0.145357],
[0.987621, 0.815978, 0.144363],
[0.986509, 0.822401, 0.143557],
[0.985314, 0.828846, 0.142945],
[0.984031, 0.835315, 0.142528],
[0.982653, 0.841812, 0.142303],
[0.981190, 0.848329, 0.142279],
[0.979644, 0.854866, 0.142453],
[0.977995, 0.861432, 0.142808],
[0.976265, 0.868016, 0.143351],
[0.974443, 0.874622, 0.144061],
[0.972530, 0.881250, 0.144923],
[0.970533, 0.887896, 0.145919],
[0.968443, 0.894564, 0.147014],
[0.966271, 0.901249, 0.148180],
[0.964021, 0.907950, 0.149370],
[0.961681, 0.914672, 0.150520],
[0.959276, 0.921407, 0.151566],
[0.956808, 0.928152, 0.152409],
[0.954287, 0.934908, 0.152921],
[0.951726, 0.941671, 0.152925],
[0.949151, 0.948435, 0.152178],
[0.946602, 0.955190, 0.150328],
[0.944152, 0.961916, 0.146861],
[0.941896, 0.968590, 0.140956],
[0.940015, 0.975158, 0.131326]]
_viridis_data = [[0.267004, 0.004874, 0.329415],
[0.268510, 0.009605, 0.335427],
[0.269944, 0.014625, 0.341379],
[0.271305, 0.019942, 0.347269],
[0.272594, 0.025563, 0.353093],
[0.273809, 0.031497, 0.358853],
[0.274952, 0.037752, 0.364543],
[0.276022, 0.044167, 0.370164],
[0.277018, 0.050344, 0.375715],
[0.277941, 0.056324, 0.381191],
[0.278791, 0.062145, 0.386592],
[0.279566, 0.067836, 0.391917],
[0.280267, 0.073417, 0.397163],
[0.280894, 0.078907, 0.402329],
[0.281446, 0.084320, 0.407414],
[0.281924, 0.089666, 0.412415],
[0.282327, 0.094955, 0.417331],
[0.282656, 0.100196, 0.422160],
[0.282910, 0.105393, 0.426902],
[0.283091, 0.110553, 0.431554],
[0.283197, 0.115680, 0.436115],
[0.283229, 0.120777, 0.440584],
[0.283187, 0.125848, 0.444960],
[0.283072, 0.130895, 0.449241],
[0.282884, 0.135920, 0.453427],
[0.282623, 0.140926, 0.457517],
[0.282290, 0.145912, 0.461510],
[0.281887, 0.150881, 0.465405],
[0.281412, 0.155834, 0.469201],
[0.280868, 0.160771, 0.472899],
[0.280255, 0.165693, 0.476498],
[0.279574, 0.170599, 0.479997],
[0.278826, 0.175490, 0.483397],
[0.278012, 0.180367, 0.486697],
[0.277134, 0.185228, 0.489898],
[0.276194, 0.190074, 0.493001],
[0.275191, 0.194905, 0.496005],
[0.274128, 0.199721, 0.498911],
[0.273006, 0.204520, 0.501721],
[0.271828, 0.209303, 0.504434],
[0.270595, 0.214069, 0.507052],
[0.269308, 0.218818, 0.509577],
[0.267968, 0.223549, 0.512008],
[0.266580, 0.228262, 0.514349],
[0.265145, 0.232956, 0.516599],
[0.263663, 0.237631, 0.518762],
[0.262138, 0.242286, 0.520837],
[0.260571, 0.246922, 0.522828],
[0.258965, 0.251537, 0.524736],
[0.257322, 0.256130, 0.526563],
[0.255645, 0.260703, 0.528312],
[0.253935, 0.265254, 0.529983],
[0.252194, 0.269783, 0.531579],
[0.250425, 0.274290, 0.533103],
[0.248629, 0.278775, 0.534556],
[0.246811, 0.283237, 0.535941],
[0.244972, 0.287675, 0.537260],
[0.243113, 0.292092, 0.538516],
[0.241237, 0.296485, 0.539709],
[0.239346, 0.300855, 0.540844],
[0.237441, 0.305202, 0.541921],
[0.235526, 0.309527, 0.542944],
[0.233603, 0.313828, 0.543914],
[0.231674, 0.318106, 0.544834],
[0.229739, 0.322361, 0.545706],
[0.227802, 0.326594, 0.546532],
[0.225863, 0.330805, 0.547314],
[0.223925, 0.334994, 0.548053],
[0.221989, 0.339161, 0.548752],
[0.220057, 0.343307, 0.549413],
[0.218130, 0.347432, 0.550038],
[0.216210, 0.351535, 0.550627],
[0.214298, 0.355619, 0.551184],
[0.212395, 0.359683, 0.551710],
[0.210503, 0.363727, 0.552206],
[0.208623, 0.367752, 0.552675],
[0.206756, 0.371758, 0.553117],
[0.204903, 0.375746, 0.553533],
[0.203063, 0.379716, 0.553925],
[0.201239, 0.383670, 0.554294],
[0.199430, 0.387607, 0.554642],
[0.197636, 0.391528, 0.554969],
[0.195860, 0.395433, 0.555276],
[0.194100, 0.399323, 0.555565],
[0.192357, 0.403199, 0.555836],
[0.190631, 0.407061, 0.556089],
[0.188923, 0.410910, 0.556326],
[0.187231, 0.414746, 0.556547],
[0.185556, 0.418570, 0.556753],
[0.183898, 0.422383, 0.556944],
[0.182256, 0.426184, 0.557120],
[0.180629, 0.429975, 0.557282],
[0.179019, 0.433756, 0.557430],
[0.177423, 0.437527, 0.557565],
[0.175841, 0.441290, 0.557685],
[0.174274, 0.445044, 0.557792],
[0.172719, 0.448791, 0.557885],
[0.171176, 0.452530, 0.557965],
[0.169646, 0.456262, 0.558030],
[0.168126, 0.459988, 0.558082],
[0.166617, 0.463708, 0.558119],
[0.165117, 0.467423, 0.558141],
[0.163625, 0.471133, 0.558148],
[0.162142, 0.474838, 0.558140],
[0.160665, 0.478540, 0.558115],
[0.159194, 0.482237, 0.558073],
[0.157729, 0.485932, 0.558013],
[0.156270, 0.489624, 0.557936],
[0.154815, 0.493313, 0.557840],
[0.153364, 0.497000, 0.557724],
[0.151918, 0.500685, 0.557587],
[0.150476, 0.504369, 0.557430],
[0.149039, 0.508051, 0.557250],
[0.147607, 0.511733, 0.557049],
[0.146180, 0.515413, 0.556823],
[0.144759, 0.519093, 0.556572],
[0.143343, 0.522773, 0.556295],
[0.141935, 0.526453, 0.555991],
[0.140536, 0.530132, 0.555659],
[0.139147, 0.533812, 0.555298],
[0.137770, 0.537492, 0.554906],
[0.136408, 0.541173, 0.554483],
[0.135066, 0.544853, 0.554029],
[0.133743, 0.548535, 0.553541],
[0.132444, 0.552216, 0.553018],
[0.131172, 0.555899, 0.552459],
[0.129933, 0.559582, 0.551864],
[0.128729, 0.563265, 0.551229],
[0.127568, 0.566949, 0.550556],
[0.126453, 0.570633, 0.549841],
[0.125394, 0.574318, 0.549086],
[0.124395, 0.578002, 0.548287],
[0.123463, 0.581687, 0.547445],
[0.122606, 0.585371, 0.546557],
[0.121831, 0.589055, 0.545623],
[0.121148, 0.592739, 0.544641],
[0.120565, 0.596422, 0.543611],
[0.120092, 0.600104, 0.542530],
[0.119738, 0.603785, 0.541400],
[0.119512, 0.607464, 0.540218],
[0.119423, 0.611141, 0.538982],
[0.119483, 0.614817, 0.537692],
[0.119699, 0.618490, 0.536347],
[0.120081, 0.622161, 0.534946],
[0.120638, 0.625828, 0.533488],
[0.121380, 0.629492, 0.531973],
[0.122312, 0.633153, 0.530398],
[0.123444, 0.636809, 0.528763],
[0.124780, 0.640461, 0.527068],
[0.126326, 0.644107, 0.525311],
[0.128087, 0.647749, 0.523491],
[0.130067, 0.651384, 0.521608],
[0.132268, 0.655014, 0.519661],
[0.134692, 0.658636, 0.517649],
[0.137339, 0.662252, 0.515571],
[0.140210, 0.665859, 0.513427],
[0.143303, 0.669459, 0.511215],
[0.146616, 0.673050, 0.508936],
[0.150148, 0.676631, 0.506589],
[0.153894, 0.680203, 0.504172],
[0.157851, 0.683765, 0.501686],
[0.162016, 0.687316, 0.499129],
[0.166383, 0.690856, 0.496502],
[0.170948, 0.694384, 0.493803],
[0.175707, 0.697900, 0.491033],
[0.180653, 0.701402, 0.488189],
[0.185783, 0.704891, 0.485273],
[0.191090, 0.708366, 0.482284],
[0.196571, 0.711827, 0.479221],
[0.202219, 0.715272, 0.476084],
[0.208030, 0.718701, 0.472873],
[0.214000, 0.722114, 0.469588],
[0.220124, 0.725509, 0.466226],
[0.226397, 0.728888, 0.462789],
[0.232815, 0.732247, 0.459277],
[0.239374, 0.735588, 0.455688],
[0.246070, 0.738910, 0.452024],
[0.252899, 0.742211, 0.448284],
[0.259857, 0.745492, 0.444467],
[0.266941, 0.748751, 0.440573],
[0.274149, 0.751988, 0.436601],
[0.281477, 0.755203, 0.432552],
[0.288921, 0.758394, 0.428426],
[0.296479, 0.761561, 0.424223],
[0.304148, 0.764704, 0.419943],
[0.311925, 0.767822, 0.415586],
[0.319809, 0.770914, 0.411152],
[0.327796, 0.773980, 0.406640],
[0.335885, 0.777018, 0.402049],
[0.344074, 0.780029, 0.397381],
[0.352360, 0.783011, 0.392636],
[0.360741, 0.785964, 0.387814],
[0.369214, 0.788888, 0.382914],
[0.377779, 0.791781, 0.377939],
[0.386433, 0.794644, 0.372886],
[0.395174, 0.797475, 0.367757],
[0.404001, 0.800275, 0.362552],
[0.412913, 0.803041, 0.357269],
[0.421908, 0.805774, 0.351910],
[0.430983, 0.808473, 0.346476],
[0.440137, 0.811138, 0.340967],
[0.449368, 0.813768, 0.335384],
[0.458674, 0.816363, 0.329727],
[0.468053, 0.818921, 0.323998],
[0.477504, 0.821444, 0.318195],
[0.487026, 0.823929, 0.312321],
[0.496615, 0.826376, 0.306377],
[0.506271, 0.828786, 0.300362],
[0.515992, 0.831158, 0.294279],
[0.525776, 0.833491, 0.288127],
[0.535621, 0.835785, 0.281908],
[0.545524, 0.838039, 0.275626],
[0.555484, 0.840254, 0.269281],
[0.565498, 0.842430, 0.262877],
[0.575563, 0.844566, 0.256415],
[0.585678, 0.846661, 0.249897],
[0.595839, 0.848717, 0.243329],
[0.606045, 0.850733, 0.236712],
[0.616293, 0.852709, 0.230052],
[0.626579, 0.854645, 0.223353],
[0.636902, 0.856542, 0.216620],
[0.647257, 0.858400, 0.209861],
[0.657642, 0.860219, 0.203082],
[0.668054, 0.861999, 0.196293],
[0.678489, 0.863742, 0.189503],
[0.688944, 0.865448, 0.182725],
[0.699415, 0.867117, 0.175971],
[0.709898, 0.868751, 0.169257],
[0.720391, 0.870350, 0.162603],
[0.730889, 0.871916, 0.156029],
[0.741388, 0.873449, 0.149561],
[0.751884, 0.874951, 0.143228],
[0.762373, 0.876424, 0.137064],
[0.772852, 0.877868, 0.131109],
[0.783315, 0.879285, 0.125405],
[0.793760, 0.880678, 0.120005],
[0.804182, 0.882046, 0.114965],
[0.814576, 0.883393, 0.110347],
[0.824940, 0.884720, 0.106217],
[0.835270, 0.886029, 0.102646],
[0.845561, 0.887322, 0.099702],
[0.855810, 0.888601, 0.097452],
[0.866013, 0.889868, 0.095953],
[0.876168, 0.891125, 0.095250],
[0.886271, 0.892374, 0.095374],
[0.896320, 0.893616, 0.096335],
[0.906311, 0.894855, 0.098125],
[0.916242, 0.896091, 0.100717],
[0.926106, 0.897330, 0.104071],
[0.935904, 0.898570, 0.108131],
[0.945636, 0.899815, 0.112838],
[0.955300, 0.901065, 0.118128],
[0.964894, 0.902323, 0.123941],
[0.974417, 0.903590, 0.130215],
[0.983868, 0.904867, 0.136897],
[0.993248, 0.906157, 0.143936]]
from matplotlib.colors import ListedColormap
from matplotlib.colors import LinearSegmentedColormap
#import numpy as np
cmaps = {}
for (name, data) in (('magma', _magma_data),
('inferno', _inferno_data),
('plasma', _plasma_data),
('viridis', _viridis_data)):
#cmaps[name] = ListedColormap(data, name=name)
#cmaps[name] = ListedColormap(data, name=name, N=8)
cmaps[name] = LinearSegmentedColormap.from_list(name, data, N=8)
magma = cmaps['magma']
inferno = cmaps['inferno']
plasma = cmaps['plasma']
viridis = cmaps['viridis']
|
orloxREPO_NAMEmesa_input_dataPATH_START.@mesa_input_data_extracted@mesa_input_data-master@2016_ULX@scripts@NSBH@colormaps.py@.PATH_END.py
|
{
"filename": "exceptions.py",
"repo_name": "stub42/pytz",
"repo_path": "pytz_extracted/pytz-master/src/pytz/exceptions.py",
"type": "Python"
}
|
'''
Custom exceptions raised by pytz.
'''
__all__ = [
'UnknownTimeZoneError', 'InvalidTimeError', 'AmbiguousTimeError',
'NonExistentTimeError',
]
class Error(Exception):
'''Base class for all exceptions raised by the pytz library'''
class UnknownTimeZoneError(KeyError, Error):
'''Exception raised when pytz is passed an unknown timezone.
>>> isinstance(UnknownTimeZoneError(), LookupError)
True
This class is actually a subclass of KeyError to provide backwards
compatibility with code relying on the undocumented behavior of earlier
pytz releases.
>>> isinstance(UnknownTimeZoneError(), KeyError)
True
And also a subclass of pytz.exceptions.Error, as are other pytz
exceptions.
>>> isinstance(UnknownTimeZoneError(), Error)
True
'''
pass
class InvalidTimeError(Error):
'''Base class for invalid time exceptions.'''
class AmbiguousTimeError(InvalidTimeError):
'''Exception raised when attempting to create an ambiguous wallclock time.
At the end of a DST transition period, a particular wallclock time will
occur twice (once before the clocks are set back, once after). Both
possibilities may be correct, unless further information is supplied.
See DstTzInfo.normalize() for more info
'''
class NonExistentTimeError(InvalidTimeError):
'''Exception raised when attempting to create a wallclock time that
cannot exist.
At the start of a DST transition period, the wallclock time jumps forward.
The instants jumped over never occur.
'''
|
stub42REPO_NAMEpytzPATH_START.@pytz_extracted@pytz-master@src@pytz@exceptions.py@.PATH_END.py
|
{
"filename": "RHT_example_workflow-checkpoint.ipynb",
"repo_name": "seclark/RHT",
"repo_path": "RHT_extracted/RHT-master/.ipynb_checkpoints/RHT_example_workflow-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
# RHT example workflow
### by S. E. Clark
Imports. Note we are importing `rht` and `RHT_tools` from this repo.
```python
from astropy.io import fits
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import rht, RHT_tools
%matplotlib inline
```
File "/Users/jegpeek/Repositories/RHT-fork/rht.py", line 110
print announcement(strings)
^
SyntaxError: invalid syntax
Load some test data. Let's use a fits version of a tesla coil image from <a href="https://commons.wikimedia.org/wiki/File:225W_Zeus_Tesla_coil_-_arcs2_(cropped).jpg">Wikimedia commons</a>.
```python
data_fn = "testim_tesla_small"
tesla_data = fits.getdata(data_fn+".fits")
```
Let's take a look at the original image.
```python
fig = plt.figure(figsize=(6,6))
plt.imshow(tesla_data, cmap="Greys")
```
<matplotlib.image.AxesImage at 0x1821b9e590>

Run the RHT! It's as simple as this. Note that depending on your setup, this may run quite slowly in a Jupyter notebook. The following should only take a few seconds from the command line. From the command line, simply do
~~~
python rht.py data_fn --wlen=21 --smr=2
~~~
Where wlen is the window length and smr is the unsharp mask smoothing radius. For details please refer to <a href="http://adsabs.harvard.edu/abs/2014ApJ...789...82C">the RHT paper</a>.
```python
rht.main(data_fn, smr=2, wlen=21)
```
**********************************************************************
Fast Rolling Hough Transform by Susan Clark
Started for: testim_tesla_small.fits
**********************************************************************
1/4:: Retrieving Data from: testim_tesla_small.fits
Finished Masking: 3sec
2/4:: Size: 300x300, Wlen: 21, Smr: 2, Frac: 0.7, Standard (half-polar) RHT: True
3/4:: Running RHT... 2min
4/4:: Successfully Saved Data As /Users/susanclark/Projects/RHT/testim_tesla_small_xyt01.fits
**********************************************************************
testim_tesla_small.fits: Passed
Complete!
**********************************************************************
True
By default, the data are saved as a fits file of the same name, with "_xytNN" appended, where NN is the RHT run number.
```python
rht_data_fn = data_fn+"_xyt01.fits"
rht_tesla = fits.getdata(rht_data_fn)
```
The backprojection is stored as the first hdu. This is total RHT linear intensity integrated over orientation. More prominent features in the backprojection indicate regions with greater total linear power.
```python
fig = plt.figure(figsize=(6,6))
plt.imshow(rht_tesla, cmap="Greys")
```
<matplotlib.image.AxesImage at 0x1822860c50>

Some helper functions are provided in `RHT_tools.py`. Let's use them to grab the total RHT output (pixel indices and R(x, y, theta)) from the second header object.
```python
ipoints, jpoints, hthets, naxis1, naxis2, wlen, smr, thresh = RHT_tools.get_RHT_data(rht_data_fn)
```
loading data from testim_tesla_small_xyt01.fits
Just to demonstrate, let's grab a random point. We'll also get the array of theta bins using `RHT_tools`.
```python
indx = 20000
ipoint_example = ipoints[indx]
jpoint_example = jpoints[indx]
hthets_example = hthets[indx]
thets_arr = RHT_tools.get_thets(wlen, save=False)
```
Plot the RHT spectrum at this random point.
```python
fig=plt.figure(figsize=(12,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
ax1.plot(np.degrees(thets_arr), hthets_example)
ax1.set_xlabel("theta [degrees]")
ax1.set_ylabel("RHT intensity")
ax1.set_title("RHT spectrum at point ({}, {})".format(ipoint_example, jpoint_example))
ax2.imshow(rht_tesla, cmap="Greys")
ax2.plot(ipoint_example, jpoint_example, '+', color="pink", ms=15, mew=3)
```
[<matplotlib.lines.Line2D at 0x182f692c10>]

Let's now plot all of the RHT spectra that lie in a given row in our image.
```python
row_js = jpoints[np.where(jpoints == 250)]
row_is = ipoints[np.where(jpoints == 250)]
row_hthets = hthets[np.where(jpoints == 250)]
cmap = matplotlib.cm.get_cmap('Reds_r')
fig=plt.figure(figsize=(12,6))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
for _i in range(len(row_js)):
ax1.plot(np.degrees(thets_arr), row_hthets[_i, :,], color=cmap(_i*1./len(row_js)))
ax1.set_xlabel("theta [degrees]")
ax1.set_ylabel("RHT intensity")
ax1.set_title("RHT spectra where jpoint = {}".format(250))
ax2.imshow(rht_tesla, cmap="Greys")
plt.scatter(row_is, row_js, color=cmap(np.arange(len(row_js)*1.)/len(row_js)))
```
<matplotlib.collections.PathCollection at 0x1830dfe650>

```python
```
21
```python
```
|
seclarkREPO_NAMERHTPATH_START.@RHT_extracted@RHT-master@.ipynb_checkpoints@RHT_example_workflow-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "test_ephemeris_functions.py",
"repo_name": "spacetelescope/mirage",
"repo_path": "mirage_extracted/mirage-master/tests/test_ephemeris_functions.py",
"type": "Python"
}
|
"""Unit tests for working with ephemeris files
Authors
-------
- Bryan Hilbert
Use
---
Ensure you have pytest installed. Then, simply run pytest in any
parent directory of mirage/tests/:
>>> pytest
"""
import datetime
import numpy as np
import os
import pkg_resources
from mirage.seed_image import ephemeris_tools
package_path = pkg_resources.resource_filename('mirage', '')
data_dir = os.path.join( os.path.dirname(__file__), 'test_data/ephemeris/')
CONFIG_DIR = os.path.join(package_path, 'config')
def test_create_interpol_function():
"""Create an interpolation function from an ephemeris table
"""
ephemeris_file = os.path.join(data_dir, 'horizons_results.txt')
ephem_table = ephemeris_tools.read_ephemeris_file(ephemeris_file)
ra_function, dec_function = ephemeris_tools.create_interpol_function(ephem_table)
check_time = datetime.datetime(2020, 10, 3)
check_time_calendar = ephemeris_tools.to_timestamp(check_time)
ra_interp = ra_function([check_time_calendar])
dec_interp = dec_function([check_time_calendar])
assert np.isclose(ra_interp[0], 23.74433333333333, atol=1e-9)
assert np.isclose(dec_interp[0], 6.01483333, atol=1e-9)
def test_read_ephemeris_file():
"""Read in an ephemeris and return interpolation funcations. Development
was based on an ephemeris file from Hoirzons.
"""
files = ['horizons_results.txt', 'horizons_results_jupiter.txt']
times = [datetime.datetime(2020, 10, 1), datetime.datetime(2022, 7, 11, 0, 2, 0)]
ras = [24.299791666666664, 7.885875]
decs = [6.131916666666666, 1.98519444444]
for efile, time, ra, dec in zip(files, times, ras, decs):
ephemeris_file = os.path.join(data_dir, efile)
ephem = ephemeris_tools.read_ephemeris_file(ephemeris_file)
check_time = datetime.datetime(2020, 10, 1)
match = ephem['Time'] == time
assert np.isclose(ephem[match]['RA'].data[0], ra, atol=1e-9)
assert np.isclose(ephem[match]['Dec'].data[0], dec, atol=1e-9)
cols = ['Time', 'RA', 'Dec']
for col in cols:
assert col in ephem.colnames
|
spacetelescopeREPO_NAMEmiragePATH_START.@mirage_extracted@mirage-master@tests@test_ephemeris_functions.py@.PATH_END.py
|
{
"filename": "mri.py",
"repo_name": "hmuellergoe/mrbeam",
"repo_path": "mrbeam_extracted/mrbeam-main/mr_beam/itreg/examples/mri.py",
"type": "Python"
}
|
import logging
import matplotlib.colorbar as cbar
import matplotlib.pyplot as plt
import numpy as np
import regpy.stoprules as rules
import regpy.util as util
from regpy.operators.mri import cartesian_sampling, normalize, parallel_mri, sobolev_smoother
from regpy.solvers import HilbertSpaceSetting
from regpy.solvers.irgnm import IrgnmCG
from regpy.discrs import UniformGrid
from regpy.hilbert import L2
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(levelname)s %(name)-40s :: %(message)s'
)
# TODO dtype=complex?
grid = UniformGrid((-1, 1, 100), (-1, 1, 100), dtype=complex)
sobolev_index = 32
noiselevel = 0.05
# In real applications with data known before constructing the operator, estimate_sampling_pattern
# can be used to determine the mask.
mask = grid.zeros(dtype=bool)
mask[::2] = True
mask[:10] = True
mask[-10:] = True
full_mri_op = parallel_mri(grid=grid, ncoils=10)
sampling = cartesian_sampling(full_mri_op.codomain, mask=mask)
mri_op = sampling * full_mri_op
# Substitute Sobolev weights into coil profiles
smoother = sobolev_smoother(mri_op.domain, sobolev_index, factor=220.)
smoothed_op = mri_op * smoother
exact_solution = mri_op.domain.zeros()
exact_density, exact_coils = mri_op.domain.split(exact_solution) # returns views into exact_solution in this case
# Exact density is just a square shape
exact_density[...] = (np.max(np.abs(grid.coords), axis=0) < 0.4)
# Exact coils are Gaussians centered on points on a circle
centers = util.linspace_circle(exact_coils.shape[0]) / np.sqrt(2)
for coil, center in zip(exact_coils, centers):
r = np.linalg.norm(grid.coords - center[:, np.newaxis, np.newaxis], axis=0)
coil[...] = np.exp(-r**2 / 2)
# Construct data (criminally), add noise
exact_data = mri_op(exact_solution)
data = exact_data + noiselevel * mri_op.codomain.randn()
# Initial guess: constant density, zero coils
init = smoothed_op.domain.zeros()
init_density, _ = smoothed_op.domain.split(init)
init_density[...] = 1
setting = HilbertSpaceSetting(op=smoothed_op, Hdomain=L2, Hcodomain=L2)
solver = IrgnmCG(
setting=setting,
data=data,
regpar=10,
regpar_step=0.8,
init=init
)
stoprule = (
rules.CountIterations(max_iterations=100) +
rules.Discrepancy(
setting.Hcodomain.norm, data,
noiselevel=setting.Hcodomain.norm(exact_data - data),
tau=1.1
)
)
# Plotting setup
plt.ion()
fig, axes = plt.subplots(ncols=2, constrained_layout=True)
bars = [cbar.make_axes(ax)[0] for ax in axes]
axes[0].set_title('exact solution')
axes[1].set_title('reconstruction')
# Plot exact solution
im = axes[0].imshow(np.abs(normalize(*mri_op.domain.split(exact_solution))))
fig.colorbar(im, cax=bars[0])
# Run the solver, plot iterates
for reco, reco_data in solver.until(stoprule):
reco2 = smoother(reco)
im = axes[1].imshow(np.abs(normalize(*mri_op.domain.split(reco2))))
bars[1].clear()
fig.colorbar(im, cax=bars[1])
plt.pause(0.5)
plt.ioff()
plt.show()
|
hmuellergoeREPO_NAMEmrbeamPATH_START.@mrbeam_extracted@mrbeam-main@mr_beam@itreg@examples@mri.py@.PATH_END.py
|
{
"filename": "adamw.py",
"repo_name": "neuraloperator/neuraloperator",
"repo_path": "neuraloperator_extracted/neuraloperator-main/neuralop/training/adamw.py",
"type": "Python"
}
|
# copy dependencies from transformers/optimization.py
import math
import warnings
from typing import Callable, Iterable, Tuple, Union, List
import torch
from torch import nn
from torch.optim import Optimizer
class AdamW(Optimizer):
"""
Implements Adam algorithm with weight decay fix as introduced in [Decoupled Weight Decay
Regularization](https://arxiv.org/abs/1711.05101).
Parameters
----------
params : Iterable[nn.parameter.Parameter]
Iterable of parameters to optimize or dictionaries defining parameter groups.
lr : float, *optional*, defaults to 0.001
The learning rate to use.
betas (`Tuple[float,float]`, *optional*, defaults to `(0.9, 0.999)`):
Adam's betas parameters (b1, b2).
eps (`float`, *optional*, defaults to 1e-06):
Adam's epsilon for numerical stability.
weight_decay (`float`, *optional*, defaults to 0.0):
Decoupled weight decay to apply.
correct_bias (`bool`, *optional*, defaults to `True`):
Whether or not to correct bias in Adam (for instance, in Bert TF repository they use `False`).
"""
def __init__(
self,
params: Iterable[nn.parameter.Parameter],
lr: float = 1e-3,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-6,
weight_decay: float = 0.0,
correct_bias: bool = True,
):
if lr < 0.0:
raise ValueError(f"Invalid learning rate: {lr} - should be >= 0.0")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter: {betas[0]} - should be in [0.0, 1.0)")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter: {betas[1]} - should be in [0.0, 1.0)")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps} - should be >= 0.0")
defaults = {"lr": lr, "betas": betas, "eps": eps, "weight_decay": weight_decay, "correct_bias": correct_bias}
super().__init__(params, defaults)
@torch.no_grad()
def step(self, closure: Callable = None):
"""
Performs a single optimization step.
Arguments:
closure (`Callable`, *optional*): A closure that reevaluates the model and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead")
state = self.state[p]
if "step" not in state:
state["step"] = 0
if 'dim' not in group:
group['dim'] = 2
# State initialization
if "exp_avg" not in state:
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(grad)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(grad)
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
# Decay the first and second moment running average coefficient
# In-place operations to update the averages at the same time
exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1))
if torch.is_complex(grad):
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1.0 - beta2)
else:
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2)
denom = exp_avg_sq.sqrt().add_(group["eps"])
step_size = group["lr"]
if group["correct_bias"]: # No bias correction for Bert
bias_correction1 = 1.0 - beta1 ** state["step"]
bias_correction2 = 1.0 - beta2 ** state["step"]
step_size = step_size * math.sqrt(bias_correction2) / bias_correction1
# compute norm gradient
norm_grad = exp_avg / denom
p.add_(norm_grad, alpha=-step_size)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
# Add weight decay at the end (fixed version)
if group["weight_decay"] > 0.0:
p.add_(p, alpha=(-group["lr"] * group["weight_decay"]))
return loss
|
neuraloperatorREPO_NAMEneuraloperatorPATH_START.@neuraloperator_extracted@neuraloperator-main@neuralop@training@adamw.py@.PATH_END.py
|
{
"filename": "_style.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/marker/colorbar/tickfont/_style.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StyleValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="style",
parent_name="scatter.marker.colorbar.tickfont",
**kwargs,
):
super(StyleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["normal", "italic"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@marker@colorbar@tickfont@_style.py@.PATH_END.py
|
{
"filename": "test_ipac_definitions.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/io/ascii/tests/test_ipac_definitions.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import re
from io import StringIO
import pytest
from astropy.io import ascii
from astropy.io.ascii.core import masked
from astropy.io.ascii.ipac import IpacFormatError, IpacFormatErrorDBMS
from astropy.io.ascii.ui import read
from astropy.table import Column, Table
DATA = """
| a | b |
| char | char |
ABBBBBBABBBBBBBA
"""
def test_ipac_default():
# default should be ignore
table = read(DATA, format="ipac")
assert table["a"][0] == "BBBBBB"
assert table["b"][0] == "BBBBBBB"
def test_ipac_ignore():
table = read(DATA, format="ipac", definition="ignore")
assert table["a"][0] == "BBBBBB"
assert table["b"][0] == "BBBBBBB"
def test_ipac_left():
table = read(DATA, format="ipac", definition="left")
assert table["a"][0] == "BBBBBBA"
assert table["b"][0] == "BBBBBBBA"
def test_ipac_right():
table = read(DATA, format="ipac", definition="right")
assert table["a"][0] == "ABBBBBB"
assert table["b"][0] == "ABBBBBBB"
def test_too_long_colname_default():
table = Table([[3]], names=["a1234567890123456789012345678901234567890"])
out = StringIO()
with pytest.raises(IpacFormatError):
ascii.write(table, out, format="ipac")
def test_too_long_colname_strict():
table = Table([[3]], names=["a1234567890123456"])
out = StringIO()
with pytest.raises(IpacFormatErrorDBMS):
ascii.write(table, out, format="ipac", DBMS=True)
def test_too_long_colname_notstrict():
table = Table([[3]], names=["a1234567890123456789012345678901234567890"])
out = StringIO()
with pytest.raises(IpacFormatError):
ascii.write(table, out, format="ipac", DBMS=False)
@pytest.mark.parametrize(
("strict_", "Err"), [(True, IpacFormatErrorDBMS), (False, IpacFormatError)]
)
def test_non_alfnum_colname(strict_, Err):
table = Table([[3]], names=["a123456789 01234"])
out = StringIO()
with pytest.raises(Err):
ascii.write(table, out, format="ipac", DBMS=strict_)
def test_colname_starswithnumber_strict():
table = Table([[3]], names=["a123456789 01234"])
out = StringIO()
with pytest.raises(IpacFormatErrorDBMS):
ascii.write(table, out, format="ipac", DBMS=True)
def test_double_colname_strict():
table = Table([[3], [1]], names=["DEC", "dec"])
out = StringIO()
with pytest.raises(IpacFormatErrorDBMS):
ascii.write(table, out, format="ipac", DBMS=True)
@pytest.mark.parametrize("colname", ["x", "y", "z", "X", "Y", "Z"])
def test_reserved_colname_strict(colname):
table = Table([["reg"]], names=[colname])
out = StringIO()
with pytest.raises(IpacFormatErrorDBMS):
ascii.write(table, out, format="ipac", DBMS=True)
def test_too_long_comment():
msg = "Wrapping comment lines > 78 characters produced 1 extra line(s)"
with pytest.warns(UserWarning, match=re.escape(msg)):
table = Table([[3]])
table.meta["comments"] = ["a" * 79]
out = StringIO()
ascii.write(table, out, format="ipac")
expected_out = """\
\\ aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
\\ a
|col0|
|long|
| |
|null|
3
"""
assert out.getvalue().strip().splitlines() == expected_out.splitlines()
def test_out_with_nonstring_null():
"""Test a (non-string) fill value.
Even for an unmasked tables, the fill_value should show up in the
table header.
"""
table = Table([[3]], masked=True)
out = StringIO()
ascii.write(table, out, format="ipac", fill_values=[(masked, -99999)])
expected_out = """\
| col0|
| long|
| |
|-99999|
3
"""
assert out.getvalue().strip().splitlines() == expected_out.splitlines()
def test_include_exclude_names():
table = Table([[1], [2], [3]], names=("A", "B", "C"))
out = StringIO()
ascii.write(
table, out, format="ipac", include_names=("A", "B"), exclude_names=("A",)
)
# column B should be the only included column in output
expected_out = """\
| B|
|long|
| |
|null|
2
"""
assert out.getvalue().strip().splitlines() == expected_out.splitlines()
def test_short_dtypes():
table = Table(
[Column([1.0], dtype="f4"), Column([2], dtype="i2")],
names=("float_col", "int_col"),
)
out = StringIO()
ascii.write(table, out, format="ipac")
expected_out = """\
|float_col|int_col|
| float| int|
| | |
| null| null|
1.0 2
"""
assert out.getvalue().strip().splitlines() == expected_out.splitlines()
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@io@ascii@tests@test_ipac_definitions.py@.PATH_END.py
|
{
"filename": "table.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/matplotlib/table.py",
"type": "Python"
}
|
"""
Place a table below the x-axis at location loc.
The table consists of a grid of cells.
The grid need not be rectangular and can have holes.
Cells are added by specifying their row and column.
For the purposes of positioning the cell at (0, 0) is
assumed to be at the top left and the cell at (max_row, max_col)
is assumed to be at bottom right.
You can add additional cells outside this range to have convenient
ways of positioning more interesting grids.
Author : John Gill <jng@europe.renre.com>
Copyright : 2004 John Gill and John Hunter
License : matplotlib license
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import warnings
from . import artist
from .artist import Artist, allow_rasterization
from .patches import Rectangle
from matplotlib import docstring
from .text import Text
from .transforms import Bbox
from matplotlib.path import Path
class Cell(Rectangle):
"""
A cell is a Rectangle with some associated text.
"""
PAD = 0.1 # padding between text and rectangle
def __init__(self, xy, width, height,
edgecolor='k', facecolor='w',
fill=True,
text='',
loc=None,
fontproperties=None
):
# Call base
Rectangle.__init__(self, xy, width=width, height=height,
edgecolor=edgecolor, facecolor=facecolor)
self.set_clip_on(False)
# Create text object
if loc is None:
loc = 'right'
self._loc = loc
self._text = Text(x=xy[0], y=xy[1], text=text,
fontproperties=fontproperties)
self._text.set_clip_on(False)
def set_transform(self, trans):
Rectangle.set_transform(self, trans)
# the text does not get the transform!
self.stale = True
def set_figure(self, fig):
Rectangle.set_figure(self, fig)
self._text.set_figure(fig)
def get_text(self):
'Return the cell Text intance'
return self._text
def set_fontsize(self, size):
self._text.set_fontsize(size)
self.stale = True
def get_fontsize(self):
'Return the cell fontsize'
return self._text.get_fontsize()
def auto_set_font_size(self, renderer):
""" Shrink font size until text fits. """
fontsize = self.get_fontsize()
required = self.get_required_width(renderer)
while fontsize > 1 and required > self.get_width():
fontsize -= 1
self.set_fontsize(fontsize)
required = self.get_required_width(renderer)
return fontsize
@allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
# draw the rectangle
Rectangle.draw(self, renderer)
# position the text
self._set_text_position(renderer)
self._text.draw(renderer)
self.stale = False
def _set_text_position(self, renderer):
""" Set text up so it draws in the right place.
Currently support 'left', 'center' and 'right'
"""
bbox = self.get_window_extent(renderer)
l, b, w, h = bbox.bounds
# draw in center vertically
self._text.set_verticalalignment('center')
y = b + (h / 2.0)
# now position horizontally
if self._loc == 'center':
self._text.set_horizontalalignment('center')
x = l + (w / 2.0)
elif self._loc == 'left':
self._text.set_horizontalalignment('left')
x = l + (w * self.PAD)
else:
self._text.set_horizontalalignment('right')
x = l + (w * (1.0 - self.PAD))
self._text.set_position((x, y))
def get_text_bounds(self, renderer):
""" Get text bounds in axes co-ordinates. """
bbox = self._text.get_window_extent(renderer)
bboxa = bbox.inverse_transformed(self.get_data_transform())
return bboxa.bounds
def get_required_width(self, renderer):
""" Get width required for this cell. """
l, b, w, h = self.get_text_bounds(renderer)
return w * (1.0 + (2.0 * self.PAD))
def set_text_props(self, **kwargs):
'update the text properties with kwargs'
self._text.update(kwargs)
self.stale = True
class CustomCell(Cell):
"""
A subclass of Cell where the sides may be visibly toggled.
"""
_edges = 'BRTL'
_edge_aliases = {'open': '',
'closed': _edges, # default
'horizontal': 'BT',
'vertical': 'RL'
}
def __init__(self, *args, **kwargs):
visible_edges = kwargs.pop('visible_edges')
Cell.__init__(self, *args, **kwargs)
self.visible_edges = visible_edges
@property
def visible_edges(self):
return self._visible_edges
@visible_edges.setter
def visible_edges(self, value):
if value is None:
self._visible_edges = self._edges
elif value in self._edge_aliases:
self._visible_edges = self._edge_aliases[value]
else:
for edge in value:
if edge not in self._edges:
msg = ('Invalid edge param {0}, must only be one of'
' {1} or string of {2}.').format(
value,
", ".join(self._edge_aliases),
", ".join(self._edges),
)
raise ValueError(msg)
self._visible_edges = value
self.stale = True
def get_path(self):
'Return a path where the edges specificed by _visible_edges are drawn'
codes = [Path.MOVETO]
for edge in self._edges:
if edge in self._visible_edges:
codes.append(Path.LINETO)
else:
codes.append(Path.MOVETO)
if Path.MOVETO not in codes[1:]: # All sides are visible
codes[-1] = Path.CLOSEPOLY
return Path(
[[0.0, 0.0], [1.0, 0.0], [1.0, 1.0], [0.0, 1.0], [0.0, 0.0]],
codes,
readonly=True
)
class Table(Artist):
"""
Create a table of cells.
Table can have (optional) row and column headers.
Each entry in the table can be either text or patches.
Column widths and row heights for the table can be specified.
Return value is a sequence of text, line and patch instances that make
up the table
"""
codes = {'best': 0,
'upper right': 1, # default
'upper left': 2,
'lower left': 3,
'lower right': 4,
'center left': 5,
'center right': 6,
'lower center': 7,
'upper center': 8,
'center': 9,
'top right': 10,
'top left': 11,
'bottom left': 12,
'bottom right': 13,
'right': 14,
'left': 15,
'top': 16,
'bottom': 17,
}
FONTSIZE = 10
AXESPAD = 0.02 # the border between the axes and table edge
def __init__(self, ax, loc=None, bbox=None, **kwargs):
Artist.__init__(self)
if isinstance(loc, six.string_types) and loc not in self.codes:
warnings.warn('Unrecognized location %s. Falling back on '
'bottom; valid locations are\n%s\t' %
(loc, '\n\t'.join(self.codes)))
loc = 'bottom'
if isinstance(loc, six.string_types):
loc = self.codes.get(loc, 1)
self.set_figure(ax.figure)
self._axes = ax
self._loc = loc
self._bbox = bbox
# use axes coords
self.set_transform(ax.transAxes)
self._texts = []
self._cells = {}
self._edges = None
self._autoRows = []
self._autoColumns = []
self._autoFontsize = True
self.update(kwargs)
self.set_clip_on(False)
def add_cell(self, row, col, *args, **kwargs):
""" Add a cell to the table. """
xy = (0, 0)
cell = CustomCell(xy, visible_edges=self.edges, *args, **kwargs)
cell.set_figure(self.figure)
cell.set_transform(self.get_transform())
cell.set_clip_on(False)
self._cells[row, col] = cell
self.stale = True
@property
def edges(self):
return self._edges
@edges.setter
def edges(self, value):
self._edges = value
self.stale = True
def _approx_text_height(self):
return (self.FONTSIZE / 72.0 * self.figure.dpi /
self._axes.bbox.height * 1.2)
@allow_rasterization
def draw(self, renderer):
# Need a renderer to do hit tests on mouseevent; assume the last one
# will do
if renderer is None:
renderer = self.figure._cachedRenderer
if renderer is None:
raise RuntimeError('No renderer defined')
if not self.get_visible():
return
renderer.open_group('table')
self._update_positions(renderer)
for key in sorted(self._cells):
self._cells[key].draw(renderer)
renderer.close_group('table')
self.stale = False
def _get_grid_bbox(self, renderer):
"""Get a bbox, in axes co-ordinates for the cells.
Only include those in the range (0,0) to (maxRow, maxCol)"""
boxes = [cell.get_window_extent(renderer)
for (row, col), cell in six.iteritems(self._cells)
if row >= 0 and col >= 0]
bbox = Bbox.union(boxes)
return bbox.inverse_transformed(self.get_transform())
def contains(self, mouseevent):
"""Test whether the mouse event occurred in the table.
Returns T/F, {}
"""
if callable(self._contains):
return self._contains(self, mouseevent)
# TODO: Return index of the cell containing the cursor so that the user
# doesn't have to bind to each one individually.
renderer = self.figure._cachedRenderer
if renderer is not None:
boxes = [cell.get_window_extent(renderer)
for (row, col), cell in six.iteritems(self._cells)
if row >= 0 and col >= 0]
bbox = Bbox.union(boxes)
return bbox.contains(mouseevent.x, mouseevent.y), {}
else:
return False, {}
def get_children(self):
'Return the Artists contained by the table'
return list(six.itervalues(self._cells))
get_child_artists = get_children # backward compatibility
def get_window_extent(self, renderer):
'Return the bounding box of the table in window coords'
boxes = [cell.get_window_extent(renderer)
for cell in six.itervalues(self._cells)]
return Bbox.union(boxes)
def _do_cell_alignment(self):
""" Calculate row heights and column widths.
Position cells accordingly.
"""
# Calculate row/column widths
widths = {}
heights = {}
for (row, col), cell in six.iteritems(self._cells):
height = heights.setdefault(row, 0.0)
heights[row] = max(height, cell.get_height())
width = widths.setdefault(col, 0.0)
widths[col] = max(width, cell.get_width())
# work out left position for each column
xpos = 0
lefts = {}
for col in sorted(widths):
lefts[col] = xpos
xpos += widths[col]
ypos = 0
bottoms = {}
for row in sorted(heights, reverse=True):
bottoms[row] = ypos
ypos += heights[row]
# set cell positions
for (row, col), cell in six.iteritems(self._cells):
cell.set_x(lefts[col])
cell.set_y(bottoms[row])
def auto_set_column_width(self, col):
""" Given column indexs in either List, Tuple or int. Will be able to
automatically set the columns into optimal sizes.
Here is the example of the input, which triger automatic adjustment on
columns to optimal size by given index numbers.
-1: the row labling
0: the 1st column
1: the 2nd column
Args:
col(List): list of indexs
>>>table.auto_set_column_width([-1,0,1])
col(Tuple): tuple of indexs
>>>table.auto_set_column_width((-1,0,1))
col(int): index integer
>>>table.auto_set_column_width(-1)
>>>table.auto_set_column_width(0)
>>>table.auto_set_column_width(1)
"""
# check for col possibility on iteration
try:
iter(col)
except (TypeError, AttributeError):
self._autoColumns.append(col)
else:
for cell in col:
self._autoColumns.append(cell)
self.stale = True
def _auto_set_column_width(self, col, renderer):
""" Automagically set width for column.
"""
cells = [key for key in self._cells if key[1] == col]
# find max width
width = 0
for cell in cells:
c = self._cells[cell]
width = max(c.get_required_width(renderer), width)
# Now set the widths
for cell in cells:
self._cells[cell].set_width(width)
def auto_set_font_size(self, value=True):
""" Automatically set font size. """
self._autoFontsize = value
self.stale = True
def _auto_set_font_size(self, renderer):
if len(self._cells) == 0:
return
fontsize = list(six.itervalues(self._cells))[0].get_fontsize()
cells = []
for key, cell in six.iteritems(self._cells):
# ignore auto-sized columns
if key[1] in self._autoColumns:
continue
size = cell.auto_set_font_size(renderer)
fontsize = min(fontsize, size)
cells.append(cell)
# now set all fontsizes equal
for cell in six.itervalues(self._cells):
cell.set_fontsize(fontsize)
def scale(self, xscale, yscale):
""" Scale column widths by xscale and row heights by yscale. """
for c in six.itervalues(self._cells):
c.set_width(c.get_width() * xscale)
c.set_height(c.get_height() * yscale)
def set_fontsize(self, size):
"""
Set the fontsize of the cell text
ACCEPTS: a float in points
"""
for cell in six.itervalues(self._cells):
cell.set_fontsize(size)
self.stale = True
def _offset(self, ox, oy):
'Move all the artists by ox,oy (axes coords)'
for c in six.itervalues(self._cells):
x, y = c.get_x(), c.get_y()
c.set_x(x + ox)
c.set_y(y + oy)
def _update_positions(self, renderer):
# called from renderer to allow more precise estimates of
# widths and heights with get_window_extent
# Do any auto width setting
for col in self._autoColumns:
self._auto_set_column_width(col, renderer)
if self._autoFontsize:
self._auto_set_font_size(renderer)
# Align all the cells
self._do_cell_alignment()
bbox = self._get_grid_bbox(renderer)
l, b, w, h = bbox.bounds
if self._bbox is not None:
# Position according to bbox
rl, rb, rw, rh = self._bbox
self.scale(rw / w, rh / h)
ox = rl - l
oy = rb - b
self._do_cell_alignment()
else:
# Position using loc
(BEST, UR, UL, LL, LR, CL, CR, LC, UC, C,
TR, TL, BL, BR, R, L, T, B) = xrange(len(self.codes))
# defaults for center
ox = (0.5 - w / 2) - l
oy = (0.5 - h / 2) - b
if self._loc in (UL, LL, CL): # left
ox = self.AXESPAD - l
if self._loc in (BEST, UR, LR, R, CR): # right
ox = 1 - (l + w + self.AXESPAD)
if self._loc in (BEST, UR, UL, UC): # upper
oy = 1 - (b + h + self.AXESPAD)
if self._loc in (LL, LR, LC): # lower
oy = self.AXESPAD - b
if self._loc in (LC, UC, C): # center x
ox = (0.5 - w / 2) - l
if self._loc in (CL, CR, C): # center y
oy = (0.5 - h / 2) - b
if self._loc in (TL, BL, L): # out left
ox = - (l + w)
if self._loc in (TR, BR, R): # out right
ox = 1.0 - l
if self._loc in (TR, TL, T): # out top
oy = 1.0 - b
if self._loc in (BL, BR, B): # out bottom
oy = - (b + h)
self._offset(ox, oy)
def get_celld(self):
'return a dict of cells in the table'
return self._cells
def table(ax,
cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None, edges='closed',
**kwargs):
"""
TABLE(cellText=None, cellColours=None,
cellLoc='right', colWidths=None,
rowLabels=None, rowColours=None, rowLoc='left',
colLabels=None, colColours=None, colLoc='center',
loc='bottom', bbox=None, edges='closed')
Factory function to generate a Table instance.
Thanks to John Gill for providing the class and table.
"""
if cellColours is None and cellText is None:
raise ValueError('At least one argument from "cellColours" or '
'"cellText" must be provided to create a table.')
# Check we have some cellText
if cellText is None:
# assume just colours are needed
rows = len(cellColours)
cols = len(cellColours[0])
cellText = [[''] * cols] * rows
rows = len(cellText)
cols = len(cellText[0])
for row in cellText:
if len(row) != cols:
msg = "Each row in 'cellText' must have {0} columns"
raise ValueError(msg.format(cols))
if cellColours is not None:
if len(cellColours) != rows:
raise ValueError("'cellColours' must have {0} rows".format(rows))
for row in cellColours:
if len(row) != cols:
msg = "Each row in 'cellColours' must have {0} columns"
raise ValueError(msg.format(cols))
else:
cellColours = ['w' * cols] * rows
# Set colwidths if not given
if colWidths is None:
colWidths = [1.0 / cols] * cols
# Fill in missing information for column
# and row labels
rowLabelWidth = 0
if rowLabels is None:
if rowColours is not None:
rowLabels = [''] * rows
rowLabelWidth = colWidths[0]
elif rowColours is None:
rowColours = 'w' * rows
if rowLabels is not None:
if len(rowLabels) != rows:
raise ValueError("'rowLabels' must be of length {0}".format(rows))
# If we have column labels, need to shift
# the text and colour arrays down 1 row
offset = 1
if colLabels is None:
if colColours is not None:
colLabels = [''] * cols
else:
offset = 0
elif colColours is None:
colColours = 'w' * cols
# Set up cell colours if not given
if cellColours is None:
cellColours = ['w' * cols] * rows
# Now create the table
table = Table(ax, loc, bbox, **kwargs)
table.edges = edges
height = table._approx_text_height()
# Add the cells
for row in xrange(rows):
for col in xrange(cols):
table.add_cell(row + offset, col,
width=colWidths[col], height=height,
text=cellText[row][col],
facecolor=cellColours[row][col],
loc=cellLoc)
# Do column labels
if colLabels is not None:
for col in xrange(cols):
table.add_cell(0, col,
width=colWidths[col], height=height,
text=colLabels[col], facecolor=colColours[col],
loc=colLoc)
# Do row labels
if rowLabels is not None:
for row in xrange(rows):
table.add_cell(row + offset, -1,
width=rowLabelWidth or 1e-15, height=height,
text=rowLabels[row], facecolor=rowColours[row],
loc=rowLoc)
if rowLabelWidth == 0:
table.auto_set_column_width(-1)
ax.add_table(table)
return table
docstring.interpd.update(Table=artist.kwdoc(Table))
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@matplotlib@table.py@.PATH_END.py
|
{
"filename": "hypothesis_temporary_module_82e1c2937352cf71000bc2b6f36cb4cd01864ccd.py",
"repo_name": "ryanvarley/exodata",
"repo_path": "exodata_extracted/exodata-master/exodata/tests/.hypothesis/eval_source/hypothesis_temporary_module_82e1c2937352cf71000bc2b6f36cb4cd01864ccd.py",
"type": "Python"
}
|
from hypothesis.utils.conventions import not_set
def accept(f):
def test_can_derive_other_vars_from_one_calculated(self, A, T_s=not_set, R_s=not_set, a=not_set, epsilon=not_set):
return f(self=self, A=A, T_s=T_s, R_s=R_s, a=a, epsilon=epsilon)
return test_can_derive_other_vars_from_one_calculated
|
ryanvarleyREPO_NAMEexodataPATH_START.@exodata_extracted@exodata-master@exodata@tests@.hypothesis@eval_source@hypothesis_temporary_module_82e1c2937352cf71000bc2b6f36cb4cd01864ccd.py@.PATH_END.py
|
{
"filename": "quantile_regression.py",
"repo_name": "statsmodels/statsmodels",
"repo_path": "statsmodels_extracted/statsmodels-main/examples/python/quantile_regression.py",
"type": "Python"
}
|
#!/usr/bin/env python
# DO NOT EDIT
# Autogenerated from the notebook quantile_regression.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Quantile regression
#
# This example page shows how to use ``statsmodels``' ``QuantReg`` class
# to replicate parts of the analysis published in
#
# * Koenker, Roger and Kevin F. Hallock. "Quantile Regression". Journal of
# Economic Perspectives, Volume 15, Number 4, Fall 2001, Pages 143–156
#
# We are interested in the relationship between income and expenditures on
# food for a sample of working class Belgian households in 1857 (the Engel
# data).
#
# ## Setup
#
# We first need to load some modules and to retrieve the data.
# Conveniently, the Engel dataset is shipped with ``statsmodels``.
import numpy as np
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
import matplotlib.pyplot as plt
data = sm.datasets.engel.load_pandas().data
data.head()
# ## Least Absolute Deviation
#
# The LAD model is a special case of quantile regression where q=0.5
mod = smf.quantreg("foodexp ~ income", data)
res = mod.fit(q=0.5)
print(res.summary())
# ## Visualizing the results
#
# We estimate the quantile regression model for many quantiles between .05
# and .95, and compare best fit line from each of these models to Ordinary
# Least Squares results.
# ### Prepare data for plotting
#
# For convenience, we place the quantile regression results in a Pandas
# DataFrame, and the OLS results in a dictionary.
quantiles = np.arange(0.05, 0.96, 0.1)
def fit_model(q):
res = mod.fit(q=q)
return [q, res.params["Intercept"], res.params["income"]
] + res.conf_int().loc["income"].tolist()
models = [fit_model(x) for x in quantiles]
models = pd.DataFrame(models, columns=["q", "a", "b", "lb", "ub"])
ols = smf.ols("foodexp ~ income", data).fit()
ols_ci = ols.conf_int().loc["income"].tolist()
ols = dict(a=ols.params["Intercept"],
b=ols.params["income"],
lb=ols_ci[0],
ub=ols_ci[1])
print(models)
print(ols)
# ### First plot
#
# This plot compares best fit lines for 10 quantile regression models to
# the least squares fit. As Koenker and Hallock (2001) point out, we see
# that:
#
# 1. Food expenditure increases with income
# 2. The *dispersion* of food expenditure increases with income
# 3. The least squares estimates fit low income observations quite poorly
# (i.e. the OLS line passes over most low income households)
x = np.arange(data.income.min(), data.income.max(), 50)
get_y = lambda a, b: a + b * x
fig, ax = plt.subplots(figsize=(8, 6))
for i in range(models.shape[0]):
y = get_y(models.a[i], models.b[i])
ax.plot(x, y, linestyle="dotted", color="grey")
y = get_y(ols["a"], ols["b"])
ax.plot(x, y, color="red", label="OLS")
ax.scatter(data.income, data.foodexp, alpha=0.2)
ax.set_xlim((240, 3000))
ax.set_ylim((240, 2000))
legend = ax.legend()
ax.set_xlabel("Income", fontsize=16)
ax.set_ylabel("Food expenditure", fontsize=16)
# ### Second plot
#
# The dotted black lines form 95% point-wise confidence band around 10
# quantile regression estimates (solid black line). The red lines represent
# OLS regression results along with their 95% confidence interval.
#
# In most cases, the quantile regression point estimates lie outside the
# OLS confidence interval, which suggests that the effect of income on food
# expenditure may not be constant across the distribution.
n = models.shape[0]
p1 = plt.plot(models.q, models.b, color="black", label="Quantile Reg.")
p2 = plt.plot(models.q, models.ub, linestyle="dotted", color="black")
p3 = plt.plot(models.q, models.lb, linestyle="dotted", color="black")
p4 = plt.plot(models.q, [ols["b"]] * n, color="red", label="OLS")
p5 = plt.plot(models.q, [ols["lb"]] * n, linestyle="dotted", color="red")
p6 = plt.plot(models.q, [ols["ub"]] * n, linestyle="dotted", color="red")
plt.ylabel(r"$\beta_{income}$")
plt.xlabel("Quantiles of the conditional food expenditure distribution")
plt.legend()
plt.show()
|
statsmodelsREPO_NAMEstatsmodelsPATH_START.@statsmodels_extracted@statsmodels-main@examples@python@quantile_regression.py@.PATH_END.py
|
{
"filename": "transitmodel.py",
"repo_name": "KathrynJones1/catwoman",
"repo_path": "catwoman_extracted/catwoman-master/catwoman/transitmodel.py",
"type": "Python"
}
|
# catwoman: a batman extension to generate morning/evening terminator transit lightcurves
# Copyright (C) 2019 Kathryn Jones & Néstor Espinoza
#
# This program incorporates a modified version of the batman package: fast computation of exoplanet transit light curves
# Copyright (C) 2015 Laura Kreidberg
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from . import _nonlinear_ld
from . import _quadratic_ld
from . import _logarithmic_ld
from . import _exponential_ld
from . import _power2_ld
from . import _custom_ld
from . import _rsky
from . import _eclipse
from math import pi
import multiprocessing
from . import openmp
__all__ = ['TransitModel', 'TransitParams']
def wrapper(func, *args, **kwargs):
def wrapped():
return func(*args, **kwargs)
return wrapped
class TransitModel(object):
"""
Class for generating model asymmetric transit light curves.
:param params: A :attr:`TransitParams` object containing the physical parameters of the transit
:type params: a `TransitParams` instance
:param t: Array of times at which to calculate the model.
:type t: ndarray
:param max_err: Error tolerance/truncation error (in parts per million) for the model.
:type max_err: float, optional
:param fac: Scale factor for integration step size
:type fac: float, optional
:param supersample_factor: Number of points subdividing exposure
:type supersample_factor: integer, optional
:param exp_time: Exposure time (in same units as `t`)
:type exp_time: double, optional
:Example:
>>> model = catwoman.TransitModel(params, t, max_err = 1.0)
"""
#:param nthreads: Number of threads to use for parallelization.
#:type nthreads: int, optional
#:param transittype: Type of transit ("primary" or "secondary")
#:type transittype: string, optional
def __init__(self, params, t, max_err=1.0, nthreads = 1, fac = None, transittype = "primary", supersample_factor = 1, exp_time = 0.):
#checking for invalid input
if (params.limb_dark == "uniform" and params.u!=None) or (params.limb_dark == "linear" and len(params.u) != 1) or \
(params.limb_dark == "quadratic" and len(params.u) != 2) or (params.limb_dark == "logarithmic" and len(params.u) != 2) or \
(params.limb_dark == "exponential" and len(params.u) != 2) or (params.limb_dark == "squareroot" and len(params.u) != 2) or \
(params.limb_dark == "power2" and len(params.u) != 2) or \
(params.limb_dark == "nonlinear" and len(params.u) != 4):
raise Exception("Incorrect number of coefficients for " +params.limb_dark + " limb darkening; u should have the form:\n \
u = [] for uniform LD\n \
u = [u1] for linear LD\n \
u = [u1, u2] for quadratic, logarithmic, exponential, squareroot, and power2 LD\n \
u = [u1, u2, u3, u4] for nonlinear LD, or\n \
u = [u1, ..., un] for custom LD")
if params.limb_dark not in ["uniform", "linear", "quadratic", "logarithmic", "exponential", "squareroot", "nonlinear", "power2", "custom"]:
raise Exception("\""+params.limb_dark+"\""+" limb darkening not supported; allowed options are:\n \
uniform, linear, quadratic, logarithmic, exponential, squareroot, nonlinear, power2, custom")
if max_err < 0.001: raise Exception("The lowest allowed value for max_err is 0.001. For more accurate calculation, set the integration step size explicitly with the fac parameter.")
if transittype not in ["primary", "secondary"]: raise Exception("Allowed transit types are \"primary\" and \"secondary\".")
if (supersample_factor > 1 and exp_time <= 0.): raise Exception("Please enter a valid exposure time (exp_time must be greater than 0 to calculate super-sampled light curves).")
if (not isinstance(t, np.ndarray)): raise Exception("Times t must be a numpy array (not a list).")
if (params.phi>90) or (params.phi<(-90)): raise Exception("Please enter a valid phi angle (params.phi must be between 90 and -90 degrees).")
#initializes model parameters
self.t = t
self.t0 = params.t0
self.per = params.per
self.rp = params.rp
self.rp2 = params.rp2 #added this
self.a = params.a
self.inc = params.inc
self.ecc = params.ecc
self.w = params.w
self.u = params.u
self.limb_dark = params.limb_dark
self.fp = params.fp
self.t_secondary = params.t_secondary
self.max_err = max_err
self.supersample_factor = supersample_factor
self.exp_time = exp_time
self.inverse = False
self.twocircles = False
self.phi = params.phi*pi/180 #convert phi from degrees to radians
self.b = params.a*np.cos(params.inc*pi/180)*((1-params.ecc*params.ecc)/(1-params.ecc*np.sin(params.w*pi/180)))
if self.supersample_factor > 1: # IJMC: now do it quicker, with no loops:
t_offsets = np.linspace(-self.exp_time/2., self.exp_time/2., self.supersample_factor)
self.t_supersample = (t_offsets + self.t.reshape(self.t.size, 1)).flatten()
self.t = self.t_supersample
else: self.t_supersample = self.t
self.phi2=np.zeros(len(self.t))
self.Y=np.zeros(len(self.t))
self.X=np.zeros(len(self.t))
self.psi=np.zeros(len(self.t))
self.big_vector = np.zeros(4*len(self.t))
#Checking if there are two semi-circles
if self.rp2==None:
self.twocircles=False
self.rp2=0.0
else:
self.twocircles=True
#Finding the index i at which the planet is at inferior conjuction
self.mini = 0.0
stop = False
i = 0
while ((stop == False) and (i != len(t))):
if self.t[i] == self.t0:
self.mini = i
stop = True
elif self.t[i] > self.t0:
self.mini = i - 0.5
stop = True
i = i + 1
#handles the case of inverse transits (rp < 0)
if self.rp < 0 :
self.rp = -1.*self.rp
params.rp = -1.*params.rp
self.inverse = True
if transittype == "primary": self.transittype = 1
else:
self.transittype = 2
params.t0 = self.get_t_conjunction(params)
#Setting a fac if uniform limb darkening or if already specified, if not calculate it
if fac != None: self.fac = fac
elif self.limb_dark != "uniform": self.fac = self._get_fac()
else: self.fac = 0.5
if nthreads==None or nthreads == 1: self.nthreads=1
else:
if nthreads <= multiprocessing.cpu_count()and nthreads >1 and openmp.detect(): self.nthreads = nthreads
else:
if nthreads > multiprocessing.cpu_count(): raise Exception("Maximum number of threads is "+'{0:d}'.format(multiprocessing.cpu_count()))
elif nthreads <= 1: raise Exception("Number of threads must be between 2 and {0:d}".format(multiprocessing.cpu_count()))
else: raise Exception("OpenMP not enabled: do not set the nthreads parameter")
self.big_vector = _rsky._rsky(self.t_supersample, params.t0, params.per, params.a, params.inc*pi/180., params.ecc, params.w*pi/180., self.transittype, self.nthreads)
self.ds=self.big_vector[0:int(len(self.big_vector)/4)]
self.Y=self.big_vector[int(len(self.big_vector)/4):int(len(self.big_vector)*2/4)]
self.psi=self.big_vector[int(len(self.big_vector)*2/4):int(len(self.big_vector)*3/4)]
self.X=self.big_vector[int(len(self.big_vector)*3/4):int(len(self.big_vector))]
#Correcting phi angle to orbital motion
self.phi2 = self.phi + self.psi
def calc_err(self, plot = False):
"""
Calculate maximum error for a specific transit light curve calculation.
:param plot: If ``True``, plots the error in the light curve model against the separation of centres, d.
:type plot: bool
:return: Truncation error (in parts per million)
:rtype: float
"""
if (self.limb_dark in ["logarithmic", "exponential", "nonlinear", "squareroot", "power2", "custom", "quadratic", "linear", "uniform"]):
ds = np.linspace(0., 1.1, 500)
fac_lo = 5.0e-4
bfac = np.zeros(500)
phifac=np.asarray([self.phi for i in range(0,500)])
if self.limb_dark == "nonlinear":
f0 = _nonlinear_ld._nonlinear_ld(ds, self.rp, self.u[0], self.u[1], self.u[2], self.u[3], fac_lo, self.nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
f = _nonlinear_ld._nonlinear_ld(ds, self.rp, self.u[0], self.u[1], self.u[2], self.u[3], self.fac, self.nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
elif self.limb_dark == "squareroot":
f0 = _nonlinear_ld._nonlinear_ld(ds, self.rp, self.u[1], self.u[0], 0., 0., fac_lo, self.nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
f = _nonlinear_ld._nonlinear_ld(ds, self.rp, self.u[1], self.u[0], 0., 0., self.fac, self.nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
elif self.limb_dark == "exponential":
f0 = _exponential_ld._exponential_ld(ds, self.rp, self.u[0], self.u[1], fac_lo, self.nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
f = _exponential_ld._exponential_ld(ds, self.rp, self.u[0], self.u[1], self.fac, self.nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
elif self.limb_dark == "logarithmic":
f0 = _logarithmic_ld._logarithmic_ld(ds, self.rp, self.u[0], self.u[1], fac_lo, self.nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
f = _logarithmic_ld._logarithmic_ld(ds, self.rp, self.u[0], self.u[1], self.fac, self.nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
elif self.limb_dark == "power2":
f0 = _power2_ld._power2_ld(ds, self.rp, self.u[0], self.u[1], fac_lo, self.nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
f = _power2_ld._power2_ld(ds, self.rp, self.u[0], self.u[1], self.fac, self.nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
elif self.limb_dark == "quadratic":
f0 = _quadratic_ld._quadratic_ld(ds, self.rp, self.u[0], self.u[1], fac_lo, self.nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
f = _quadratic_ld._quadratic_ld(ds, self.rp, self.u[0], self.u[1], self.fac, self.nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
elif self.limb_dark == "linear":
f0 = _quadratic_ld._quadratic_ld(ds, self.rp, self.u[0], 0., fac_lo, self.nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
f = _quadratic_ld._quadratic_ld(ds, self.rp, self.u[0], 0., self.fac, self.nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
elif self.limb_dark == "uniform":
f0 = _quadratic_ld._quadratic_ld(ds, self.rp, 0., 0., fac_lo, self.nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
f = _quadratic_ld._quadratic_ld(ds, self.rp, 0., 0., self.fac, self.nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
else:
f0 = _custom_ld._custom_ld(ds, self.rp, self.u[0], self.u[1], self.u[2], self.u[3], self.u[4], self.u[5], fac_lo, self.nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
f = _custom_ld._custom_ld(ds, self.rp, self.u[0], self.u[1], self.u[2], self.u[3], self.u[4], self.u[5], self.fac, self.nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
err = np.max(np.abs(f-f0))*1.0e6
if plot == True:
import matplotlib.pyplot as plt
plt.plot(ds, 1.0e6*(f-f0), color='k')
plt.xlabel("d (separation of centers)")
plt.ylabel("Error (ppm)")
plt.show()
return err
else: raise Exception("Function calc_err not valid for " + self.limb_dark + " limb darkening")
def _get_fac(self):
if (self.limb_dark in ["logarithmic", "exponential", "nonlinear", "squareroot", "power2", "custom", "quadratic", "linear", "uniform"]):
nthreads = 1
fac_lo, fac_hi = 5.0e-4, 1.
ds = np.linspace(0., np.maximum(1.+self.rp,1.+self.rp2), 1000)
bfac=np.zeros(1000)
phifac=np.asarray([self.phi for i in range(0,1000)])
if self.limb_dark == "nonlinear": f0 = _nonlinear_ld._nonlinear_ld(ds, self.rp, self.u[0], self.u[1], self.u[2], self.u[3], fac_lo, nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
elif self.limb_dark == "squareroot": f0 = _nonlinear_ld._nonlinear_ld(ds, self.rp, self.u[1], self.u[0], 0., 0., fac_lo, nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
elif self.limb_dark == "exponential": f0 = _exponential_ld._exponential_ld(ds, self.rp, self.u[0], self.u[1], fac_lo, nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
elif self.limb_dark == "logarithmic": f0 = _logarithmic_ld._logarithmic_ld(ds, self.rp, self.u[0], self.u[1], fac_lo, nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
elif self.limb_dark == "power2": f0 = _power2_ld._power2_ld(ds, self.rp, self.u[0], self.u[1], fac_lo, nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
elif self.limb_dark == "quadratic": f0 = _quadratic_ld._quadratic_ld(ds, self.rp, self.u[0], self.u[1], fac_lo, nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
elif self.limb_dark == "linear": f0 = _quadratic_ld._quadratic_ld(ds, self.rp, self.u[0], 0., fac_lo, nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
elif self.limb_dark == "uniform": f0 = _quadratic_ld._quadratic_ld(ds, self.rp, 0., 0., fac_lo, nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
else: f0 = _custom_ld._custom_ld(ds, self.rp, self.u[0], self.u[1], self.u[2], self.u[3], self.u[4], self.u[5], fac_lo, nthreads, phifac, bfac, 0, self.rp2,self.twocircles)
n = 0
err = 0.
while(err > self.max_err or err < 0.99*self.max_err):
#fac = (fac_lo + fac_hi)/2. #arithmetic mean
fac = np.sqrt(fac_lo*fac_hi) #geometric mean(arithmetic mean in log space)
if self.limb_dark == "nonlinear": f = _nonlinear_ld._nonlinear_ld(ds, self.rp, self.u[0], self.u[1], self.u[2], self.u[3], fac, nthreads, phifac, bfac, 0 ,self.rp2,self.twocircles)
elif self.limb_dark == "squareroot": f = _nonlinear_ld._nonlinear_ld(ds, self.rp, self.u[1], self.u[0], 0., 0., fac, nthreads, phifac, bfac, 0,self.rp2,self.twocircles)
elif self.limb_dark == "exponential": f = _exponential_ld._exponential_ld(ds, self.rp, self.u[0], self.u[1], fac, nthreads, phifac, bfac, self.mini,self.rp2,self.twocircles)
elif self.limb_dark == "logarithmic": f = _logarithmic_ld._logarithmic_ld(ds, self.rp, self.u[0], self.u[1], fac, nthreads, phifac, bfac, 0,self.rp2,self.twocircles)
elif self.limb_dark == "power2": f = _power2_ld._power2_ld(ds, self.rp, self.u[0], self.u[1], fac, nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
elif self.limb_dark == "quadratic": f = _quadratic_ld._quadratic_ld(ds, self.rp, self.u[0], self.u[1], fac, nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
elif self.limb_dark == "linear": f = _quadratic_ld._quadratic_ld(ds, self.rp, self.u[0], 0., fac, nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
elif self.limb_dark == "uniform": f = _quadratic_ld._quadratic_ld(ds, self.rp, 0., 0., fac, nthreads, phifac, bfac, 0, self.rp2, self.twocircles)
else: f = _custom_ld._custom_ld(ds, self.rp, self.u[0], self.u[1], self.u[2], self.u[3], self.u[4], self.u[5], fac, nthreads, phifac, bfac, 0 ,self.rp2,self.twocircles)
err = np.max(np.abs(f-f0))*1.0e6
if err > self.max_err: fac_hi = fac
else: fac_lo = fac
n += 1
if n > 1e3: raise Exception("Convergence failure in calculation of scale factor for integration step size")
return fac
else: return 0.5
#else: return None
def light_curve(self, params):
"""
Calculates and returns a model asymmetric light curve.
:param params: Transit parameters
:type params: A `TransitParams` instance
:return: Relative flux
:rtype: ndarray
:Example:
>>> flux = model.light_curve(params)
"""
#recalculates rsky and fac if necessary
new_rsky=0
if params.t0 != self.t0 or params.per != self.per or params.a != self.a or params.inc != self.inc or params.ecc != self.ecc or params.w != self.w or params.t_secondary != self.t_secondary:
if self.transittype == 2 and params.t_secondary != self.t_secondary:
params.t0 = self.get_t_conjunction(params)
self.big_vector= _rsky._rsky(self.t_supersample, params.t0, params.per, params.a, params.inc*pi/180., params.ecc, params.w*pi/180., self.transittype, self.nthreads)
self.ds=self.big_vector[0:int(len(self.big_vector)/4)]
self.Y=self.big_vector[int(len(self.big_vector)/4):int(len(self.big_vector)*2/4)]
self.psi=self.big_vector[int(len(self.big_vector)*2/4):int(len(self.big_vector)*3/4)]
self.X=self.big_vector[int(len(self.big_vector)*3/4):int(len(self.big_vector))]
new_rsky = 1
if params.limb_dark != self.limb_dark: self.fac = self._get_fac()
#updates transit params
self.t0 = params.t0
self.per = params.per
self.rp = params.rp
self.rp2 = params.rp2
self.a = params.a
self.inc = params.inc
self.ecc = params.ecc
self.w = params.w
self.u = params.u
self.limb_dark = params.limb_dark
self.fp = params.fp
self.t_secondary = params.t_secondary
self.inverse = False
self.twocircles = False
self.b = params.a*np.cos(params.inc*pi/180)*((1-params.ecc*params.ecc)/(1-params.ecc*np.sin(params.w*pi/180)))
#updating self.phi2 if self.phi or self.psi has changed
if ((params.phi*pi/180)!=self.phi or (new_rsky==1)):
self.phi = params.phi*pi/180
#Correcting phi angle to orbital motion
self.phi2 = self.phi + self.psi
#checking again if there has been a value of rp2 supplied
if self.rp2 == None:
self.twocircles = False
self.rp2=0.0
else: self.twocircles = True
#handles the case of inverse transits (rp < 0)
if self.rp < 0.:
self.rp = -1.*self.rp
params.rp = -1.*params.rp
self.inverse = True
if self.transittype == 1:
if params.limb_dark != self.limb_dark: raise Exception("Need to reinitialize model in order to change limb darkening option")
if self.limb_dark == "quadratic": lc = _quadratic_ld._quadratic_ld(self.ds, params.rp, params.u[0], params.u[1], self.fac, self.nthreads, self.phi2, self.Y, self.mini, self.rp2, self.twocircles)
elif self.limb_dark == "uniform": lc = _quadratic_ld._quadratic_ld(self.ds, params.rp, 0., 0., self.fac, self.nthreads, self.phi2, self.Y, self.mini, self.rp2, self.twocircles)
elif self.limb_dark == "linear": lc = _quadratic_ld._quadratic_ld(self.ds, params.rp, params.u[0], 0.,self.fac, self.nthreads, self.phi2, self.Y, self.mini, self.rp2, self.twocircles)
elif self.limb_dark == "nonlinear": lc = _nonlinear_ld._nonlinear_ld(self.ds, params.rp, params.u[0], params.u[1], params.u[2], params.u[3], self.fac, self.nthreads, self.phi2, self.Y, self.mini,self.rp2,self.twocircles)
elif self.limb_dark == "squareroot": lc = _nonlinear_ld._nonlinear_ld(self.ds, params.rp, params.u[1], params.u[0], 0., 0., self.fac, self.nthreads, self.phi2, self.Y, self.mini,self.rp2,self.twocircles)
elif self.limb_dark == "logarithmic": lc = _logarithmic_ld._logarithmic_ld(self.ds, params.rp, params.u[0], params.u[1], self.fac, self.nthreads, self.phi2, self.Y, self.mini,self.rp2,self.twocircles)
elif self.limb_dark == "exponential": lc = _exponential_ld._exponential_ld(self.ds, params.rp, params.u[0], params.u[1], self.fac, self.nthreads, self.phi2, self.Y, self.mini,self.rp2,self.twocircles)
elif self.limb_dark == "power2": lc = _power2_ld._power2_ld(self.ds, params.rp, params.u[0], params.u[1], self.fac, self.nthreads, self.phi2, self.Y, self.mini,self.rp2,self.twocircles)
elif self.limb_dark == "custom": lc = _custom_ld._custom_ld(self.ds, params.rp, params.u[0], params.u[1], params.u[2], params.u[3], params.u[4], params.u[5], self.fac, self.nthreads, self.phi2, self.Y, self.mini,self.rp2,self.twocircles)
else: raise Exception("Invalid limb darkening option")
if self.inverse == True: lc = 2. - lc
else: lc = _eclipse._eclipse(self.ds, params.rp, params.fp, self.nthreads)
if self.supersample_factor == 1: return lc
else: return np.mean(lc.reshape(-1, self.supersample_factor), axis=1)
def _get_phase(self, params, position):
if position == "periastron": TA = 0.
elif position == "primary": TA = pi/2. - params.w*pi/180.
elif position == "secondary": TA = 3.*pi/2. - params.w*pi/180.
E = 2.*np.arctan(np.sqrt((1. - params.ecc)/(1. + params.ecc))*np.tan(TA/2.))
M = E - params.ecc*np.sin(E)
return M/2./pi
def get_t_periastron(self, params):
"""
Return the time of periastron passage (calculated using `params.t0`).
"""
phase = self._get_phase(params, "primary")
return params.t0 - params.per*phase
def get_t_secondary(self, params):
"""
Return the time of secondary eclipse center (calculated using `params.t0`).
"""
phase = self._get_phase(params, "primary")
phase2 = self._get_phase(params, "secondary")
return params.t0 + params.per*(phase2-phase)
def get_t_conjunction(self, params):
"""
Return the time of primary transit center (calculated using `params.t_secondary`).
"""
phase = self._get_phase(params, "primary")
phase2 = self._get_phase(params, "secondary")
return params.t_secondary + params.per*(phase-phase2)
def get_true_anomaly(self):
"""
Return the true anomaly at each time
"""
big_vec_getf = _rsky._getf(self.t_supersample, self.t0, self.per, self.a,
self.inc*pi/180., self.ecc, self.w*pi/180.,
self.transittype, self.nthreads)
self.f = big_vec_getf[0:len(big_vec_getf)/4]
return self.f
class TransitParams(object):
"""
Object to store the physical parameters of the transit.
:param t0: Time of inferior conjunction.
:type t0: float
:param t_secondary: Time of secondary eclipse center.
:type t_secondary: float, optional
:param per: Orbital period.
:type per: float
:param rp: Planet radius 1 (in stellar radii).
:type rp: float
:param rp2: Planet radius 2 (in stellar radii)
:type rp2: float
:param a: Semi-major axis (in stellar radii).
:type a: float
:param inc: Orbital inclination (in degrees).
:type inc: float
:param ecc: Orbital eccentricity.
:type ecc: float
:param w: Argument of periapse (in degrees)
:type w: float
:param u: List of limb darkening coefficients.
:type u: array_like
:param limb_dark: Limb darkening model (choice of "nonlinear", "quadratic", "exponential", "logarithmic", "squareroot", "linear", "uniform", "power2", or "custom").
:type limb_dark: str
:param fp: Planet-to-star flux ratio (for secondary eclipse models).
:type fp: float, optional
:param phi: Rotation angle of planet, relative to horizontal (in degrees)
:type phi: float
.. note::
- Units for the orbital period and ephemeris can be anything as long as they are consistent (e.g. both in days).
:Example:
>>> import catwoman
>>> params = catwoman.TransitParams()
>>> params.t0 = 0. #time of inferior conjunction
>>> params.per = 1. #orbital period
>>> params.rp = 0.1 #planet radius 1 (in units of stellar radii)
>>> params.rp2 = 0.1005 #planet radius 2 (in units of stellar radii)
>>> params.a = 15. #semi-major axis (in units of stellar radii)
>>> params.inc = 87. #orbital inclination (in degrees)
>>> params.ecc = 0. #eccentricity
>>> params.w = 90. #longitude of periastron (in degrees)
>>> params.u = [0.1, 0.3] #limb darkening coefficients
>>> params.limb_dark = "quadratic" #limb darkening model
>>> params.phi = 90. #rotation angle of planet
"""
# - The orbital path is calculated based on `t0` for primary transits and `t_secondary` for secondary eclipses.
def __init__(self):
self.t0 = None
self.per = None
self.rp = None
self.rp2 = None
self.a = None
self.inc = None
self.ecc = None
self.w = None
self.u = None
self.limb_dark = None
self.fp = None
self.t_secondary = None
self.phi = None
|
KathrynJones1REPO_NAMEcatwomanPATH_START.@catwoman_extracted@catwoman-master@catwoman@transitmodel.py@.PATH_END.py
|
{
"filename": "test_xml.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/utils/tests/test_xml.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import io
import pytest
from ..xml import check, unescaper, writer
from ...extern import six
def test_writer():
fh = io.StringIO()
w = writer.XMLWriter(fh)
with w.tag("html"):
with w.tag("body"):
w.data("This is the content")
w.comment("comment")
value = ''.join(fh.getvalue().split())
assert value == '<html><body>Thisisthecontent<!--comment--></body></html>'
def test_check_id():
assert check.check_id("Fof32")
assert check.check_id("_Fof32")
assert not check.check_id("32Fof")
def test_fix_id():
assert check.fix_id("Fof32") == "Fof32"
assert check.fix_id("@#f") == "___f"
def test_check_token():
assert check.check_token("token")
assert not check.check_token("token\rtoken")
def test_check_mime_content_type():
assert check.check_mime_content_type("image/jpeg")
assert not check.check_mime_content_type("image")
def test_check_anyuri():
assert check.check_anyuri("https://github.com/astropy/astropy")
def test_unescape_all():
# str
url_in = 'http://casu.ast.cam.ac.uk/ag/iphas-dsa%2FSubmitCone?' \
'DSACAT=IDR&amp;DSATAB=Emitters&amp;'
url_out = 'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \
'DSACAT=IDR&DSATAB=Emitters&'
assert unescaper.unescape_all(url_in) == url_out
# bytes
url_in = b'http://casu.ast.cam.ac.uk/ag/iphas-dsa%2FSubmitCone?' \
b'DSACAT=IDR&amp;DSATAB=Emitters&amp;'
url_out = b'http://casu.ast.cam.ac.uk/ag/iphas-dsa/SubmitCone?' \
b'DSACAT=IDR&DSATAB=Emitters&'
assert unescaper.unescape_all(url_in) == url_out
def test_escape_xml():
s = writer.xml_escape('This & That')
assert type(s) == six.text_type
assert s == 'This & That'
s = writer.xml_escape(1)
assert type(s) == str
assert s == '1'
s = writer.xml_escape(b'This & That')
assert type(s) == bytes
assert s == b'This & That'
@pytest.mark.skipif('writer.HAS_BLEACH')
def test_escape_xml_without_bleach():
fh = io.StringIO()
w = writer.XMLWriter(fh)
with pytest.raises(ValueError) as err:
with w.xml_cleaning_method('bleach_clean'):
pass
assert 'bleach package is required when HTML escaping is disabled' in str(err)
@pytest.mark.skipif('not writer.HAS_BLEACH')
def test_escape_xml_with_bleach():
fh = io.StringIO()
w = writer.XMLWriter(fh)
# Turn off XML escaping, but still sanitize unsafe tags like <script>
with w.xml_cleaning_method('bleach_clean'):
w.start('td')
w.data('<script>x</script> <em>OK</em>')
w.end(indent=False)
assert fh.getvalue() == '<td><script>x</script> <em>OK</em></td>\n'
fh = io.StringIO()
w = writer.XMLWriter(fh)
# Default is True (all XML tags escaped)
with w.xml_cleaning_method():
w.start('td')
w.data('<script>x</script> <em>OK</em>')
w.end(indent=False)
assert fh.getvalue() == '<td><script>x</script> <em>OK</em></td>\n'
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@utils@tests@test_xml.py@.PATH_END.py
|
{
"filename": "n2hp.py",
"repo_name": "vlas-sokolov/pyspecnest",
"repo_path": "pyspecnest_extracted/pyspecnest-master/pyspecnest/n2hp.py",
"type": "Python"
}
|
import os
import numpy as np
from .multiwrapper import Parameter, ModelContainer
# TODO: generalize it with the parameter names already present in pyspeckit!
def get_n2hp_model(sp, std_noise, priors=None, npeaks=1, **kwargs):
# initializing the model parameters
if priors is None:
# set up dummy priors for an example run
# FIXME: the popping techninque, amazing as
# it is, is merely an ugly hack!
# priors should be initialized from a dict
priors = [[3, 20], [0, 30], [-30, 30], [0, 1]][::-1] * npeaks
parlist = []
for i in range(npeaks):
tex = Parameter("tex_{}".format(i),
r'$\mathrm{{T_{{ex{}}}}}$'.format(i), priors.pop())
tau = Parameter("tau_{}".format(i), r'$\mathrm{{\tau_{}}}$'.format(i),
priors.pop())
xoff = Parameter("xoff_{}".format(i),
r'$\mathrm{{x_{{off{}}}}}$'.format(i), priors.pop())
sig = Parameter("sig_{}".format(i), r'$\sigma_{}$'.format(i),
priors.pop())
parlist += [tex, tau, xoff, sig]
n2hp_model = ModelContainer(
parlist,
model=sp.specfit.get_full_model,
std_noise=std_noise,
xdata=sp.xarr.value,
ydata=sp.data,
npeaks=npeaks,
**kwargs)
return n2hp_model
def suffix_str(model, snr):
""" Name id for output files """
fixed_str = ''.join([{True: 'T', False: 'F'}[i] for i in model.fixed])
out_suffix = '{}_snr{:n}'.format(fixed_str, snr)
return out_suffix
def get_pymultinest_dir(output_dir, prefix, suffix, subdir='chains'):
""" Sets up and returns multinest output directory """
local_dir = '{}/{}_{}/'.format(subdir, prefix, suffix)
pymultinest_output = os.path.join(output_dir, local_dir)
if not os.path.exists(pymultinest_output):
os.mkdir(pymultinest_output)
return pymultinest_output
|
vlas-sokolovREPO_NAMEpyspecnestPATH_START.@pyspecnest_extracted@pyspecnest-master@pyspecnest@n2hp.py@.PATH_END.py
|
{
"filename": "More_Advanced_Thermal_Emission.ipynb",
"repo_name": "jzuhone/pyxsim",
"repo_path": "pyxsim_extracted/pyxsim-main/doc/source/cookbook/More_Advanced_Thermal_Emission.ipynb",
"type": "Jupyter Notebook"
}
|
# More Advanced Thermal Emission
In this example, we'll look at the emission from a disk galaxy from the Illustris TNG simulations.
This dataset has metallicity information for several species in it. We'll make a cut in phase space like we did in the previous example. The dataset we want to use for this example is available for download [here](https://hea-www.cfa.harvard.edu/~jzuhone/cutout_31_rotated.hdf5).
First, import our necessary modules:
```python
import yt
import pyxsim
import soxs
```
We will make phase space cuts on the gas cells using density, temperature, and star formation rate:
```python
# Note that the units of all numbers in this function are CGS
# define hot gas filter
def hot_gas(pfilter, data):
pfilter1 = data[pfilter.filtered_type, "temperature"] > 3.0e5
pfilter2 = data["PartType0", "StarFormationRate"] == 0.0
pfilter3 = data[pfilter.filtered_type, "density"] < 5e-25
return pfilter1 & pfilter2 & pfilter3
yt.add_particle_filter(
"hot_gas",
function=hot_gas,
filtered_type="gas",
requires=["temperature", "density"],
)
```
Next, we `load` the dataset with yt, and add the `"hot_gas"` filter to the dataset:
```python
ds = yt.load(
"cutout_31_rotated.hdf5",
bounding_box=[[-1000.0, 1000], [-1000.0, 1000], [-1000.0, 1000]],
)
ds.add_particle_filter("hot_gas")
```
We also need to tell pyXSIM which elements have fields in the dataset that
should be used. To do this we create a `var_elem` dictionary of (key, value)
pairs corresponding to the element name and the yt field name (assuming the
`"hot_gas"` type).
```python
# metal fields to use
metals = [
"C_fraction",
"N_fraction",
"O_fraction",
"Ne_fraction",
"Mg_fraction",
"Si_fraction",
"Fe_fraction",
]
var_elem = {elem.split("_")[0]: ("hot_gas", elem) for elem in metals}
```
Now that we have everything we need, we'll set up the `IGMSourceModel`, which is based on Cloudy and includes resonant scattering off of the CXB (see [here](https://hea-www.cfa.harvard.edu/~jzuhone/pyxsim/source_models/thermal_sources.html#igm-source-model) for more details). Because we created a hot gas filter, we will use the `"hot_gas"` field type for the emission measure, temperature, and metallicity fields.
```python
source_model = pyxsim.IGMSourceModel(
0.1,
4.0,
5000,
("hot_gas", "metallicity"),
binscale="log",
resonant_scattering=True,
temperature_field=("hot_gas", "temperature"),
emission_measure_field=("hot_gas", "emission_measure"),
nh_field=("hot_gas", "H_nuclei_density"),
var_elem=var_elem,
)
```
As in other examples, we choose big numbers for the collecting area and exposure time, and a redshift:
```python
exp_time = (1.0, "Ms") # exposure time
area = (5000.0, "cm**2") # collecting area
redshift = 0.01
```
Next, we'll create a box object to serve as a source for the photons. The dataset consists of only
the galaxy at a specific location, which we use below, and pick a width of 1 Mpc:
```python
c = ds.arr([0.0, 0.0, 0.0], "code_length")
width = ds.quan(1.0, "Mpc")
le = c - 0.5 * width
re = c + 0.5 * width
box = ds.box(le, re)
```
So, that's everything--let's create the photons! We use the `make_photons` function for this:
```python
n_photons, n_cells = pyxsim.make_photons(
"cutout_31_photons", box, redshift, area, exp_time, source_model
)
```
And now we create events using the `project_photons` function. Let's project along the `"z"` axis. We'll use the `"tbabs"` foreground absorption model this time, with a neutral hydrogen column of $N_H = 2 \times 10^{20}~{\rm cm}^{-2}$:
```python
n_events = pyxsim.project_photons(
"cutout_31_photons",
"cutout_31_events",
"x",
(30.0, 45.0),
absorb_model="tbabs",
nH=0.02,
)
```
Now that we have a set of "events" on the sky, we can use them as an input to the instrument simulator in SOXS. We'll observe it with the 2eV LEM model for 1 Ms. First, we'll create a background file that we'll use for the background:
```python
soxs.make_background_file(
"bkgnd_evt_31.fits", (1000.0, "ks"), "lem_2eV", [30.0, 45.0], overwrite=True
)
```
Now we simulate the source itself, adding in the background:
```python
soxs.instrument_simulator(
"cutout_31_events.h5",
"evt_31.fits",
(1000.0, "ks"),
"lem_2eV",
[30.0, 45.0],
overwrite=True,
bkgnd_file="bkgnd_evt_31.fits",
)
```
We can use the `write_image()` function in SOXS to bin the events into an image and write them
to a file, restricting the energies between 0.644 and 0.65 keV, which focuses on the redshifted OVIII line:
```python
soxs.write_image("evt_31.fits", "img_31.fits", emin=0.644, emax=0.65, overwrite=True)
```
Now we can take a quick look at the image:
```python
soxs.plot_image("img_31.fits", stretch="log", cmap="arbre", width=0.4, vmin=0.5)
```
Now we will make spectra to look at. First, filter the events of both the combined source and background files and the background-only files within 0.15 degree of the center:
```python
soxs.filter_events(
"evt_31.fits",
"evt_31_filter.fits",
overwrite=True,
region='fk5\ncircle(30.0000000,45.0000000,540.000")',
)
soxs.filter_events(
"bkgnd_evt_31.fits",
"bkgnd_evt_31_filter.fits",
overwrite=True,
region='fk5\ncircle(30.0000000,45.0000000,540.000")',
)
```
Now bin up spectra for these new event files:
```python
soxs.write_spectrum("evt_31_filter.fits", "evt_31.pi", overwrite=True)
soxs.write_spectrum("bkgnd_evt_31_filter.fits", "bkgnd_evt_31.pi", overwrite=True)
```
Finally, we can plot the spectra. Below, the total spectrum is in blue and the background/foreground spectrum is in orange. The lines from the emission of the distant galaxy are redshifted away from the foreground Milky Way lines.
```python
fig, ax = soxs.plot_spectrum("evt_31.pi", xmin=0.5, xmax=0.7, xscale="linear", ymin=0.5)
soxs.plot_spectrum("bkgnd_evt_31.pi", xmin=0.5, xmax=0.7, fig=fig, ax=ax, ymin=0.5)
```
|
jzuhoneREPO_NAMEpyxsimPATH_START.@pyxsim_extracted@pyxsim-main@doc@source@cookbook@More_Advanced_Thermal_Emission.ipynb@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattercarpet/hoverlabel/font/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._weightsrc import WeightsrcValidator
from ._weight import WeightValidator
from ._variantsrc import VariantsrcValidator
from ._variant import VariantValidator
from ._textcasesrc import TextcasesrcValidator
from ._textcase import TextcaseValidator
from ._stylesrc import StylesrcValidator
from ._style import StyleValidator
from ._sizesrc import SizesrcValidator
from ._size import SizeValidator
from ._shadowsrc import ShadowsrcValidator
from ._shadow import ShadowValidator
from ._linepositionsrc import LinepositionsrcValidator
from ._lineposition import LinepositionValidator
from ._familysrc import FamilysrcValidator
from ._family import FamilyValidator
from ._colorsrc import ColorsrcValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._weightsrc.WeightsrcValidator",
"._weight.WeightValidator",
"._variantsrc.VariantsrcValidator",
"._variant.VariantValidator",
"._textcasesrc.TextcasesrcValidator",
"._textcase.TextcaseValidator",
"._stylesrc.StylesrcValidator",
"._style.StyleValidator",
"._sizesrc.SizesrcValidator",
"._size.SizeValidator",
"._shadowsrc.ShadowsrcValidator",
"._shadow.ShadowValidator",
"._linepositionsrc.LinepositionsrcValidator",
"._lineposition.LinepositionValidator",
"._familysrc.FamilysrcValidator",
"._family.FamilyValidator",
"._colorsrc.ColorsrcValidator",
"._color.ColorValidator",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattercarpet@hoverlabel@font@__init__.py@.PATH_END.py
|
{
"filename": "_stackgaps.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter/_stackgaps.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class StackgapsValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="stackgaps", parent_name="scatter", **kwargs):
super(StackgapsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["infer zero", "interpolate"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter@_stackgaps.py@.PATH_END.py
|
{
"filename": "test_variance.py",
"repo_name": "gmbrandt/xwavecal",
"repo_path": "xwavecal_extracted/xwavecal-main/xwavecal/tests/test_variance.py",
"type": "Python"
}
|
import numpy as np
from xwavecal.tests.utils import FakeImage, FakeContext
from xwavecal.variance import CalcInverseVariance
class TestCalcInverseVariance:
def test_calc_zero_rdnoise(self):
image = FakeImage()
image.header = {'read_noise': 0}
image.data = np.random.random((10, 10)) * 100
image = CalcInverseVariance(FakeContext()).do_stage(image)
assert np.allclose(image.ivar, image.data ** (-1))
def test_calc_zero_signal(self):
image = FakeImage()
image.header = {'read_noise': 10}
image.data = np.zeros((10, 10))
image = CalcInverseVariance(FakeContext()).do_stage(image)
assert np.allclose(image.ivar, image.get_header_val('read_noise') ** (-2))
def test_calc(self):
image = FakeImage()
image.header = {'read_noise': 10}
image.data = np.random.random((10, 10)) * 100 - 50
image = CalcInverseVariance(FakeContext()).do_stage(image)
assert np.min(image.ivar ** -1) >= image.get_header_val('read_noise') ** (2)
|
gmbrandtREPO_NAMExwavecalPATH_START.@xwavecal_extracted@xwavecal-main@xwavecal@tests@test_variance.py@.PATH_END.py
|
{
"filename": "tfsa-2021-038.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/security/advisory/tfsa-2021-038.md",
"type": "Markdown"
}
|
## TFSA-2021-038: Division by 0 in `FractionalAvgPool`
### CVE Number
CVE-2021-29550
### Impact
An attacker can cause a runtime division by zero error and denial of service in
`tf.raw_ops.FractionalAvgPool`:
```python
import tensorflow as tf
value = tf.constant([60], shape=[1, 1, 1, 1], dtype=tf.int32)
pooling_ratio = [1.0, 1.0000014345305555, 1.0, 1.0]
pseudo_random = False
overlapping = False
deterministic = False
seed = 0
seed2 = 0
tf.raw_ops.FractionalAvgPool(
value=value, pooling_ratio=pooling_ratio, pseudo_random=pseudo_random,
overlapping=overlapping, deterministic=deterministic, seed=seed, seed2=seed2)
```
This is because the
[implementation](https://github.com/tensorflow/tensorflow/blob/acc8ee69f5f46f92a3f1f11230f49c6ac266f10c/tensorflow/core/kernels/fractional_avg_pool_op.cc#L85-L89)
computes a divisor quantity by dividing two user controlled values:
```cc
for (int i = 0; i < tensor_in_and_out_dims; ++i) {
output_size[i] = static_cast<int>(std::floor(input_size[i] / pooling_ratio_[i]));
DCHECK_GT(output_size[i], 0);
}
```
The user controls the values of `input_size[i]` and `pooling_ratio_[i]` (via the
`value.shape()` and `pooling_ratio` arguments). If the value in `input_size[i]`
is smaller than the `pooling_ratio_[i]`, then the floor operation results in
`output_size[i]` being 0. The `DCHECK_GT` line is a no-op outside of debug mode,
so in released versions of TF this does not trigger.
Later, these computed values [are used as
arguments](https://github.com/tensorflow/tensorflow/blob/acc8ee69f5f46f92a3f1f11230f49c6ac266f10c/tensorflow/core/kernels/fractional_avg_pool_op.cc#L96-L99)
to
[`GeneratePoolingSequence`](https://github.com/tensorflow/tensorflow/blob/acc8ee69f5f46f92a3f1f11230f49c6ac266f10c/tensorflow/core/kernels/fractional_pool_common.cc#L100-L108).
There, the first computation is a division in a modulo operation:
```cc
std::vector<int64> GeneratePoolingSequence(int input_length, int output_length,
GuardedPhiloxRandom* generator,
bool pseudo_random) {
...
if (input_length % output_length == 0) {
diff = std::vector<int64>(output_length, input_length / output_length);
}
...
}
```
Since `output_length` can be 0, this results in runtime crashing.
### Patches
We have patched the issue in GitHub commit
[548b5eaf23685d86f722233d8fbc21d0a4aecb96](https://github.com/tensorflow/tensorflow/commit/548b5eaf23685d86f722233d8fbc21d0a4aecb96).
The fix will be included in TensorFlow 2.5.0. We will also cherrypick this
commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow
2.1.4, as these are also affected and still in supported range.
### For more information
Please consult [our security
guide](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for
more information regarding the security model and how to contact us with issues
and questions.
### Attribution
This vulnerability has been reported by Ying Wang and Yakun Zhang of Baidu
X-Team.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@security@advisory@tfsa-2021-038.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contourcarpet/colorbar/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._yref import YrefValidator
from ._ypad import YpadValidator
from ._yanchor import YanchorValidator
from ._y import YValidator
from ._xref import XrefValidator
from ._xpad import XpadValidator
from ._xanchor import XanchorValidator
from ._x import XValidator
from ._title import TitleValidator
from ._tickwidth import TickwidthValidator
from ._tickvalssrc import TickvalssrcValidator
from ._tickvals import TickvalsValidator
from ._ticktextsrc import TicktextsrcValidator
from ._ticktext import TicktextValidator
from ._ticksuffix import TicksuffixValidator
from ._ticks import TicksValidator
from ._tickprefix import TickprefixValidator
from ._tickmode import TickmodeValidator
from ._ticklen import TicklenValidator
from ._ticklabelstep import TicklabelstepValidator
from ._ticklabelposition import TicklabelpositionValidator
from ._ticklabeloverflow import TicklabeloverflowValidator
from ._tickformatstopdefaults import TickformatstopdefaultsValidator
from ._tickformatstops import TickformatstopsValidator
from ._tickformat import TickformatValidator
from ._tickfont import TickfontValidator
from ._tickcolor import TickcolorValidator
from ._tickangle import TickangleValidator
from ._tick0 import Tick0Validator
from ._thicknessmode import ThicknessmodeValidator
from ._thickness import ThicknessValidator
from ._showticksuffix import ShowticksuffixValidator
from ._showtickprefix import ShowtickprefixValidator
from ._showticklabels import ShowticklabelsValidator
from ._showexponent import ShowexponentValidator
from ._separatethousands import SeparatethousandsValidator
from ._outlinewidth import OutlinewidthValidator
from ._outlinecolor import OutlinecolorValidator
from ._orientation import OrientationValidator
from ._nticks import NticksValidator
from ._minexponent import MinexponentValidator
from ._lenmode import LenmodeValidator
from ._len import LenValidator
from ._labelalias import LabelaliasValidator
from ._exponentformat import ExponentformatValidator
from ._dtick import DtickValidator
from ._borderwidth import BorderwidthValidator
from ._bordercolor import BordercolorValidator
from ._bgcolor import BgcolorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._yref.YrefValidator",
"._ypad.YpadValidator",
"._yanchor.YanchorValidator",
"._y.YValidator",
"._xref.XrefValidator",
"._xpad.XpadValidator",
"._xanchor.XanchorValidator",
"._x.XValidator",
"._title.TitleValidator",
"._tickwidth.TickwidthValidator",
"._tickvalssrc.TickvalssrcValidator",
"._tickvals.TickvalsValidator",
"._ticktextsrc.TicktextsrcValidator",
"._ticktext.TicktextValidator",
"._ticksuffix.TicksuffixValidator",
"._ticks.TicksValidator",
"._tickprefix.TickprefixValidator",
"._tickmode.TickmodeValidator",
"._ticklen.TicklenValidator",
"._ticklabelstep.TicklabelstepValidator",
"._ticklabelposition.TicklabelpositionValidator",
"._ticklabeloverflow.TicklabeloverflowValidator",
"._tickformatstopdefaults.TickformatstopdefaultsValidator",
"._tickformatstops.TickformatstopsValidator",
"._tickformat.TickformatValidator",
"._tickfont.TickfontValidator",
"._tickcolor.TickcolorValidator",
"._tickangle.TickangleValidator",
"._tick0.Tick0Validator",
"._thicknessmode.ThicknessmodeValidator",
"._thickness.ThicknessValidator",
"._showticksuffix.ShowticksuffixValidator",
"._showtickprefix.ShowtickprefixValidator",
"._showticklabels.ShowticklabelsValidator",
"._showexponent.ShowexponentValidator",
"._separatethousands.SeparatethousandsValidator",
"._outlinewidth.OutlinewidthValidator",
"._outlinecolor.OutlinecolorValidator",
"._orientation.OrientationValidator",
"._nticks.NticksValidator",
"._minexponent.MinexponentValidator",
"._lenmode.LenmodeValidator",
"._len.LenValidator",
"._labelalias.LabelaliasValidator",
"._exponentformat.ExponentformatValidator",
"._dtick.DtickValidator",
"._borderwidth.BorderwidthValidator",
"._bordercolor.BordercolorValidator",
"._bgcolor.BgcolorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contourcarpet@colorbar@__init__.py@.PATH_END.py
|
{
"filename": "_linepositionsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymap/hoverlabel/font/_linepositionsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="linepositionsrc",
parent_name="densitymap.hoverlabel.font",
**kwargs,
):
super(LinepositionsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymap@hoverlabel@font@_linepositionsrc.py@.PATH_END.py
|
{
"filename": "binned.py",
"repo_name": "vislearn/FrEIA",
"repo_path": "FrEIA_extracted/FrEIA-master/FrEIA/modules/splines/binned.py",
"type": "Python"
}
|
from typing import Dict, List, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from itertools import chain
from FrEIA.modules.coupling_layers import _BaseCouplingBlock
from FrEIA.modules.base import InvertibleModule
from FrEIA import utils
class BinnedSpline(_BaseCouplingBlock):
def __init__(self, dims_in, dims_c=None, subnet_constructor: callable = None,
split_len: Union[float, int] = 0.5, **kwargs) -> None:
if dims_c is None:
dims_c = []
super().__init__(dims_in, dims_c, clamp=0.0, clamp_activation=lambda u: u, split_len=split_len)
self.spline_base = BinnedSplineBase(dims_in, dims_c, **kwargs)
num_params = sum(self.spline_base.parameter_counts.values())
self.subnet1 = subnet_constructor(self.split_len2 + self.condition_length, self.split_len1 * num_params)
self.subnet2 = subnet_constructor(self.split_len1 + self.condition_length, self.split_len2 * num_params)
def _spline1(self, x1: torch.Tensor, parameters: Dict[str, torch.Tensor], rev: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
def _spline2(self, x2: torch.Tensor, parameters: Dict[str, torch.Tensor], rev: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:
raise NotImplementedError
def _coupling1(self, x1: torch.Tensor, u2: torch.Tensor, rev: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:
"""
The full coupling consists of:
1. Querying the parameter tensor from the subnetwork
2. Splitting this tensor into the semantic parameters
3. Constraining the parameters
4. Performing the actual spline for each bin, given the parameters
"""
parameters = self.subnet1(u2)
parameters = self.spline_base.split_parameters(parameters, self.split_len1)
parameters = self.constrain_parameters(parameters)
return self.spline_base.binned_spline(x=x1, parameters=parameters, spline=self._spline1, rev=rev)
def _coupling2(self, x2: torch.Tensor, u1: torch.Tensor, rev: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:
parameters = self.subnet2(u1)
parameters = self.spline_base.split_parameters(parameters, self.split_len2)
parameters = self.constrain_parameters(parameters)
return self.spline_base.binned_spline(x=x2, parameters=parameters, spline=self._spline2, rev=rev)
def constrain_parameters(self, parameters: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
return self.spline_base.constrain_parameters(parameters)
class BinnedSplineBase(InvertibleModule):
"""
Base Class for Splines
Implements input-binning, where bin knots are jointly predicted along with spline parameters
by a non-invertible coupling subnetwork
"""
def __init__(self, dims_in, dims_c=None, bins: int = 10, parameter_counts: Dict[str, int] = None,
min_bin_sizes: Tuple[float] = (0.1, 0.1), default_domain: Tuple[float] = (-3.0, 3.0, -3.0, 3.0),
identity_tails: bool = False, domain_clamping: float = None) -> None:
"""
Args:
bins: number of bins to use
parameter_counts: dictionary containing (parameter_name, parameter_counts)
the counts are used to split the network outputs
min_bin_sizes: tuple of (min_x_size, min_y_size)
bins are scaled such that they never fall below this size
default_domain: tuple of (left, right, bottom, top) default spline domain values
these values will be used as the starting domain (when the network outputs zero)
identity_tails: whether to use identity tails for the spline
domain_clamping: clamping value for the domain, if float,
clamp spline width and height to (-domain_clamping, domain_clamping)
"""
if dims_c is None:
dims_c = []
if parameter_counts is None:
parameter_counts = {}
super().__init__(dims_in, dims_c)
assert bins >= 1, "need at least one bin"
assert all(s >= 0 for s in min_bin_sizes), "minimum bin size cannot be negative"
assert default_domain[1] > default_domain[0], "x domain must be increasing"
assert default_domain[3] > default_domain[2], "y domain must be increasing"
assert default_domain[1] - default_domain[0] >= min_bin_sizes[0] * bins, \
"{bins} bins of size {min_bin_sizes[0]} are too large for domain {default_domain[0]} to {default_domain[1]}"
assert default_domain[3] - default_domain[2] >= min_bin_sizes[1] * bins, \
"{bins} bins of size {min_bin_sizes[1]} are too large for domain {default_domain[2]} to {default_domain[3]}"
self.register_buffer("bins", torch.tensor(bins, dtype=torch.int32))
self.register_buffer("min_bin_sizes", torch.as_tensor(min_bin_sizes, dtype=torch.float32))
self.register_buffer("default_domain", torch.as_tensor(default_domain, dtype=torch.float32))
self.register_buffer("identity_tails", torch.tensor(identity_tails, dtype=torch.bool))
self.register_buffer("default_width", torch.as_tensor(default_domain[1] - default_domain[0], dtype=torch.float32))
self.domain_clamping = domain_clamping
# The default parameters are
# parameter constraints count
# 1. the leftmost bin edge - 1
# 2. the lowermost bin edge - 1
# 3. the widths of each bin positive #bins
# 4. the heights of each bin positive #bins
default_parameter_counts = dict(
widths=bins,
heights=bins,
)
if not identity_tails:
default_parameter_counts["bottom"] = 1
default_parameter_counts["left"] = 1
else:
default_parameter_counts["total_width"] = 1
# merge parameter counts with child classes
self.parameter_counts = {**default_parameter_counts, **parameter_counts}
def split_parameters(self, parameters: torch.Tensor, split_len: int) -> Dict[str, torch.Tensor]:
"""
Split parameter tensor into semantic parameters, as given by self.parameter_counts
"""
keys = list(self.parameter_counts.keys())
lengths = list(self.parameter_counts.values())
parameters = parameters.movedim(1, -1)
parameters = parameters.reshape(*parameters.shape[:-1], split_len, -1)
values = list(torch.split(parameters, lengths, dim=-1))
return dict(zip(keys, values))
def clamp_domain(self, domain: torch.Tensor) -> torch.Tensor:
"""
Clamp domain to the a size between (-domain_clamping, domain_clamping)
"""
if self.domain_clamping is None:
return domain
else:
return self.domain_clamping * torch.tanh(domain / self.domain_clamping)
def constrain_parameters(self, parameters: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Constrain Parameters to meet certain conditions (e.g. positivity)
"""
# we constrain the widths and heights to be positive with a softplus
# furthermore, to allow minimum bin widths, we add this outside the softplus
# we also want to use the default domain when the network predicts zeros, so
# shift the softplus such that this is true, even with nonzero minimum bin sizes.
if self.identity_tails:
total_width = parameters["total_width"]
shift = np.log(np.e - 1)
total_width = self.default_width * F.softplus(total_width + shift)
total_width = self.clamp_domain(total_width)
parameters["left"] = -total_width / 2
parameters["bottom"] = -total_width / 2
parameters["widths"] = total_width * F.softmax(parameters["widths"], dim=-1)
parameters["heights"] = total_width * F.softmax(parameters["heights"], dim=-1)
else:
parameters["left"] = parameters["left"] + self.default_domain[0]
parameters["bottom"] = parameters["bottom"] + self.default_domain[2]
default_bin_width = (self.default_domain[1] - self.default_domain[0]) / self.bins
default_bin_height = (self.default_domain[3] - self.default_domain[2]) / self.bins
xshift = torch.log(torch.exp(default_bin_width - self.min_bin_sizes[0]) - 1)
yshift = torch.log(torch.exp(default_bin_height - self.min_bin_sizes[1]) - 1)
parameters["widths"] = self.min_bin_sizes[0] + F.softplus(parameters["widths"] + xshift)
parameters["heights"] = self.min_bin_sizes[1] + F.softplus(parameters["heights"] + yshift)
domain_width = torch.sum(parameters["widths"], dim=-1, keepdim=True)
domain_height = torch.sum(parameters["heights"], dim=-1, keepdim=True)
width_resize = self.clamp_domain(domain_width) / domain_width
height_resize = self.clamp_domain(domain_height) / domain_height
parameters["widths"] = parameters["widths"] * width_resize
parameters["heights"] = parameters["heights"] * height_resize
parameters["left"] = parameters["left"] * width_resize
parameters["bottom"] = parameters["bottom"] * height_resize
return parameters
def binned_spline(self, x: torch.Tensor, parameters: Dict[str, torch.Tensor], spline: callable, rev: bool = False) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Compute the spline for given bin and spline parameters
"""
x = x.movedim(1, -1)
# find bin knots
knot_x = parameters["left"] + torch.cumsum(parameters["widths"], dim=-1)
knot_y = parameters["bottom"] + torch.cumsum(parameters["heights"], dim=-1)
# concatenate leftmost edge
knot_x = torch.cat((parameters["left"], knot_x), dim=-1)
knot_y = torch.cat((parameters["bottom"], knot_y), dim=-1)
# find spline mask
if not rev:
inside = (knot_x[..., 0] < x) & (x <= knot_x[..., -1])
else:
y = x
inside = (knot_y[..., 0] < y) & (y <= knot_y[..., -1])
knot_x = knot_x[inside]
knot_y = knot_y[inside]
x_in = x[inside]
x_out = x[~inside]
scale = torch.sum(parameters["heights"], dim=-1, keepdim=True) / torch.sum(parameters["widths"], dim=-1, keepdim=True)
shift = parameters["bottom"] - scale * parameters["left"]
scale = scale[~inside].squeeze(-1)
shift = shift[~inside].squeeze(-1)
# find bin edge indices
if not rev:
upper = torch.searchsorted(knot_x, x_in[..., None])
else:
y_in = x_in
upper = torch.searchsorted(knot_y, y_in[..., None])
lower = upper - 1
spline_parameters = dict()
# gather bin edges from indices
spline_parameters["left"] = torch.gather(knot_x, dim=-1, index=lower).squeeze(-1)
spline_parameters["right"] = torch.gather(knot_x, dim=-1, index=upper).squeeze(-1)
spline_parameters["bottom"] = torch.gather(knot_y, dim=-1, index=lower).squeeze(-1)
spline_parameters["top"] = torch.gather(knot_y, dim=-1, index=upper).squeeze(-1)
# gather all other parameter edges
for key, value in parameters.items():
if key in ["left", "bottom", "widths", "heights", "total_width"]:
continue
v = value[inside]
spline_parameters[f"{key}_left"] = torch.gather(v, dim=-1, index=lower).squeeze(-1)
spline_parameters[f"{key}_right"] = torch.gather(v, dim=-1, index=upper).squeeze(-1)
if not rev:
y = torch.clone(x)
log_jac = y.new_zeros(y.shape)
y[inside], log_jac[inside] = spline(x_in, spline_parameters, rev=rev)
y[~inside], log_jac[~inside] = scale * x_out + shift, torch.log(scale)
log_jac_det = utils.sum_except_batch(log_jac)
y = y.movedim(-1, 1)
return y, log_jac_det
else:
y = x
y_in = x_in
y_out = x_out
x = torch.clone(y)
log_jac = x.new_zeros(x.shape)
x[inside], log_jac[inside] = spline(y_in, spline_parameters, rev=rev)
x[~inside], log_jac[~inside] = (y_out - shift) / scale, torch.log(scale)
log_jac_det = -utils.sum_except_batch(log_jac)
x = x.movedim(-1, 1)
return x, log_jac_det
|
vislearnREPO_NAMEFrEIAPATH_START.@FrEIA_extracted@FrEIA-master@FrEIA@modules@splines@binned.py@.PATH_END.py
|
{
"filename": "ah_bootstrap.py",
"repo_name": "D-arioSpace/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/ah_bootstrap.py",
"type": "Python"
}
|
"""
This bootstrap module contains code for ensuring that the astropy_helpers
package will be importable by the time the setup.py script runs. It also
includes some workarounds to ensure that a recent-enough version of setuptools
is being used for the installation.
This module should be the first thing imported in the setup.py of distributions
that make use of the utilities in astropy_helpers. If the distribution ships
with its own copy of astropy_helpers, this module will first attempt to import
from the shipped copy. However, it will also check PyPI to see if there are
any bug-fix releases on top of the current version that may be useful to get
past platform-specific bugs that have been fixed. When running setup.py, use
the ``--offline`` command-line option to disable the auto-upgrade checks.
When this module is imported or otherwise executed it automatically calls a
main function that attempts to read the project's setup.cfg file, which it
checks for a configuration section called ``[ah_bootstrap]`` the presences of
that section, and options therein, determine the next step taken: If it
contains an option called ``auto_use`` with a value of ``True``, it will
automatically call the main function of this module called
`use_astropy_helpers` (see that function's docstring for full details).
Otherwise no further action is taken and by default the system-installed version
of astropy-helpers will be used (however, ``ah_bootstrap.use_astropy_helpers``
may be called manually from within the setup.py script).
This behavior can also be controlled using the ``--auto-use`` and
``--no-auto-use`` command-line flags. For clarity, an alias for
``--no-auto-use`` is ``--use-system-astropy-helpers``, and we recommend using
the latter if needed.
Additional options in the ``[ah_boostrap]`` section of setup.cfg have the same
names as the arguments to `use_astropy_helpers`, and can be used to configure
the bootstrap script when ``auto_use = True``.
See https://github.com/astropy/astropy-helpers for more details, and for the
latest version of this module.
"""
import contextlib
import errno
import io
import locale
import os
import re
import subprocess as sp
import sys
from distutils import log
from distutils.debug import DEBUG
from configparser import ConfigParser, RawConfigParser
import pkg_resources
from setuptools import Distribution
from setuptools.package_index import PackageIndex
# This is the minimum Python version required for astropy-helpers
__minimum_python_version__ = (3, 5)
# TODO: Maybe enable checking for a specific version of astropy_helpers?
DIST_NAME = 'astropy-helpers'
PACKAGE_NAME = 'astropy_helpers'
UPPER_VERSION_EXCLUSIVE = None
# Defaults for other options
DOWNLOAD_IF_NEEDED = True
INDEX_URL = 'https://pypi.python.org/simple'
USE_GIT = True
OFFLINE = False
AUTO_UPGRADE = True
# A list of all the configuration options and their required types
CFG_OPTIONS = [
('auto_use', bool), ('path', str), ('download_if_needed', bool),
('index_url', str), ('use_git', bool), ('offline', bool),
('auto_upgrade', bool)
]
# Start off by parsing the setup.cfg file
_err_help_msg = """
If the problem persists consider installing astropy_helpers manually using pip
(`pip install astropy_helpers`) or by manually downloading the source archive,
extracting it, and installing by running `python setup.py install` from the
root of the extracted source code.
"""
SETUP_CFG = ConfigParser()
if os.path.exists('setup.cfg'):
try:
SETUP_CFG.read('setup.cfg')
except Exception as e:
if DEBUG:
raise
log.error(
"Error reading setup.cfg: {0!r}\n{1} will not be "
"automatically bootstrapped and package installation may fail."
"\n{2}".format(e, PACKAGE_NAME, _err_help_msg))
# We used package_name in the package template for a while instead of name
if SETUP_CFG.has_option('metadata', 'name'):
parent_package = SETUP_CFG.get('metadata', 'name')
elif SETUP_CFG.has_option('metadata', 'package_name'):
parent_package = SETUP_CFG.get('metadata', 'package_name')
else:
parent_package = None
if SETUP_CFG.has_option('options', 'python_requires'):
python_requires = SETUP_CFG.get('options', 'python_requires')
# The python_requires key has a syntax that can be parsed by SpecifierSet
# in the packaging package. However, we don't want to have to depend on that
# package, so instead we can use setuptools (which bundles packaging). We
# have to add 'python' to parse it with Requirement.
from pkg_resources import Requirement
req = Requirement.parse('python' + python_requires)
# We want the Python version as a string, which we can get from the platform module
import platform
# strip off trailing '+' incase this is a dev install of python
python_version = platform.python_version().strip('+')
# allow pre-releases to count as 'new enough'
if not req.specifier.contains(python_version, True):
if parent_package is None:
message = "ERROR: Python {} is required by this package\n".format(req.specifier)
else:
message = "ERROR: Python {} is required by {}\n".format(req.specifier, parent_package)
sys.stderr.write(message)
sys.exit(1)
if sys.version_info < __minimum_python_version__:
if parent_package is None:
message = "ERROR: Python {} or later is required by astropy-helpers\n".format(
__minimum_python_version__)
else:
message = "ERROR: Python {} or later is required by astropy-helpers for {}\n".format(
__minimum_python_version__, parent_package)
sys.stderr.write(message)
sys.exit(1)
_str_types = (str, bytes)
# What follows are several import statements meant to deal with install-time
# issues with either missing or misbehaving pacakges (including making sure
# setuptools itself is installed):
# Check that setuptools 30.3 or later is present
from distutils.version import LooseVersion
try:
import setuptools
assert LooseVersion(setuptools.__version__) >= LooseVersion('30.3')
except (ImportError, AssertionError):
sys.stderr.write("ERROR: setuptools 30.3 or later is required by astropy-helpers\n")
sys.exit(1)
SETUPTOOLS_LT_42 = LooseVersion(setuptools.__version__) < LooseVersion('42')
# typing as a dependency for 1.6.1+ Sphinx causes issues when imported after
# initializing submodule with ah_boostrap.py
# See discussion and references in
# https://github.com/astropy/astropy-helpers/issues/302
try:
import typing # noqa
except ImportError:
pass
# Note: The following import is required as a workaround to
# https://github.com/astropy/astropy-helpers/issues/89; if we don't import this
# module now, it will get cleaned up after `run_setup` is called, but that will
# later cause the TemporaryDirectory class defined in it to stop working when
# used later on by setuptools
try:
import setuptools.py31compat # noqa
except ImportError:
pass
# matplotlib can cause problems if it is imported from within a call of
# run_setup(), because in some circumstances it will try to write to the user's
# home directory, resulting in a SandboxViolation. See
# https://github.com/matplotlib/matplotlib/pull/4165
# Making sure matplotlib, if it is available, is imported early in the setup
# process can mitigate this (note importing matplotlib.pyplot has the same
# issue)
try:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
except:
# Ignore if this fails for *any* reason*
pass
# End compatibility imports...
class _Bootstrapper(object):
"""
Bootstrapper implementation. See ``use_astropy_helpers`` for parameter
documentation.
"""
def __init__(self, path=None, index_url=None, use_git=None, offline=None,
download_if_needed=None, auto_upgrade=None):
if path is None:
path = PACKAGE_NAME
if not (isinstance(path, _str_types) or path is False):
raise TypeError('path must be a string or False')
if not isinstance(path, str):
fs_encoding = sys.getfilesystemencoding()
path = path.decode(fs_encoding) # path to unicode
self.path = path
# Set other option attributes, using defaults where necessary
self.index_url = index_url if index_url is not None else INDEX_URL
self.offline = offline if offline is not None else OFFLINE
# If offline=True, override download and auto-upgrade
if self.offline:
download_if_needed = False
auto_upgrade = False
self.download = (download_if_needed
if download_if_needed is not None
else DOWNLOAD_IF_NEEDED)
self.auto_upgrade = (auto_upgrade
if auto_upgrade is not None else AUTO_UPGRADE)
# If this is a release then the .git directory will not exist so we
# should not use git.
git_dir_exists = os.path.exists(os.path.join(os.path.dirname(__file__), '.git'))
if use_git is None and not git_dir_exists:
use_git = False
self.use_git = use_git if use_git is not None else USE_GIT
# Declared as False by default--later we check if astropy-helpers can be
# upgraded from PyPI, but only if not using a source distribution (as in
# the case of import from a git submodule)
self.is_submodule = False
@classmethod
def main(cls, argv=None):
if argv is None:
argv = sys.argv
config = cls.parse_config()
config.update(cls.parse_command_line(argv))
auto_use = config.pop('auto_use', False)
bootstrapper = cls(**config)
if auto_use:
# Run the bootstrapper, otherwise the setup.py is using the old
# use_astropy_helpers() interface, in which case it will run the
# bootstrapper manually after reconfiguring it.
bootstrapper.run()
return bootstrapper
@classmethod
def parse_config(cls):
if not SETUP_CFG.has_section('ah_bootstrap'):
return {}
config = {}
for option, type_ in CFG_OPTIONS:
if not SETUP_CFG.has_option('ah_bootstrap', option):
continue
if type_ is bool:
value = SETUP_CFG.getboolean('ah_bootstrap', option)
else:
value = SETUP_CFG.get('ah_bootstrap', option)
config[option] = value
return config
@classmethod
def parse_command_line(cls, argv=None):
if argv is None:
argv = sys.argv
config = {}
# For now we just pop recognized ah_bootstrap options out of the
# arg list. This is imperfect; in the unlikely case that a setup.py
# custom command or even custom Distribution class defines an argument
# of the same name then we will break that. However there's a catch22
# here that we can't just do full argument parsing right here, because
# we don't yet know *how* to parse all possible command-line arguments.
if '--no-git' in argv:
config['use_git'] = False
argv.remove('--no-git')
if '--offline' in argv:
config['offline'] = True
argv.remove('--offline')
if '--auto-use' in argv:
config['auto_use'] = True
argv.remove('--auto-use')
if '--no-auto-use' in argv:
config['auto_use'] = False
argv.remove('--no-auto-use')
if '--use-system-astropy-helpers' in argv:
config['auto_use'] = False
argv.remove('--use-system-astropy-helpers')
return config
def run(self):
strategies = ['local_directory', 'local_file', 'index']
dist = None
# First, remove any previously imported versions of astropy_helpers;
# this is necessary for nested installs where one package's installer
# is installing another package via setuptools.sandbox.run_setup, as in
# the case of setup_requires
for key in list(sys.modules):
try:
if key == PACKAGE_NAME or key.startswith(PACKAGE_NAME + '.'):
del sys.modules[key]
except AttributeError:
# Sometimes mysterious non-string things can turn up in
# sys.modules
continue
# Check to see if the path is a submodule
self.is_submodule = self._check_submodule()
for strategy in strategies:
method = getattr(self, 'get_{0}_dist'.format(strategy))
dist = method()
if dist is not None:
break
else:
raise _AHBootstrapSystemExit(
"No source found for the {0!r} package; {0} must be "
"available and importable as a prerequisite to building "
"or installing this package.".format(PACKAGE_NAME))
# This is a bit hacky, but if astropy_helpers was loaded from a
# directory/submodule its Distribution object gets a "precedence" of
# "DEVELOP_DIST". However, in other cases it gets a precedence of
# "EGG_DIST". However, when activing the distribution it will only be
# placed early on sys.path if it is treated as an EGG_DIST, so always
# do that
dist = dist.clone(precedence=pkg_resources.EGG_DIST)
# Otherwise we found a version of astropy-helpers, so we're done
# Just active the found distribution on sys.path--if we did a
# download this usually happens automatically but it doesn't hurt to
# do it again
# Note: Adding the dist to the global working set also activates it
# (makes it importable on sys.path) by default.
try:
pkg_resources.working_set.add(dist, replace=True)
except TypeError:
# Some (much) older versions of setuptools do not have the
# replace=True option here. These versions are old enough that all
# bets may be off anyways, but it's easy enough to work around just
# in case...
if dist.key in pkg_resources.working_set.by_key:
del pkg_resources.working_set.by_key[dist.key]
pkg_resources.working_set.add(dist)
@property
def config(self):
"""
A `dict` containing the options this `_Bootstrapper` was configured
with.
"""
return dict((optname, getattr(self, optname))
for optname, _ in CFG_OPTIONS if hasattr(self, optname))
def get_local_directory_dist(self):
"""
Handle importing a vendored package from a subdirectory of the source
distribution.
"""
if not os.path.isdir(self.path):
return
log.info('Attempting to import astropy_helpers from {0} {1!r}'.format(
'submodule' if self.is_submodule else 'directory',
self.path))
dist = self._directory_import()
if dist is None:
log.warn(
'The requested path {0!r} for importing {1} does not '
'exist, or does not contain a copy of the {1} '
'package.'.format(self.path, PACKAGE_NAME))
elif self.auto_upgrade and not self.is_submodule:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_local_file_dist(self):
"""
Handle importing from a source archive; this also uses setup_requires
but points easy_install directly to the source archive.
"""
if not os.path.isfile(self.path):
return
log.info('Attempting to unpack and import astropy_helpers from '
'{0!r}'.format(self.path))
try:
dist = self._do_download(find_links=[self.path])
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to import {0} from the specified archive {1!r}: '
'{2}'.format(PACKAGE_NAME, self.path, str(e)))
dist = None
if dist is not None and self.auto_upgrade:
# A version of astropy-helpers was found on the available path, but
# check to see if a bugfix release is available on PyPI
upgrade = self._do_upgrade(dist)
if upgrade is not None:
dist = upgrade
return dist
def get_index_dist(self):
if not self.download:
log.warn('Downloading {0!r} disabled.'.format(DIST_NAME))
return None
log.warn(
"Downloading {0!r}; run setup.py with the --offline option to "
"force offline installation.".format(DIST_NAME))
try:
dist = self._do_download()
except Exception as e:
if DEBUG:
raise
log.warn(
'Failed to download and/or install {0!r} from {1!r}:\n'
'{2}'.format(DIST_NAME, self.index_url, str(e)))
dist = None
# No need to run auto-upgrade here since we've already presumably
# gotten the most up-to-date version from the package index
return dist
def _directory_import(self):
"""
Import astropy_helpers from the given path, which will be added to
sys.path.
Must return True if the import succeeded, and False otherwise.
"""
# Return True on success, False on failure but download is allowed, and
# otherwise raise SystemExit
path = os.path.abspath(self.path)
# Use an empty WorkingSet rather than the man
# pkg_resources.working_set, since on older versions of setuptools this
# will invoke a VersionConflict when trying to install an upgrade
ws = pkg_resources.WorkingSet([])
ws.add_entry(path)
dist = ws.by_key.get(DIST_NAME)
if dist is None:
# We didn't find an egg-info/dist-info in the given path, but if a
# setup.py exists we can generate it
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
# We use subprocess instead of run_setup from setuptools to
# avoid segmentation faults - see the following for more details:
# https://github.com/cython/cython/issues/2104
sp.check_output([sys.executable, 'setup.py', 'egg_info'], cwd=path)
for dist in pkg_resources.find_distributions(path, True):
# There should be only one...
return dist
return dist
def _do_download(self, version='', find_links=None):
if find_links:
allow_hosts = ''
index_url = None
else:
allow_hosts = None
index_url = self.index_url
# Annoyingly, setuptools will not handle other arguments to
# Distribution (such as options) before handling setup_requires, so it
# is not straightforward to programmatically augment the arguments which
# are passed to easy_install
class _Distribution(Distribution):
def get_option_dict(self, command_name):
opts = Distribution.get_option_dict(self, command_name)
if command_name == 'easy_install':
if find_links is not None:
opts['find_links'] = ('setup script', find_links)
if index_url is not None:
opts['index_url'] = ('setup script', index_url)
# For setuptools>=42, the allow_hosts option can't
# be used because pip doesn't support it.
if allow_hosts is not None and SETUPTOOLS_LT_42:
opts['allow_hosts'] = ('setup script', allow_hosts)
return opts
if version:
req = '{0}=={1}'.format(DIST_NAME, version)
else:
if UPPER_VERSION_EXCLUSIVE is None:
req = DIST_NAME
else:
req = '{0}<{1}'.format(DIST_NAME, UPPER_VERSION_EXCLUSIVE)
attrs = {'setup_requires': [req]}
# NOTE: we need to parse the config file (e.g. setup.cfg) to make sure
# it honours the options set in the [easy_install] section, and we need
# to explicitly fetch the requirement eggs as setup_requires does not
# get honored in recent versions of setuptools:
# https://github.com/pypa/setuptools/issues/1273
try:
context = _verbose if DEBUG else _silence
with context():
dist = _Distribution(attrs=attrs)
try:
dist.parse_config_files(ignore_option_errors=True)
dist.fetch_build_eggs(req)
except TypeError:
# On older versions of setuptools, ignore_option_errors
# doesn't exist, and the above two lines are not needed
# so we can just continue
pass
# If the setup_requires succeeded it will have added the new dist to
# the main working_set
return pkg_resources.working_set.by_key.get(DIST_NAME)
except Exception as e:
if DEBUG:
raise
msg = 'Error retrieving {0} from {1}:\n{2}'
if find_links:
source = find_links[0]
elif index_url != INDEX_URL:
source = index_url
else:
source = 'PyPI'
raise Exception(msg.format(DIST_NAME, source, repr(e)))
def _do_upgrade(self, dist):
# Build up a requirement for a higher bugfix release but a lower minor
# release (so API compatibility is guaranteed)
next_version = _next_version(dist.parsed_version)
req = pkg_resources.Requirement.parse(
'{0}>{1},<{2}'.format(DIST_NAME, dist.version, next_version))
package_index = PackageIndex(index_url=self.index_url)
upgrade = package_index.obtain(req)
if upgrade is not None:
return self._do_download(version=upgrade.version)
def _check_submodule(self):
"""
Check if the given path is a git submodule.
See the docstrings for ``_check_submodule_using_git`` and
``_check_submodule_no_git`` for further details.
"""
if (self.path is None or
(os.path.exists(self.path) and not os.path.isdir(self.path))):
return False
if self.use_git:
return self._check_submodule_using_git()
else:
return self._check_submodule_no_git()
def _check_submodule_using_git(self):
"""
Check if the given path is a git submodule. If so, attempt to initialize
and/or update the submodule if needed.
This function makes calls to the ``git`` command in subprocesses. The
``_check_submodule_no_git`` option uses pure Python to check if the given
path looks like a git submodule, but it cannot perform updates.
"""
cmd = ['git', 'submodule', 'status', '--', self.path]
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except _CommandNotFound:
# The git command simply wasn't found; this is most likely the
# case on user systems that don't have git and are simply
# trying to install the package from PyPI or a source
# distribution. Silently ignore this case and simply don't try
# to use submodules
return False
stderr = stderr.strip()
if returncode != 0 and stderr:
# Unfortunately the return code alone cannot be relied on, as
# earlier versions of git returned 0 even if the requested submodule
# does not exist
# This is a warning that occurs in perl (from running git submodule)
# which only occurs with a malformatted locale setting which can
# happen sometimes on OSX. See again
# https://github.com/astropy/astropy/issues/2749
perl_warning = ('perl: warning: Falling back to the standard locale '
'("C").')
if not stderr.strip().endswith(perl_warning):
# Some other unknown error condition occurred
log.warn('git submodule command failed '
'unexpectedly:\n{0}'.format(stderr))
return False
# Output of `git submodule status` is as follows:
#
# 1: Status indicator: '-' for submodule is uninitialized, '+' if
# submodule is initialized but is not at the commit currently indicated
# in .gitmodules (and thus needs to be updated), or 'U' if the
# submodule is in an unstable state (i.e. has merge conflicts)
#
# 2. SHA-1 hash of the current commit of the submodule (we don't really
# need this information but it's useful for checking that the output is
# correct)
#
# 3. The output of `git describe` for the submodule's current commit
# hash (this includes for example what branches the commit is on) but
# only if the submodule is initialized. We ignore this information for
# now
_git_submodule_status_re = re.compile(
r'^(?P<status>[+-U ])(?P<commit>[0-9a-f]{40}) '
r'(?P<submodule>\S+)( .*)?$')
# The stdout should only contain one line--the status of the
# requested submodule
m = _git_submodule_status_re.match(stdout)
if m:
# Yes, the path *is* a git submodule
self._update_submodule(m.group('submodule'), m.group('status'))
return True
else:
log.warn(
'Unexpected output from `git submodule status`:\n{0}\n'
'Will attempt import from {1!r} regardless.'.format(
stdout, self.path))
return False
def _check_submodule_no_git(self):
"""
Like ``_check_submodule_using_git``, but simply parses the .gitmodules file
to determine if the supplied path is a git submodule, and does not exec any
subprocesses.
This can only determine if a path is a submodule--it does not perform
updates, etc. This function may need to be updated if the format of the
.gitmodules file is changed between git versions.
"""
gitmodules_path = os.path.abspath('.gitmodules')
if not os.path.isfile(gitmodules_path):
return False
# This is a minimal reader for gitconfig-style files. It handles a few of
# the quirks that make gitconfig files incompatible with ConfigParser-style
# files, but does not support the full gitconfig syntax (just enough
# needed to read a .gitmodules file).
gitmodules_fileobj = io.StringIO()
# Must use io.open for cross-Python-compatible behavior wrt unicode
with io.open(gitmodules_path) as f:
for line in f:
# gitconfig files are more flexible with leading whitespace; just
# go ahead and remove it
line = line.lstrip()
# comments can start with either # or ;
if line and line[0] in (':', ';'):
continue
gitmodules_fileobj.write(line)
gitmodules_fileobj.seek(0)
cfg = RawConfigParser()
try:
cfg.readfp(gitmodules_fileobj)
except Exception as exc:
log.warn('Malformatted .gitmodules file: {0}\n'
'{1} cannot be assumed to be a git submodule.'.format(
exc, self.path))
return False
for section in cfg.sections():
if not cfg.has_option(section, 'path'):
continue
submodule_path = cfg.get(section, 'path').rstrip(os.sep)
if submodule_path == self.path.rstrip(os.sep):
return True
return False
def _update_submodule(self, submodule, status):
if status == ' ':
# The submodule is up to date; no action necessary
return
elif status == '-':
if self.offline:
raise _AHBootstrapSystemExit(
"Cannot initialize the {0} submodule in --offline mode; "
"this requires being able to clone the submodule from an "
"online repository.".format(submodule))
cmd = ['update', '--init']
action = 'Initializing'
elif status == '+':
cmd = ['update']
action = 'Updating'
if self.offline:
cmd.append('--no-fetch')
elif status == 'U':
raise _AHBootstrapSystemExit(
'Error: Submodule {0} contains unresolved merge conflicts. '
'Please complete or abandon any changes in the submodule so that '
'it is in a usable state, then try again.'.format(submodule))
else:
log.warn('Unknown status {0!r} for git submodule {1!r}. Will '
'attempt to use the submodule as-is, but try to ensure '
'that the submodule is in a clean state and contains no '
'conflicts or errors.\n{2}'.format(status, submodule,
_err_help_msg))
return
err_msg = None
cmd = ['git', 'submodule'] + cmd + ['--', submodule]
log.warn('{0} {1} submodule with: `{2}`'.format(
action, submodule, ' '.join(cmd)))
try:
log.info('Running `{0}`; use the --no-git option to disable git '
'commands'.format(' '.join(cmd)))
returncode, stdout, stderr = run_cmd(cmd)
except OSError as e:
err_msg = str(e)
else:
if returncode != 0:
err_msg = stderr
if err_msg is not None:
log.warn('An unexpected error occurred updating the git submodule '
'{0!r}:\n{1}\n{2}'.format(submodule, err_msg,
_err_help_msg))
class _CommandNotFound(OSError):
"""
An exception raised when a command run with run_cmd is not found on the
system.
"""
def run_cmd(cmd):
"""
Run a command in a subprocess, given as a list of command-line
arguments.
Returns a ``(returncode, stdout, stderr)`` tuple.
"""
try:
p = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE)
# XXX: May block if either stdout or stderr fill their buffers;
# however for the commands this is currently used for that is
# unlikely (they should have very brief output)
stdout, stderr = p.communicate()
except OSError as e:
if DEBUG:
raise
if e.errno == errno.ENOENT:
msg = 'Command not found: `{0}`'.format(' '.join(cmd))
raise _CommandNotFound(msg, cmd)
else:
raise _AHBootstrapSystemExit(
'An unexpected error occurred when running the '
'`{0}` command:\n{1}'.format(' '.join(cmd), str(e)))
# Can fail of the default locale is not configured properly. See
# https://github.com/astropy/astropy/issues/2749. For the purposes under
# consideration 'latin1' is an acceptable fallback.
try:
stdio_encoding = locale.getdefaultlocale()[1] or 'latin1'
except ValueError:
# Due to an OSX oddity locale.getdefaultlocale() can also crash
# depending on the user's locale/language settings. See:
# https://bugs.python.org/issue18378
stdio_encoding = 'latin1'
# Unlikely to fail at this point but even then let's be flexible
if not isinstance(stdout, str):
stdout = stdout.decode(stdio_encoding, 'replace')
if not isinstance(stderr, str):
stderr = stderr.decode(stdio_encoding, 'replace')
return (p.returncode, stdout, stderr)
def _next_version(version):
"""
Given a parsed version from pkg_resources.parse_version, returns a new
version string with the next minor version.
Examples
========
>>> _next_version(pkg_resources.parse_version('1.2.3'))
'1.3.0'
"""
if hasattr(version, 'base_version'):
# New version parsing from setuptools >= 8.0
if version.base_version:
parts = version.base_version.split('.')
else:
parts = []
else:
parts = []
for part in version:
if part.startswith('*'):
break
parts.append(part)
parts = [int(p) for p in parts]
if len(parts) < 3:
parts += [0] * (3 - len(parts))
major, minor, micro = parts[:3]
return '{0}.{1}.{2}'.format(major, minor + 1, 0)
class _DummyFile(object):
"""A noop writeable object."""
errors = '' # Required for Python 3.x
encoding = 'utf-8'
def write(self, s):
pass
def flush(self):
pass
@contextlib.contextmanager
def _verbose():
yield
@contextlib.contextmanager
def _silence():
"""A context manager that silences sys.stdout and sys.stderr."""
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = _DummyFile()
sys.stderr = _DummyFile()
exception_occurred = False
try:
yield
except:
exception_occurred = True
# Go ahead and clean up so that exception handling can work normally
sys.stdout = old_stdout
sys.stderr = old_stderr
raise
if not exception_occurred:
sys.stdout = old_stdout
sys.stderr = old_stderr
class _AHBootstrapSystemExit(SystemExit):
def __init__(self, *args):
if not args:
msg = 'An unknown problem occurred bootstrapping astropy_helpers.'
else:
msg = args[0]
msg += '\n' + _err_help_msg
super(_AHBootstrapSystemExit, self).__init__(msg, *args[1:])
BOOTSTRAPPER = _Bootstrapper.main()
def use_astropy_helpers(**kwargs):
"""
Ensure that the `astropy_helpers` module is available and is importable.
This supports automatic submodule initialization if astropy_helpers is
included in a project as a git submodule, or will download it from PyPI if
necessary.
Parameters
----------
path : str or None, optional
A filesystem path relative to the root of the project's source code
that should be added to `sys.path` so that `astropy_helpers` can be
imported from that path.
If the path is a git submodule it will automatically be initialized
and/or updated.
The path may also be to a ``.tar.gz`` archive of the astropy_helpers
source distribution. In this case the archive is automatically
unpacked and made temporarily available on `sys.path` as a ``.egg``
archive.
If `None` skip straight to downloading.
download_if_needed : bool, optional
If the provided filesystem path is not found an attempt will be made to
download astropy_helpers from PyPI. It will then be made temporarily
available on `sys.path` as a ``.egg`` archive (using the
``setup_requires`` feature of setuptools. If the ``--offline`` option
is given at the command line the value of this argument is overridden
to `False`.
index_url : str, optional
If provided, use a different URL for the Python package index than the
main PyPI server.
use_git : bool, optional
If `False` no git commands will be used--this effectively disables
support for git submodules. If the ``--no-git`` option is given at the
command line the value of this argument is overridden to `False`.
auto_upgrade : bool, optional
By default, when installing a package from a non-development source
distribution ah_boostrap will try to automatically check for patch
releases to astropy-helpers on PyPI and use the patched version over
any bundled versions. Setting this to `False` will disable that
functionality. If the ``--offline`` option is given at the command line
the value of this argument is overridden to `False`.
offline : bool, optional
If `False` disable all actions that require an internet connection,
including downloading packages from the package index and fetching
updates to any git submodule. Defaults to `True`.
"""
global BOOTSTRAPPER
config = BOOTSTRAPPER.config
config.update(**kwargs)
# Create a new bootstrapper with the updated configuration and run it
BOOTSTRAPPER = _Bootstrapper(**config)
BOOTSTRAPPER.run()
|
D-arioSpaceREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@ah_bootstrap.py@.PATH_END.py
|
{
"filename": "unit_normalization_test.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/normalization/unit_normalization_test.py",
"type": "Python"
}
|
import numpy as np
import pytest
from keras.src import backend
from keras.src import layers
from keras.src import testing
def squared_l2_norm(x):
x = backend.convert_to_numpy(x)
return np.sum(x**2)
class UnitNormalizationTest(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_un_basics(self):
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": -1},
input_shape=(2, 3),
expected_output_shape=(2, 3),
supports_masking=True,
assert_built_after_instantiation=True,
)
self.run_layer_test(
layers.UnitNormalization,
init_kwargs={"axis": (1, 2)},
input_shape=(1, 3, 3),
expected_output_shape=(1, 3, 3),
supports_masking=True,
assert_built_after_instantiation=True,
)
def test_invalid_axis(self):
with self.assertRaisesRegex(
TypeError,
(
"Invalid value for `axis` argument: expected an int or a "
"list/tuple of ints."
),
):
layers.UnitNormalization(axis={"axis": -1})
def test_correctness(self):
layer = layers.UnitNormalization(axis=-1)
inputs = np.random.normal(size=(2, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :]), 1.0)
layer = layers.UnitNormalization(axis=(1, 2))
inputs = np.random.normal(size=(2, 3, 3))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, :]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, :]), 1.0)
layer = layers.UnitNormalization(axis=1)
inputs = np.random.normal(size=(2, 3, 2))
outputs = layer(inputs)
self.assertAllClose(squared_l2_norm(outputs[0, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 0]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[0, :, 1]), 1.0)
self.assertAllClose(squared_l2_norm(outputs[1, :, 1]), 1.0)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@normalization@unit_normalization_test.py@.PATH_END.py
|
{
"filename": "Spectrum.py",
"repo_name": "chianti-atomic/ChiantiPy",
"repo_path": "ChiantiPy_extracted/ChiantiPy-master/ChiantiPy/core/Spectrum.py",
"type": "Python"
}
|
import copy
from datetime import datetime
import numpy as np
import ChiantiPy
import ChiantiPy.tools.data as chdata
import ChiantiPy.tools.constants as const
import ChiantiPy.tools.filters as chfilters
import ChiantiPy.tools.util as util
import ChiantiPy.tools.io as chio
from ChiantiPy.base import ionTrails
from ChiantiPy.base import specTrails
class spectrum(ionTrails, specTrails):
'''
Calculate the emission spectrum as a function of temperature and density.
one of the convenient things is that all of the instantiated ion classes, determined
through such keywords as 'elementList', 'ionList', and 'minAbund' are kept in a
dictionary self.IonInstances where self.IonInstances['mg_7'] is the class instance of
ChiantiPy.core.ion for 'mg_7'. All its methods and attributes are available.
includes elemental abundances and ionization equilibria
the set of abundances, a file in $XUVTOP/abundance, can be set with the keyword argument 'abundanceName'
temperature and density can be arrays but, unless the size of either is unity (1),
the two must have the same size
the returned spectrum will be convolved with a filter of the specified width on the
specified wavelength array
the default filter is gaussianR with a resolving power of 1000. Other filters,
such as gaussian, box and lorentz, are available in ChiantiPy.tools.filters. When
using the box filter, the width should equal the wavelength interval to keep the units
of the continuum and line spectrum the same.
Inherited methods include 'intensityList', 'intensityRatio' (between lines of different ions),
'intensityRatioSave' and 'convolve'
A selection of elements can be make with elementList a list containing the names of elements
that are desired to be included, e.g., ['fe','ni']
A selection of ions can be make with ionList containing the names of
the desired lines in CHIANTI notation, i.e. C VI = c_6
Both elementList and ionList can not be specified at the same time
a minimum abundance can be specified so that the calculation can be speeded up
by excluding elements with a low abundance. The default of minAbund is 1.e-6
It is necessary to specify at least an elementList, an ionList, or a minAbund to select any ions
for a spectrum calculation
With solar photospheric abundances
setting minAbund = 1.e-4 will include H, He, C, O, Ne
setting minAbund = 2.e-5 adds N, Mg, Si, S, Fe
setting minAbund = 1.e-6 adds Na, Al, Ar, Ca, Ni
Setting doLines = 0 will skip the calculation of spectral lines.
Setting doContinuum =0 will skip the continuum calculation.
Setting em [for emission measure] will multiply the spectrum at each temperature
by the value of em.
em [for emission measure] can be a float or an array of the same length as the
temperature/density
keepIons set this to keep the ion instances that have been calculated in a dictionary
self.IonInstances with the keywords being the CHIANTI-style ion names
abundance - to select a particular set of abundances, set abundance to the name of a
CHIANTI abundance file, without the '.abund' suffix, e.g. 'sun_photospheric_1998_grevesse'
If set to a blank (''), a gui selection menu will popup and allow the selection of an
set of abundances
Parameters
--------------
temperature: `float`, `list`, `ndarray`
the temperature(s) in K
eDensity: float, ndarray
eDensity: electron density in :math:`\\mathrm{cm^{-3}}`
wavelength: `list` or `ndarray`
wavelength: array of wavelengths, generally in Angstroms
elementList: `list`
elementList: list of elements to include, such as 'fe', 'ne', 's'
ionList: `list`
ionList: list of ions to include, such as 'fe_16', 'ne_10'
minAbund: `float`
minAbund: minimum abundance (relative to H) to include
doLines: `bool1
doLines: if true, line intensities are calculated
doContinuum: `bool`
doContinuum: if true, continuum intensities are calculated only if wavelengths are in angstroms
keepIons: `bool`
keepIons: keep the ion instances used in the calculation
should be used with caution otherwise the bunch instance
can become quite large
em: `float`, `list`, `ndarray`
em: the emission measure
abundance: `str`
abuncance: the file name of the abuncance set to be used
must be one in the $XUVTOP/abund directory
allLInes: `bool`
allLines: whether or not to include unobserved lines
verbose: `bool`
verbose: whether to allow certain print statements
'''
def __init__(self, temperature, eDensity, wavelength, filter=(chfilters.gaussianR, 1000.), label=None,
elementList = None, ionList = None, minAbund=None, doLines = True, doContinuum = True, em=None, keepIons=0,
abundance=None, verbose=0, allLines=1):
#
self.Defaults=chdata.Defaults
if doContinuum and self.Defaults['wavelength'] != 'angstrom':
print(' the continuum can only be calculated for wavelengths in angstroms')
print(' set doContuum = False to continue')
return
wavelength = np.atleast_1d(wavelength)
if wavelength.size < 2:
print(' wavelength must have at least two values, current length %3i'%(wavelength.size))
return
t1 = datetime.now()
# creates Intensity dict from first ion calculated
setupIntensity = False
#
self.Wavelength = np.asarray(wavelength, np.float64)
self.WvlRange = np.asarray([self.Wavelength.min(), self.Wavelength.max()], np.float64)
#
self.argCheck(temperature=temperature, eDensity=eDensity, pDensity=None, em=em)
nTempDens = self.NTempDens
self.Labels = util.units(chdata.Defaults)
xlabel = self.Labels['xlabel']
ylabel = self.Labels['spectrumYlabel']
if np.array_equal(self.Em, np.ones_like(self.Em)):
ylabel += '($\\int\\,$ N$_e\\,$N$_H\\,$d${\\it l}$)$^{-1}$'
#
if abundance is not None:
ab = chio.abundanceRead(abundance)
abundAll = ab['abundance']
self.AbundanceName = abundance
else:
self.AbundanceName = self.Defaults['abundfile']
abundAll = chdata.Abundance[self.AbundanceName]['abundance']
# needed by ionGate
self.AbundAll = abundAll
self.Abundance = abundAll
#
self.MinAbund = minAbund
wavelength = np.asarray(wavelength)
nWvl = wavelength.size
self.Wavelength = wavelength
#
freeFree = np.zeros((nTempDens, nWvl), np.float64).squeeze()
freeBound = np.zeros((nTempDens, nWvl), np.float64).squeeze()
twoPhoton = np.zeros((nTempDens, nWvl), np.float64).squeeze()
lineSpectrum = np.zeros((nTempDens, nWvl), np.float64).squeeze()
#
self.IonsCalculated = []
if keepIons:
self.IonInstances = {}
self.FfInstances = {}
self.FbInstances = {}
self.Finished = []
#
self.ionGate(elementList = elementList, ionList = ionList, minAbund=minAbund, doLines=doLines,
doContinuum=doContinuum)
#
for akey in sorted(self.Todo.keys()):
zStuff = util.convertName(akey)
Z = zStuff['Z']
ionstage = zStuff['Ion']
dielectronic = zStuff['Dielectronic']
abundance = self.Abundance[Z - 1]
if verbose:
print(' %5i %5s abundance = %10.2e '%(Z, const.El[Z-1], abundance))
print(' doing ion %s for the following processes %s'%(akey, self.Todo[akey]))
if 'ff' in self.Todo[akey]:
if verbose:
print(' calculating ff continuum for : %s'%(akey))
FF = ChiantiPy.core.continuum(akey, temperature, abundance=abundance, em=em, verbose=verbose)
FF.freeFree(wavelength)
freeFree += FF.FreeFree['intensity'].squeeze()
if keepIons:
self.FfInstances[akey] = copy.deepcopy(FF)
if 'fb' in self.Todo[akey]:
if verbose:
print(' calculating fb continuum for : %s'%(akey))
FB = ChiantiPy.core.continuum(akey, temperature, abundance=abundance, em=em, verbose=verbose)
FB.freeBound(wavelength)
if 'errorMessage' not in FB.FreeBound.keys():
freeBound += FB.FreeBound['intensity'].squeeze()
if keepIons:
self.FbInstances[akey] = copy.deepcopy(FB)
else:
if verbose:
print(FB.FreeBound['errorMessage'])
if 'line' in self.Todo[akey]:
if verbose:
print(' calculating spectrum for : %s'%(akey))
thisIon = ChiantiPy.core.ion(akey, temperature, eDensity, pDensity='default', abundance=abundance, em=em, verbose=verbose)
thisIon.intensity(allLines=allLines)
self.IonsCalculated.append(akey)
if 'errorMessage' not in list(thisIon.Intensity.keys()):
self.Finished.append(akey)
thisIon.spectrum(wavelength, filter=filter, allLines=allLines)
if keepIons:
self.IonInstances[akey] = copy.deepcopy(thisIon)
if setupIntensity:
for bkey in self.Intensity:
self.Intensity[bkey] = np.hstack((copy.copy(self.Intensity[bkey]),
thisIon.Intensity[bkey]))
else:
setupIntensity = True
self.Intensity = thisIon.Intensity
lineSpectrum += thisIon.Spectrum['intensity'].squeeze()
else:
if verbose:
print(thisIon.Intensity['errorMessage'])
# get 2 photon emission for H and He sequences
if doContinuum:
print(self.Todo[akey])
if (Z - ionstage) in [0, 1] and not dielectronic:
thisIon.twoPhoton(wavelength)
twoPhoton += thisIon.TwoPhoton['intensity'].squeeze()
if verbose:
print(' doing two photon')
self.FreeFree = {'wavelength':wavelength, 'intensity':freeFree.squeeze()}
self.FreeBound = {'wavelength':wavelength, 'intensity':freeBound.squeeze()}
self.LineSpectrum = {'wavelength':wavelength, 'intensity':lineSpectrum.squeeze()}
self.TwoPhoton = {'wavelength':wavelength, 'intensity':twoPhoton.squeeze()}
cont = freeFree.squeeze() + freeBound.squeeze() + twoPhoton.squeeze()
self.Continuum = {'wavelength':wavelength, 'intensity':cont}
#
#
total = freeFree + freeBound + lineSpectrum + twoPhoton
self.Total = total
t2 = datetime.now()
dt=t2-t1
print(' elapsed seconds = %12.3f'%(dt.seconds))
if nTempDens == 1:
integrated = total
else:
integrated = total.sum(axis=0)
#
if type(label) == type(''):
if hasattr(self, 'Spectrum'):
self.Spectrum[label] = {'wavelength':wavelength, 'intensity':total.squeeze(),
'filter':filter[0], 'filterWidth':filter[1], 'integrated':integrated, 'em':em,
'ions':self.IonsCalculated, 'Abundance':self.AbundanceName, 'xlabel':xlabel,
'ylabel':ylabel, 'minAbund':minAbund}
else:
self.Spectrum = {label:{'wavelength':wavelength, 'intensity':total.squeeze(),
'filter':filter[0], 'filterWidth':filter[1], 'integrated':integrated, 'em':em,
'ions':self.IonsCalculated, 'Abundance':self.AbundanceName, 'xlabel':xlabel,
'ylabel':ylabel, 'minAbund':minAbund}}
else:
self.Spectrum = {'wavelength':wavelength, 'intensity':total.squeeze(),
'filter':filter[0], 'filterWidth':filter[1], 'integrated':integrated,
'ions':self.IonsCalculated, 'Abundance':self.AbundanceName, 'xlabel':xlabel,
'ylabel':ylabel, 'minAbund':minAbund}
|
chianti-atomicREPO_NAMEChiantiPyPATH_START.@ChiantiPy_extracted@ChiantiPy-master@ChiantiPy@core@Spectrum.py@.PATH_END.py
|
{
"filename": "_cheatertype.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/carpet/baxis/_cheatertype.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CheatertypeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="cheatertype", parent_name="carpet.baxis", **kwargs):
super(CheatertypeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["index", "value"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@carpet@baxis@_cheatertype.py@.PATH_END.py
|
{
"filename": "_family.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/histogram2dcontour/contours/labelfont/_family.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilyValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self,
plotly_name="family",
parent_name="histogram2dcontour.contours.labelfont",
**kwargs,
):
super(FamilyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
no_blank=kwargs.pop("no_blank", True),
strict=kwargs.pop("strict", True),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@histogram2dcontour@contours@labelfont@_family.py@.PATH_END.py
|
{
"filename": "__main__.py",
"repo_name": "j0r1/GRALE2",
"repo_path": "GRALE2_extracted/GRALE2-master/pygrale/grale/editor/__main__.py",
"type": "Python"
}
|
import os
import sys
sys.path = [ os.path.dirname(__file__) ] + sys.path
import mainwindow
mainwindow.main()
|
j0r1REPO_NAMEGRALE2PATH_START.@GRALE2_extracted@GRALE2-master@pygrale@grale@editor@__main__.py@.PATH_END.py
|
{
"filename": "tools_human.ipynb",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/docs/docs/how_to/tools_human.ipynb",
"type": "Jupyter Notebook"
}
|
# How to add a human-in-the-loop for tools
There are certain tools that we don't trust a model to execute on its own. One thing we can do in such situations is require human approval before the tool is invoked.
:::info
This how-to guide shows a simple way to add human-in-the-loop for code running in a jupyter notebook or in a terminal.
To build a production application, you will need to do more work to keep track of application state appropriately.
We recommend using `langgraph` for powering such a capability. For more details, please see this [guide](https://langchain-ai.github.io/langgraph/how-tos/human-in-the-loop/).
:::
## Setup
We'll need to install the following packages:
```python
%pip install --upgrade --quiet langchain
```
And set these environment variables:
```python
import getpass
import os
# If you'd like to use LangSmith, uncomment the below:
# os.environ["LANGCHAIN_TRACING_V2"] = "true"
# os.environ["LANGCHAIN_API_KEY"] = getpass.getpass()
```
## Chain
Let's create a few simple (dummy) tools and a tool-calling chain:
import ChatModelTabs from "@theme/ChatModelTabs";
<ChatModelTabs customVarName="llm"/>
```python
# | output: false
# | echo: false
from langchain_anthropic import ChatAnthropic
llm = ChatAnthropic(model="claude-3-sonnet-20240229", temperature=0)
```
```python
from typing import Dict, List
from langchain_core.messages import AIMessage
from langchain_core.runnables import Runnable, RunnablePassthrough
from langchain_core.tools import tool
@tool
def count_emails(last_n_days: int) -> int:
"""Dummy function to count number of e-mails. Returns 2 * last_n_days."""
return last_n_days * 2
@tool
def send_email(message: str, recipient: str) -> str:
"""Dummy function for sending an e-mail."""
return f"Successfully sent email to {recipient}."
tools = [count_emails, send_email]
llm_with_tools = llm.bind_tools(tools)
def call_tools(msg: AIMessage) -> List[Dict]:
"""Simple sequential tool calling helper."""
tool_map = {tool.name: tool for tool in tools}
tool_calls = msg.tool_calls.copy()
for tool_call in tool_calls:
tool_call["output"] = tool_map[tool_call["name"]].invoke(tool_call["args"])
return tool_calls
chain = llm_with_tools | call_tools
chain.invoke("how many emails did i get in the last 5 days?")
```
[{'name': 'count_emails',
'args': {'last_n_days': 5},
'id': 'toolu_01QYZdJ4yPiqsdeENWHqioFW',
'output': 10}]
## Adding human approval
Let's add a step in the chain that will ask a person to approve or reject the tall call request.
On rejection, the step will raise an exception which will stop execution of the rest of the chain.
```python
import json
class NotApproved(Exception):
"""Custom exception."""
def human_approval(msg: AIMessage) -> AIMessage:
"""Responsible for passing through its input or raising an exception.
Args:
msg: output from the chat model
Returns:
msg: original output from the msg
"""
tool_strs = "\n\n".join(
json.dumps(tool_call, indent=2) for tool_call in msg.tool_calls
)
input_msg = (
f"Do you approve of the following tool invocations\n\n{tool_strs}\n\n"
"Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.\n >>>"
)
resp = input(input_msg)
if resp.lower() not in ("yes", "y"):
raise NotApproved(f"Tool invocations not approved:\n\n{tool_strs}")
return msg
```
```python
chain = llm_with_tools | human_approval | call_tools
chain.invoke("how many emails did i get in the last 5 days?")
```
Do you approve of the following tool invocations
{
"name": "count_emails",
"args": {
"last_n_days": 5
},
"id": "toolu_01WbD8XeMoQaRFtsZezfsHor"
}
Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.
>>> yes
[{'name': 'count_emails',
'args': {'last_n_days': 5},
'id': 'toolu_01WbD8XeMoQaRFtsZezfsHor',
'output': 10}]
```python
try:
chain.invoke("Send sally@gmail.com an email saying 'What's up homie'")
except NotApproved as e:
print()
print(e)
```
Do you approve of the following tool invocations
{
"name": "send_email",
"args": {
"recipient": "sally@gmail.com",
"message": "What's up homie"
},
"id": "toolu_014XccHFzBiVcc9GV1harV9U"
}
Anything except 'Y'/'Yes' (case-insensitive) will be treated as a no.
>>> no
Tool invocations not approved:
{
"name": "send_email",
"args": {
"recipient": "sally@gmail.com",
"message": "What's up homie"
},
"id": "toolu_014XccHFzBiVcc9GV1harV9U"
}
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@docs@docs@how_to@tools_human.ipynb@.PATH_END.py
|
{
"filename": "version.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/numpy/version.py",
"type": "Python"
}
|
# THIS FILE IS GENERATED FROM NUMPY SETUP.PY
#
# To compare versions robustly, use `numpy.lib.NumpyVersion`
short_version = '1.14.0'
version = '1.14.0'
full_version = '1.14.0'
git_revision = '6914bb41f0fb3c1ba500bae4e7d671da9536786f'
release = True
if not release:
version = full_version
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@numpy@version.py@.PATH_END.py
|
{
"filename": "debugger_test.py",
"repo_name": "jax-ml/jax",
"repo_path": "jax_extracted/jax-main/tests/debugger_test.py",
"type": "Python"
}
|
# Copyright 2022 The JAX Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections.abc import Sequence
import contextlib
import io
import re
import textwrap
from typing import IO
import unittest
from absl.testing import absltest
import jax
from jax.experimental import pjit
from jax._src import debugger
from jax._src import test_util as jtu
import jax.numpy as jnp
import numpy as np
jax.config.parse_flags_with_absl()
def make_fake_stdin_stdout(commands: Sequence[str]) -> tuple[IO[str], io.StringIO]:
fake_stdin = io.StringIO()
fake_stdin.truncate(0)
for command in commands:
fake_stdin.write(command + "\n")
fake_stdin.seek(0)
return fake_stdin, io.StringIO()
def _format_multiline(text):
return textwrap.dedent(text).lstrip()
_exit_stack = contextlib.ExitStack()
def setUpModule():
_exit_stack.enter_context(jtu.set_host_platform_device_count(2))
def tearDownModule():
_exit_stack.close()
foo = 2
class CliDebuggerTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(["cpu", "gpu", "tpu"]):
self.skipTest(f"Host callback not supported on {jtu.device_under_test()}")
def test_debugger_eof(self):
stdin, stdout = make_fake_stdin_stdout([])
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
with self.assertRaises(SystemExit):
f(2.)
jax.effects_barrier()
def test_debugger_can_continue(self):
stdin, stdout = make_fake_stdin_stdout(["c"])
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
f(2.)
jax.effects_barrier()
expected = _format_multiline(r"""
Entering jdb:
(jdb) """)
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_can_print_value(self):
stdin, stdout = make_fake_stdin_stdout(["p x", "c"])
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
expected = _format_multiline(r"""
Entering jdb:
(jdb) Array(2., dtype=float32)
(jdb) """)
f(jnp.array(2., jnp.float32))
jax.effects_barrier()
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_can_print_value_in_jit(self):
stdin, stdout = make_fake_stdin_stdout(["p x", "c"])
@jax.jit
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
expected = _format_multiline(r"""
Entering jdb:
(jdb) Array(2., dtype=float32)
(jdb) """)
f(jnp.array(2., jnp.float32))
jax.effects_barrier()
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_can_print_multiple_values(self):
stdin, stdout = make_fake_stdin_stdout(["p x, y", "c"])
@jax.jit
def f(x):
y = x + 1.
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
expected = _format_multiline(r"""
Entering jdb:
(jdb) (Array(2., dtype=float32), Array(3., dtype=float32))
(jdb) """)
f(jnp.array(2., jnp.float32))
jax.effects_barrier()
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_can_print_context(self):
stdin, stdout = make_fake_stdin_stdout(["l", "c"])
@jax.jit
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
f(2.)
jax.effects_barrier()
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) > .*debugger_test\.py\([0-9]+\)
@jax\.jit
def f\(x\):
y = jnp\.sin\(x\)
-> debugger\.breakpoint\(stdin=stdin, stdout=stdout, backend="cli"\)
return y
.*
\(jdb\) """)
self.assertRegex(stdout.getvalue(), expected)
def test_debugger_can_print_backtrace(self):
stdin, stdout = make_fake_stdin_stdout(["bt", "c"])
@jax.jit
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
expected = _format_multiline(r"""
Entering jdb:.*
\(jdb\) Traceback:.*
""")
f(2.)
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_debugger_can_work_with_multiple_stack_frames(self):
stdin, stdout = make_fake_stdin_stdout(["l", "u", "p x", "d", "c"])
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
@jax.jit
def g(x):
y = f(x)
return jnp.exp(y)
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) > .*debugger_test\.py\([0-9]+\)
def f\(x\):
y = jnp\.sin\(x\)
-> debugger\.breakpoint\(stdin=stdin, stdout=stdout, backend="cli"\)
return y
.*
\(jdb\) > .*debugger_test\.py\([0-9]+\).*
@jax\.jit
def g\(x\):
-> y = f\(x\)
return jnp\.exp\(y\)
.*
\(jdb\) Array\(2\., dtype=float32\)
\(jdb\) > .*debugger_test\.py\([0-9]+\)
def f\(x\):
y = jnp\.sin\(x\)
-> debugger\.breakpoint\(stdin=stdin, stdout=stdout, backend="cli"\)
return y
.*
\(jdb\) """)
g(jnp.array(2., jnp.float32))
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_can_use_multiple_breakpoints(self):
stdin, stdout = make_fake_stdin_stdout(["p y", "c", "p y", "c"])
def f(x):
y = x + 1.
debugger.breakpoint(stdin=stdin, stdout=stdout, ordered=True,
backend="cli")
return y
@jax.jit
def g(x):
y = f(x) * 2.
debugger.breakpoint(stdin=stdin, stdout=stdout, ordered=True,
backend="cli")
return jnp.exp(y)
expected = _format_multiline(r"""
Entering jdb:
(jdb) Array(3., dtype=float32)
(jdb) Entering jdb:
(jdb) Array(6., dtype=float32)
(jdb) """)
g(jnp.array(2., jnp.float32))
jax.effects_barrier()
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_works_with_vmap(self):
stdin, stdout = make_fake_stdin_stdout(["p y", "c", "p y", "c"])
def f(x):
y = x + 1.
debugger.breakpoint(stdin=stdin, stdout=stdout, ordered=True,
backend="cli")
return 2. * y
@jax.jit
@jax.vmap
def g(x):
y = f(x)
return jnp.exp(y)
expected = _format_multiline(r"""
Entering jdb:
(jdb) Array(1., dtype=float32)
(jdb) Entering jdb:
(jdb) Array(2., dtype=float32)
(jdb) """)
g(jnp.arange(2., dtype=jnp.float32))
jax.effects_barrier()
self.assertEqual(stdout.getvalue(), expected)
def test_debugger_works_with_pmap(self):
if jax.local_device_count() < 2:
raise unittest.SkipTest("Test requires >= 2 devices.")
stdin, stdout = make_fake_stdin_stdout(["p y", "c", "p y", "c"])
def f(x):
y = jnp.sin(x)
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
@jax.pmap
def g(x):
y = f(x)
return jnp.exp(y)
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) Array\(.*, dtype=float32\)
\(jdb\) Entering jdb:
\(jdb\) Array\(.*, dtype=float32\)
\(jdb\) """)
g(jnp.arange(2., dtype=jnp.float32))
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_debugger_works_with_pjit(self):
if jax.default_backend() != "tpu":
raise unittest.SkipTest("`pjit` doesn't work with CustomCall.")
stdin, stdout = make_fake_stdin_stdout(["p y", "c"])
def f(x):
y = x + 1
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
return y
def g(x):
y = f(x)
return jnp.exp(y)
g = pjit.pjit(
g,
in_shardings=jax.sharding.PartitionSpec("dev"),
out_shardings=jax.sharding.PartitionSpec("dev"),
)
with jax.sharding.Mesh(np.array(jax.devices()), ["dev"]):
arr = (1 + jnp.arange(8)).astype(np.int32)
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) {}
\(jdb\) """.format(re.escape(repr(arr))))
g(jnp.arange(8, dtype=jnp.int32))
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_debugger_uses_local_before_global_scope(self):
stdin, stdout = make_fake_stdin_stdout(["p foo", "c"])
foo = "outer"
def f(x):
foo = "inner"
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
del foo
return x
del foo
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) 'inner'
\(jdb\) """)
f(2.)
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_debugger_accesses_globals(self):
stdin, stdout = make_fake_stdin_stdout(["p foo", "c"])
@jax.jit
def g():
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) \*\*\* NameError: name 'foo' is not defined
\(jdb\) """)
g()
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_can_limit_num_frames(self):
stdin, stdout = make_fake_stdin_stdout(["u", "p x", "c"])
def g():
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli",
num_frames=2)
@jax.jit
def f():
x = 2
g()
return x
_ = f()
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) .*
.*
.*
.*
.*
.*
.*
\(jdb\) 2
\(jdb\) """)
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
stdin, stdout = make_fake_stdin_stdout(["u", "u", "c"])
def g2():
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli",
num_frames=2)
@jax.jit
def f2():
x = 2
g2()
return x
expected = ".*At topmost frame.*"
_ = f2()
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
def test_can_handle_dictionaries_with_unsortable_keys(self):
stdin, stdout = make_fake_stdin_stdout(["p x", "p weird_dict",
"p weirder_dict", "c"])
@jax.jit
def f():
weird_dict = {(lambda x: x): 2., (lambda x: x * 2): 3}
weirder_dict = {(lambda x: x): weird_dict}
x = 2.
debugger.breakpoint(stdin=stdin, stdout=stdout, backend="cli")
del weirder_dict
return x
expected = _format_multiline(r"""
Entering jdb:
\(jdb\) 2.0
\(jdb\) <cant_flatten>
\(jdb\) <cant_flatten>
\(jdb\) """)
_ = f()
jax.effects_barrier()
self.assertRegex(stdout.getvalue(), expected)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
jax-mlREPO_NAMEjaxPATH_START.@jax_extracted@jax-main@tests@debugger_test.py@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "PeterKamphuis/pyFAT-astro",
"repo_path": "pyFAT-astro_extracted/pyFAT-astro-main/pyFAT_astro/main.py",
"type": "Python"
}
|
# -*- coding: future_fstrings -*-
# This is the python version of FAT
import numpy as np
import os
import psutil
import pyFAT_astro
import pyFAT_astro.Support.support_functions as sf
import pyFAT_astro.Support.read_functions as rf
import sys
import traceback
import warnings
import threading
from datetime import datetime
from multiprocessing import Pool,get_context,Lock,Manager
from omegaconf import OmegaConf
from pyFAT_astro.FAT_Galaxy_Loop import FAT_Galaxy_Loop,MP_initialize_sofia,\
MP_Fitting_Loop
from pyFAT_astro.config.defaults import defaults
from pyFAT_astro.Support.fat_errors import ProgramError,BadCatalogueError
from pyFAT_astro.Support.write_functions import reorder_output_catalogue
from pyFAT_astro.Support.log_functions import full_system_tracking
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
log = file if hasattr(file,'write') else sys.stderr
traceback.print_stack(file=log)
log.write(warnings.formatwarning(message, category, filename, lineno, line))
try:
from importlib.resources import files as import_pack_files
except ImportError:
# Try backported to PY<37 `importlib_resources`.
# For Py<3.9 files is not available
from importlib_resources import files as import_pack_files
# String syntax ''' '''for multiline strings. " " for string without break and ' ' for indexing dictionaries
#from memory_profiler import profile
#@profile
def main(argv):
try:
#Get default settings
print(f"This is version {pyFAT_astro.__version__} of pyFAT.")
if pyFAT_astro.__branch__:
print(f"This is a github distribution and we are on the branch {pyFAT_astro.__branch__}.")
if '-v' in argv or '--version' in argv:
#print(f"This is version {pyFAT_astro.__version__} of the program.")
#if pyFAT_astro.__branch__:
# print(f"This is a github distribution and we are on the branch {pyFAT_astro.__branch__}.")
sys.exit()
help_message = '''
Use pyFAT in this way for batch fitting:
pyFAT configuration_file=FAT_Input.yml
where configuration_file specifies a yaml file with specific settings
such as the catalog.
For fitting use pyFAT in this way:
pyFAT cube_name=Input_Cube.fits
Where Input_Cube.fits is the observation to be fitted. In this mode
configuration_file can still be used to specify fit settings but
catalogue and location setting will be ignored.
pyFAT -h
prints this message.
pyFAT print_examples=True
prints a yaml file (FAT_defaults.yml) with the default values for all
possible fitting parameters and an example input catalogue (FAT_Example_Catalogue.txt).
The files are printed in the current working directory. In the yaml
file values designated ??? indicated values without defaults.
All config parameters can be set directly from the command e.g:
pyFAT file_name=Input_Cube.fits fitting.ring_size=1.5 'fitting.fixed_parameters=[INCL,SDIS]'
You can test your installation with:
pyFAT installation_check=True
'''
if '-h' in argv or '--help' in argv:
print(help_message)
sys.exit()
cfg = OmegaConf.structured(defaults)
if cfg.ncpu == psutil.cpu_count():
cfg.ncpu -= 1
# read command line arguments anything list input should be set in '' e.g. pyROTMOD 'rotmass.MD=[1.4,True,True]'
inputconf = OmegaConf.from_cli(argv)
cfg_input = OmegaConf.merge(cfg,inputconf)
if cfg_input.print_examples:
no_cube = OmegaConf.masked_copy(cfg, ['ncpu','input','output',\
'fitting','advanced'])
with open('FAT_defaults.yml','w') as default_write:
default_write.write(OmegaConf.to_yaml(no_cube))
my_resources = import_pack_files('pyFAT_astro.config')
data = (my_resources / 'FAT_Input_Catalogue.txt').read_bytes()
with open('FAT_Example_Catalogue.txt','w+b') as default_write:
default_write.write(data)
print(f'''We have printed the file FAT_defaults.yml FAT_Input_Catalogue.txt in {os.getcwd()}.
''')
sys.exit()
if cfg_input.configuration_file:
succes = False
while not succes:
try:
yaml_config = OmegaConf.load(cfg_input.configuration_file)
#merge yml file with defaults
cfg = OmegaConf.merge(cfg,yaml_config)
succes = True
except FileNotFoundError:
cfg_input.configuration_file = input(f'''
You have provided a config file ({cfg_input.configuration_file}) but it can't be found.
If you want to provide a config file please give the correct name.
Else press CTRL-C to abort.
configuration_file = ''')
cfg = OmegaConf.merge(cfg,inputconf)
if not any([cfg.cube_name, cfg.configuration_file, cfg.installation_check\
,cfg.print_examples,cfg.input.catalogue]):
print(help_message)
sys.exit()
# if we set more cpus than available we limit to the available cpus
try:
if cfg.ncpu > len(psutil.Process().cpu_affinity()):
cfg.ncpu = len(psutil.Process().cpu_affinity())
except AttributeError:
if cfg.ncpu > psutil.cpu_count():
cfg.ncpu = psutil.cpu_count()
#Let's write and input example to the main directory
if cfg.output.debug:
with open(f'{cfg.input.main_directory}/FAT_Inputs-Run_{datetime.now().strftime("%d-%m-%Y")}.yml','w') as default_write:
default_write.write(OmegaConf.to_yaml(cfg))
#Transform all to a Configuration dictionary
Original_Configuration = sf.setup_configuration(cfg)
if cfg.output.debug:
warnings.showwarning = warn_with_traceback
#First we check for sofia and TiRiFiC
Original_Configuration['SOFIA2'] = sf.find_program(Original_Configuration['SOFIA2'], "SoFiA 2")
Original_Configuration['TIRIFIC'] = sf.find_program(Original_Configuration['TIRIFIC'], "TiRiFiC")
if cfg.cube_name:
Full_Catalogue = sf.Proper_Dictionary({})
Full_Catalogue['ENTRIES'] = ['ENTRIES','ID','DISTANCE','DIRECTORYNAME','CUBENAME']
Full_Catalogue['ID'] = [f"{os.path.splitext(cfg.cube_name.split('/')[-1])[0]}"]
Full_Catalogue['DISTANCE'] = [-1.]
Full_Catalogue['DIRECTORYNAME'] = ['./']
Full_Catalogue['CUBENAME'] = [f"{os.path.splitext(cfg.cube_name.split('/')[-1])[0]}"]
elif 'sofia_catalogue' in Original_Configuration['FITTING_STAGES']:
Full_Catalogue = rf.sofia_input_catalogue(Original_Configuration)
else:
Full_Catalogue = rf.catalogue(Original_Configuration['CATALOGUE'],split_char= cfg.advanced.catalogue_split_character)
# Get the longest directory name to format the output directory properlyFit_Tirific_OSC
for directory in Full_Catalogue['DIRECTORYNAME']:
if directory == './':
directory = Original_Configuration['MAIN_DIRECTORY'].split('/')[-2]
if len(directory) > Original_Configuration['MAXIMUM_DIRECTORY_LENGTH']:
Original_Configuration['MAXIMUM_DIRECTORY_LENGTH'] = len(directory)
# Create a file to write the results to if if required
if Original_Configuration['OUTPUT_CATALOGUE']:
if not os.path.exists(Original_Configuration['OUTPUT_CATALOGUE']) or Original_Configuration['NEW_OUTPUT']:
if os.path.exists(Original_Configuration['OUTPUT_CATALOGUE']) and Original_Configuration['NEW_OUTPUT']:
os.rename(Original_Configuration['OUTPUT_CATALOGUE'],f"{os.path.splitext(Original_Configuration['OUTPUT_CATALOGUE'])[0]}_Prev.txt")
with open(Original_Configuration['OUTPUT_CATALOGUE'],'w') as output_catalogue:
comment = 'Comments on Fit Result'
AC1 = 'OS'
output_catalogue.write(f"{'Directory Name':<{Original_Configuration['MAXIMUM_DIRECTORY_LENGTH']}s} {AC1:>6s} {comment}\n")
if Original_Configuration['TIMING']:
with open(Original_Configuration['MAIN_DIRECTORY']+'Timing_Result.txt','w') as timing_result:
timing_result.write("Timing results for every section of the fit process for all galaxies. \n")
# If we do this we should have 1 cpu to keep going
Original_Configuration['NCPU'] -= 1
system_monitor = full_system_tracking(Original_Configuration)
fst = threading.Thread(target=system_monitor.start_monitoring)
fst.start()
print(f"We are using {Original_Configuration['NCPU']} cpus for fitting and 1 for timing.")
else:
print(f"We are using {Original_Configuration['NCPU']} cpus.")
#if start_galaxy not negative then it is catalogue ID
if Original_Configuration['CATALOGUE_START_ID'] in ['-1','-1.']:
Original_Configuration['CATALOGUE_START_ID'] = int(0)
else:
Original_Configuration['CATALOGUE_START_ID'] = int(np.where(Original_Configuration['CATALOGUE_START_ID'] == np.array(Full_Catalogue['ID'],dtype=str))[0][0])
# If the end galaxy is -1 fit the whole catalogue
if Original_Configuration['CATALOGUE_END_ID'] in ['-1','-1.']:
Original_Configuration['CATALOGUE_END_ID'] = int(len(Full_Catalogue['ID']))
if Original_Configuration['CATALOGUE_END_ID'] == 0:
Original_Configuration['CATALOGUE_END_ID'] = 1
else:
Original_Configuration['CATALOGUE_END_ID'] = int(np.where(Original_Configuration['CATALOGUE_END_ID'] == np.array(Full_Catalogue['ID'],dtype=str))[0][0])+1
# start the main fitting loop
if float(Original_Configuration['CATALOGUE_START_ID']) > float(Original_Configuration['CATALOGUE_END_ID']):
raise BadCatalogueError(f''' Your starting galaxy (Line nr = {Original_Configuration['CATALOGUE_START_ID']}) is listed after your ending galaxy (Line nr = {Original_Configuration['CATALOGUE_END_ID']}), maybe you have double catalogue ids?''')
sys.exit(1)
if Original_Configuration['MULTIPROCESSING']:
Original_Configuration['VERBOSE_SCREEN'] = False
#output_catalogue = copy.deepcopy(Original_Configuration['OUTPUT_CATALOGUE'])
#Original_Configuration['OUTPUT_CATALOGUE'] = None
no_processes,sofia_processes = sf.calculate_number_processes(Original_Configuration)
Configs_and_Locks = []
with Manager() as loop_manager:
timing_lock = loop_manager.Lock()
catalogue_lock = loop_manager.Lock()
#In case of multiprocessing we want to make sure to start with
#The big galaxies
#Setup an array of configs with locks
for current_galaxy_index in range(Original_Configuration['CATALOGUE_START_ID'], Original_Configuration['CATALOGUE_END_ID']):
Configs_and_Locks.append([sf.set_individual_configuration(current_galaxy_index,Full_Catalogue,Original_Configuration),timing_lock,catalogue_lock])
#Get all intitial setups
with get_context("spawn").Pool(processes=sofia_processes) as pool:
print(f'Starting size estimates with {sofia_processes} processes')
initial_setups = pool.starmap(MP_initialize_sofia, Configs_and_Locks)
initial_setups = [x for x in initial_setups if x['Succes']]
sizes = np.array([np.mean(x['Size']) for x in initial_setups]\
,dtype=float)
if len(sizes) > 0.:
sorted_ind = np.flip(sizes.argsort())
sorted_initial_setups = [[initial_setups[x],timing_lock,catalogue_lock] \
for x in sorted_ind]
initial_setups =[]
with get_context("spawn").Pool(processes=no_processes) as pool:
print(f'Starting fitting with {no_processes} processes')
finals = pool.starmap(MP_Fitting_Loop, sorted_initial_setups)
else:
print(f'All galaxies can not be fitted')
#For clarity we reorder the output results to match the input
reorder_output_catalogue(Original_Configuration,Full_Catalogue)
#Stitch all temporary outpu catalogues back together
#with open(output_catalogue,'a') as catalogue:
# for x in results:
# catalogue.writelines(x)
else:
Original_Configuration['PER_GALAXY_NCPU'] = sf.set_limits(Original_Configuration['NCPU'],1,20)
for current_galaxy_index in range(Original_Configuration['CATALOGUE_START_ID'], Original_Configuration['CATALOGUE_END_ID']):
Configuration = sf.set_individual_configuration(current_galaxy_index,Full_Catalogue,Original_Configuration)
catalogue_line = FAT_Galaxy_Loop(Configuration)
if Original_Configuration['TIMING']:
system_monitor.stop_monitoring()
fst.join()
except SystemExit:
try:
system_monitor.stop_monitoring()
fst.join()
except:
pass
pass
except KeyboardInterrupt:
traceback.print_exception(*sys.exc_info())
try:
system_monitor.stop_monitoring()
fst.join()
except:
pass
pass
except:
try:
system_monitor.stop_monitoring()
fst.join()
except:
pass
raise ProgramError(f'''Something went wrong in the main. This should not happen. Please list an issue on github.''')
main.__doc__ = '''
NAME:
main
PURPOSE:
Fit Tilted Ring Models with Tirific in a fully automated manner
CATEGORY:
Main for fitting galaxies. Tirific still requires interactive fitting this code attempts
to remedy that
CALLING SEQUENCE:
see pyFAT -h
INPUTS:
see pyFAT -h
OUTPUTS:
See Readme or just run the code
EXAMPLE:
pyFAT configuration_file=/home/your_computer/FAT_dir/FAT_INPUT.yml'
'''
|
PeterKamphuisREPO_NAMEpyFAT-astroPATH_START.@pyFAT-astro_extracted@pyFAT-astro-main@pyFAT_astro@main.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "spacetelescope/pystortion",
"repo_path": "pystortion_extracted/pystortion-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
from configparser import ConfigParser
# Get some values from the setup.cfg
conf = ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'pystortion')
DESCRIPTION = metadata.get('description', 'Support for distortion measurements in astronomical imagers.')
AUTHOR = metadata.get('author', 'Johannes Sahlmann STScI/AURA')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://stsci.edu')
__minimum_python_version__ = metadata.get("minimum_python_version", "3.5")
# Enforce Python version check - this is the same check as in __init__.py but
# this one has to happen before importing ah_bootstrap.
if sys.version_info < tuple((int(val) for val in __minimum_python_version__.split('.'))):
sys.stderr.write("ERROR: pystortion requires Python {} or later\n".format(__minimum_python_version__))
sys.exit(1)
# Import ah_bootstrap after the python version validation
import ah_bootstrap
from setuptools import setup
import builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (register_commands, get_debug_option,
get_package_info)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# order of priority for long_description:
# (1) set in setup.cfg,
# (2) load LONG_DESCRIPTION.rst,
# (3) load README.rst,
# (4) package docstring
readme_glob = 'README*'
_cfg_long_description = metadata.get('long_description', '')
if _cfg_long_description:
LONG_DESCRIPTION = _cfg_long_description
elif os.path.exists('LONG_DESCRIPTION.rst'):
with open('LONG_DESCRIPTION.rst') as f:
LONG_DESCRIPTION = f.read()
elif len(glob.glob(readme_glob)) > 0:
with open(glob.glob(readme_glob)[0]) as f:
LONG_DESCRIPTION = f.read()
else:
# Get the long description from the package's docstring
__import__(PACKAGENAME)
package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP440 compatible (http://www.python.org/dev/peps/pep-0440)
VERSION = metadata.get('version', '0.0.dev')
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README* as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if not os.path.basename(fname).startswith('README')]
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Define entry points for command-line scripts
entry_points = {'console_scripts': []}
if conf.has_section('entry_points'):
entry_point_list = conf.items('entry_points')
for entry_point in entry_point_list:
entry_points['console_scripts'].append('{0} = {1}'.format(
entry_point[0], entry_point[1]))
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
# Note that requires and provides should not be included in the call to
# ``setup``, since these are now deprecated. See this link for more details:
# https://groups.google.com/forum/#!topic/astropy-dev/urYO8ckB2uM
setup(name=PACKAGENAME,
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
install_requires=[s.strip() for s in metadata.get('install_requires', 'astropy').split(',')],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=False,
entry_points=entry_points,
python_requires='>={}'.format(__minimum_python_version__),
**package_info
)
|
spacetelescopeREPO_NAMEpystortionPATH_START.@pystortion_extracted@pystortion-master@setup.py@.PATH_END.py
|
{
"filename": "TestSampleList.py",
"repo_name": "dokester/BayesicFitting",
"repo_path": "BayesicFitting_extracted/BayesicFitting-master/BayesicFitting/test/TestSampleList.py",
"type": "Python"
}
|
# run with : python3 -m unittest TestSampleList
import unittest
import numpy as numpy
import sys
from numpy.testing import assert_array_almost_equal as assertAAE
from astropy import units
import math
from BayesicFitting import *
from BayesicFitting import formatter as fmt
__author__ = "Do Kester"
__year__ = 2017
__license__ = "GPL3"
__version__ = "0.9"
__maintainer__ = "Do"
__status__ = "Development"
# *
# * This file is part of the BayesicFitting package.
# *
# * BayesicFitting is free software: you can redistribute it and/or modify
# * it under the terms of the GNU Lesser General Public License as
# * published by the Free Software Foundation, either version 3 of
# * the License, or ( at your option ) any later version.
# *
# * BayesicFitting is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU Lesser General Public License for more details.
# *
# * The GPL3 license can be found at <http://www.gnu.org/licenses/>.
# *
# * 2002 Do Kester
class TestSampleList( unittest.TestCase ):
"""
Test harness for Fitter class.
Author Do Kester
"""
# Define x independent variable
x = numpy.asarray( [ -1.0, -0.8, -0.6, -0.4, -0.2, 0.0, 0.2, 0.4, 0.6, 0.8, 1.0], dtype=float )
# Define noise: random Gaussian noise sig=0.3
noise = numpy.asarray( [ -0.000996, -0.046035, 0.013656, 0.418449, 0.0295155, 0.273705,
-0.204794, 0.275843, -0.415945, -0.373516, -0.158084], dtype=float )
wgt = numpy.asarray( [1,2,3,4,5,6,7,4,3,2,1], dtype=float ) # total 38
par = numpy.asarray( [3,2,1,0.3], dtype=float )
len = 11
# **************************************************************
def testSampleList( self ):
print( "========= SampleListTest =======================" )
gm = GaussModel( )
gm += PolynomialModel( 0 )
problem = ClassicProblem( gm, xdata=self.x, ydata=self.noise )
# errdis = GaussErrorDistribution( )
lnZ = 1.234
sl0 = SampleList( gm, self.len )
k = 0
for s in sl0 :
self.assertTrue( s.id == k )
self.assertTrue( s.parent == -1 )
self.assertTrue( isinstance( s.model, GaussModel ) )
self.assertTrue( s.logW == 0 )
self.assertTrue( s.logL == 0 )
self.assertTrue( s.parameters[0] == 1 )
self.assertTrue( s.parameters[1] == 0 )
self.assertTrue( s.parameters[2] == 1 )
self.assertTrue( s.parameters[3] == 0 )
self.assertTrue( s.fitIndex is None or len( s.fitIndex ) == 4 )
k += 1
ap = numpy.append( gm.parameters, [0.5] )
fi = numpy.asarray( [0,1,2,3,-1] )
sl = SampleList( gm, self.len, parameters=ap, fitIndex=fi )
k = 0
for s in sl:
s.id = k + 1
s.parent = ( k + 2 ) % self.len + 1
sup = 0.3 + 0.01 * self.noise[k]
s.hyper = sup
pars = self.par + 0.01 * self.noise[k]
s.parameters = pars
s.logL = -1213.0 + self.x[k]
s.logW = math.log( self.wgt[k] / 38.0 ) + lnZ
print( s )
print( " allpars ", fmt( s.allpars ) )
print( " par sup ", fmt( s.parameters ), fmt( s.hypars ) )
print( " fitindx ", fmt( s.fitIndex ) )
k += 1
sl.logZ = lnZ
sl.info = 30
self.assertTrue( sl.evidence == sl.logZ / math.log( 10) )
self.assertTrue( sl.info == 30 )
sumw = 0
for s in sl :
sumw += s.weight
print( "wgt evo ", sl.getWeightEvolution( ) )
print( "sum wgt ", sumw )
self.assertFalse( abs(sumw - 1.0 ) < 1e-8 )
sl.normalize()
sumw = 0
for s in sl :
sumw += s.weight
print( sl.getWeightEvolution( ) )
print( sumw )
self.assertTrue( abs(sumw - 1.0 ) < 1e-8 )
for s in sl0 :
s.logW = -sys.float_info.max
sl0.sample( 4, sl.sample( 4 ) )
sl0[5] = sl[5]
sl0.logZ = sl.logZ
print( sl0.sample( 4 ) )
print( sl0[4].parameters )
print( sl0[4].hypars )
print( sl.sample( 4 ) )
print( sl[4].parameters )
print( sl[4].hypars )
print( sl0.sample( 5 ) )
print( sl.sample( 5 ) )
self.assertTrue( sl0[4].id == sl[4].id )
self.assertTrue( sl0[4].parent == sl[4].parent )
self.assertTrue( sl0[4].hypars == sl[4].hypars )
self.assertTrue( sl0[4].logW == sl[4].logW )
self.assertTrue( sl0.sample( 4 ).logL == sl[4].logL )
print( "sl0 Id ", sl0.getGeneration( ) )
print( "sl0 par ", sl0.getParentEvolution( ) )
print( "sl Id ", sl.getGeneration( ) )
print( "sl par ", sl.getParentEvolution( ) )
sl0.add( sl[10] )
# print( "sl0 Id ", sl0.getGeneration( ) )
self.assertTrue( len( sl0 ) == 12 )
sl0.copy( 11, 0 )
# print( "sl0 Id ", sl0.getGeneration( ) )
self.assertTrue( sl0[0].id == 0 )
self.assertTrue( sl0[11].id == 11 )
self.assertTrue( sl0[0].logW == sl0[11].logW )
self.assertTrue( sl0.sample( 0 ).logL == sl0.sample( 11 ).logL )
for s,t in zip( sl0, sl ) :
s.logL = t.logL
s.logW = t.logW
print( "SL ", len( sl0 ) )
# Tools.printclass( sl0[0] )
# Tools.printclass( sl0[7] )
for s in sl0 :
print( s.id, s.logW, s.logL )
k = 1
while k < 3:
sl0.weed( maxsize=8 )
print( "SL0 ", k, len( sl0 ) )
for s in sl0 :
print( s.id, s.logW, s.logL )
# Tools.printclass( sl0[0] )
# Tools.printclass( sl0[7] )
self.assertTrue( len( sl0 ) == 8 )
# self.assertTrue( sl0[0].logL == sl0[5].logL )
k += 1
print( "SL ", k, len( sl ) )
for s in sl :
print( s.id, s.logW, s.logL )
print( "par ", sl.parameters )
print( "stdev ", sl.stdevs )
print( "hypars ", sl.hypars )
print( "stdscl ", sl.stdevHypars )
print( sl.medianIndex, sl.modusIndex, sl.maxLikelihoodIndex )
sss = numpy.zeros( gm.npchain, dtype=float ) + 0.003
assertAAE( sl.parameters, self.par, 2 )
assertAAE( sl.standardDeviations, sss, 1 )
assertAAE( sl.scale, 0.3, 2 )
assertAAE( sl.stdevScale, 0.003, 1 )
self.assertTrue( sl.medianIndex == 5 )
self.assertTrue( sl.modusIndex == 6 )
self.assertTrue( sl.maxLikelihoodIndex == -1 )
assertAAE( sl.maxLikelihoodParameters, sl[10].parameters )
self.assertTrue( sl.maxLikelihoodScale == sl[10].hypars[0] )
assertAAE( sl.medianParameters, sl[5].parameters )
self.assertTrue( sl.medianScale == sl[5].hypars[0] )
assertAAE( sl.modusParameters, sl[6].parameters )
self.assertTrue( sl.modusScale == sl[6].hypars[0] )
param = sl.getParameterEvolution( )
print( param.shape )
self.assertTrue( param.shape[0] == 11 )
self.assertTrue( param.shape[1] == 4 )
par0 = sl.getParameterEvolution( 0 )
par1 = sl.getParameterEvolution( 1 )
par2 = sl.getParameterEvolution( 2 )
par3 = sl.getParameterEvolution( 3 )
nrp = sl.getNumberOfParametersEvolution( )
for i in range( self.len ):
assertAAE( param[i], self.par + 0.01 * self.noise[i] )
assertAAE( param[i, 0], par0[i] )
assertAAE( param[i, 1], par1[i] )
assertAAE( param[i, 2], par2[i] )
assertAAE( param[i, 3], par3[i] )
self.assertTrue( nrp[i] == 4 )
xx = numpy.arange( 10, dtype=float ) * 0.2
yf1 = sl.average( xx )
yf2 = gm.result( xx, sl.getParameters( ) )
assertAAE( yf1, yf2, 5 )
err = sl.monteCarloError( xx )
assertAAE( err, numpy.zeros( 10, dtype=float ), 2 )
zz = numpy.arange( 20, dtype=float ) * 0.2
assertAAE( sl.monteCarloError( zz ), numpy.zeros( 20, dtype=float ), 2 )
@classmethod
def suite( cls ):
return unittest.TestCase.suite( TestSampleList.__class__ )
if __name__ == '__main__':
unittest.main()
|
dokesterREPO_NAMEBayesicFittingPATH_START.@BayesicFitting_extracted@BayesicFitting-master@BayesicFitting@test@TestSampleList.py@.PATH_END.py
|
{
"filename": "precalc.py",
"repo_name": "TRASAL/frbpoppy",
"repo_path": "frbpoppy_extracted/frbpoppy-master/frbpoppy/precalc.py",
"type": "Python"
}
|
"""Create a lookup tables for redshift and the NE2001, YMW16 dispersion measure."""
import os
import numpy as np
#import numexpr as ne
import time
import bisect
import sys
from scipy.integrate import quad
from tqdm import tqdm
from joblib import Parallel, delayed
import astropy.units as u
from astropy.cosmology import Planck13, Planck18, z_at_value
import frbpoppy.galacticops as go
from frbpoppy.misc import pprint
from frbpoppy.paths import paths
class NE2001Table:
"""Create/use a NE2001 lookup table for dispersion measure."""
def __init__(self, test=False):
"""Initializing."""
self.test = test
self.set_file_name()
# Setup database
self.db = False
self.step = 0.1
self.rounding = 1
# For parallel processes
self.temp_path = None
if self.test:
self.step = 0.1
if os.path.exists(self.file_name):
os.remove(self.file_name)
if os.path.exists(self.file_name) and self.test is False:
self.db = True
else:
# Calculations take quite some time
# Provide a way for people to quit
try:
self.create_table()
except KeyboardInterrupt:
pprint('Losing all progress in calculations')
os.remove(self.file_name)
if self.temp:
os.remove(self.temp_path)
sys.exit()
def set_file_name(self):
"""Determine filename."""
uni_mods = os.path.join(paths.models(), 'universe/')
self.file_name = uni_mods + 'dm_mw_ne2001.npy'
if self.test:
uni_mods = os.path.join(paths.models(), 'universe/')
self.file_name = uni_mods + 'test_dm_mw_ne2001.npy'
def create_table(self, parallel=True):
"""Create a lookup table for dispersion measure."""
step = 1
gls = np.arange(-180., 180. + step, step).round(1)
gbs = np.arange(-90., 90. + step, step).round(1)
dist = 0.1 # [Gpc]
gls = gls.astype(np.float32)
gbs = gbs.astype(np.float32)
DM_MW_Table = {}
start = time.time()
for gl in gls:
for gb in gbs:
if gl in DM_MW_Table:
DM_MW_Table[gl].update({gb: go.ne2001_dist_to_dm(dist, gl, gb)})
else:
DM_MW_Table.update({gl: {gb: go.ne2001_dist_to_dm(dist, gl, gb)}})
np.save(self.file_name, DM_MW_Table)
def lookup(self, gal, gab):
"""Look up associated milky way dispersion measure with gal coords.
Args:
gl (array): Galactic longitude [fractional degrees]
gb (array): Galactic latitude [fractional degrees]
Returns:
dm_mw (float): Galactic dispersion measure [pc*cm^-3]
"""
# Load dm table
dm_mw_table = np.load(self.file_name, allow_pickle=True)
dm_mw_table = dict(enumerate(dm_mw_table.flatten()))[0]
coor_index = range(0, 361*181)
dm_mw_table_1d = []
for i in range(-180, 181):
for j in range(-90, 91):
dm_mw_table_1d.append(dm_mw_table[i][j])
#dm_mw_table_1d = dict(zip(coor_index, dm_mw_table_1d))
dm_mw_table_1d = np.stack((np.array(coor_index), np.array(dm_mw_table_1d)), axis=-1)
# Round values
gal = np.round(gal)
gab = np.round(gab)
index = (gal - (-180))*181 + (gab - (-90))
#index = ne.evaluate("(gal - (-180))*181 + (gab - (-90))")
dm_mw = dm_mw_table_1d[np.searchsorted(dm_mw_table_1d[:, 0], index)][:, 1]
return dm_mw
class YMW16Table:
"""Create/use a NE2001 lookup table for dispersion measure."""
def __init__(self, test=False):
"""Initializing."""
self.test = test
self.set_file_name()
# Setup database
self.db = False
self.step = 0.1
self.rounding = 1
# For parallel processes
self.temp_path = None
if self.test:
self.step = 0.1
if os.path.exists(self.file_name):
os.remove(self.file_name)
if os.path.exists(self.file_name) and self.test is False:
self.db = True
else:
# Calculations take quite some time
# Provide a way for people to quit
try:
self.create_table()
except KeyboardInterrupt:
pprint('Losing all progress in calculations')
os.remove(self.file_name)
if self.temp:
os.remove(self.temp_path)
sys.exit()
def set_file_name(self):
"""Determine filename."""
uni_mods = os.path.join(paths.models(), 'universe/')
self.file_name = uni_mods + 'dm_mw_ymw16.npy'
if self.test:
uni_mods = os.path.join(paths.models(), 'universe/')
self.file_name = uni_mods + 'test_dm_mw_ymw16.npy'
def create_table(self, parallel=True):
"""Create a lookup table for dispersion measure."""
step = 1
gls = np.arange(-180., 180. + step, step).round(1)
gbs = np.arange(-90., 90. + step, step).round(1)
dist = 0.1 # [Gpc]
gls = gls.astype(np.float32)
gbs = gbs.astype(np.float32)
DM_MW_Table = {}
start = time.time()
for gl in gls:
for gb in gbs:
if gl in DM_MW_Table:
DM_MW_Table[gl].update({gb: go.ymw16_dist_to_dm(dist, gl, gb)})
else:
DM_MW_Table.update({gl: {gb: go.ymw16_dist_to_dm(dist, gl, gb)}})
np.save(self.file_name, DM_MW_Table)
pprint('Finished DM table')
def lookup(self, gal, gab):
"""Look up associated milky way dispersion measure with gal coords.
Args:
gl (array): Galactic longitude [fractional degrees]
gb (array): Galactic latitude [fractional degrees]
Returns:
dm_mw (float): Galactic dispersion measure [pc*cm^-3]
"""
# Load dm table
dm_mw_table = np.load(self.file_name, allow_pickle=True)
dm_mw_table = dict(enumerate(dm_mw_table.flatten()))[0]
coor_index = range(0, 361*181)
dm_mw_table_1d = []
for i in range(-180, 181):
for j in range(-90, 91):
dm_mw_table_1d.append(dm_mw_table[i][j])
#dm_mw_table_1d = dict(zip(coor_index, dm_mw_table_1d))
dm_mw_table_1d = np.stack((np.array(coor_index), np.array(dm_mw_table_1d)), axis=-1)
gal = np.round(gal)
gab = np.round(gab)
index = (gal - (-180))*181 + (gab - (-90))
#index = ne.evaluate("(gal - (-180))*181 + (gab - (-90))")
dm_mw = dm_mw_table_1d[np.searchsorted(dm_mw_table_1d[:, 0], index)][:, 1]
return dm_mw
class DistanceTable:
"""
Create/use a lookup table for comoving distance, volume, redshift etc.
Create a list of tuples to lookup the corresponding redshift for a comoving
distance [Gpc] (or the other way around). Uses formulas from
Hoggs et al. (1999) for the cosmological calculations. To avoid long
calculation times, it will check if a previous run with the same parameters
has been done, which it will then load it. If not, it will calculate a new
table, and save the table for later runs. Covers z, dist, vol, dvol,
cdf_sfr and cdf_smd and several delayed cdf_sfr.
Args:
H_0 (float, optional): Hubble parameter. Defaults to 67.74 km/s/Mpc
W_m (float, optional): Omega matter. Defaults to 0.3089
W_k (float, optional): Omega vacuum. Defaults to 0.6911
"""
def __init__(self, H_0=67.74, W_m=0.3089, W_v=0.6911, test=False):
"""Initializing."""
self.H_0 = H_0
self.W_m = W_m
self.W_v = W_v
self.test = test
self.set_file_name()
# Setup database
self.db = False
self.step = 0.00001
self.z_max = 6.5
if self.test:
self.step = 0.001
self.z_max = 6.5
if os.path.exists(self.file_name):
os.remove(self.file_name)
if os.path.exists(self.file_name) and self.test is False:
self.db = True
else:
# Calculations take quite some time
# Provide a way for people to quit
try:
self.create_table()
except KeyboardInterrupt:
pprint('Losing all progress in calculations')
os.remove(self.file_name)
sys.exit()
def set_file_name(self):
"""Determine filename."""
uni_mods = os.path.join(paths.models(), 'universe/')
def cvt(value):
"""Convert a float to a string without a period."""
return str(value).replace('.', 'd')
# Convert
paras = ['h0', cvt(self.H_0),
'wm', cvt(self.W_m),
'wv', cvt(self.W_v)]
f = '-'.join(paras)
#self.file_name = uni_mods + f'{f}.db'
self.file_name = uni_mods + f'{f}.npy'
if self.test:
self.file_name = uni_mods + 'cosmo_test.npy'
def create_table(self):
"""Create a lookup table for distances."""
m = ['Creating a distance table',
' - Only needs to happen once',
' - May take up to 2m on a single core']
for n in m:
pprint(n)
H_0 = self.H_0
W_m = self.W_m
W_v = self.W_v
W_k = 1.0 - W_m - W_v # Omega curvature
if W_k != 0.0:
pprint('Careful - Your cosmological parameters do not sum to 1.0')
n_cpus = 128
zs = np.arange(0, self.z_max+self.step, self.step)
pprint(' - Calculating parameters at various redshifts')
conv = go.Redshift(zs, H_0=H_0, W_m=W_m, W_v=W_v)
dists = conv.dist_co()
vols = conv.vol_co()
# Get dV
dvols = np.zeros_like(vols)
dvols[1:] = np.diff(vols)
pprint(' - Calculating Star Formation Rate')
# Get pdf sfr
pdf_sfr = np.array(Parallel(n_jobs=n_cpus)(delayed(sfr)(i) for i in zs))*dvols
cdf_sfr = np.cumsum(pdf_sfr)
cdf_sfr /= cdf_sfr[-1] # Normalize
pprint(' - Calculating Stellar Mass Density')
# Get pdf csmd
pdf_smd = np.array(Parallel(n_jobs=n_cpus)(delayed(smd)(i, H_0=H_0, W_m=W_m, W_v=W_v) for i in zs))*dvols
cdf_smd = np.cumsum(pdf_smd)
cdf_smd /= cdf_smd[-1] # Normalize
pprint(' - Calculating Delayed Star Formation Rate - 0.1 Gyr')
# Get pdf delayed sfr 0.1 Gyr
pdf_dsfr0d1 = np.array(Parallel(n_jobs=n_cpus)(delayed(delayed_sfr)(i, 0.1) for i in zs))*dvols
cdf_dsfr0d1 = np.cumsum(pdf_dsfr0d1)
cdf_dsfr0d1 /= cdf_dsfr0d1[-1] # Normalize
pprint(' - Calculating Delayed Star Formation Rate - 0.5 Gyr')
# Get pdf delayed sfr 0.5 Gyr
pdf_dsfr0d5 = np.array(Parallel(n_jobs=n_cpus)(delayed(delayed_sfr)(i, 0.5) for i in zs))*dvols
cdf_dsfr0d5 = np.cumsum(pdf_dsfr0d5) # Unnormalized
cdf_dsfr0d5 /= cdf_dsfr0d5[-1]
pprint(' - Calculating Delayed Star Formation Rate - 1 Gyr')
# Get pdf delayed sfr 1 Gyr
pdf_dsfr1 = np.array(Parallel(n_jobs=n_cpus)(delayed(delayed_sfr)(i, 1) for i in zs))*dvols
cdf_dsfr1 = np.cumsum(pdf_dsfr1)
cdf_dsfr1 /= cdf_dsfr1[-1] # Normalize
lookback_times = Planck18.lookback_time(zs).value
results = np.stack((zs, dists, vols, dvols, cdf_sfr, cdf_smd, cdf_dsfr0d1, cdf_dsfr0d5, cdf_dsfr1, lookback_times)).T
pprint(' - Saving values to database')
np.save(self.file_name, results)
pprint('Finished distance table')
def lookup(self, z=None, dist_co=None, vol_co=None, dvol_co=None,
cdf_sfr=None, cdf_smd=None,
cdf_dsfr0d1=None, cdf_dsfr0d5=None, cdf_dsfr1=None,
lookback_time=None):
"""Look up associated values with input values."""
distance_table = np.load(self.file_name, allow_pickle=True)
# Check what's being looked up, set all other keywords to same length
kw = {'z': z,
'dist': dist_co,
'vol': vol_co,
'dvol': dvol_co,
'cdf_sfr': cdf_sfr,
'cdf_smd': cdf_smd,
'cdf_dsfr0d1': cdf_dsfr0d1,
'cdf_dsfr0d5': cdf_dsfr0d5,
'cdf_dsfr1': cdf_dsfr1,
'lookback_time': lookback_time}
col = -1
for key, value in kw.items():
col += 1
if value is not None:
in_par = key
break
for key, value in kw.items():
if key != in_par:
kw[key] = np.ones_like(kw[in_par])
keys = list(kw.keys())
# Search database
start = time.time()
d = distance_table[np.searchsorted(distance_table[:, keys.index(in_par)], kw[in_par])]
for key in keys:
if key == in_par:
continue
kw[key] = d[:, keys.index(key)]
return list(kw.values())
def delayed_sfr(z, delay_time):
"""Return the number density of star forming rate at redshift z.
Follows Madau & Dickinson (2014), eq. 15. For more info see
https://arxiv.org/pdf/1403.0007.pdf
"""
#Make sure we do not exceed the z_at_value limit
z_lim = z_at_value(Planck18.age, delay_time * u.Gyr) - 0.002
z = np.piecewise(z, [z < z_lim, z >= z_lim],
[lambda z:np.array([z_at_value(Planck18.age, 13.7869 * u.Gyr - Planck18.lookback_time(i) - delay_time * u.Gyr) for i in z]),
lambda z:999])
return (1+z)**2.7/(1+((1+z)/2.9)**5.6)
def sfr(z):
"""Return the number density of star forming rate at redshift z.
Follows Madau & Dickinson (2014), eq. 15. For more info see
https://arxiv.org/pdf/1403.0007.pdf
"""
return (1+z)**2.7/(1+((1+z)/2.9)**5.6)
def smd(z, H_0=67.74, W_m=0.3089, W_v=0.6911):
"""Return the number density of Stellar Mass Density at redshift z.
Follows Madau & Dickinson (2014), eq. 2 & 15. For more info see
https://arxiv.org/pdf/1403.0007.pdf
"""
def integral(z):
z1 = z + 1
return z1**1.7/(1+(z1/2.9)**5.6)*(1/(H_0*(W_m*z1**3+W_v)**0.5))
def csmd(z):
return 0.01095*quad(integral, z, np.inf)[0]
vec_csmd = np.vectorize(csmd)
return vec_csmd(z)
|
TRASALREPO_NAMEfrbpoppyPATH_START.@frbpoppy_extracted@frbpoppy-master@frbpoppy@precalc.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "astropy/astroquery",
"repo_path": "astroquery_extracted/astroquery-main/astroquery/skyview/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from astropy import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astroquery.skyview`.
"""
url = _config.ConfigItem(
'http://skyview.gsfc.nasa.gov/current/cgi/basicform.pl',
'SkyView URL')
conf = Conf()
from .core import SkyView, SkyViewClass
__all__ = ['SkyView', 'SkyViewClass',
'Conf', 'conf',
]
|
astropyREPO_NAMEastroqueryPATH_START.@astroquery_extracted@astroquery-main@astroquery@skyview@__init__.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "tardis-sn/tardis",
"repo_path": "tardis_extracted/tardis-main/docs/conf.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
# Load all of the global Astropy configuration
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('http://matplotlib.sourceforge.net/', None),
'astropy': ('http://docs.astropy.org/en/stable/', None),
'h5py': ('http://docs.h5py.org/en/latest/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/dev/', None),
}
# import sphinx_bootstrap_theme
import sphinx_rtd_theme
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.graphviz',
'sphinx.ext.viewcode',
'numpydoc',
'nbsphinx',
'sphinx-jsonschema',
'recommonmark',
'sphinxcontrib.apidoc'
]
source_suffix = {
'.rst': 'restructuredtext',
# '.txt': 'markdown',
'.md': 'markdown',
}
## get's rid of many toctree contains errors: see https://github.com/phn/pytpm/issues/3#issuecomment-12133978
numpydoc_show_class_members = False
extensions += ['matplotlib.sphinxext.plot_directive',
'sphinxcontrib.bibtex']
nbsphinx_execute = 'auto'
nbsphinx_execute_arguments = [
"--InlineBackend.figure_formats={'svg', 'pdf'}",
"--InlineBackend.rc={'figure.dpi': 96}",
]
nbsphinx_prolog = """
This notebook is available at
https://github.com/tardis-sn/tardis/tree/master/docs/{{ env.doc2path(env.docname, base=None) }}
----
"""
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.1'
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', '_templates', '**.ipynb_checkpoints']
#exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog = """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = u'TARDIS'
author = u'TARDIS collaboration'
copyright = u'2013, ' + author
master_doc = 'index'
#default_role = 'obj'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
import tardis
# The short X.Y version.
version = tardis.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = tardis.__version__
# -- APIDoc configuration -----------------------------------------------------
apidoc_module_dir = '../tardis'
apidoc_output_dir = 'api'
apidoc_excluded_paths = ['*tests*', '*setup_package*']
apidoc_separate_modules = True
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
html_static_path = ['_static']
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'tardis_logo.ico'
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
#html_extra_path = ['research/code_comparison/plasma_compare/tardis_example.yml']
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
|
tardis-snREPO_NAMEtardisPATH_START.@tardis_extracted@tardis-main@docs@conf.py@.PATH_END.py
|
{
"filename": "data_syn.py",
"repo_name": "quatrope/feets",
"repo_path": "feets_extracted/feets-master/res/paper/reports/experiments/data_syn.py",
"type": "Python"
}
|
# NORMAL
time_normal = np.arange(10000)
mag_normal = np.random.normal(size=10000)
error_normal = np.random.normal(loc=1, scale =0.008, size=10000)
mag_normal2 = np.random.normal(size=10000)
error_normal2 = np.random.normal(loc=1, scale =0.008, size=10000)
lc_normal = {
"time": time_normal,
"magnitude": mag_normal,
"error": error_normal,
"magnitude2": mag_normal2,
"aligned_time": time_normal,
"aligned_magnitude": mag_normal,
"aligned_magnitude2": mag_normal2,
"aligned_error": error_normal,
"aligned_error2": error_normal2}
# PERIODIC
import numpy as np
rand = np.random.RandomState(42)
time_periodic = 100 * rand.rand(100)
mag_periodic = np.sin(2 * np.pi * time_periodic) + 0.1 * rand.randn(100)
lc_periodic = {"time": time_periodic, "magnitude": mag_periodic}
# UNIFORM
lc_uniform = {
"time": np.arange(10000),
"magnitude": np.random.uniform(size=10000)}
|
quatropeREPO_NAMEfeetsPATH_START.@feets_extracted@feets-master@res@paper@reports@experiments@data_syn.py@.PATH_END.py
|
{
"filename": "reciprocal_kprime.py",
"repo_name": "CaymanUnterborn/ExoPlex",
"repo_path": "ExoPlex_extracted/ExoPlex-master/ExoPlex/burnman/eos/reciprocal_kprime.py",
"type": "Python"
}
|
from __future__ import absolute_import
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2017 by the BurnMan team, released under the GNU
# GPL v2 or later.
import scipy.optimize as opt
from scipy.special import gamma, gammainc
from . import equation_of_state as eos
from ..utils.math import bracket
import warnings
import numpy as np
# Try to import the jit from numba. If it is
# not available, just go with the standard
# python interpreter
try:
from numba import jit
except ImportError:
def jit(fn):
return fn
@jit
def _delta_PoverK_from_P(PoverK, pressure, K_0, Kprime_0, Kprime_inf):
return PoverK - (pressure/K_0)*np.power((1. - Kprime_inf*PoverK), Kprime_0/Kprime_inf) # eq. 58
@jit
def _delta_PoverK_from_V(PoverK, V, V_0, K_0, Kprime_0, Kprime_inf):
Kprime_ratio = Kprime_0 / Kprime_inf
return ( np.log( V_0 / V ) +
Kprime_ratio / Kprime_inf * np.log(1. - Kprime_inf * PoverK) +
(Kprime_ratio - 1.) * PoverK ) # eq. 61
def _upper_incomplete_gamma(z, a):
"""
An implementation of the non-regularised upper incomplete gamma
function. Computed using the relationship with the regularised
lower incomplete gamma function (scipy.special.gammainc).
Uses the recurrence relation wherever z<0.
"""
n = int(-np.floor(z))
if n > 0:
z = z + n
u_gamma = (1. - gammainc(z, a))*gamma(z)
for i in range(n):
z = z - 1.
u_gamma = (u_gamma - np.power(a, z)*np.exp(-a))/z
return u_gamma
else:
return (1. - gammainc(z, a))*gamma(z)
def _PoverK_from_P(pressure, params):
"""
Calculates the pressure:bulk modulus ratio
from a given pressure using brentq optimization
"""
args = ((pressure - params['P_0']), params['K_0'],
params['Kprime_0'], params['Kprime_inf'])
return opt.brentq(_delta_PoverK_from_P,
1./(params['Kprime_inf'] - params['Kprime_0']) + np.finfo(float).eps,
1./params['Kprime_inf'] - np.finfo(float).eps,
args=args)
def _PoverK_from_V(volume, params):
"""
Calculates the pressure:bulk modulus ratio
from a given volume using brentq optimization
"""
args = (volume, params['V_0'], params['K_0'],
params['Kprime_0'], params['Kprime_inf'])
return opt.brentq(_delta_PoverK_from_V,
1./(params['Kprime_inf'] - params['Kprime_0']) + np.finfo(float).eps,
1./params['Kprime_inf'] - np.finfo(float).eps,
args=args)
def bulk_modulus(pressure, params):
"""
Returns the bulk modulus at a given pressure
"""
PoverK = _PoverK_from_P(pressure, params)
K = params['K_0']*np.power((1. - params['Kprime_inf']*PoverK), -
params['Kprime_0']/params['Kprime_inf'])
return K
def shear_modulus(pressure, params):
"""
Returns the shear modulus at a given pressure
"""
G = ( params['G_0']/params['K_0'] * bulk_modulus(pressure, params) -
(params['G_0']/params['K_0']*params['Kprime_inf'] - params['Gprime_inf']) * pressure )
return G # eq. 78
class RKprime(eos.EquationOfState):
"""
Class for the isothermal reciprocal K-prime equation of state
detailed in :cite:`StaceyDavis2004`. This equation of state is
a development of work by :cite:`Keane1954` and :cite:`Stacey2000`,
making use of the fact that :math:`K'` typically varies smoothly
as a function of :math:`P/K`, and is thermodynamically required to
exceed 5/3 at infinite pressure.
It is worth noting that this equation of state rapidly becomes
unstable at negative pressures, so should not be trusted to provide
a good *HT-LP* equation of state using a thermal pressure
formulation. The negative root of :math:`dP/dK`
can be found at :math:`K/P = K'_{\infty} - K'_0`,
which corresponds to a bulk modulus of
:math:`K = K_0 ( 1 - K'_{\infty}/K'_0 )^{K'_0/K'_{\infty}}`
and a volume of
:math:`V = V_0 ( K'_0 / (K'_0 - K'_{\infty}) )^{K'_0/{K'}^2_{\infty}} \exp{(-1/K'_{\infty})}`.
This equation of state has no temperature dependence.
"""
def volume(self, pressure, temperature, params):
"""
Returns volume :math:`[m^3]` as a function of pressure :math:`[Pa]`.
"""
Kprime_ratio = params['Kprime_0']/params['Kprime_inf']
PoverK = _PoverK_from_P(pressure, params)
V = params['V_0'] * np.exp( Kprime_ratio/params['Kprime_inf'] *
np.log(1. - params['Kprime_inf'] * PoverK) +
(Kprime_ratio - 1.) * PoverK ) # Eq. 61
return V
def pressure(self, temperature, volume, params):
"""
Returns pressure :math:`[Pa]` as a function of volume :math:`[m^3]`.
"""
PoverK = _PoverK_from_V(volume, params)
return params['P_0'] + ( params['K_0'] * PoverK *
np.power(1. - params['Kprime_inf'] * PoverK,
-params['Kprime_0']/params['Kprime_inf']) )
def isothermal_bulk_modulus(self, pressure, temperature, volume, params):
"""
Returns isothermal bulk modulus :math:`K_T` :math:`[Pa]` as a function of pressure :math:`[Pa]`,
temperature :math:`[K]` and volume :math:`[m^3]`.
"""
return bulk_modulus(pressure, params)
def adiabatic_bulk_modulus(self, pressure, temperature, volume, params):
"""
Returns adiabatic bulk modulus :math:`K_s` of the mineral. :math:`[Pa]`.
"""
return bulk_modulus(pressure, params)
def shear_modulus(self, pressure, temperature, volume, params):
"""
Returns shear modulus :math:`G` of the mineral. :math:`[Pa]`
"""
return shear_modulus(pressure, params)
def entropy(self, pressure, temperature, volume, params):
"""
Returns the molar entropy :math:`\mathcal{S}` of the mineral. :math:`[J/K/mol]`
"""
return 0.
def _intVdP(self, xi, params):
a = params['Kprime_inf']
b = (params['Kprime_0']/params['Kprime_inf']/params['Kprime_inf'] -
params['Kprime_0']/params['Kprime_inf'] - 1.)
c = params['Kprime_0'] - params['Kprime_inf']
f = (params['Kprime_0']/params['Kprime_inf'] - 1.)
i1 = float( params['V_0'] * params['K_0'] *
np.exp(f / a) * np.power(a, b - 1.) /
np.power(f, b + 2.) *
( f * params['Kprime_0'] * _upper_incomplete_gamma( b + 1. ,
f * (1./a - xi) ) -
a * c * _upper_incomplete_gamma( b + 2., f * (1./a - xi) ) ) )
return i1
def gibbs_free_energy(self, pressure, temperature, volume, params):
"""
Returns the Gibbs free energy :math:`\mathcal{G}` of the mineral. :math:`[J/mol]`
"""
# G = E0 + int VdP (when S = 0)
K = self.isothermal_bulk_modulus(pressure, temperature, volume, params)
return params['E_0'] + params['P_0']*params['V_0'] + self._intVdP((pressure - params['P_0'])/K, params) - self._intVdP(0., params)
def molar_internal_energy(self, pressure, temperature, volume, params):
"""
Returns the internal energy :math:`\mathcal{E}` of the mineral. :math:`[J/mol]`
"""
# E = G - PV (+ TS)
return ( self.gibbs_free_energy(pressure, temperature, volume, params) - pressure*volume)
def molar_heat_capacity_v(self, pressure, temperature, volume, params):
"""
Since this equation of state does not contain temperature effects, simply return a very large number. :math:`[J/K/mol]`
"""
return 1.e99
def molar_heat_capacity_p(self, pressure, temperature, volume, params):
"""
Since this equation of state does not contain temperature effects, simply return a very large number. :math:`[J/K/mol]`
"""
return 1.e99
def thermal_expansivity(self, pressure, temperature, volume, params):
"""
Since this equation of state does not contain temperature effects, simply return zero. :math:`[1/K]`
"""
return 0.
def grueneisen_parameter(self, pressure, temperature, volume, params):
"""
Since this equation of state does not contain temperature effects, simply return zero. :math:`[unitless]`
"""
return 0.
def validate_parameters(self, params):
"""
Check for existence and validity of the parameters.
The value for :math:`K'_{\infty}` is thermodynamically bounded
between 5/3 and :math:`K'_0` :cite:`StaceyDavis2004`.
"""
if 'E_0' not in params:
params['E_0'] = 0.
if 'P_0' not in params:
params['P_0'] = 0.
# If G and Gprime_inf are not included this is presumably deliberate,
# as we can model density and bulk modulus just fine without them,
# so just add them to the dictionary as nans
if 'G_0' not in params:
params['G_0'] = float('nan')
if 'Gprime_inf' not in params:
params['Gprime_inf'] = float('nan')
# Check that all the required keys are in the dictionary
expected_keys = ['V_0', 'K_0', 'Kprime_0', 'Kprime_inf', 'G_0', 'Gprime_inf']
for k in expected_keys:
if k not in params:
raise KeyError('params object missing parameter : ' + k)
# Finally, check that the values are reasonable.
if params['P_0'] < 0.:
warnings.warn('Unusual value for P_0', stacklevel=2)
if params['V_0'] < 1.e-7 or params['V_0'] > 1.e-3:
warnings.warn('Unusual value for V_0', stacklevel=2)
if params['K_0'] < 1.e9 or params['K_0'] > 1.e13:
warnings.warn('Unusual value for K_0', stacklevel=2)
if params['Kprime_0'] < 0. or params['Kprime_0'] > 10.:
warnings.warn('Unusual value for Kprime_0', stacklevel=2)
if params['Kprime_inf'] < 5./3. or params['Kprime_inf'] > params['Kprime_0']:
warnings.warn('Unusual value for Kprime_inf', stacklevel=2) # eq. 17
if params['G_0'] < 0.0 or params['G_0'] > 1.e13:
warnings.warn('Unusual value for G_0', stacklevel=2)
if params['Gprime_inf'] < -5. or params['Gprime_inf'] > 10.:
warnings.warn('Unusual value for Gprime_inf', stacklevel=2)
|
CaymanUnterbornREPO_NAMEExoPlexPATH_START.@ExoPlex_extracted@ExoPlex-master@ExoPlex@burnman@eos@reciprocal_kprime.py@.PATH_END.py
|
{
"filename": "_outlinecolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergl/marker/colorbar/_outlinecolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OutlinecolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="outlinecolor",
parent_name="scattergl.marker.colorbar",
**kwargs,
):
super(OutlinecolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergl@marker@colorbar@_outlinecolor.py@.PATH_END.py
|
{
"filename": "06-Using_a_community_code.ipynb",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/doc/interactive_tutorial/06-Using_a_community_code.ipynb",
"type": "Jupyter Notebook"
}
|
```bash
%%bash
# preamble script to check and install AMUSE components if necessary
# required packages for this tutorial:
PACKAGES="mpi4py amuse-framework amuse-bhtree amuse-sse amuse-seba amuse-sphray"
# skip in case a full development install is present
pip show amuse-devel && exit 0
for package in ${PACKAGES}
do
pip show ${package} || pip install ${package}
done
```
```python
# the following fixes are highly recommended
#allow oversubscription for openMPI
import os
os.environ["OMPI_MCA_rmaps_base_oversubscribe"]="true"
# use lower cpu resources for idle codes
from amuse.support import options
options.GlobalOptions.instance().override_value_for_option("polling_interval_in_milliseconds", 10)
```
```python
%matplotlib inline
from matplotlib import pyplot
import numpy
```
So far we have been looking at some of the basic tools provided with the AMUSE framework. These tools are generally useful but mainly meant to ease working with *community* codes. *Community* codes form the core of the AMUSE framework and are the functional components to evolve astrophysical models in time or calculate properties of the physical models.
*Community* codes are existing and newly developed applications, implemented in C/C++, Fortran or another language. The AMUSE framework provides an interface to these codes so that you can interact with every code in a standard way.
```python
from amuse.lab import *
```
For example, we can calculate the HR diagram of a star by first creating a stellar evolution code.
```python
code = Seba()
print(code)
```
The interface to a *Community* code is implemented as a class with a name based on the name of the original application. In this case we will use SeBa, a stellar evolution code based on precalculated tracks.
Every code in amuse supports particle collections or grids. SeBa supports two particle collections, one for single stars (called `particles`) and one for binaries (called `binaries`). We will add a particle to the single stars collection.
```python
code.particles.add_particle(Particle(mass=2 | units.MSun))
print(code.particles)
```
Next, we will evolve the code for 2 billion years and store the luminosity and temperature in 1 Myr intervals:
```python
luminosity = [] | units.LSun
temperature = [] | units.K
t = 0 | units.Myr
while t < 2 | units.Gyr:
t += 1. | units.Myr
code.evolve_model(t)
luminosity.append(code.particles[0].luminosity)
temperature.append(code.particles[0].temperature)
if int(t.value_in(units.Myr)) % 100 == 0:
print("evolved until t=", t)
```
After evolving we can save the data or plot it if we want.
```python
pyplot.loglog(temperature.value_in(units.K), luminosity.value_in(units.LSun))
pyplot.xlim(pyplot.xlim()[::-1])
pyplot.ylim(.1,1.e4)
```
Now we are done, and need to stop the code. After stopping the code we cannot interact with it any longer and requesting any particle attributes of the particles in the code will result in an error.
```python
code.stop()
```
Code interaction sequence
-------------------------
All interaction with codes in amuse follows the same general sequence. An instance of the code interface is created, parameters controlling the code are set, particles are added or grids filled, the model is evolved, data is retrieved and the code is stopped.
```python
code = Bhtree() # 1 code is created and started
code.parameters.epsilon_squared = (0.01 | nbody_system.length)**2 # 2 parameters are set
code.particles.add_particles(new_plummer_model(100)) # 3 particles are added
code.evolve_model(1 | nbody_system.time) # 4 model is evolved in the code
write_set_to_file(
code.particles,
'evolved_cluster.amuse',
'amuse',
overwrite_file=True,
) # 5 data is retrieved
print('evolved until', code.model_time)
code.stop() # 6 code is stopped
```
All codes follow this pattern, lets do a radiative transport example
```python
code = Sphray() # 1 code is created and started
code.parameters.box_size = 2.0 | units.kpc # 2 parameters are set
code.parameters.number_of_rays = 10000 | units.Myr**-1
gas = new_ism_cube(5000, 1 | units.kpc, 0.01 | (units.amu / units.cm**3))
gas.h_smooth = 0.1 | units.kpc
gas.xion = 0.00
source = Particle(position=[0, 0, 0] | units.parsec, luminosity=1 | 1.e50 / units.s)
code.gas_particles.add_particles(gas) # 3 particles are added (sphray has two collections, gas and src)
code.src_particles.add_particle(source)
code.evolve_model(0.5 | units.Myr) # 4 model is evolved in the code
pyplot.scatter(
gas.position.lengths().value_in(units.kpc),
code.gas_particles.xion, # 5 data is retrieved
)
code.stop() # 6 code is stopped
```
Some steps are optional, for example in a lot of codes you do not need to set any parameters if you have a simple problem. Others are critical, for example, you'll need to add particles to the code or else it will not evolve anything and some codes will return with an error.
In AMUSE, codes are combined by having multiple `evolve_model` steps and changing attribute values or adding and removing particles in between these steps. A first order combination of stellar dynamics and stellar evolutions simply copies the evolved masses from the stellar evolution code to the stellar dynamics code. The combined script contains the 6 steps of the generic sequence for each code and creates an interaction between the codes.
```python
converter = nbody_system.nbody_to_si(100 | units.MSun, 1 | units.parsec)
stars = new_plummer_model(100, converter)
stars.mass = new_salpeter_mass_distribution(100, mass_min=1 | units.MSun)
code1 = Hermite(converter) # 1 code is created and started
code1.parameters.epsilon_squared = (0.01 | nbody_system.length)**2 # 2 parameters are set
code1.particles.add_particles(stars) # 3 particles are added
code2 = Seba() # 1 code is created and started
code2.particles.add_particles(stars) # 3 particles are added
dt = 0.5 | units.Myr
t = 0.0 | units.Myr
print(
"total mass (at t = %s): %s"
% (t, code1.particles.mass.sum().in_(units.MSun))
)
while t < 10 | units.Myr:
t += dt
code1.evolve_model(t) # 4 model is evolved in the code
code2.evolve_model(t) # 4 model is evolved in the code
code1.particles.mass = code2.particles.mass
print(
"total mass (at t = %s): %s"
% (t, code1.particles.mass.sum().in_(units.MSun)) # 5 data is retrieved
)
code1.stop() # 6 code is stopped
code2.stop() # 6 code is stopped
```
In the AMUSE primer many more scripts are shown and the primer shows how to create more difficult and interesting interactions on a step by step basis. In this tutorial we will continue with showing the individual AMUSE framework tools and the general structure of interacting with a single code. The examples in the AMUSE primer focus on getting the science correct, where the tutorial will show you how to get the AMUSE syntax correct. For example in the above script we should think about when and how to take the timesteps, the mass loss should probably be a smooth function in time for the stellar dynamics to make sense (so smaller steps when interesting physics happens in a star). Considerations like these are handled in detail in the AMUSE primer.
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@doc@interactive_tutorial@06-Using_a_community_code.ipynb@.PATH_END.py
|
{
"filename": "mesh_validation.py",
"repo_name": "astertaylor/SAMUS",
"repo_path": "SAMUS_extracted/SAMUS-main/SAMUS/testing/mesh_validation.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 11 17:03:40 2022
@author: astertaylor
This script is created to demonstrate the validity of the use of SAMUS's
meshes. We run twice, once with n=0, the default, and once with n=1, which has
~2x the precision. The outputs demonstrate their equivalence, and thus the
convergence of this system.
"""
import SAMUS
# create class
standard_class = SAMUS.model("coarse_mesh", a=20, b=50, c=110, mu=10**7, n=0)
# runs simulation with only 5 time steps per rotation
standard_class.run_model(5, rtol=0.05, data_name='hyperbolic_traj')
# create class
halved_class = SAMUS.model("finer_mesh", a=20, b=50, c=110, mu=10**7, n=1)
# runs simulation with only 5 time steps per rotation
halved_class.run_model(5, rtol=0.05, data_name='hyperbolic_traj')
|
astertaylorREPO_NAMESAMUSPATH_START.@SAMUS_extracted@SAMUS-main@SAMUS@testing@mesh_validation.py@.PATH_END.py
|
{
"filename": "paramnames.py",
"repo_name": "cmbant/getdist",
"repo_path": "getdist_extracted/getdist-master/getdist/paramnames.py",
"type": "Python"
}
|
import os
import fnmatch
from itertools import chain
def makeList(roots):
"""
Checks if the given parameter is a list.
If not, Creates a list with the parameter as an item in it.
:param roots: The parameter to check
:return: A list containing the parameter.
"""
if isinstance(roots, (list, tuple)):
return roots
else:
return [roots]
def escapeLatex(text):
if text:
import matplotlib
if matplotlib.rcParams['text.usetex']:
return text.replace('_', '{\\textunderscore}')
return text
def mergeRenames(*dicts, **kwargs):
"""
Joins several dicts of renames.
If `keep_names_1st=True` (default: `False`), keeps empty entries when possible
in order to preserve the parameter names of the first input dictionary.
Returns a merged dictionary of renames,
whose keys are chosen from the left-most input.
"""
keep_names_1st = kwargs.pop("keep_names_1st", False)
if kwargs:
raise ValueError("kwargs not recognized: %r" % kwargs)
sets = list(chain(*[[set([k] + (makeList(v or [])))
for k, v in dic.items()] for dic in dicts]))
# If two sets have elements in common, join them.
something_changed = True
out = []
while something_changed:
something_changed = False
for i in range(1, len(sets)):
if sets[0].intersection(sets[i]):
sets[0] = sets[0].union(sets.pop(i))
something_changed = True
break
if not something_changed and sets:
out += [sets.pop(0)]
if len(sets):
something_changed = True
merged = {}
for params in out:
for dic in dicts:
p = set(dic).intersection(params)
if p and (params != p or keep_names_1st):
key = p.pop()
params.remove(key)
merged[key] = list(params)
break
return merged
class ParamInfo:
"""
Parameter information object.
:ivar name: the parameter name tag (no spacing or punctuation)
:ivar label: latex label (without enclosing $)
:ivar comment: any descriptive comment describing the parameter
:ivar isDerived: True if a derived parameter, False otherwise (e.g. for MCMC parameters)
"""
def __init__(self, line=None, name='', label='', comment='', derived=False,
renames=None, number=None):
self.setName(name)
self.isDerived = derived
self.label = label or name
self.comment = comment
self.filenameLoadedFrom = ''
self.number = number
self.renames = makeList(renames or [])
if line is not None:
self.setFromString(line)
def nameEquals(self, name):
if isinstance(name, ParamInfo):
return name.name == name
else:
return name == name
def setFromString(self, line):
items = line.split(None, 1)
name = items[0]
if name.endswith('*'):
name = name.strip('*')
self.isDerived = True
self.setName(name)
if len(items) > 1:
tmp = items[1].split('#', 1)
self.label = tmp[0].strip().replace('!', '\\')
if len(tmp) > 1:
self.comment = tmp[1].strip()
else:
self.comment = ''
return self
def setName(self, name):
if not isinstance(name, str):
raise ValueError('"name" must be a parameter name string not %s: %s' % (type(name), name))
if '*' in name or '?' in name or ' ' in name or '\t' in name:
raise ValueError('Parameter names must not contain spaces, * or ?')
self.name = name
def getLabel(self):
if self.label:
return self.label
else:
return self.name
def latexLabel(self):
if self.label:
return '$' + self.label + '$'
else:
return self.name
def setFromStringWithComment(self, items):
self.setFromString(items[0])
if items[1] != 'NULL':
self.comment = items[1]
def string(self, wantComments=True):
res = self.name
if self.isDerived:
res += '*'
res = res + '\t' + self.label
if wantComments and self.comment != '':
res = res + '\t#' + self.comment
return res
def __str__(self):
return self.string()
class ParamList:
"""
Holds an orders list of :class:`ParamInfo` objects describing a set of parameters.
:ivar names: list of :class:`ParamInfo` objects
"""
loadFromFile: callable
def __init__(self, fileName=None, setParamNameFile=None, default=0, names=None, labels=None):
"""
:param fileName: name of .paramnames file to load from
:param setParamNameFile: override specific parameter names' labels using another file
:param default: set to int>0 to automatically generate that number of default names and labels
(param1, p_{1}, etc.)
:param names: a list of name strings to use
"""
self.names = []
self.info_dict = None # if read from yaml file, saved here
if default:
self.setDefault(default)
if names is not None:
self.setWithNames(names)
if fileName is not None:
self.loadFromFile(fileName)
if setParamNameFile is not None:
self.setLabelsFromParamNames(setParamNameFile)
if labels is not None:
self.setLabels(labels)
def setDefault(self, n):
self.names = [ParamInfo(name='param' + str(i + 1), label='p_{' + str(i + 1) + '}') for i in range(n)]
return self
def setWithNames(self, names):
self.names = [ParamInfo(name) for name in names]
return self
def setLabels(self, labels):
for name, label in zip(self.names, labels):
name.label = label
def numDerived(self):
return len([1 for info in self.names if info.isDerived])
def list(self):
"""
Gets a list of parameter name strings
"""
return [name.name for name in self.names]
def labels(self):
"""
Gets a list of parameter labels
"""
return [name.label for name in self.names]
def listString(self):
return " ".join(self.list())
def numParams(self):
return len(self.names)
def numNonDerived(self):
return len([1 for info in self.names if not info.isDerived])
def parWithNumber(self, num):
for par in self.names:
if par.number == num:
return par
return None
def _check_name_str(self, name):
if not isinstance(name, str):
raise ValueError('"name" must be a parameter name string not %s: %s' % (type(name), name))
def parWithName(self, name, error=False, renames=None):
"""
Gets the :class:`ParamInfo` object for the parameter with the given name
:param name: name of the parameter
:param error: if True raise an error if parameter not found, otherwise return None
:param renames: a dictionary that is used to provide optional name mappings
to the stored names
"""
self._check_name_str(name)
given_names = {name}
if renames:
given_names.update(makeList(renames.get(name, [])))
for par in self.names:
known_names = set([par.name] + makeList(getattr(par, 'renames', [])) +
(makeList(renames.get(par.name, [])) if renames else []))
if known_names.intersection(given_names):
return par
if error:
raise Exception("parameter name not found: %s" % name)
return None
def numberOfName(self, name):
"""
Gets the parameter number of the given parameter name
:param name: parameter name tag
:return: index of the parameter, or -1 if not found
"""
self._check_name_str(name)
for i, par in enumerate(self.names):
if par.name == name:
return i
return -1
def hasParam(self, name):
return self.numberOfName(name) != -1
def parsWithNames(self, names, error=False, renames=None):
"""
gets the list of :class:`ParamInfo` instances for given list of name strings.
Also expands any names that are globs into list with matching parameter names
:param names: list of name strings
:param error: if True, raise an error if any name not found,
otherwise returns None items. Can be a list of length `len(names)`
:param renames: optional dictionary giving mappings of parameter names
"""
res = []
if isinstance(names, str):
names = [names]
errors = makeList(error)
if len(errors) < len(names):
errors = len(names) * errors
for name, error in zip(names, errors):
if isinstance(name, ParamInfo):
res.append(name)
else:
if '?' in name or '*' in name:
res += self.getMatches(name)
else:
res.append(self.parWithName(name, error, renames))
return res
def getMatches(self, pattern, strings=False):
pars = []
for par in self.names:
if fnmatch.fnmatchcase(par.name, pattern):
if strings:
pars.append(par.name)
else:
pars.append(par)
return pars
def setLabelsFromParamNames(self, fname):
self.setLabelsAndDerivedFromParamNames(fname, False)
def setLabelsAndDerivedFromParamNames(self, fname, set_derived=True):
if isinstance(fname, ParamNames):
p = fname
else:
p = ParamNames(fname)
for par in p.names:
param = self.parWithName(par.name)
if param is not None:
param.label = par.label
if set_derived:
param.isDerived = par.isDerived
def getRenames(self, keep_empty=False):
"""
Gets dictionary of renames known to each parameter.
"""
return {param.name: getattr(param, "renames", [])
for param in self.names
if (getattr(param, "renames", False) or keep_empty)}
def updateRenames(self, renames):
"""
Updates the renames known to each parameter with the given dictionary of renames.
"""
merged_renames = mergeRenames(
self.getRenames(keep_empty=True), renames, keep_names_1st=True)
known_names = self.list()
for name, rename in merged_renames.items():
if name in known_names:
self.parWithName(name).renames = rename
def fileList(self, fname):
with open(fname, encoding='utf-8-sig') as f:
textFileLines = f.readlines()
return textFileLines
def deleteIndices(self, indices):
self.names = [name for i, name in enumerate(self.names) if i not in indices]
def filteredCopy(self, params):
usedNames = self.__class__()
for name in self.names:
if isinstance(params, list):
p = name.name in params
else:
p = params.parWithName(name.name)
if p:
usedNames.names.append(name)
return usedNames
def addDerived(self, name, **kwargs):
"""
adds a new parameter
:param name: name tag for the new parameter
:param kwargs: other arguments for constructing the new :class:`ParamInfo`
"""
if kwargs.get('derived') is None:
kwargs['derived'] = True
self._check_name_str(name)
kwargs['name'] = name
self.names.append(ParamInfo(**kwargs))
return self.names[-1]
def maxNameLen(self):
return max([len(name.name) for name in self.names])
def parFormat(self):
maxLen = max(9, self.maxNameLen()) + 1
return "%-" + str(maxLen) + "s"
def name(self, ix, tag_derived=False):
par = self.names[ix]
if tag_derived and par.isDerived:
return par.name + '*'
else:
return par.name
def __str__(self):
text = ''
for par in self.names:
text += par.string() + '\n'
return text
def saveAsText(self, filename):
"""
Saves to a plain text .paramnames file
:param filename: filename to save to
"""
with open(filename, 'w', encoding='utf-8') as f:
f.write(str(self))
def getDerivedNames(self):
"""
Get the names of all derived parameters
"""
return [name.name for name in self.names if name.isDerived]
def getRunningNames(self):
"""
Get the names of all running (non-derived) parameters
"""
return [name.name for name in self.names if not name.isDerived]
class ParamNames(ParamList):
"""
Holds an orders list of :class:`ParamInfo` objects describing a set of parameters,
inheriting from :class:`ParamList`.
Can be constructed programmatically, and also loaded and saved to a .paramnames files, which is a plain text file
giving the names and optional label and comment for each parameter, in order.
:ivar names: list of :class:`ParamInfo` objects describing each parameter
:ivar filenameLoadedFrom: if loaded from file, the file name
"""
def loadFromFile(self, fileName):
"""
loads from fileName, a plain text .paramnames file or a "full" yaml file
"""
self.filenameLoadedFrom = os.path.split(fileName)[1]
extension = os.path.splitext(fileName)[-1]
if extension == '.paramnames':
with open(fileName, encoding='utf-8-sig') as f:
self.names = [ParamInfo(line) for line in [s.strip() for s in f] if line != '']
elif extension.lower() in ('.yaml', '.yml'):
from getdist import yaml_tools
from getdist.cobaya_interface import get_info_params, is_sampled_param
from getdist.cobaya_interface import is_derived_param, _p_label, _p_renames
self.info_dict = yaml_tools.yaml_load_file(fileName)
info_params = get_info_params(self.info_dict)
# first sampled, then derived
self.names = [ParamInfo(name=param, label=(info or {}).get(_p_label, param),
renames=(info or {}).get(_p_renames))
for param, info in info_params.items() if is_sampled_param(info)]
self.names += [ParamInfo(name=param, label=(info or {}).get(_p_label, param),
renames=(info or {}).get(_p_renames), derived=True)
for param, info in info_params.items() if is_derived_param(info)]
else:
raise ValueError('ParanNames must be loaded from .paramnames or .yaml/.yml file, '
'found %s' % fileName)
def loadFromKeyWords(self, keywordProvider):
num_params_used = keywordProvider.keyWord_int('num_params_used')
num_derived_params = keywordProvider.keyWord_int('num_derived_params')
nparam = num_params_used + num_derived_params
for i in range(nparam):
info = ParamInfo()
info.setFromStringWithComment(keywordProvider.keyWordAndComment('param_' + str(i + 1)))
self.names.append(info)
return nparam
def saveKeyWords(self, keywordProvider):
keywordProvider.setKeyWord_int('num_params_used', len(self.names) - self.numDerived())
keywordProvider.setKeyWord_int('num_derived_params', self.numDerived())
for i, name in enumerate(self.names):
keywordProvider.setKeyWord('param_' + str(i + 1), name.string(False).replace('\\', '!'),
name.comment)
|
cmbantREPO_NAMEgetdistPATH_START.@getdist_extracted@getdist-master@getdist@paramnames.py@.PATH_END.py
|
{
"filename": "cored_steep_ellipsoid.py",
"repo_name": "lenstronomy/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/LensModel/Profiles/cored_steep_ellipsoid.py",
"type": "Python"
}
|
__author__ = "sibirrer"
from lenstronomy.LensModel.Profiles.base_profile import LensProfileBase
import numpy as np
from lenstronomy.Util import param_util
from lenstronomy.Util import util
__all__ = [
"CSE",
"CSEMajorAxis",
"CSEMajorAxisSet",
"CSEProductAvg",
"CSEProductAvgSet",
]
class CSE(LensProfileBase):
"""
Cored steep ellipsoid (CSE)
:param axis: 'major' or 'product_avg' ; whether to evaluate corresponding to r= major axis or r= sqrt(ab)
source:
Keeton and Kochanek (1998)
Oguri 2021: https://arxiv.org/pdf/2106.11464.pdf
.. math::
\\kappa(u;s) = \\frac{A}{2(s^2 + \\xi^2)^{3/2}}
with
.. math::
\\xi(x, y) = \\sqrt{x^2 + \\frac{y^2}{q^2}}
"""
param_names = ["A", "s", "e1", "e2", "center_x", "center_y"]
lower_limit_default = {
"A": -1000,
"s": 0,
"e1": -0.5,
"e2": -0.5,
"center_x": -100,
"center_y": -100,
}
upper_limit_default = {
"A": 1000,
"s": 10000,
"e1": 0.5,
"e2": 0.5,
"center_x": 100,
"center_y": 100,
}
def __init__(self, axis="product_avg"):
if axis == "major":
self.major_axis_model = CSEMajorAxis()
elif axis == "product_avg":
self.major_axis_model = CSEProductAvg()
else:
raise ValueError(
"axis must be set to 'major' or 'product_avg'. Input is %s ." % axis
)
super(CSE, self).__init__()
def function(self, x, y, a, s, e1, e2, center_x, center_y):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a: lensing strength
:param s: core radius
:param e1: eccentricity
:param e2: eccentricity
:param center_x: center of profile
:param center_y: center of profile
:return: lensing potential
"""
phi_q, q = param_util.ellipticity2phi_q(e1, e2)
# shift
x_ = x - center_x
y_ = y - center_y
# rotate
x__, y__ = util.rotate(x_, y_, phi_q)
# potential calculation
f_ = self.major_axis_model.function(x__, y__, a, s, q)
return f_
def derivatives(self, x, y, a, s, e1, e2, center_x, center_y):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a: lensing strength
:param s: core radius
:param e1: eccentricity
:param e2: eccentricity
:param center_x: center of profile
:param center_y: center of profile
:return: deflection in x- and y-direction
"""
phi_q, q = param_util.ellipticity2phi_q(e1, e2)
# shift
x_ = x - center_x
y_ = y - center_y
# rotate
x__, y__ = util.rotate(x_, y_, phi_q)
f__x, f__y = self.major_axis_model.derivatives(x__, y__, a, s, q)
# rotate deflections back
f_x, f_y = util.rotate(f__x, f__y, -phi_q)
return f_x, f_y
def hessian(self, x, y, a, s, e1, e2, center_x, center_y):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a: lensing strength
:param s: core radius
:param e1: eccentricity
:param e2: eccentricity
:param center_x: center of profile
:param center_y: center of profile
:return: hessian elements f_xx, f_xy, f_yx, f_yy
"""
phi_q, q = param_util.ellipticity2phi_q(e1, e2)
# shift
x_ = x - center_x
y_ = y - center_y
# rotate
x__, y__ = util.rotate(x_, y_, phi_q)
f__xx, f__xy, __, f__yy = self.major_axis_model.hessian(x__, y__, a, s, q)
# rotate back
kappa = 1.0 / 2 * (f__xx + f__yy)
gamma1__ = 1.0 / 2 * (f__xx - f__yy)
gamma2__ = f__xy
gamma1 = np.cos(2 * phi_q) * gamma1__ - np.sin(2 * phi_q) * gamma2__
gamma2 = +np.sin(2 * phi_q) * gamma1__ + np.cos(2 * phi_q) * gamma2__
f_xx = kappa + gamma1
f_yy = kappa - gamma1
f_xy = gamma2
return f_xx, f_xy, f_xy, f_yy
class CSEMajorAxis(LensProfileBase):
"""
Cored steep ellipsoid (CSE) along the major axis
source:
Keeton and Kochanek (1998)
Oguri 2021: https://arxiv.org/pdf/2106.11464.pdf
.. math::
\\kappa(u;s) = \\frac{A}{2(s^2 + \\xi^2)^{3/2}}
with
.. math::
\\xi(x, y) = \\sqrt{x^2 + \\frac{y^2}{q^2}}
"""
param_names = ["A", "s", "q", "center_x", "center_y"]
lower_limit_default = {
"A": -1000,
"s": 0,
"q": 0.001,
"center_x": -100,
"center_y": -100,
}
upper_limit_default = {
"A": 1000,
"s": 10000,
"q": 0.99999,
"center_x": 100,
"center_y": 100,
}
def function(self, x, y, a, s, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a: lensing strength
:param s: core radius
:param q: axis ratio
:return: lensing potential
"""
# potential calculation
psi = np.sqrt(q**2 * (s**2 + x**2) + y**2)
Phi = (psi + s) ** 2 + (1 - q**2) * x**2
phi = q / (2 * s) * np.log(Phi) - q / s * np.log((1 + q) * s)
return a * phi
def derivatives(self, x, y, a, s, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a: lensing strength
:param s: core radius
:param q: axis ratio
:return: deflection in x- and y-direction
"""
psi = np.sqrt(q**2 * (s**2 + x**2) + y**2)
Phi = (psi + s) ** 2 + (1 - q**2) * x**2
f_x = q * x * (psi + q**2 * s) / (s * psi * Phi)
f_y = q * y * (psi + s) / (s * psi * Phi)
return a * f_x, a * f_y
def hessian(self, x, y, a, s, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a: lensing strength
:param s: core radius
:param q: axis ratio
:return: hessian elements f_xx, f_xy, f_yx, f_yy
"""
# equations 21-23 in Oguri 2021
psi = np.sqrt(q**2 * (s**2 + x**2) + y**2)
Phi = (psi + s) ** 2 + (1 - q**2) * x**2
f_xx = (
q
/ (s * Phi)
* (
1
+ q**2 * s * (q**2 * s**2 + y**2) / psi**3
- 2 * x**2 * (psi + q**2 * s) ** 2 / (psi**2 * Phi)
)
)
f_yy = (
q
/ (s * Phi)
* (
1
+ q**2 * s * (s**2 + x**2) / psi**3
- 2 * y**2 * (psi + s) ** 2 / (psi**2 * Phi)
)
)
f_xy = (
-q
* x
* y
/ (s * Phi)
* (q**2 * s / psi**3 + 2 * (psi + q**2 * s) * (psi + s) / (psi**2 * Phi))
)
return a * f_xx, a * f_xy, a * f_xy, a * f_yy
class CSEMajorAxisSet(LensProfileBase):
"""A set of CSE profiles along a joint center and axis."""
def __init__(self):
self.major_axis_model = CSEMajorAxis()
super(CSEMajorAxisSet, self).__init__()
def function(self, x, y, a_list, s_list, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a_list: list of lensing strength
:param s_list: list of core radius
:param q: axis ratio
:return: lensing potential
"""
f_ = np.zeros_like(x, dtype=float)
for a, s in zip(a_list, s_list):
f_ += self.major_axis_model.function(x, y, a, s, q)
return f_
def derivatives(self, x, y, a_list, s_list, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a_list: list of lensing strength
:param s_list: list of core radius
:param q: axis ratio
:return: deflection in x- and y-direction
"""
f_x, f_y = np.zeros_like(x, dtype=float), np.zeros_like(y, dtype=float)
for a, s in zip(a_list, s_list):
f_x_, f_y_ = self.major_axis_model.derivatives(x, y, a, s, q)
f_x += f_x_
f_y += f_y_
return f_x, f_y
def hessian(self, x, y, a_list, s_list, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a_list: list of lensing strength
:param s_list: list of core radius
:param q: axis ratio
:return: hessian elements f_xx, f_xy, f_yx, f_yy
"""
f_xx, f_xy, f_yy = (
np.zeros_like(x, dtype=float),
np.zeros_like(x, dtype=float),
np.zeros_like(x, dtype=float),
)
for a, s in zip(a_list, s_list):
f_xx_, f_xy_, _, f_yy_ = self.major_axis_model.hessian(x, y, a, s, q)
f_xx += f_xx_
f_xy += f_xy_
f_yy += f_yy_
return f_xx, f_xy, f_xy, f_yy
class CSEProductAvg(LensProfileBase):
"""Cored steep ellipsoid (CSE) evaluated at the product-averaged radius sqrt(ab),
such that mass is not changed when increasing ellipticity.
Same as CSEMajorAxis but evaluated at r=sqrt(q)*r_original
Keeton and Kochanek (1998)
Oguri 2021: https://arxiv.org/pdf/2106.11464.pdf
.. math::
\\kappa(u;s) = \\frac{A}{2(s^2 + \\xi^2)^{3/2}}
with
.. math::
\\xi(x, y) = \\sqrt{qx^2 + \\frac{y^2}{q}}
"""
param_names = ["A", "s", "q", "center_x", "center_y"]
lower_limit_default = {
"A": -1000,
"s": 0,
"q": 0.001,
"center_x": -100,
"center_y": -100,
}
upper_limit_default = {
"A": 1000,
"s": 10000,
"q": 0.99999,
"center_x": 100,
"center_y": 100,
}
def __init__(self):
super(CSEProductAvg, self).__init__()
self.MA_class = CSEMajorAxis()
@staticmethod
def _convert2prodavg(x, y, a, s, q):
"""Converts coordinates and re-normalizes major-axis parameterization to instead
be wrt.
product-averaged
"""
a = a / q
x = x * np.sqrt(q)
y = y * np.sqrt(q)
return x, y, a, s, q
def function(self, x, y, a, s, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a: lensing strength
:param s: core radius
:param q: axis ratio
:return: lensing potential
"""
x, y, a, s, q = self._convert2prodavg(x, y, a, s, q)
return self.MA_class.function(x, y, a, s, q)
def derivatives(self, x, y, a, s, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a: lensing strength
:param s: core radius
:param q: axis ratio
:return: deflection in x- and y-direction
"""
x, y, a, s, q = self._convert2prodavg(x, y, a, s, q)
af_x, af_y = self.MA_class.derivatives(x, y, a, s, q)
# extra sqrt(q) factor from taking derivative of transformed coordinate
return np.sqrt(q) * af_x, np.sqrt(q) * af_y
def hessian(self, x, y, a, s, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a: lensing strength
:param s: core radius
:param q: axis ratio
:return: hessian elements f_xx, f_xy, f_yx, f_yy
"""
x, y, a, s, q = self._convert2prodavg(x, y, a, s, q)
af_xx, af_xy, af_xy, af_yy = self.MA_class.hessian(x, y, a, s, q)
# two sqrt(q) factors from taking derivatives of transformed coordinate
return q * af_xx, q * af_xy, q * af_xy, q * af_yy
class CSEProductAvgSet(LensProfileBase):
"""A set of CSE profiles along a joint center and axis."""
def __init__(self):
self.major_axis_model = CSEProductAvg()
super(CSEProductAvgSet, self).__init__()
def function(self, x, y, a_list, s_list, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a_list: list of lensing strength
:param s_list: list of core radius
:param q: axis ratio
:return: lensing potential
"""
f_ = np.zeros_like(x, dtype=float)
for a, s in zip(a_list, s_list):
f_ += self.major_axis_model.function(x, y, a, s, q)
return f_
def derivatives(self, x, y, a_list, s_list, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a_list: list of lensing strength
:param s_list: list of core radius
:param q: axis ratio
:return: deflection in x- and y-direction
"""
f_x, f_y = np.zeros_like(x, dtype=float), np.zeros_like(y, dtype=float)
for a, s in zip(a_list, s_list):
f_x_, f_y_ = self.major_axis_model.derivatives(x, y, a, s, q)
f_x += f_x_
f_y += f_y_
return f_x, f_y
def hessian(self, x, y, a_list, s_list, q):
"""
:param x: coordinate in image plane (angle)
:param y: coordinate in image plane (angle)
:param a_list: list of lensing strength
:param s_list: list of core radius
:param q: axis ratio
:return: hessian elements f_xx, f_xy, f_yx, f_yy
"""
f_xx, f_xy, f_yy = (
np.zeros_like(x, dtype=float),
np.zeros_like(x, dtype=float),
np.zeros_like(x, dtype=float),
)
for a, s in zip(a_list, s_list):
f_xx_, f_xy_, _, f_yy_ = self.major_axis_model.hessian(x, y, a, s, q)
f_xx += f_xx_
f_xy += f_xy_
f_yy += f_yy_
return f_xx, f_xy, f_xy, f_yy
|
lenstronomyREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@LensModel@Profiles@cored_steep_ellipsoid.py@.PATH_END.py
|
{
"filename": "test_datasets_video_utils.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/test/test_datasets_video_utils.py",
"type": "Python"
}
|
import pytest
import torch
from common_utils import assert_equal, get_list_of_videos
from torchvision import io
from torchvision.datasets.video_utils import unfold, VideoClips
class TestVideo:
def test_unfold(self):
a = torch.arange(7)
r = unfold(a, 3, 3, 1)
expected = torch.tensor(
[
[0, 1, 2],
[3, 4, 5],
]
)
assert_equal(r, expected)
r = unfold(a, 3, 2, 1)
expected = torch.tensor([[0, 1, 2], [2, 3, 4], [4, 5, 6]])
assert_equal(r, expected)
r = unfold(a, 3, 2, 2)
expected = torch.tensor(
[
[0, 2, 4],
[2, 4, 6],
]
)
assert_equal(r, expected)
@pytest.mark.skipif(not io.video._av_available(), reason="this test requires av")
def test_video_clips(self, tmpdir):
video_list = get_list_of_videos(tmpdir, num_videos=3)
video_clips = VideoClips(video_list, 5, 5, num_workers=2)
assert video_clips.num_clips() == 1 + 2 + 3
for i, (v_idx, c_idx) in enumerate([(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2)]):
video_idx, clip_idx = video_clips.get_clip_location(i)
assert video_idx == v_idx
assert clip_idx == c_idx
video_clips = VideoClips(video_list, 6, 6)
assert video_clips.num_clips() == 0 + 1 + 2
for i, (v_idx, c_idx) in enumerate([(1, 0), (2, 0), (2, 1)]):
video_idx, clip_idx = video_clips.get_clip_location(i)
assert video_idx == v_idx
assert clip_idx == c_idx
video_clips = VideoClips(video_list, 6, 1)
assert video_clips.num_clips() == 0 + (10 - 6 + 1) + (15 - 6 + 1)
for i, v_idx, c_idx in [(0, 1, 0), (4, 1, 4), (5, 2, 0), (6, 2, 1)]:
video_idx, clip_idx = video_clips.get_clip_location(i)
assert video_idx == v_idx
assert clip_idx == c_idx
@pytest.mark.skipif(not io.video._av_available(), reason="this test requires av")
def test_video_clips_custom_fps(self, tmpdir):
video_list = get_list_of_videos(tmpdir, num_videos=3, sizes=[12, 12, 12], fps=[3, 4, 6])
num_frames = 4
for fps in [1, 3, 4, 10]:
video_clips = VideoClips(video_list, num_frames, num_frames, fps)
for i in range(video_clips.num_clips()):
video, audio, info, video_idx = video_clips.get_clip(i)
assert video.shape[0] == num_frames
assert info["video_fps"] == fps
# TODO add tests checking that the content is right
def test_compute_clips_for_video(self):
video_pts = torch.arange(30)
# case 1: single clip
num_frames = 13
orig_fps = 30
duration = float(len(video_pts)) / orig_fps
new_fps = 13
clips, idxs = VideoClips.compute_clips_for_video(video_pts, num_frames, num_frames, orig_fps, new_fps)
resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps)
assert len(clips) == 1
assert_equal(clips, idxs)
assert_equal(idxs[0], resampled_idxs)
# case 2: all frames appear only once
num_frames = 4
orig_fps = 30
duration = float(len(video_pts)) / orig_fps
new_fps = 12
clips, idxs = VideoClips.compute_clips_for_video(video_pts, num_frames, num_frames, orig_fps, new_fps)
resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps)
assert len(clips) == 3
assert_equal(clips, idxs)
assert_equal(idxs.flatten(), resampled_idxs)
# case 3: frames aren't enough for a clip
num_frames = 32
orig_fps = 30
new_fps = 13
with pytest.warns(UserWarning):
clips, idxs = VideoClips.compute_clips_for_video(video_pts, num_frames, num_frames, orig_fps, new_fps)
assert len(clips) == 0
assert len(idxs) == 0
if __name__ == "__main__":
pytest.main([__file__])
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@test@test_datasets_video_utils.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattersmith/selected/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._textfont import TextfontValidator
from ._marker import MarkerValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._textfont.TextfontValidator", "._marker.MarkerValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattersmith@selected@__init__.py@.PATH_END.py
|
{
"filename": "binaryformat.md",
"repo_name": "tigerchenlu98/rebound",
"repo_path": "rebound_extracted/rebound-main/docs/binaryformat.md",
"type": "Markdown"
}
|
# Binary Format
REBOUND comes with its own binary format.
The binary format allows you to store a current simulation state to a file or to memory.
The binary format is also used when you make a copy of a simulation or when you compare two simulations with each other.
The Simulationarchive is an extension of the binary format which allows you to store multiple snapshots of a simulation in one file.
This page explains the details of the binary format.
It is mainly intended for people who wish to extend the built-in REBOUND functionality.
You do not need to know those details if you're only working with binary files to save and load simulations.
REBOUND uses two structures for the binary files:
```c
struct reb_binary_field {
uint32_t type;
uint64_t size;
};
```
and
```c
struct reb_simulationarchive_blob {
int32_t index;
int32_t offset_prev;
int32_t offset_next;
};
```
!!! note
Before version 3.18, the offset datatype was `int16_t`. This caused problems for simulations with a large number of particles and has since been change to `int32_t`.
## Binary file (one snapshot)
You create a binary file if you save a simulation
=== "C"
```c
struct reb_simulation* r = reb_simulation_create();
// ... setup simulation ...
reb_simulation_save_to_file(r, "snapshot.bin");
```
=== "Python"
```python
sim = rebound.Simulation()
// ... setup simulation ...
sim.save_to_file("snapshot.bin")
```
Such a binary file with one snapshot is simply a set of `reb_binaryfield`s followed by one `reb_simulationarchive_blob` at the end, for example:
```
reb_binary_field:
type: DT
size: 8 bytes
8 bytes of data representing the value of DT
reb_binary_field:
type: PARTICLES
size: 128 bytes
128 bytes of data representing the values of PARTICLES
...
reb_binary_field:
type: END
size: 0
reb_simulationarchive_blob:
index: 0
offset_prev: 0
offset_next: 0
```
Each of the binary fields provides the context (type and size) for the data that immediately follows the field.
The type is an integer defined in the `reb_binary_field_descriptor_list` (see below).
The last binary field of type `9999` (`end`) to indicate that the snapshot ends here.
!!! note
Before version 3.27 data was encoded using the enum `REB_BINARY_FIELD_TYPE` instead of `reb_binary_field_descriptor_list`.
## Simulationarchive file (multiple snapshots)
The binary file above can also be interpreted as a Simulationarchive with one snapshot.
You can append many (millions!) of snapshots to a binary file.
REBOUND only stores data that has changed since the original snapshot (typically the particle data, time, etc).
This allows for a very compact file size, while still maintaining bit-wise reproducibility.
Each snapshot is separated by a `reb_simulationarchive_blob`.
The blob contains the offset to the previous and next blobs.
This allows REBOUND to quickly jump from one blob in the archive to the next.
Between the blobs are the same `reb_binary_field`s we already encountered for a binary file with one snapshot.
Thus, a Simulationarchive file with multiple snapshots looks something like this:
```
reb_binary_field:
type: DT
size: 8 bytes
8 bytes of data representing the value of DT
... more reb_binary_fields ...
reb_binary_field:
type: END
size: 0
reb_simulationarchive_blob:
index: 0
offset_prev: 0
offset_next: 256 (offset to the next blob)
reb_binary_field:
type: DT
size: 8 bytes
8 bytes of data representing the value of DT
... more reb_binary_fields ...
reb_binary_field:
type: END
size: 0
reb_simulationarchive_blob:
index: 1
offset_prev: 256 (offset to the previous blob)
offset_next: 256 (offset to the next blob)
reb_binary_field:
type: DT
size: 8 bytes
8 bytes of data representing the value of DT
... more reb_binary_fields ...
reb_binary_field:
type: END
size: 0
reb_simulationarchive_blob:
index: 2
offset_prev: 256 (offset to the previous blob)
offset_next: 0
```
The offsets are also used as a sort of checksum to detect if a binary file has been corrupted (for example because a user ran out of disk space).
If a binary file is corrupted, REBOUND attempts some magic and will recover the last snapshot which does not appear corrupted.
You will see a warning message when that happens and should proceed with caution (make a backup!).
## Binary Field Descriptor
REBOUND maintains a list of fields it needs to input/output in order to restore a simulation.
This list is of type `struct reb_binary_field_descriptor[]` and defined in `output.c` as `reb_binary_field_descriptor_list`.
A single struct `reb_binary_field_descriptor` contains the information to input/output one REBOUND field, for example the current simulation time `t`:
```c
struct reb_binary_field_descriptor fd_t = { 0, REB_DOUBLE, "t", offsetof(struct reb_simulation, t), 0, 0};
```
The first number is a unique identifier (in this case 0). The second entry is the type of data, in this case a single double precision floating point number. The third entry is a string used to identify the field. This is only used when generating human-readable output and is typically the same as the variable name in C. The next entry is the offset of where this variable is stored relative to the beginning of the simulation structure.
REBOUND also supports array like fields. For example consider the `particles` field:
```c
struct reb_binary_field_descriptor fd_particles = { 85, REB_POINTER, "particles", offsetof(struct reb_simulation, particles), offsetof(struct reb_simulation, N), sizeof(struct reb_particle)};
```
The second to last entry lists the offset of the a variable in the `reb_simulation` structure that determines the number of array elements. In this case the number of particles. The last entry is the size of a single element. In this case, the size of one `reb_particle`.
If you add an additional field to the `reb_simulation` struct and you want to write it to a binary file and read it back in, then you need to add an entry to `reb_binary_field_descriptor_list`.
|
tigerchenlu98REPO_NAMEreboundPATH_START.@rebound_extracted@rebound-main@docs@binaryformat.md@.PATH_END.py
|
{
"filename": "_values.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/xaxis/rangebreak/_values.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ValuesValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self, plotly_name="values", parent_name="layout.xaxis.rangebreak", **kwargs
):
super(ValuesValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
free_length=kwargs.pop("free_length", True),
items=kwargs.pop("items", {"editType": "calc", "valType": "any"}),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@xaxis@rangebreak@_values.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterpolargl/unselected/marker/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="scatterpolargl.unselected.marker",
**kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterpolargl@unselected@marker@_color.py@.PATH_END.py
|
{
"filename": "_end.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/contourcarpet/contours/_end.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class EndValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="end", parent_name="contourcarpet.contours", **kwargs
):
super(EndValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
implied_edits=kwargs.pop("implied_edits", {"^autocontour": False}),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@contourcarpet@contours@_end.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/astropy/io/votable/validator/__init__.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from .main import make_validation_report
from . import main
__doc__ = main.__doc__
del main
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@astropy@io@votable@validator@__init__.py@.PATH_END.py
|
{
"filename": "test_plane_parallel_fft.py",
"repo_name": "cosmodesi/pyrecon",
"repo_path": "pyrecon_extracted/pyrecon-main/pyrecon/tests/test_plane_parallel_fft.py",
"type": "Python"
}
|
import numpy as np
from pyrecon import PlaneParallelFFTReconstruction
from pyrecon.utils import MemoryMonitor
from utils import get_random_catalog, Catalog
def test_dtype():
data = get_random_catalog(seed=42)
randoms = get_random_catalog(seed=81)
for los in ['x']:
recon_f4 = PlaneParallelFFTReconstruction(f=0.8, bias=2., nthreads=4, positions=randoms['Position'], nmesh=64, los=los, dtype='f4')
recon_f4.assign_data(data['Position'], data['Weight'])
recon_f4.assign_randoms(randoms['Position'], randoms['Weight'])
recon_f4.set_density_contrast()
assert recon_f4.mesh_delta.dtype.itemsize == 4
recon_f4.run()
assert recon_f4.mesh_psi[0].dtype.itemsize == 4
shifts_f4 = recon_f4.read_shifts(data['Position'].astype('f8'), field='disp+rsd')
assert shifts_f4.dtype.itemsize == 8
shifts_f4 = recon_f4.read_shifts(data['Position'].astype('f4'), field='disp+rsd')
assert shifts_f4.dtype.itemsize == 4
recon_f8 = PlaneParallelFFTReconstruction(f=0.8, bias=2., nthreads=4, positions=randoms['Position'], nmesh=64, los=los, dtype='f8')
recon_f8.assign_data(data['Position'], data['Weight'])
recon_f8.assign_randoms(randoms['Position'], randoms['Weight'])
recon_f8.set_density_contrast()
assert recon_f8.mesh_delta.dtype.itemsize == 8
recon_f8.run()
assert recon_f8.mesh_psi[0].dtype.itemsize == 8
shifts_f8 = recon_f8.read_shifts(data['Position'], field='disp+rsd')
assert shifts_f8.dtype.itemsize == 8
assert not np.all(shifts_f4 == shifts_f8)
assert np.allclose(shifts_f4, shifts_f8, atol=1e-2, rtol=1e-2)
def test_mem():
data = get_random_catalog(seed=42)
randoms = get_random_catalog(seed=84)
with MemoryMonitor() as mem:
recon = PlaneParallelFFTReconstruction(f=0.8, bias=2., nthreads=4, positions=randoms['Position'], nmesh=256, los='x', dtype='f8')
mem('init')
recon.assign_data(data['Position'], data['Weight'])
mem('data')
recon.assign_randoms(randoms['Position'], randoms['Weight'])
mem('randoms')
recon.set_density_contrast()
mem('delta')
recon.run()
mem('recon') # 3 meshes
def test_wrap():
size = 100000
boxsize = 1000
for origin in [-500, 0, 500]:
boxcenter = boxsize / 2 + origin
data = get_random_catalog(size, boxsize, seed=42)
# set one of the data positions to be outside the fiducial box by hand
data['Position'][-1] = np.array([boxsize, boxsize, boxsize]) + 1
data['Position'] += boxcenter
randoms = get_random_catalog(size, boxsize, seed=42)
# set one of the random positions to be outside the fiducial box by hand
randoms['Position'][-1] = np.array([0, 0, 0]) - 1
randoms['Position'] += boxcenter
recon = PlaneParallelFFTReconstruction(f=0.8, bias=2, los='z', boxsize=boxsize, boxcenter=boxcenter, nmesh=64, wrap=True)
# following steps should run without error if wrapping is correctly implemented
recon.assign_data(data['Position'], data['Weight'])
recon.assign_randoms(randoms['Position'], randoms['Weight'])
recon.set_density_contrast()
recon.run()
# following steps test the implementation coded into standalone pyrecon code
for field in ['rsd', 'disp', 'disp+rsd']:
shifts = recon.read_shifts(data['Position'], field=field)
diff = data['Position'] - shifts
positions_rec = (diff - recon.offset) % recon.boxsize + recon.offset
assert np.all(positions_rec <= origin + boxsize) and np.all(positions_rec >= origin)
assert np.allclose(recon.read_shifted_positions(data['Position'], field=field), positions_rec)
def test_ref(data_fn, randoms_fn, data_fn_rec=None, randoms_fn_rec=None):
boxsize = 1200.
boxcenter = [1754, 0., 0.]
data = Catalog.read(data_fn)
randoms = Catalog.read(randoms_fn)
recon = PlaneParallelFFTReconstruction(f=0.8, bias=2., los='x', fft_engine='fftw', nthreads=4, boxcenter=boxcenter, boxsize=boxsize, nmesh=128, dtype='f8')
recon.assign_data(data['Position'], data['Weight'])
recon.assign_randoms(randoms['Position'], randoms['Weight'])
recon.set_density_contrast()
recon.run()
from pypower import CatalogFFTPower
from matplotlib import pyplot as plt
for cat, fn in zip([data, randoms], [data_fn_rec, randoms_fn_rec]):
rec = recon.read_shifted_positions(cat['Position'])
if 'Position_rec' in cat:
print('Checking...')
assert np.allclose(rec, cat['Position_rec'])
else:
cat['Position_rec'] = rec
if fn is not None:
cat.write(fn)
kwargs = dict(edges={'min': 0., 'step': 0.01}, ells=(0, 2, 4), boxsize=1000., nmesh=64, resampler='tsc', interlacing=3, position_type='pos')
power = CatalogFFTPower(data_positions1=data['Position'], randoms_positions1=randoms['Position'], **kwargs)
poles = power.poles
power = CatalogFFTPower(data_positions1=data['Position_rec'], randoms_positions1=randoms['Position_rec'], **kwargs)
poles_rec = power.poles
for ill, ell in enumerate(poles.ells):
plt.plot(poles.k, poles.k * poles(ell=ell), color='C{:d}'.format(ill), linestyle='-')
plt.plot(poles_rec.k, poles_rec.k * poles_rec(ell=ell), color='C{:d}'.format(ill), linestyle='--')
if power.mpicomm.rank == 0:
plt.show()
if __name__ == '__main__':
from utils import data_fn, randoms_fn, catalog_rec_fn
from pyrecon.utils import setup_logging
setup_logging()
# test_mem()
test_dtype()
test_wrap()
#test_ref(data_fn, randoms_fn)
data_fn_rec, randoms_fn_rec = [catalog_rec_fn(fn, 'plane_parallel_fft') for fn in [data_fn, randoms_fn]]
data_fn, randoms_fn = data_fn_rec, randoms_fn_rec
data_fn_rec, randoms_fn_rec = None, None
test_ref(data_fn, randoms_fn, data_fn_rec, randoms_fn_rec)
|
cosmodesiREPO_NAMEpyreconPATH_START.@pyrecon_extracted@pyrecon-main@pyrecon@tests@test_plane_parallel_fft.py@.PATH_END.py
|
{
"filename": "zero_padding2d_test.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/layers/reshaping/zero_padding2d_test.py",
"type": "Python"
}
|
import numpy as np
from absl.testing import parameterized
from keras.src import backend
from keras.src import dtype_policies
from keras.src import layers
from keras.src import testing
class ZeroPadding2DTest(testing.TestCase):
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_2d(self, data_format):
inputs = np.random.rand(1, 2, 3, 4)
outputs = layers.ZeroPadding2D(
padding=((1, 2), (3, 4)), data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 1:-2, 3:-4], inputs)
else:
for index in [0, -1, -2]:
self.assertAllClose(outputs[:, index, :, :], 0.0)
for index in [0, 1, 2, -1, -2, -3, -4]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, 1:-2, 3:-4, :], inputs)
@parameterized.product(
(
{"padding": ((2, 2), (2, 2))}, # 2 tuples
{"padding": (2, 2)}, # 1 tuple
{"padding": 2}, # 1 int
),
(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
),
)
def test_zero_padding_2d_with_same_padding(self, padding, data_format):
inputs = np.random.rand(1, 2, 3, 4)
outputs = layers.ZeroPadding2D(
padding=padding, data_format=data_format
)(inputs)
if data_format == "channels_first":
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, :, :, index], 0.0)
self.assertAllClose(outputs[:, :, 2:-2, 2:-2], inputs)
else:
for index in [0, 1, -1, -2]:
self.assertAllClose(outputs[:, index, :, :], 0.0)
self.assertAllClose(outputs[:, :, index, :], 0.0)
self.assertAllClose(outputs[:, 2:-2, 2:-2, :], inputs)
def test_zero_padding_2d_with_dynamic_spatial_dim(self):
if backend.config.image_data_format() == "channels_last":
input_layer = layers.Input(batch_shape=(1, 2, None, 4))
else:
input_layer = layers.Input(batch_shape=(1, 4, 2, None))
padded = layers.ZeroPadding2D(((1, 2), (3, 4)))(input_layer)
if backend.config.image_data_format() == "channels_last":
self.assertEqual(padded.shape, (1, 5, None, 4))
else:
self.assertEqual(padded.shape, (1, 4, 5, None))
@parameterized.parameters(
{"padding": (1,)},
{"padding": (1, 2, 3)},
{"padding": "1"},
{"padding": ((1, 2), (3, 4, 5))},
{"padding": ((1, 2), (3, -4))},
{"padding": ((1, 2), "3")},
)
def test_zero_padding_2d_errors_if_padding_argument_invalid(self, padding):
with self.assertRaises(ValueError):
layers.ZeroPadding2D(padding)
@parameterized.parameters(
{"data_format": "channels_first"},
{"data_format": "channels_last"},
)
def test_zero_padding_2d_get_config(self, data_format):
layer = layers.ZeroPadding2D(padding=(1, 2), data_format=data_format)
expected_config = {
"data_format": data_format,
"dtype": dtype_policies.serialize(layer.dtype_policy),
"name": layer.name,
"padding": ((1, 1), (2, 2)),
"trainable": layer.trainable,
}
self.assertEqual(layer.get_config(), expected_config)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@layers@reshaping@zero_padding2d_test.py@.PATH_END.py
|
{
"filename": "johnsnowlabs.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/langchain/embeddings/johnsnowlabs.py",
"type": "Python"
}
|
from typing import TYPE_CHECKING, Any
from langchain._api import create_importer
if TYPE_CHECKING:
from langchain_community.embeddings import JohnSnowLabsEmbeddings
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"JohnSnowLabsEmbeddings": "langchain_community.embeddings"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"JohnSnowLabsEmbeddings",
]
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@langchain@embeddings@johnsnowlabs.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.